From 0cfbf6abaccff178f3e76d6c59d61b0fc84786b6 Mon Sep 17 00:00:00 2001 From: mandrono Date: Thu, 22 Apr 2021 15:57:26 +0300 Subject: [PATCH] remove deprecated test --- .../tests_deprecated/CMakeLists.txt | 18 +- .../config_param_test/config_param_test.cpp | 52 - .../functional/mkldnn/dummy.cpp | 4 + .../extensions_tests/extensions_test.cpp | 274 - .../network_tests/ngraph_network_test.cpp | 359 - .../regression_tests/regression_reference.cpp | 13 - .../single_layer_tests.cpp | 233 - .../graph_tools_functional_tests.cpp | 26 - .../common_dyn_batch_regression.cpp | 16 - .../input_tests/parser_tests.cpp | 36 - .../io_blob_tests/cropResize_tests.cpp | 250 - .../io_blob_tests/dims_tests.cpp | 7 - .../io_blob_tests/layout_tests.cpp | 15 - .../lstm/lstm_cell_test.cpp | 7 - .../lstm/lstm_ir_test.cpp | 10 - .../lstm/rnn_seq_test.cpp | 7 - .../network_tests/network_test.cpp | 202 - ...ecision_transformer_single_layer_tests.cpp | 862 --- .../single_layer_tests/argmax_tests.cpp | 211 - .../single_layer_tests/concat_tests.cpp | 277 - .../single_layer_tests/conv_int8_tests.cpp | 363 - .../mkldnn/single_layer_tests/conv_tests.cpp | 429 -- .../single_layer_tests/conv_tests_int8.cpp | 452 -- .../single_layer_tests/detectionout_tests.cpp | 189 - .../single_layer_tests/fullycon_tests.cpp | 185 - .../mkldnn_batchnorm_tests.cpp | 175 - .../mkldnn_deconv_tests.cpp | 231 - .../mkldnn_logistic_tests.cpp | 139 - .../single_layer_tests/mkldnn_power_tests.cpp | 152 - .../mkldnn_roipooling_tests.cpp | 101 - .../mkldnn_scaleshift_tests.cpp | 170 - .../mkldnn_simplernms_tests.cpp | 151 - .../mkldnn/single_layer_tests/norm_tests.cpp | 182 - .../single_layer_tests/pooling_tests.cpp | 213 - .../single_layer_tests/priorbox_tests.cpp | 369 - .../single_layer_tests/region_yolo_tests.cpp | 234 - .../mkldnn/snippet_test/multi_out_test.cpp | 125 - .../mkldnn/snippet_test/tripple_test.cpp | 118 - .../functional/mkldnn/test_model_repo.cpp | 17 - .../tests_deprecated/unit/CMakeLists.txt | 44 +- .../mkldnn/constant_propagation_test.cpp | 304 - .../unit/engines/mkldnn/convert_desc_test.cpp | 75 - .../unit/engines/mkldnn/dummy.cpp | 4 + .../unit/engines/mkldnn/dump_test.cpp | 138 - .../unit/engines/mkldnn/dumper_test.cpp | 98 - .../layers/extensions/broadcast_tests.cpp | 276 - .../layers/extensions/bucketize_tests.cpp | 249 - .../graph/layers/extensions/fake_layer.cpp | 131 - .../graph/layers/extensions/fill_tests.cpp | 194 - .../graph/layers/extensions/gather_tests.cpp | 684 -- .../layers/extensions/graph_generic_test.cpp | 1521 ---- .../layers/extensions/log_softmax_tests.cpp | 273 - .../graph/layers/extensions/math_tests.cpp | 319 - .../graph/layers/extensions/mvn_tests.cpp | 646 -- .../extensions/non_max_suppression_tests.cpp | 568 -- .../layers/extensions/normalize_tests.cpp | 640 -- .../graph/layers/extensions/onehot_tests.cpp | 854 --- .../graph/layers/extensions/range_tests.cpp | 247 - .../graph/layers/extensions/reduce_tests.cpp | 535 -- .../extensions/reverse_sequence_tests.cpp | 265 - .../graph/layers/extensions/scatter_tests.cpp | 203 - .../graph/layers/extensions/select_tests.cpp | 280 - .../extensions/shuffle_channels_tests.cpp | 205 - .../sparse_fill_empty_rows_tests.cpp | 545 -- .../sparse_segment_reduce_tests.cpp | 302 - .../extensions/sparse_to_dense_tests.cpp | 279 - .../sparse_weighted_reduce_tests.cpp | 416 - .../layers/extensions/strided_slice_tests.cpp | 487 -- .../graph/layers/extensions/topk_tests.cpp | 519 -- .../graph/layers/extensions/unique_tests.cpp | 370 - .../layers/internal/graph_activation_test.cpp | 422 -- .../graph_batchnorm_scaleshift_test.cpp | 344 - .../layers/internal/graph_batchnorm_test.cpp | 312 - .../layers/internal/graph_concat_test.cpp | 1030 --- .../graph/layers/internal/graph_conv_test.cpp | 531 -- .../layers/internal/graph_deconv_test.cpp | 555 -- .../layers/internal/graph_depthwise_test.cpp | 456 -- .../layers/internal/graph_eltwise_test.cpp | 379 - .../internal/graph_fullyconnected_test.cpp | 337 - .../graph/layers/internal/graph_gemm_test.cpp | 662 -- .../layers/internal/graph_input_test.cpp | 471 -- .../graph/layers/internal/graph_lrn_test.cpp | 301 - .../layers/internal/graph_permute_test.cpp | 635 -- .../layers/internal/graph_pooling_test.cpp | 504 -- .../layers/internal/graph_power_test.cpp | 332 - .../graph/layers/internal/graph_relu_test.cpp | 244 - .../layers/internal/graph_reorder_test.cpp | 256 - .../layers/internal/graph_reshape_test.cpp | 304 - .../internal/graph_roi_pooling_test.cpp | 313 - .../layers/internal/graph_simplernms_test.cpp | 473 -- .../layers/internal/graph_softmax_test.cpp | 419 -- .../layers/internal/graph_split_test.cpp | 501 -- .../graph/layers/internal/graph_tile_test.cpp | 281 - .../structure/graph_conv_concat_tests.cpp | 267 - .../graph_conv_depthwise_fusing_test.cpp | 337 - .../structure/graph_deconv_concat_tests.cpp | 397 - .../structure/graph_dw_conv_fusing_test.cpp | 334 - .../structure/graph_optimization_test.cpp | 421 -- .../graph/structure/graph_structure_test.cpp | 6671 ----------------- .../unit/engines/mkldnn/graph/test_graph.hpp | 362 - .../engines/mkldnn/mkldnn_primitive_test.cpp | 54 - .../unit/engines/mkldnn/test_layers.cpp | 183 - 102 files changed, 39 insertions(+), 37594 deletions(-) delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/config_param_test/config_param_test.cpp create mode 100644 inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/extensions_tests/extensions_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/network_tests/ngraph_network_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/regression_tests/regression_reference.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/graph_tools/graph_tools_functional_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/input_tests/parser_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_cell_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_ir_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/rnn_seq_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/transformations/low_precision_transformer_single_layer_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/argmax_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/concat_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_int8_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests_int8.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/detectionout_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/fullycon_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_batchnorm_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_deconv_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_logistic_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_power_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_roipooling_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_scaleshift_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_simplernms_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/norm_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/pooling_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/priorbox_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/snippet_test/multi_out_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/snippet_test/tripple_test.cpp delete mode 100644 inference-engine/tests_deprecated/functional/mkldnn/test_model_repo.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/constant_propagation_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/convert_desc_test.cpp create mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/dump_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/dumper_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/broadcast_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/bucketize_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fill_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/log_softmax_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/math_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/non_max_suppression_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/onehot_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/range_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reduce_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reverse_sequence_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/scatter_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/select_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/shuffle_channels_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_fill_empty_rows_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_segment_reduce_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_to_dense_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_weighted_reduce_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/strided_slice_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/topk_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/unique_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_concat_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_deconv_concat_tests.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/graph/test_graph.hpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/test_layers.cpp diff --git a/inference-engine/tests_deprecated/CMakeLists.txt b/inference-engine/tests_deprecated/CMakeLists.txt index e64f14eca43779..31f5a9f7f96add 100644 --- a/inference-engine/tests_deprecated/CMakeLists.txt +++ b/inference-engine/tests_deprecated/CMakeLists.txt @@ -14,14 +14,14 @@ if (ENABLE_GAPI_TESTS) add_subdirectory(fluid_preproc) endif() -# if (ENABLE_FUNCTIONAL_TESTS) -# add_subdirectory(functional) -# endif() +if (ENABLE_FUNCTIONAL_TESTS) + add_subdirectory(functional) +endif() -# if (ENABLE_BEH_TESTS) -# add_subdirectory(behavior) -# endif() +if (ENABLE_BEH_TESTS) + add_subdirectory(behavior) +endif() -# if(ENABLE_TESTS) -# add_subdirectory(unit) -# endif() +if(ENABLE_TESTS) + add_subdirectory(unit) +endif() diff --git a/inference-engine/tests_deprecated/functional/mkldnn/config_param_test/config_param_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/config_param_test/config_param_test.cpp deleted file mode 100644 index 57a7a235ff4170..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/config_param_test/config_param_test.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include -#include - -using namespace ::testing; -using namespace InferenceEngine; - -class smoke_PropertyTest : public TestsCommon, public TestsCommonFunc{}; - -TEST_F(smoke_PropertyTest, onSplitConvConcat) { - auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat({1, 4, 100, 100}); - - CNNNetwork net(fnPtr); - auto ieCore = PluginCache::get().ie(); - InferenceEngine::ExecutableNetwork exeNet = ieCore->LoadNetwork(net, CommonTestUtils::DEVICE_CPU); - InferenceEngine::InferRequest inferRequest0 = exeNet.CreateInferRequest(); - - auto blob0 = FuncTestUtils::createAndFillBlob(net.getInputsInfo().begin()->second->getTensorDesc()); - - inferRequest0.SetBlob(net.getInputsInfo().begin()->first, blob0); - inferRequest0.Infer(); - float* outRawData = inferRequest0.GetBlob(net.getOutputsInfo().begin()->first)->cbuffer().as(); - - - exeNet = ieCore->LoadNetwork(net, CommonTestUtils::DEVICE_CPU, - {{PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, PluginConfigParams::CPU_THROUGHPUT_AUTO}}); - InferenceEngine::InferRequest inferRequest1 = exeNet.CreateInferRequest(); - - auto blob1 = FuncTestUtils::createAndFillBlob(net.getInputsInfo().begin()->second->getTensorDesc()); - - inferRequest1.SetBlob(net.getInputsInfo().begin()->first, blob1); - inferRequest1.Infer(); - float* outRawDataWithConfig = inferRequest1.GetBlob(net.getOutputsInfo().begin()->first)->cbuffer().as(); - - float thr1, thr2; - FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32, thr1, thr2); - - size_t outElementsCount = std::accumulate(begin(fnPtr->get_output_shape(0)), end(fnPtr->get_output_shape(0)), 1, - std::multiplies()); - - FuncTestUtils::compareRawBuffers(outRawData, outRawDataWithConfig, outElementsCount, outElementsCount, - FuncTestUtils::CompareType::ABS_AND_REL, - thr1, thr2); -} diff --git a/inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp b/inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp new file mode 100644 index 00000000000000..ffe853f7697581 --- /dev/null +++ b/inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp @@ -0,0 +1,4 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + diff --git a/inference-engine/tests_deprecated/functional/mkldnn/extensions_tests/extensions_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/extensions_tests/extensions_test.cpp deleted file mode 100644 index 79c8892152c156..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/extensions_tests/extensions_test.cpp +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace ::testing; -using namespace InferenceEngine; - -struct extension_params { - std::string pluginName; - std::shared_ptr extension; - std::string plugin() { return pluginName + "Plugin"; } - // optional config (used for multi-device) - std::map config; -}; - -class NewFakePrimitiveImpl : public InferenceEngine::ILayerExecImpl { -public: - NewFakePrimitiveImpl(const std::shared_ptr& node): node(node) {} - - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = true; - if (node->outputs().size() != 1 && node->inputs().size() != 1) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = false; - cfg.inPlace = 0; - InferenceEngine::SizeVector order; - auto partialShape = node->get_output_partial_shape(0); - if (partialShape.is_dynamic()) - return InferenceEngine::GENERAL_ERROR; - auto shape = node->get_output_shape(0); - for(size_t i = 0; i < shape.size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, - shape, {shape, order}); - config.outConfs.push_back(cfg); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, InferenceEngine::ResponseDesc *resp) noexcept override { - return InferenceEngine::OK; - } - -private: - const std::shared_ptr node; -}; - -class FakeTestOp: public ngraph::op::Op { -public: - static constexpr ngraph::NodeTypeInfo type_info{"Fake", 0}; - const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } - - FakeTestOp() = default; - explicit FakeTestOp(const ngraph::Output& arg): Op({arg}) { - constructor_validate_and_infer_types(); - } - - void validate_and_infer_types() override { - auto input_shape = get_input_partial_shape(0).to_shape(); - - ngraph::Shape output_shape(input_shape); - for (int i = 0; i < input_shape.size(); ++i) { - output_shape[i] = input_shape[i]; - } - - set_output_type(0, get_input_element_type(0), ngraph::PartialShape(output_shape)); - } - - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override { - if (new_args.size() != 1) { - throw ngraph::ngraph_error("Incorrect number of new arguments"); - } - - return std::make_shared(new_args.at(0)); - } - - bool visit_attributes(ngraph::AttributeVisitor& visitor) override { - return true; - } -}; - -constexpr ngraph::NodeTypeInfo FakeTestOp::type_info; - -class NewTestExtension : public InferenceEngine::IExtension { -public: - NewTestExtension() { - impls["Fake"] = [](const std::shared_ptr& node) -> InferenceEngine::ILayerImpl::Ptr { - return std::make_shared(node); - }; - } - - void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override { - static const InferenceEngine::Version VERSION{{}, "", ""}; - versionInfo = &VERSION; - } - - void Unload() noexcept override {} - - std::vector getImplTypes(const std::shared_ptr& node) override { - if (impls.find(node->description()) == impls.end()) - return {}; - return {"CPU"}; - } - - InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, const std::string& implType) override { - if (impls.find(node->description()) == impls.end() || implType != "CPU") - return nullptr; - return impls[node->description()](node); - } - - std::map getOpSets() override { - static std::map opsets; - if (opsets.empty()) { - ngraph::OpSet opset; - opset.insert(); - opsets["custom_opset"] = opset; - } - return opsets; - } -private: - std::map)>> impls; -}; - -class smoke_ExtensionTest : public TestsCommon, - public TestsCommonFunc { - -protected: - void checkExtensionRemoved(extension_params p) { - try { - std::unique_ptr score_engine; - score_engine.reset(new InferenceEnginePluginPtr(make_plugin_name(p.plugin()).c_str())); - (*score_engine)->SetConfig(p.config); - ASSERT_EQ(p.extension.use_count(), 2); - - (*score_engine)->AddExtension(p.extension); - // multi-device holds additional reference of the extension ptr - ASSERT_EQ(p.extension.use_count(), p.pluginName.find("Multi")==std::string::npos ? 3 : 4); - score_engine.reset(); - - ASSERT_EQ(p.extension.use_count(), 2); - } catch (const InferenceEngine::Exception& e) { - FAIL() << e.what(); - } - } - void checkExtensionNotRemovedFromAnotherEngineObject(extension_params p) { - try { - std::unique_ptr score_engine1; - score_engine1.reset(new InferenceEnginePluginPtr(make_plugin_name(p.plugin()).c_str())); - (*score_engine1)->SetConfig(p.config); - - std::unique_ptr score_engine2; - score_engine2.reset(new InferenceEnginePluginPtr(make_plugin_name(p.plugin()).c_str())); - (*score_engine2)->SetConfig(p.config); - ASSERT_EQ(p.extension.use_count(), 2); - - (*score_engine1)->AddExtension(p.extension); - // multi-device holds additional reference of the extension ptr - ASSERT_EQ(p.extension.use_count(), p.pluginName.find("Multi")==std::string::npos ? 3 : 4); - score_engine2.reset(); - - // multi-device holds additional reference of the extension ptr - ASSERT_EQ(p.extension.use_count(), p.pluginName.find("Multi")==std::string::npos ? 3 : 4); - score_engine1.reset(); - ASSERT_EQ(p.extension.use_count(), 2); - } catch (const InferenceEngine::Exception& e) { - FAIL() << e.what(); - } - } - - void checkNotSharedExtensions(std::shared_ptr extension, std::string device) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 5 - 5 - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - 1 - 3 - 5 - 5 - - - - - - - - - - )V0G0N"; - - try { - Core ie; - ie.AddExtension(extension, "CPU"); - Core ie2; - - Blob::Ptr weights; - CNNNetwork cnnNet1 = ie.ReadNetwork(model, weights); - ASSERT_NO_THROW(ie.LoadNetwork(cnnNet1, device)); - ASSERT_THROW(ie2.ReadNetwork(model, weights), InferenceEngine::Exception); - } catch (const InferenceEngine::Exception& e) { - FAIL() << e.what(); - } - } -}; - -#ifndef ENABLE_MKL_DNN - #include "disable_tests.hpp" -#endif - -TEST_F(smoke_ExtensionTest, MKLDNN_delete_extension) { - std::shared_ptr ext(new NewTestExtension()); - checkExtensionRemoved({"MKLDNN", ext}); -} - -TEST_F(smoke_ExtensionTest, MKLDNN_no_delete_extension_from_another_engine) { - std::shared_ptr ext(new NewTestExtension()); - checkExtensionNotRemovedFromAnotherEngineObject({"MKLDNN", ext}); -} - -TEST_F(smoke_ExtensionTest, MKLDNN_no_share_extension_between_engines) { - std::shared_ptr ext(new NewTestExtension()); - checkNotSharedExtensions(ext, "CPU"); -} - -TEST_F(smoke_ExtensionTest, MKLDNN_no_share_new_extension_between_engines) { - std::shared_ptr ext(new NewTestExtension()); - checkNotSharedExtensions(ext, "CPU"); -} diff --git a/inference-engine/tests_deprecated/functional/mkldnn/network_tests/ngraph_network_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/network_tests/ngraph_network_test.cpp deleted file mode 100644 index 5ae7de8ae7c7cf..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/network_tests/ngraph_network_test.cpp +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include - -#define XBYAK_NO_OP_NAMES -#define XBYAK_UNDEF_JNL - -using namespace ::testing; -using namespace InferenceEngine; - -struct ngraph_network_param { - std::string modelFile; - std::string imageName; - std::string ngraphModel; - - std::string model() { - ModelsPath result; - result += kPathSeparator; - result += modelFile; - return result; - } - - std::string weights() { - ModelsPath result; - result += kPathSeparator; - result += FileUtils::fileNameNoExt(modelFile); - result += ".bin"; - return result; - } - - std::string image() { - std::string result = TestDataHelpers::get_data_path(); - result += kPathSeparator; - result += imageName; - return result; - } - - std::string v7model() { - ModelsPath result; - result += kPathSeparator; - result += ngraphModel; - return result; - } -}; - -class smoke_NGraphNetworkTest : public TestsCommon, public TestsCommonFunc { -protected: - Blob::Ptr classifyV7(ngraph_network_param p, size_t batch_size = 1, float threshold = 0.005f) { - Core ie; - CNNNetwork network = ie.ReadNetwork(p.v7model()); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - - Blob::Ptr src = readInput(p.image(), batch_size); - - OutputsDataMap outInfo = network.getOutputsInfo(); - InputsDataMap inputInfo = network.getInputsInfo(); - - auto dst = make_shared_blob(outInfo.begin()->second->getTensorDesc()); - dst->allocate(); - inferRequest.SetBlob(inputInfo.begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - return dst; - } - - Blob::Ptr classifyV5(ngraph_network_param p, size_t batch_size = 1, float threshold = 0.005f) { - Core ie; - CNNNetwork network = ie.ReadNetwork(p.model(), p.weights()); - if (batch_size != 1) - network.setBatchSize(batch_size); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - - Blob::Ptr src = readInput(p.image(), batch_size); - - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - - auto dst = make_shared_blob(outInfo.begin()->second->getTensorDesc()); - dst->allocate(); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - return dst; - } - - void classify(ngraph_network_param p) { - try { - auto v7blb = classifyV7(p); - auto v5blb = classifyV5(p); - - auto* v7data = v7blb->buffer().as(); - auto* v5data = v5blb->buffer().as(); - - ASSERT_EQ(v7blb->size(), v5blb->size()); - for (size_t i = 0; i < v7blb->size(); i++) { - ASSERT_EQ(v7data[i], v5data[i]); - } - } catch (const InferenceEngine::Exception& e) { - FAIL() << e.what(); - } - } -}; - -/************************************************* - * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! - * All ref values was obtained from Caffe scoring - * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! - *************************************************/ -#ifndef ENABLE_MKL_DNN - #include "disable_tests.hpp" -#endif - -TEST_F(smoke_NGraphNetworkTest, reshapeLoadTest) { - std::string model = R"V0G0N( - - - - - - - - 1 - 1 - 28 - 28 - - - - - - - - 20 - 1 - 5 - 5 - - - - - - - - 1 - 1 - 28 - 28 - - - 20 - 1 - 5 - 5 - - - - - 1 - 20 - 24 - 24 - - - - - - - - 1 - 20 - 1 - 1 - - - - - - - 1 - 20 - 24 - 24 - - - 1 - 20 - 1 - 1 - - - - - 1 - 20 - 24 - 24 - - - - - - - - 1 - 20 - 24 - 24 - - - - - 1 - 20 - 12 - 12 - - - - - - - - 50 - 20 - 5 - 5 - - - - - - - - 1 - 20 - 12 - 12 - - - 50 - 20 - 5 - 5 - - - - - 1 - 50 - 8 - 8 - - - - - - - - 1 - 50 - 1 - 1 - - - - - - - 1 - 50 - 8 - 8 - - - 1 - 50 - 1 - 1 - - - - - 1 - 50 - 8 - 8 - - - - - - - - 1 - 50 - 8 - 8 - - - - - 1 - 50 - 4 - 4 - - - - - - - 1 - 50 - 4 - 4 - - - - - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Blob::Ptr weights = make_shared_blob({InferenceEngine::Precision::U8, {1724336}, InferenceEngine::C}); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - std::map> shape; - shape["data"] = {1, 1, 28, 28}; - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, weights); - for (size_t i = 0; i < 10; i++) { - network.reshape(shape); - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - } -} - diff --git a/inference-engine/tests_deprecated/functional/mkldnn/regression_tests/regression_reference.cpp b/inference-engine/tests_deprecated/functional/mkldnn/regression_tests/regression_reference.cpp deleted file mode 100644 index 634cb9c7e71919..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/regression_tests/regression_reference.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "regression_reference.hpp" - -namespace Regression { - namespace Reference { - - std::map> values = { - }; - } // namespace Reference -} // namespace Regression diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp deleted file mode 100644 index 69fc62c23daa8f..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "single_layer_tests.hpp" - - -static CommonTestUtils::conv_common_params convParams = - { - PropertyVector{{2, 2}}, // stride - PropertyVector{{3, 3}}, // kernel - {}, // pad_begin - {}, // pad_end - PropertyVector{{1, 1}}, // dilation - "same_upper", // auto_pad - 1, // group - 2 // out_c - }; - -static CommonTestUtils::pool_common_params poolParams = - { - PropertyVector{{2, 2}}, // stride - PropertyVector{{3, 3}}, // kernel - {}, // pad_begin - {}, // pad_end - "same_upper", // auto_pad - true, // avg - false // exclude_pad - }; - -static CommonTestUtils::conv_common_params defConvParamsHeavy = - { - PropertyVector{{1, 1}}, // stride - PropertyVector{{3, 3}}, // kernel - {}, // pad_begin - {}, // pad_end - PropertyVector{{2, 2}}, // dilation - "same_upper", // auto_pad - 1, // group - 128 // out_c - }; - -static CommonTestUtils::conv_common_params defConvParamsLight0 = - { - PropertyVector{{1, 1}}, // stride - PropertyVector{{3, 3}}, // kernel - {}, // pad_begin - {}, // pad_end - PropertyVector{{2, 2}}, // dilation - "same_upper", // auto_pad - 1, // group - 4 // out_c - }; - -static CommonTestUtils::conv_common_params defConvParamsLight1 = - { - PropertyVector{{2, 2}}, // stride - PropertyVector{{3, 3}}, // kernel - {}, // pad_begin - {}, // pad_end - PropertyVector{{1, 1}}, // dilation - "same_upper", // auto_pad - 1, // group - 16 // out_c - }; - - -static CommonTestUtils::conv_common_params defConvParamsLight2 = - { - PropertyVector{{2, 2}}, // stride - PropertyVector{{3, 3}}, // kernel - {}, // pad_begin - {}, // pad_end - PropertyVector{{2, 2}}, // dilation - "same_upper", // auto_pad - 1, // group - 15 // out_c - }; - - -static CommonTestUtils::conv_common_params defConvParamsLight3 = - { - PropertyVector{{1, 1}}, // stride - PropertyVector{{3, 3}}, // kernel - {}, // pad_begin - {}, // pad_end - PropertyVector{{2, 2}}, // dilation - "same_upper", // auto_pad - 2, // group - 4 // out_c - }; - -static std::vector pluginParams = { - PluginDependentParam{"CPU", Layout::NCHW, Precision::FP32, 0.001f} -}; - -std::string -getTestCaseName(testing::TestParamInfo> obj) { - auto params = obj.param; - LayerTestHelper::Ptr helper = std::get<3>(params); - return "MKLDNN" + helper->getType(); -} - -INSTANTIATE_TEST_CASE_P( - // TODO: rewrite to ngraph to have reshape functionality - DISABLED_Conv_smoke, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{1, 2, 16, 16}}, // input - {{1, 2, 8, 8}} // output - })), - ::testing::Values(NewShapes({ - {{1, 2, 15, 15}}, // input - {{1, 2, 8, 8}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(convParams))) -), getTestCaseName -); - -INSTANTIATE_TEST_CASE_P( - // TODO: rewrite to ngraph to have reshape functionality - DISABLED_Deconv_smoke, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{1, 2, 8, 8}}, // input - {{1, 2, 16, 16}} // output - })), - ::testing::Values(NewShapes({ - {{1, 2, 7, 7}}, // input - {{1, 2, 14, 14}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(convParams))) -), getTestCaseName -); - -INSTANTIATE_TEST_CASE_P( - // TODO: rewrite to ngraph to have reshape functionality - DISABLED_Pool_smoke, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{1, 2, 16, 16}}, // input - {{1, 2, 8, 8}} // output - })), - ::testing::Values(NewShapes({ - {{1, 2, 15, 15}}, // input - {{1, 2, 8, 8}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(poolParams))) -), getTestCaseName -); - -INSTANTIATE_TEST_CASE_P( - DISABLED_DefConvLight0_smoke, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{1, 4, 4, 4}, {1, 36, 4, 4}}, // input, trans - {{1, 4, 4, 4}} // output - })), - ::testing::Values(NewShapes({ - {{1, 4, 4, 4}, {1, 36, 4, 4}}, // input, trans - {{1, 4, 4, 4}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(defConvParamsLight0, 2))) - ), getTestCaseName -); - -INSTANTIATE_TEST_CASE_P( - DISABLED_DefConvLight1_WithBatch_smoke, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{2, 4, 8, 8}, {2, 36, 4, 4}}, // input, trans - {{2, 16, 4, 4}} // output - })), - ::testing::Values(NewShapes({ - {{2, 4, 8, 8}, {2, 36, 4, 4}}, // input, trans - {{2, 16, 4, 4}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(defConvParamsLight1, 2))) - ), getTestCaseName -); - -INSTANTIATE_TEST_CASE_P( - DISABLED_DefConvLight2_WithBatch_smoke, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{2, 4, 8, 8}, {2, 18, 4, 4}}, // input, trans - {{2, 15, 4, 4}} // output - })), - ::testing::Values(NewShapes({ - {{2, 4, 8, 8}, {2, 18, 4, 4}}, // input, trans - {{2, 15, 4, 4}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(defConvParamsLight2, 1))) - ), getTestCaseName -); - -INSTANTIATE_TEST_CASE_P( - DISABLED_DefConvLight3_WithGroups_smoke, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{1, 4, 4, 4}, {1, 18, 4, 4}}, // input, trans - {{1, 4, 4, 4}} // output - })), - ::testing::Values(NewShapes({ - {{1, 4, 4, 4}, {1, 18, 4, 4}}, // input, trans - {{1, 4, 4, 4}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(defConvParamsLight3, 1))) - ), getTestCaseName -); - -INSTANTIATE_TEST_CASE_P( - DISABLED_smoke_DefConvHeavy, CommonSingleLayerTest, - ::testing::Combine( - ::testing::Values(InitialShapes({ - {{1, 512, 38, 38}, {1, 72, 38, 38}}, // input, trans - {{1, 128, 38, 38}} // output - })), - ::testing::Values(NewShapes({ - {{1, 512, 38, 38}, {1, 72, 38, 38}}, // input, trans - {{1, 128, 38, 38}} // output - })), - ::testing::ValuesIn(pluginParams), - ::testing::Values(Helper(std::make_shared(defConvParamsHeavy, 4))) - ), getTestCaseName -); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/graph_tools/graph_tools_functional_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/graph_tools/graph_tools_functional_tests.cpp deleted file mode 100644 index 2863bdbea2779b..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/graph_tools/graph_tools_functional_tests.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "graph_tools_functional_tests.hpp" -#include - -using namespace testing; -using namespace InferenceEngine::details; -using namespace InferenceEngine; -using namespace std; - -TEST_F(GraphToolsFncTest, smoke_canSortSplitConvConcat) { - CNNNetwork network(ngraph::builder::subgraph::makeSplitConvConcat()); - checkSort(CNNNetSortTopologically(network)); -} - - -TEST_F(GraphToolsFncTest, smoke_canSortTIwithLstm) { - CNNNetwork network(ngraph::builder::subgraph::makeTIwithLSTMcell()); - checkSort(CNNNetSortTopologically(network)); - - checkSort(CNNNetSortTopologically(network)); -} \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp deleted file mode 100644 index 5514971502c422..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "common_dyn_batch_regression.hpp" - -std::vector supportedDynBatchValues = { - { "CPU", 4, 3 }, - { "CPU", 4, 2 }, - { "CPU", 4, 1 }, - { "CPU", 8, 5 }, - { "CPU", 8, 4 }, - { "CPU", 8, 3 } -}; - -INSTANTIATE_TEST_CASE_P(FunctionalTest_smoke, TestNoRegressionDynBatchFP32, ValuesIn(supportedDynBatchValues), getTestCaseName); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/input_tests/parser_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/input_tests/parser_tests.cpp deleted file mode 100644 index 916fb9a3de8828..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/input_tests/parser_tests.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "parser_tests.hpp" - -ir_test_params ir_test_cases[] = { - ir_test_params("CPU", "FP32", negative_conv_kernel_x_case), - ir_test_params("CPU", "FP32", negative_conv_kernel_y_case), - ir_test_params("CPU", "FP32", negative_conv_stride_x_case), - ir_test_params("CPU", "FP32", negative_conv_weights_case), - ir_test_params("CPU", "FP32", negative_conv_biases_case), - - ir_test_params("CPU", "FP32", negative_fc_out_size_case), - ir_test_params("CPU", "FP32", negative_fc_weights_case), - ir_test_params("CPU", "FP32", negative_fc_biases_case), - - ir_test_params("CPU", "FP32", negative_deconv_kernel_x_case), - ir_test_params("CPU", "FP32", negative_deconv_kernel_y_case), - ir_test_params("CPU", "FP32", negative_deconv_stride_x_case), - ir_test_params("CPU", "FP32", negative_deconv_weights_case), - ir_test_params("CPU", "FP32", negative_deconv_biases_case), - - ir_test_params("CPU", "FP32", negative_pool_kernel_x_case), - ir_test_params("CPU", "FP32", negative_pool_kernel_y_case), - ir_test_params("CPU", "FP32", negative_pool_stride_x_case), - ir_test_params("CPU", "FP32", incorrect_pool_type_case), - - ir_test_params("CPU", "FP32", negative_norm_local_size_case), - ir_test_params("CPU", "FP32", negative_norm_k_case) -}; - -INSTANTIATE_TEST_CASE_P(FunctionalTest_smoke, IncorrectIRTests, - ::testing::ValuesIn(ir_test_cases), - getTestName); - diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp deleted file mode 100644 index b13922560c494f..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cropResize_tests.hpp" - -#ifdef USE_OPENCV - -#define COMBINE_WITH_DEFAULT(_dims, _in_layouts, _color_formats) \ - Combine(Values(Precision::FP32), \ - Values(_dims), \ - Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)), \ - Values(_in_layouts), \ - Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), \ - Values(_color_formats), \ - Values(ROI({0, 40, 50, 220, 220})), \ - Values(false, true)) - -// test resize-only for all dims (as before) -// test resize + color conversion for smaller number of dims (simple upscale/downscale scenarios only) -namespace smoke { -static auto params_resize_only = COMBINE_WITH_DEFAULT( - TESTED_DIMS(1), - MULTI_VALUE(NCHW, NHWC), - COLOR_FORMATS_RAW); - -static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT( - TESTED_DIMS_SMALL(1), - MULTI_VALUE(NCHW, NHWC), - COLOR_FORMATS_3CH); - -static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT( - TESTED_DIMS_SMALL(1), - NHWC, - COLOR_FORMATS_4CH); - -// batch preprocessing parameters: -static auto batch_params_resize_only = COMBINE_WITH_DEFAULT( - TESTED_DIMS(2), - MULTI_VALUE(NCHW, NHWC), - COLOR_FORMATS_RAW); - -static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT( - TESTED_DIMS_SMALL(2), - MULTI_VALUE(NCHW, NHWC), - COLOR_FORMATS_3CH); - -static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT( - TESTED_DIMS_SMALL(2), - NHWC, - COLOR_FORMATS_4CH); -} // namespace smoke - - -// test everything in nightly (as before) -namespace nightly { -static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT( - TESTED_DIMS(1), - MULTI_VALUE(NCHW, NHWC), - MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH)); - -static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT( - TESTED_DIMS(1), - NHWC, - COLOR_FORMATS_4CH); - -// batch preprocessing parameters: -static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT( - MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)), - MULTI_VALUE(NCHW, NHWC), - MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH)); - -static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT( - MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)), - NHWC, - COLOR_FORMATS_4CH); -} // namespace nightly - -// reorder preprocessing parameters: -static auto reorder_params = Combine( - Values(Precision::FP32), // network precision - Values(SizeVector({1, 3, 300, 300})), // sizes of the network - Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)), // precision and threshold - Values(std::make_pair(NCHW, NHWC), std::make_pair(NHWC, NCHW)), // Input/network data layout - Values(ResizeAlgorithm::NO_RESIZE), - Values(ColorFormat::BGR), - Values(ROI({0, 0, 0, 300, 300})), // cropped ROI params (id, x, y, width, height) - Values(false, true) // Infer mode sync/async -); - -// nv12 preprocessing parameters: -static auto nv12_params = Combine( - Values(Precision::FP32), // network precision - Values(cv::Size(300, 300)), // input image size - Values(TESTED_DIMS(1)), // sizes of the network - Values(std::make_pair(Precision::U8, 1)), // precision and threshold - Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), - Values(ColorFormat::NV12), - Values(ROI({0, 0, 0, 300, 300}), ROI({0, 15, 10, 210, 210})), // cropped ROI params (id, x, y, width, height) - Values(false, true) // Infer mode sync/async -); - -static auto random_roi_3c = Combine( - Values(Precision::FP32), - Values(TESTED_DIMS(1)), - Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)), - Values(MULTI_VALUE(NCHW, NHWC)), - Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), - Values(COLOR_FORMATS_3CH), - Values(ROI({0, 0, 0, 0, 0})), - Values(false, true) -); - -static auto random_roi_4c = Combine( - Values(Precision::FP32), - Values(TESTED_DIMS(1)), - Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)), - Values(NHWC), - Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), - Values(COLOR_FORMATS_4CH), - Values(ROI({0, 0, 0, 0, 0})), - Values(false, true) -); - -static auto random_roi_nv12 = Combine( - Values(Precision::FP32), - Values(TESTED_DIMS(1)), - Values(std::make_pair(Precision::U8, 1)), - Values(NHWC), - Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), - Values(ColorFormat::NV12), - Values(ROI({0, 0, 0, 0, 0})), - Values(false, true) -); -struct PreprocessRegression: public TestsCommon {}; - -TEST_F(PreprocessRegression, smoke_DifferentSizes) { - // Reproduce "object was compiled for different meta" problem. - // When G-API/Fluid is used as a preprocessing engine, - // its state wasn't updated internally if input dimensions changed. - // Thus while graph itself continued working properly on all dimensions, - // it wan't reshaped when it had to: - // * On first call (frame size = X), _lastCall is initialized with size X - // * On second call (frame size = Y), graph is reshaped to size Y but _lastCall is still X - // * On third call (frame size = X), graph is NOT reshaped since this X matches _lastCall, - // exception is thrown since a graph reshaped to input size Y is asked to process input size X. - - Blob::Ptr in_blob; - Blob::Ptr out_blob; - - std::vector in_sizes = { - cv::Size(256, 256), - cv::Size(72, 72), - cv::Size(256, 256), - }; - - SizeVector out_dims = {1, 3, 64, 64}; - out_blob = make_shared_blob(TensorDesc(Precision::U8, out_dims, Layout::NCHW)); - out_blob->allocate(); - - PreProcessInfo info; - info.setResizeAlgorithm(RESIZE_BILINEAR); - - PreProcessDataPtr preprocess = CreatePreprocDataHelper(); - for (auto sz : in_sizes) { - cv::Mat in_mat = cv::Mat::eye(sz, CV_8UC3)*255; - in_blob = img2Blob(in_mat, Layout::NHWC); - preprocess->setRoiBlob(in_blob); - EXPECT_NO_THROW(preprocess->execute(out_blob, info, false)); - } - - // Not thrown = test is green. -}; - -struct IEPreprocessTest : public TestsCommon {}; -TEST_F(IEPreprocessTest, smoke_NetworkInputSmallSize) { - const size_t num_threads = parallel_get_max_threads(); - - std::vector out_sizes = { - cv::Size(num_threads, num_threads - 1), - cv::Size(num_threads - 1, num_threads), - cv::Size(1, 1), - cv::Size(1, 0), - cv::Size(0, 1) - }; - - SizeVector in_dims = {1, 3, num_threads * 2, num_threads * 2}; - cv::Mat in_mat = cv::Mat::eye(cv::Size(in_dims[3], in_dims[2]), CV_8UC3)*255; - Blob::Ptr in_blob = img2Blob(in_mat, Layout::NHWC); - - PreProcessInfo info; - info.setResizeAlgorithm(RESIZE_BILINEAR); - - PreProcessDataPtr preprocess = CreatePreprocDataHelper(); - preprocess->setRoiBlob(in_blob); - - for (const auto& sz : out_sizes) { - SizeVector out_dims = {1, 3, static_cast(sz.height), static_cast(sz.width)}; - Blob::Ptr out_blob = make_shared_blob(TensorDesc(Precision::U8, out_dims, Layout::NHWC)); - out_blob->allocate(); - // FIXME: sz with 0 dims must be a separate test - if (sz.width > 0 && sz.height > 0) { - EXPECT_NO_THROW(preprocess->execute(out_blob, info, false)); - } else { - EXPECT_THROW(preprocess->execute(out_blob, info, false), - InferenceEngine::Exception); - } - } -} - -// smoke: -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_random_roi_3c_smoke, RandomROITest, random_roi_3c); -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_random_roi_4c_smoke, RandomROITest, random_roi_4c); -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_random_roi_nv12_smoke, RandomROITest, random_roi_nv12); - -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_resize_only_smoke, CropResizeTest, smoke::params_resize_only); -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_3ch_and_resize_smoke, CropResizeTest, smoke::params_csc_3ch_and_resize); -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_4ch_and_resize_smoke, CropResizeTest, smoke::params_csc_4ch_and_resize); - -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_resize_only_smoke, DynamicBatchResizeTest, smoke::batch_params_resize_only); -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_3ch_and_resize_smoke, DynamicBatchResizeTest, smoke::batch_params_csc_3ch_and_resize); -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_4ch_and_resize_smoke, DynamicBatchResizeTest, smoke::batch_params_csc_4ch_and_resize); - -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_reorder_smoke, ReorderTest, reorder_params); - -PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_nv12_and_resize_smoke, NV12ColorConvertTest, nv12_params); - -//////////////////////////////////////////////////////////////////////////////////////////////////// - -// nightly: - -// FIXME: enable these once smoke/nightly concepts are introduced in CI -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_random_roi_3c_nightly, RandomROITest, random_roi_3c); -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_random_roi_4c_nightly, RandomROITest, random_roi_4c); -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_random_roi_nv12_nightly, RandomROITest, random_roi_nv12); - -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_3ch_and_resize_nightly, CropResizeTest, nightly::params_csc_3ch_and_resize); -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_4ch_and_resize_nightly, CropResizeTest, nightly::params_csc_4ch_and_resize); - -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_3ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_3ch_and_resize); -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_4ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_4ch_and_resize); - -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_3ch_and_resize_nightly, DynamicBatchResizeTest, nightly::batch_params_csc_3ch_and_resize); -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_4ch_and_resize_nightly, DynamicBatchResizeTest, nightly::batch_params_csc_4ch_and_resize); - -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_reorder_nightly, ReorderTest, reorder_params); - -PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_nv12_and_resize_nightly, NV12ColorConvertTest, nv12_params); - -#endif // USE_OPENCV diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp deleted file mode 100644 index 963b6676c2d8a6..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "dims_tests.hpp" - -PLUGING_CASE_WITH_SUFFIX(CPU, _smoke, IO_BlobTest, params); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp deleted file mode 100644 index 084f69911ed0a9..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "layout_tests.hpp" - -static auto params = ::testing::Combine( - ::testing::Values(conv_p), - ::testing::Values(std::make_pair(Precision::FP32, 1e-5)), - ::testing::Values(NCHW, NHWC), - ::testing::Values(NCHW, NHWC), - ::testing::Values(Precision::FP32, Precision::U8) // TODO: What about U16/I8/FP16? -); - -PLUGING_CASE_WITH_SUFFIX(CPU, _smoke, LayoutTTTest, params); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_cell_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_cell_test.cpp deleted file mode 100644 index 622259f4f22c6c..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_cell_test.cpp +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_cell_test.hpp" - -RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, LSTMCellTest, workload); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_ir_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_ir_test.cpp deleted file mode 100644 index a4c00554258ea2..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_ir_test.cpp +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_ir_test.hpp" - -RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, LSTM_IR_Test, workload); - -static std::vector hetero_workload { workload }; -RUN_CASE_P_WITH_SUFFIX(HETERO_CPU, _smoke, LSTM_IR_Test, hetero_workload); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/rnn_seq_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/rnn_seq_test.cpp deleted file mode 100644 index 882ecdd955dd76..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/rnn_seq_test.cpp +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "rnn_seq_test.hpp" - -RUN_CASE_CP_WITH_SUFFIX(CPU, _smoke, RNNSeqTest, workload); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp deleted file mode 100644 index cae97a1177fcd4..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include "ie_precision.hpp" -#include -#include -#include "low_precision_transformations/transformer.hpp" -#include "common/validation.hpp" -#include - -#include "network_i8.hpp" - -/************************************************* - * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! - * All ref values was obtained from Caffe scoring - * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! - *************************************************/ - -TEST_P(ModelTransformationsTest, LPT) {} - -static void checkLayerInputPrecision(const CNNNetwork& network, const std::string& layerName, Precision expectedPrecision, int inputIndex = -1) { - CNNLayerPtr layer = getLayer(network, layerName); - if (layer == nullptr) { - IE_THROW() << "layer '" << layerName << "' was not found"; - } - for (size_t index = 0ul; index < layer->insData.size(); ++index) { - if ((inputIndex != -1) && (index != inputIndex)) { - continue; - } - - const DataWeakPtr weakData = layer->insData[index]; - ASSERT_EQ(expectedPrecision, weakData.lock()->getPrecision()) << " unexpected precision " << weakData.lock()->getPrecision() << " for layer " << layerName; - } -} - -ModelParams getModelParams(const std::string modelName) { -std::map modelParams = { - // { - // "inception_v2_tf", - // ModelParams( - // "inception_v2_tf", - // "inception_v2/inception_v2_i8.xml", - // "validation_set/224x224/dog.bmp", - // {{157, 9.49783 }, // 157 row: 'Blenheim spaniel' - // { 219, 7.13866 }, // 219 row: 'Welsh springer spaniel', - // { 216, 5.60607 }, // 153 row: 'Japanese spaniel', - // { 220, 5.23158 }} - // ) - // }, - - { - "inception_v3_tf", - ModelParams( - "inception_v3_tf", - "inception_v3/inception_v3_i8.xml", - "validation_set/299x299/dog.bmp", - {{157, 10.1683}, // 157 row: 'Blenheim spaniel' - { 219, 5.751 }, // 219 row: 'Welsh springer spaniel', - { 153, 4.9502 }, // 153 row: 'Japanese spaniel', - { 216, 4.79769 }} - ) - }, - { - "mobilenet_v2_tf_depthwise", - ModelParams( - "mobilenet_v2_tf_depthwise", - "mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_i8.xml", - "validation_set/224x224/dog.bmp", - // original (FP32, no LPT) output tensor - {{ 157, 8.63748 }, - { 219, 6.29954 }, - { 216, 4.7303 }, // Windows, Linux: {218, 4.75413} - { 218, 4.69319 }, // Windows, Linux: {216, 4.75355} - { 220, 3.67249 }}, - {}, - [](const TransformationsParams& transformationsParam, CNNNetworkImplPtr usedNetwork) { - if (transformationsParam.transformationsInTestEnabled && transformationsParam.params.updatePrecisions) { - const static std::vector> fakeQuantizeAndConcolutionItems = { - // U8 with shift on activations - {"MobilenetV2/Conv/Conv2D/fq_input_0", ""}, - {"MobilenetV2/expanded_conv/project/Conv2D/fq_input_0", "MobilenetV2/expanded_conv/project/BatchNorm/FusedBatchNormV3/variance/Fused_Add_"}, - // I8 on activations - {"MobilenetV2/expanded_conv_1/expand/Conv2D/fq_input_0", ""}, - {"MobilenetV2/expanded_conv_1/project/Conv2D/fq_input_0", "MobilenetV2/expanded_conv_1/project/BatchNorm/FusedBatchNormV3/variance/Fused_Add_"}, - // I8 on activations - {"MobilenetV2/expanded_conv_2/add/fq_input_1", ""}, - {"MobilenetV2/expanded_conv_2/project/Conv2D/fq_input_0", "MobilenetV2/expanded_conv_2/project/BatchNorm/FusedBatchNormV3/variance/Fused_Add_"}, - // I8 on activations - {"MobilenetV2/expanded_conv_3/expand/Conv2D/fq_input_0", ""} - }; - - for (const std::pair item : fakeQuantizeAndConcolutionItems) { - TestsCommonFunc::checkLayerOuputPrecision(usedNetwork, item.first, Precision::U8); - if (!item.second.empty()) { - checkLayerInputPrecision(usedNetwork, item.second, Precision::U8, 0); - } - } - } - }) - }, - { - "resnet_50_tf", - ModelParams( - "resnet_50_tf", - "resnet_v1_50/resnet_v1_50_i8.xml", - "validation_set/224x224/dog.bmp", - {{ 156, 16.1796 }, - { 218, 11.9186 }, - { 219, 10.8054 }, - { 217, 10.1224 }, - { 152, 9.60148 }}, - {}, - [](const TransformationsParams& transformationsParam, CNNNetwork usedNetwork) { - if (transformationsParam.transformationsInTestEnabled && transformationsParam.params.updatePrecisions) { - const Precision originalPrecision = Precision::FP32; - const Precision targetPrecision = Precision::U8; - - //Eltwise CPU/GPU specific - TestsCommonFunc::checkLayerOuputPrecision(usedNetwork, "resnet_v1_50/block1/unit_1/bottleneck_v1/add/fq_input_0", originalPrecision); - TestsCommonFunc::checkLayerOuputPrecision(usedNetwork, "resnet_v1_50/block1/unit_1/bottleneck_v1/add/fq_input_1", Precision::I8); - - TestsCommonFunc::checkLayerOuputPrecision(usedNetwork, "resnet_v1_50/block2/unit_1/bottleneck_v1/add/fq_input_0", originalPrecision); - TestsCommonFunc::checkLayerOuputPrecision(usedNetwork, "resnet_v1_50/block2/unit_1/bottleneck_v1/add/fq_input_1", Precision::I8); - } - }) - }, - }; - - const auto it = modelParams.find(modelName); - if (it == modelParams.end()) { - IE_THROW() << "parameters for model '" << modelName << "' were not found"; - } - return it->second; -} - -//0.005f, -INSTANTIATE_TEST_CASE_P( - smoke_Inception, - ModelTransformationsTest, - ::testing::Values( - // TransformationsParams("CPU", getModelParams("inception_v2_tf"), 1ul, true, false, createParam()), - - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 1ul, false, false, createParam(), {}, 3ul), - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 1ul, false, true, createParamI8I8(), {}, 0, false), - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 1ul, false, true, createParamU8I8(), {}, 0), - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 1ul, false, true, createParamU8U8(), {}, 0), - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 1ul, false, true, createParamCpu().setQuantizedTensorAlignmentOnActivations(LayerTransformation::QuantizedTensorAlignment::UpdateLevel)), - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 1ul, false, true, createParamCpu().setQuantizedTensorAlignmentOnActivations(LayerTransformation::QuantizedTensorAlignment::UpdateIntervals)), - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 1ul, true, false, createParam()), - TransformationsParams("CPU", getModelParams("inception_v3_tf"), 2ul, true, false, createParam()) - ), - TransformationsParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_MobileNet, - ModelTransformationsTest, - ::testing::Values( - TransformationsParams("CPU", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, false, createParamU8I8(), {}, 2), -// TODO: eshoguli: fix this issue -// TransformationsParams("CPU", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamI8I8()), -// TransformationsParams("CPU", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamU8I8()), -// TransformationsParams("CPU", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamU8U8(), {}, 2), -// TransformationsParams("CPU", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamCpu(), { "464/Pool", "465/Pool" }), - TransformationsParams("CPU", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, true, false, createParamU8I8(), {}, 2), - TransformationsParams("CPU", getModelParams("mobilenet_v2_tf_depthwise"), 2ul, true, false, createParamU8I8(), {}, 2) - ), - TransformationsParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_ResNet, - ModelTransformationsTest, - ::testing::Values( - TransformationsParams("CPU", getModelParams("resnet_50_tf"), 1ul, false), - TransformationsParams("CPU", getModelParams("resnet_50_tf"), 1ul, false, true, createParamI8I8(), { - // TODO: remove when eltwise validation was added - "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars", - "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars" - }), - TransformationsParams("CPU", getModelParams("resnet_50_tf"), 1ul, false, true, createParamU8I8(), { -// // TODO: remove when eltwise validation was added - "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars", - "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars" - }), - TransformationsParams("CPU", getModelParams("resnet_50_tf"), 1ul, false, true, createParamU8U8(), { - // TODO: remove when eltwise validation was added - "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars", - "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars" - }), - TransformationsParams("CPU", getModelParams("resnet_50_tf"), 1ul, false, true, createParamCpu(), { - // TODO: remove when eltwise validation was added - "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars", - "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars" - }), - TransformationsParams("CPU", getModelParams("resnet_50_tf"), 1ul, true), - TransformationsParams("CPU", getModelParams("resnet_50_tf"), 2ul, true) - ), - TransformationsParams::getLowPrecisionTransformerSingleLayerTestName); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/transformations/low_precision_transformer_single_layer_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/transformations/low_precision_transformer_single_layer_tests.cpp deleted file mode 100644 index 090fc1bf99d524..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/transformations/low_precision_transformer_single_layer_tests.cpp +++ /dev/null @@ -1,862 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "low_precision_transformer_single_layer_tests.hpp" -#include -#include -#include - -using namespace ::testing; -using namespace InferenceEngine; - - -TEST_P(SingleLayerTransformationsTest, LPT) { -} - -INSTANTIATE_TEST_CASE_P( - smoke_SingleLayerTransformationsTestFP32, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - PowerTestModel::Ptr(new PowerTestModel(1.f, 1.f, 0)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - PowerTestModel::Ptr(new PowerTestModel(1.f, 2.89f, 64)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - PowerTestModel::Ptr(new PowerTestModel(1.f, -32.f, 0)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - PowerTestModel::Ptr(new PowerTestModel(1.f, 1.f, -64.f)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - PowerTestModel::Ptr(new PowerTestModel(3.5f, 1.f, 0)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ResampleTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FullyConnectedAndScaleShiftsOnActivationsTestModel()), - { { 1, 2048 } }, - { { 1, 1000 } }), - - // TODO: uncomment later - //SingleLayerTransformationsTestParams( - // "MKLDNNPlugin", - // SingleLayerTestModel::Ptr(new FullyConnectedTestModel({ 1, 128, 12, 64 }, { 128, 768 })), - // { { 1, 128, 12, 64 } }, - // { { 128, 768 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FullyConnectedTestModel({ 1, 128, 12, 64 }, { 1, 128, 768 })), - { { 1, 128, 12, 64 } }, - { { 1, 128, 768 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithConstants()), - { { 1, 1280, 7 } }, - { { 1, 1280, 7 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithoutConstants()), - { { 1, 1280, 7 } }, - { { 1, 1280, 7 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeReshapeTestModelWithConstants()), - { { 1, 256, 6, 6 } }, - { { 1, 9216 } }), - - // TODO: fix asymmetric patern creation issue for NC layout and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new FullyConnectedAndQuantizeTestModel()), - // { { 1, 32, 1, 1 } }, - // { { 1, 32, 1, 1 } }), - - // TODO: uncomment when biases correction with absent biases will be fixed - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new GemmAndQuantizeTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new PoolingTestModel()), - { { 149, 149, 32, 1 } }, - { { 149, 149, 32, 1 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel()), - { { 1, 32, 147, 147 } }, - { { 1, 64, 147, 147 } }), - - // Const transformation is disabled - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndPoolingAndQuantizeOnActivationsTestModel()), - { { 1, 64, 147, 147 } }, - { { 1, 80, 73, 73 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnActivationsTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 32, 149, 149 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 32, 149, 149 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 32, 149, 149 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionDepthwiseTestModel()), - { { 1, 32, 112, 112 } }, - { { 1, 32, 112, 112 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionGroupedTestModel()), - { { 1, 32, 112, 112 } }, - { { 1, 32, 112, 112 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatMultiChannelTestModel()), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConcatMultiBranchTestModel()), - // { { 299, 299, 3, 1 }, { 299, 299, 3, 1 } }, - // { { 299, 299, 12, 1 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new QuantizationOnWeightsTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - // TODO: fix later - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new QuantizationOnInvertedWeightsTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAsOutputTest()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeWithMultiOutputsTest()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAndScaleShiftTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-10.25, 10.1641} })), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-0.00174255, 0.00174255} })), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-329.688, 327.188} })), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationWithNegativeScalesTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationWithNegativeSlopeTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ScaleShiftAndFakeQuantizeTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeWithTwoScaleShiftsAsOutput()), - { { 1, 32, 28, 28 }, { } }, - { { } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new MvnTestModel(0ul, 0ul)), - { { 1, 4, 128, 128, 128 } }, - { { 1, 4, 128, 128, 128 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new MvnTestModel(1ul, 0ul)), - { { 1, 4, 128, 128, 128 } }, - { { 1, 4, 128, 128, 128 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new MvnTestModel(0ul, 1ul)), - { { 1, 4, 128, 128, 128 } }, - { { 1, 4, 128, 128, 128 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new MvnTestModel(1ul, 1ul)), - { { 1, 4, 128, 128, 128 } }, - { { 1, 4, 128, 128, 128 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchPreservedTestModel(true)), - { { 1, 32, 149, 149 } }, - { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchPreservedTestModel(false)), - { { 1, 32, 149, 149 } }, - { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchNotPreservedTestModel(true)), - { { 1, 32, 149, 149 } }, - { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchNotPreservedTestModel(false)), - { { 1, 32, 149, 149 } }, - { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } }) - ), - SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_EltwiseTestFP32, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "sum", true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "sum", false)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "mul", true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "mul", false)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "sum", true)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "sum", false)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "mul", true)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "mul", false)), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }) - ), - SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_ConcatTestFP32, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, false)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(true, false)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(false, true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConcatTestModel(false, false)), - // { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - // { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, true, { 100, 1 })), - { { 100, 1 }, { 100, 1 } }, - { { 100, 2 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, false, { 100, 1 })), - { { 100, 1 }, { 100, 1 } }, - { { 100, 2 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(false, true, true, { 100, 1 })), - { { 100, 1 }, { 100, 1 } }, - { { 100, 2 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(false, true, false, { 100, 1 })), - { { 100, 1 }, { 100, 1 } }, - { { 100, 2 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(false, false, false, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(false, true, false, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(true, false, false, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(true, true, false, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(false, false, true, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(false, true, true, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(true, false, true, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatWithPoolingTestModel(true, true, true, 2.0)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }) - ), - SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_ScaleShiftToConvolutionFP32, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel()), - { { 1, 64, 112, 112 } }, - { { 1, 64, 112, 112 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel()), - { { 1, 64, 112, 112 } }, - { { 1, 64, 112, 112 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterConcatTestModel(true)), - { { 1, 32, 299, 299 }, { 1, 32, 299, 299 } }, - { { 1, 64, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterConcatTestModel(false)), - { { 1, 32, 299, 299 }, { 1, 32, 299, 299 } }, - { { 1, 64, 299, 299 } }) - ), - SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_UpdateBiases, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(false)), - { { 1, 32, 112, 112 } }, - { { 1, 32, 112, 112 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(true)), - { { 1, 32, 112, 112 } }, - { { 1, 32, 112, 112 } }) - - // TODO: uncomment later - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new UpdateBiasesFullyConnectedTestModel(false)), - // { { 1, 32, 112, 112 } }, - // { { 1, 100 } }), - - // TODO: uncomment later - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new UpdateBiasesFullyConnectedTestModel(true)), - // { { 1, 32, 112, 112 } }, - // { { 1, 100 } }) - ), - SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_EltwiseCpuWithPooling, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "mul", false)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "mul", true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", false)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }) - ), - SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName); - -INSTANTIATE_TEST_CASE_P( - smoke_Eltwise, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", false)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseCpuTestModel()), - { { 1, 3, 299, 299 } }, - { {} }), - -// SingleLayerTransformationsTestParams( -// "CPU", -// SingleLayerTestModel::Ptr(new EltwiseTestModel()), -// { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, -// { {} }, -// "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseCpuTestModel()), - { { 1, 3, 299, 299 } }, - { {} }, - "FP16"), - -// SingleLayerTransformationsTestParams( -// "CPU", -// SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()), -// { { 1, 128, 128 }, { 1, 128, 128 } }, -// { { 1, 128, 128 } }), - - SingleLayerTransformationsTestParams( // 5 - "CPU", - SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()), - { { 1, 1, 128 }, { 1, 128, 128 } }, - { { 1, 128, 128 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()), - { { 1, 128, 128 }, { 1, 128, 1 } }, - { { 1, 128, 128 } }), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()), - { { 1, 1, 128 }, { 1, 128, 1 } }, - { { 1, 128, 128 } }))); - -INSTANTIATE_TEST_CASE_P( - smoke_SingleLayerTransformationsTestFP16, - SingleLayerTransformationsTest, - ::testing::Values( - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FullyConnectedAndScaleShiftsOnActivationsTestModel()), - { { 1, 2048 } }, - { { 1, 1000 } }, - "FP16"), - - // TODO: uncomment after fix - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }, - // "FP16"), - - // TODO: uncomment after fix - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }, - // "FP16"), - - // TODO: uncomment after fix -// SingleLayerTransformationsTestParams( -// "CPU", -// SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithConstants()), -// { { 1, 1280, 7 } }, -// { { 1, 1280, 7 } }, -// "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithoutConstants()), - { { 1, 1280, 7 } }, - { { 1, 1280, 7 } }, - "FP16"), - - // TODO: uncomment after fix - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new FakeQuantizeReshapeTestModelWithConstants()), - // { { 1, 256, 6, 6 } }, - // { { 1, 9216 } }, - // "FP16"), - - //Not parametrized yet. Executed on FP32 - - // TODO: fix asymmetric patern creation issue for NC layout and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new FullyConnectedAndQuantizeTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }, - // "FP16"), - - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new GemmAndQuantizeTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }, - // "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new PoolingTestModel()), - { { 149, 149, 32, 1 } }, - { { 149, 149, 32, 1 } }, - "FP16"), - - // TODO: failed on I8 on activations - uncomment after fix - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel()), - // { { 1, 32, 147, 147 } }, - // { { 1, 64, 147, 147 } }, - // "FP16"), - - // TODO: uncomment after fix - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }, - // "FP16"), - - // TODO: uncomment after fix - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionAndPoolingAndQuantizeOnActivationsTestModel()), - // { { 1, 64, 147, 147 } }, - // { { 1, 80, 73, 73 } }, - // "FP16"), - - // TODO: uncomment after fix - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnActivationsTestModel()), - // { { 1, 3, 299, 299 } }, - // { { 1, 32, 149, 149 } }, - // "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 32, 149, 149 } }, - "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 32, 149, 149 } }, - "FP16"), - - // TODO: fix and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionDepthwiseTestModel()), - // { { 1, 32, 112, 112 } }, - // { { 1, 32, 112, 112 } }, - // "FP16"), - - // TODO: fix and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConvolutionGroupedTestModel()), - // { { 1, 32, 112, 112 } }, - // { { 1, 32, 112, 112 } }, - // "FP16"), - - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConcatTestModel(true)), - // { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - // { { 1, 6, 299, 299 } }, - // "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatTestModel(false, true)), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 } }, - "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new ConcatMultiChannelTestModel()), - { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } }, - { { 1, 6, 299, 299 }, }, - "FP16"), - - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ConcatMultiBranchTestModel()), - // { { 299, 299, 3, 1 }, { 299, 299, 3, 1 } }, - // { { 299, 299, 12, 1 } }, - // "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new QuantizationOnWeightsTestModel()), - { { 1, 32, 149, 149 } }, - { { 1, 32, 147, 147 } }, - "FP16"), - - // TODO: fix later - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new QuantizationOnInvertedWeightsTestModel()), - // { { 1, 32, 149, 149 } }, - // { { 1, 32, 147, 147 } }, - // "FP16"), - - SingleLayerTransformationsTestParams( - "CPU", - SingleLayerTestModel::Ptr(new FakeQuantizeAndScaleShiftTestModel()), - { { 1, 3, 299, 299 } }, - { { 1, 3, 299, 299 } }, - "FP16") - - // TODO: fix and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel()), - // { { 1, 64, 112, 112 } }, - // { { 1, 64, 112, 112 } }, - // "FP16") - - // TODO: fix and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel()), - // { { 1, 64, 112, 112 } }, - // { { 1, 64, 112, 112 } }, - // "FP16") - - // TODO: fix and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterConcatTestModel()), - // { { 1, 32, 299, 299 }, { 1, 32, 299, 299 } }, - // { { 1, 64, 299, 299 } }, - // "FP16") - - // TODO: fix and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(false)), - // { { 1, 32, 112, 112 } }, - // { { 1, 32, 112, 112 } }, - // "FP16"), - - // TODO: fix and uncomment - //SingleLayerTransformationsTestParams( - // "CPU", - // SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(true)), - // { { 1, 32, 112, 112 } }, - // { { 1, 32, 112, 112 } }, - // "FP16") - ), - SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/argmax_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/argmax_tests.cpp deleted file mode 100644 index 66e5b21c39e0ef..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/argmax_tests.cpp +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include "common_test_utils/data_utils.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct argmax_test_params { - std::vector src_dims; - std::vector dst_dims; - int has_axis; - int axis; - int out_max_val; - int top_k; -}; - -static inline int count(std::vector dims, size_t start_ind, size_t end_ind) { - size_t count = 1; - for (size_t i = start_ind; i < end_ind; i++) - count *= dims[i]; - return static_cast(count); -} - -static inline int count(std::vector dims, size_t start_ind = 0) { - return count(dims, start_ind, dims.size()); -} - -static void ref_argmax(InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, argmax_test_params p) { - float *src_data = src.data(); - float* dst_data = dst.data(); - - int dim, axis_dist; - if (p.has_axis) { - int axis_ = (p.axis < 0) ? p.axis + static_cast(p.src_dims.size()) : p.axis; - dim = static_cast(p.src_dims[axis_]); - axis_dist = count(p.src_dims, axis_) / dim; - } else { - dim = count(p.src_dims, 1); - axis_dist = 1; - } - - int num = count(p.src_dims) / dim; - std::vector > src_vector(dim); - - for (int i = 0; i < num; ++i) { - for (int j = 0; j < dim; ++j) { - src_vector[j] = std::make_pair( - src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j); - } - - std::partial_sort(src_vector.begin(), src_vector.begin() + p.top_k, - src_vector.end(), std::greater >()); - - for (int j = 0; j < p.top_k; ++j) { - if (p.out_max_val) { - if (p.has_axis) { - // Produces max_val per axis - dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].first; - } else { - // Produces max_ind and max_val - dst_data[2 * i * p.top_k + j] = src_vector[j].second; - dst_data[2 * i * p.top_k + p.top_k + j] = src_vector[j].first; - } - } else { - // Produces max_ind per axis - dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].second; - } - } - } -} - -class smoke_CPU_ArgmaxOnlyTest: public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - __SRC_DIMS__ - - - - - - - __SRC_DIMS__ - - - - __DST_DIMS__ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(argmax_test_params p) { - std::string model = model_t; - - std::string src_dims; - for (auto &dim : p.src_dims) { - src_dims += "\n "; - src_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims); - - std::string dst_dims; - for (auto &dim : p.dst_dims) { - dst_dims += "\n "; - dst_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims); - - std::string axis; - if (p.has_axis) { - axis += "axis=\"" + std::to_string(p.axis) + "\""; - } - REPLACE_WITH_STR(model, "_AXIS_", axis); - - REPLACE_WITH_STR(model, "__OUT_MAX_VAL__", std::to_string(p.out_max_val)); - REPLACE_WITH_STR(model, "__TOP_K__", std::to_string(p.top_k)); - - return model; - } - - virtual void SetUp() { - try { - argmax_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr()); - - Blob::Ptr src = make_shared_blob({Precision::FP32, p.src_dims, Layout::ANY}); - src->allocate(); - - TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - CommonTestUtils::fill_data_sine(src->buffer(), src->size(), 0.5, 0.5, 1); - - BlobMap srcs; - srcs.insert(std::pair("input", src)); - - OutputsDataMap out; - out = net.getOutputsInfo(); - BlobMap outputBlobs; - - std::pair item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_argmax(*srcPtr, dst_ref, p); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(net, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(srcs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - compare(*outputBlobs.begin()->second, dst_ref); - - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPU_ArgmaxOnlyTest, TestsArgmax) {} - -INSTANTIATE_TEST_CASE_P( - TestsArgmax, smoke_CPU_ArgmaxOnlyTest, - ::testing::Values( - argmax_test_params{{1, 3, 1024, 2048}, {1, 1, 1024, 2048}, 1, 1, 0, 1}, - argmax_test_params{{1, 5, 1024, 2048}, {1, 1, 1024, 2048}, 1, 1, 1, 1}, - argmax_test_params{{3, 1, 10, 512}, {3}, 0, 1, 0, 1}, - argmax_test_params{{3, 1, 10, 512}, {3, 2}, 0, 1, 1, 1}, - argmax_test_params{{1, 20, 128, 128}, {1, 3, 128, 128}, 1, 1, 0, 3}, - argmax_test_params{{1, 20, 128, 128}, {1, 3, 128, 128}, 1, 1, 1, 3}, - argmax_test_params{{3, 1, 10, 512}, {3, 5}, 0, 1, 0, 5}, - argmax_test_params{{3, 1, 10, 512}, {3, 5, 2}, 0, 1, 1, 5}, - argmax_test_params{{1, 20, 128, 128}, {1, 18, 128, 128}, 1, 1, 0, 18}, - argmax_test_params{{1, 20, 128, 128}, {1, 18, 128, 128}, 1, 1, 1, 18} - )); - -INSTANTIATE_TEST_CASE_P( - TestsArgmaxOddDims, smoke_CPU_ArgmaxOnlyTest, - ::testing::Values( - argmax_test_params{{1, 3, 1025, 2049}, {1, 1, 1025, 2049}, 1, 1, 0, 1}, - argmax_test_params{{1, 5, 1025, 2049}, {1, 1, 1025, 2049}, 1, 1, 1, 1}, - argmax_test_params{{1, 20, 129, 129}, {1, 3, 129, 129}, 1, 1, 0, 3}, - argmax_test_params{{1, 20, 129, 129}, {1, 3, 129, 129}, 1, 1, 1, 3} - )); \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/concat_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/concat_tests.cpp deleted file mode 100644 index 55e4c18b1c3272..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/concat_tests.cpp +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - - -struct concat_base_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in1; - - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in2; - - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } out; - - size_t axis; -}; - -struct concat_test_params : concat_base_params { - std::string device_name; - - concat_test_params(std::string name, concat_base_params params) - : concat_base_params(params), device_name(name) {} -}; - -template -void check_concat_fwd(const TBlob &src, concat_test_params prm) -{ -} - -class smoke_CPU_ConcatOnlyTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IN1_ - _IC1_ - _IH1_ - _IW1_ - - - - - - - _IN2_ - _IC2_ - _IH2_ - _IW2_ - - - - - - - - _IN1_ - _IC1_ - _IH1_ - _IW1_ - - - _IN2_ - _IC2_ - _IH2_ - _IW2_ - - - - - _ON_ - _OC_ - _OH_ - _OW_ - - - - - - - - - -)V0G0N"; - - std::string getModel(concat_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IN1_", p.in1.n); - REPLACE_WITH_NUM(model, "_IC1_", p.in1.c); - REPLACE_WITH_NUM(model, "_IW1_", p.in1.w); - REPLACE_WITH_NUM(model, "_IH1_", p.in1.h); - - REPLACE_WITH_NUM(model, "_IN2_", p.in2.n); - REPLACE_WITH_NUM(model, "_IC2_", p.in2.c); - REPLACE_WITH_NUM(model, "_IW2_", p.in2.w); - REPLACE_WITH_NUM(model, "_IH2_", p.in2.h); - - REPLACE_WITH_NUM(model, "_ON_", p.out.n); - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - return model; - } - -protected: - - static void fill_data_ints(float *data, size_t size, int start) { - for (size_t i = 0; i < size; i++) { - data[i] = (float) (start + i); - } - } - - virtual void SetUp() { - - try { - concat_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - SizeVector dims_src1 = {p.in1.n, - p.in1.c, - p.in1.h, - p.in1.w - }; - - SizeVector dims_src2 = {p.in2.n, - p.in2.c, - p.in2.h, - p.in2.w}; - - SizeVector dims_dst = {p.out.n, - p.out.c, - p.out.h, - p.out.w}; - - Blob::Ptr src1 = make_shared_blob({Precision::FP32, dims_src1, Layout::NCHW}); - src1->allocate(); - fill_data_ints(src1->buffer(), src1->size(), 0); - Blob::Ptr src2 = make_shared_blob({Precision::FP32, dims_src2, Layout::NCHW}); - src2->allocate(); - fill_data_ints(src2->buffer(), src2->size(), 10000); - BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - - OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(srcs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - //compare(src, dst); - - float *src1_ptr = src1->buffer(); - float *src2_ptr = src2->buffer(); - float *dst_ptr = output->buffer(); - - int len1 = 1, len2 = 1, cycles; - for (int dim = p.axis; dim < output->getTensorDesc().getDims().size(); dim++) { - len1 *= src1->getTensorDesc().getDims()[dim]; - len2 *= src2->getTensorDesc().getDims()[dim]; - } - cycles = p.axis; - - - int index1 = 0, index2 = 0, index = 0; - for (int cycle = 0; cycle < cycles; cycle ++) { - for (int i1 = 0; i1 < len1; i1++) { - if (src1_ptr[index1] != dst_ptr[index]) - { - FAIL() << "index: " << index << " src: " << src1_ptr[index1] << ", dst: " << dst_ptr[index]; - } - index1++; index++; - } - for (int i2 = 0; i2 < len2; i2++) { - if (src2_ptr[index2] != dst_ptr[index]) - { - FAIL() << "index: " << index << " src: " << src2_ptr[index2] << ", dst: " << dst_ptr[index]; - } - index2++; index++; - } - } - - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -#define case_1 concat_base_params({\ - {1, 7, 2, 5},\ - {1, 7, 2, 5},\ - {2, 7, 2, 5},\ - 0}) -#define case_2 concat_base_params({\ - {1, 7, 2, 5},\ - {1, 7, 2, 5},\ - {1, 7, 4, 5},\ - 2}) -#define case_3 concat_base_params({\ - {1, 7, 2, 5},\ - {1, 13, 2, 5},\ - {1, 20, 2, 5},\ - 1}) -#define case_4 concat_base_params({\ - {1, 7, 2, 13},\ - {1, 7, 2, 17},\ - {1, 7, 2, 30},\ - 3}) -#define case_5 concat_base_params({\ - {1, 8, 8, 16},\ - {1, 16, 8, 16},\ - {1, 24, 8, 16},\ - 1}) - -TEST_P(smoke_CPU_ConcatOnlyTest, TestsConcat) { -} - -std::string getTestCaseName(testing::TestParamInfo obj) { - return obj.param.device_name + - "_out_w" + std::to_string(obj.param.out.w) + - "_out_h" + std::to_string(obj.param.out.h) + - "_out_c" + std::to_string(obj.param.out.c) + - "_out_n" + std::to_string(obj.param.out.n); -} - -concat_test_params concat_only_test_cases[] = { - concat_test_params("CPU", case_1), - concat_test_params("CPU", case_2), - concat_test_params("CPU", case_3), - concat_test_params("CPU", case_4), - concat_test_params("CPU", case_5), -}; - -INSTANTIATE_TEST_CASE_P(TestConcat, smoke_CPU_ConcatOnlyTest, ::testing::ValuesIn(concat_only_test_cases), getTestCaseName); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_int8_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_int8_tests.cpp deleted file mode 100644 index cf31c1e7be6b4e..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_int8_tests.cpp +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "../common_single_layer_tests/conv_ref.hpp" -#include -#include - -#include "common_test_utils/common_layers_params.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using std::vector; - -struct conv_base_params { - vector in_dims; - vector kernel; - vector strides; - vector pads_begin; - vector pads_end; - vector dilations; - - size_t out_c; - size_t grp_c; - - vector out_dims; -}; - -struct conv_test_params : conv_base_params { - std::string device_name; - - conv_test_params(std::string name, conv_base_params params) : - conv_base_params(params), device_name(name) {} -}; - -template -static void fill_int_data_even(data_t *data, size_t size, bool is_signed) { - for (size_t i = 0 ; i < size; i++) { - data[i] = (i * 13 % 21 - 10 * is_signed) * 2; - } -} - -template -static void fill_int_data(data_t *data, size_t size, bool is_signed) { - for (size_t i = 0 ; i < size; i++) { - data[i] = i * 13 % 21 - 10 * is_signed; - } -} - -template -class smoke_ConvolutionInt8OnlyTest : public TestsCommon, - public WithParamInterface { - - std::string model_t = (std::string)R"V0G0N( - - - - - - _INPUT_DIMS_ - - - - - - - - - - - - _INPUT_DIMS_ - - - - - _OUTPUT_DIMS_ - - - - - - - - -)V0G0N"; - -protected: - - size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) { - return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu; - } - - void createBlobs(const conv_test_params &p, typename TBlob::Ptr &src, TBlob::Ptr &dst, TBlob::Ptr &dst_ref) { - auto in_size = p.in_dims.size(); - auto out_size = p.out_dims.size(); - SizeVector dims_dst = { - p.out_dims[out_size - 1] == 0 ? - calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1], - p.out_dims[out_size - 2] == 0 ? - calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2], - p.out_c, - 1lu}; - SizeVector dims_src; - for (int i = in_size; i > 0; i--) { - dims_src.push_back(p.in_dims[i - 1]); - } - - Layout layout = NCHW; - if (in_size == 5) { - layout = NCDHW; - dims_dst.insert(dims_dst.begin() + 2, p.out_dims.size() > 2 ? - (p.out_dims[out_size - 3] == 0 ? - calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu); - } - - std::reverse(dims_src.begin(), dims_src.end()); - std::reverse(dims_dst.begin(), dims_dst.end()); - - Precision src_precision = (typeid(src_data_t) == typeid(int8_t)) ? Precision::I8 : Precision::U8; - src = make_shared_blob(TensorDesc({src_precision, dims_src, layout})); - src->allocate(); - - dst = make_shared_blob(TensorDesc({Precision::FP32, dims_dst, layout})); - dst->allocate(); - - dst_ref = make_shared_blob(TensorDesc({Precision::FP32, dims_dst, layout})); - dst_ref->allocate(); - } - - TBlob::Ptr fillWeights(const conv_test_params &p) { - auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu; - TBlob *weights_ptr = new TBlob(TensorDesc({Precision::U8, - {p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c * sizeof(uint8_t) - + p.out_c * sizeof(int32_t)}, C})); - weights_ptr->allocate(); - size_t bias_size = p.out_c; - size_t weights_size = (weights_ptr->size() - bias_size * sizeof(int32_t)) / sizeof(uint8_t); - int8_t *weights_data = (int8_t *) weights_ptr->buffer(); - auto *bias_data = (int32_t *)(weights_data + weights_size); - - if (typeid(src_data_t) == typeid(int8_t)) { - // If input data is signed, weight data is divided by 2 due to the specifics of implementation in mkl-dnn - fill_int_data_even(weights_data, weights_size, true); - } else { - fill_int_data(weights_data, weights_size, true); - } - fill_int_data(bias_data, bias_size, true); - - return TBlob::Ptr(weights_ptr); - } - - void calculateRef(const TBlob::Ptr &weights, const conv_test_params &p, const typename TBlob::Ptr &src, - TBlob::Ptr &dst_ref) { - const int8_t *weights_data = (const int8_t *) weights->buffer(); - size_t bias_size = p.out_c; - size_t weights_size = (weights->size() - bias_size * sizeof(int32_t)) / sizeof(uint8_t); - auto *bias_data = (const int32_t *)(weights_data + weights_size); - CommonTestUtils::conv_common_params params; - for (int i = 0; i < p.kernel.size(); i++) - params.kernel.insert(i, p.kernel[i]); - for (int i = 0; i < p.strides.size(); i++) - params.stride.insert(i, p.strides[i]); - for (int i = 0; i < p.pads_begin.size(); i++) - params.pads_begin.insert(i, p.pads_begin[i]); - for (int i = 0; i < p.dilations.size(); i++) - params.dilation.insert(i, p.dilations[i]); - params.group = p.grp_c; - params.out_c = p.out_c; - ref_conv_common<>({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params); - } - - void SetUp() override { - try { - conv_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - typename TBlob::Ptr src; - TBlob::Ptr dst, dst_ref; - createBlobs(p, src, dst, dst_ref); - auto *src_data = src->cbuffer().template as(); - size_t src_size = src->size() / sizeof(src_data_t); - if (typeid(src_data_t) == typeid(int8_t)) { - fill_int_data(src_data, src_size, true); - } else { - fill_int_data(src_data, src_size, false); - } - - auto weights = fillWeights(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, weights); - - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - outputBlobs[item.first] = dst; - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(srcs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - calculateRef(weights, p, src, dst_ref); - compare(*dst, *dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } - - virtual std::string getModel(conv_test_params p) { - std::string model = model_t; - - auto in_dims_size = p.in_dims.size(); - std::string input_dims = "" + std::to_string(p.in_dims[0]) + ""; - for (int i = 1; i < in_dims_size; i++) { - input_dims += "\n " + std::to_string(p.in_dims[i]) + ""; - } - REPLACE_WITH_STR(model, "_INPUT_DIMS_", input_dims); - - auto out_dims_size = p.out_dims.size(); - std::string output_dims = "" + std::to_string(p.in_dims[0]) + ""; - output_dims += "\n " + std::to_string(p.out_c) + ""; - if (out_dims_size > 2) { - size_t od = (p.out_dims[out_dims_size - 3] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_dims_size - 3]); - output_dims += "\n " + std::to_string(od) + ""; - } - size_t oh = p.out_dims[out_dims_size - 2] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_dims_size - 2]; - output_dims += "\n " + std::to_string(oh) + ""; - size_t ow = p.out_dims[out_dims_size - 1] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_dims_size - 1]; - output_dims += "\n " + std::to_string(ow) + ""; - REPLACE_WITH_STR(model, "_OUTPUT_DIMS_", output_dims); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations); - - REPLACE_WITH_NUM(model, "_GC_", p.grp_c); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - - std::string ip = (typeid(src_data_t) == typeid(int8_t)) ? "I8" : "U8"; - REPLACE_WITH_STR(model, "_IP_", ip); - - size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu; - size_t w_data_size = (p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KD * p.out_c * p.in_dims[1] / p.grp_c) * sizeof(uint8_t); - size_t b_data_size = p.out_c; - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - return model; - } -}; - -// conv_base_params ({in_dims, kernel, strides, pads_begin, pads_end, dilations, out_c, grp_c, out_dims}) -// If out_dims are zero, they are calculated automatically. -// 2D -#define case_1 conv_base_params({{1, 9, 16, 32}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 17, 1, {0, 0}}) -#define case_2 conv_base_params({{1, 9, 32, 16}, {2, 4}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 17, 1, {0, 0}}) -#define case_3 conv_base_params({{1, 9, 32, 16}, {2, 4}, {2, 1}, {0, 0}, {0, 0}, {1, 1}, 17, 1, {0, 0}}) -#define case_4 conv_base_params({{1, 3, 40, 40}, {3, 3}, {1, 2}, {0, 0}, {0, 0}, {1, 1}, 20, 1, {0, 0}}) -#define case_5 conv_base_params({{1, 9, 16, 32}, {7, 7}, {2, 2}, {3, 3}, {0, 0}, {1, 1}, 17, 1, {0, 0}}) -#define case_6 conv_base_params({{1, 3, 224, 224}, {7, 7}, {2, 2}, {2, 2}, {0, 0}, {1, 1}, 64, 1, {111, 111}}) -#define case_7 conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 16, 16, {0, 0}}) -#define case_8 conv_base_params({{1, 32, 16, 32}, {7, 7}, {2, 2}, {3, 3}, {0, 0}, {1, 1}, 32, 32, {0, 0}}) -#define case_9 conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {9, 9}, 16, 16, {0, 0}}) -#define case_10 conv_base_params({{1, 32, 16, 32}, {7, 7}, {2, 2}, {3, 3}, {0, 0}, {3, 3}, 32, 32, {2, 10}}) -#define case_11 conv_base_params({{1, 4, 16, 32}, {7, 7}, {2, 2}, {3, 3}, {0, 0}, {2, 2}, 4, 4, {5, 13}}) -#define case_12 conv_base_params({{1, 3, 224, 224}, {10, 10}, {1, 1}, {4, 4}, {0, 0}, {1, 1}, 4, 1, {223, 223}}) -#define case_13 conv_base_params({{1, 32, 1, 15000}, {11, 1}, {1, 1}, {20, 0}, {0, 0}, {4, 1}, 32, 1, {1, 15000}}) -#define case_14 conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 16, 8, {0, 0}}) -#define case_15 conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 8, 2, {0, 0}}) -#define case_16 conv_base_params({{1, 3, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 9, 3, {0, 0}}) -// 3D -#define case_3d_0 conv_base_params({{1, 3, 16, 32, 32}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 17, 1, {0, 0, 0}}) -#define case_3d_1 conv_base_params({{1, 3, 16, 32, 32}, {3, 3, 3}, {2, 2, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 64, 1, {0, 0, 0}}) -#define case_3d_2 conv_base_params({{1, 32, 8, 8, 8}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 32, 32, {0, 0, 0}}) -#define case_3d_3 conv_base_params({{1, 32, 10, 10, 10}, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, 32, 32, {0, 0, 0}}) -#define case_3d_4 conv_base_params({{1, 32, 8, 8, 8}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 32, 32, {0, 0, 0}}) -#define case_3d_5 conv_base_params({{1, 32, 8, 8, 8}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 16, 16, {0, 0, 0}}) -#define case_3d_6 conv_base_params({{1, 32, 10, 10, 10}, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, 16, 8, {0, 0, 0}}) -#define case_3d_7 conv_base_params({{1, 4, 8, 8, 8}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 16, 4, {0, 0, 0}}) - -using smoke_conv_u8s32 = smoke_ConvolutionInt8OnlyTest; - -TEST_P(smoke_conv_u8s32, TestsConvolution) { -} - -std::string getTestCaseInt8Name(testing::TestParamInfo obj) { - auto in_dims_size = obj.param.in_dims.size(); - return obj.param.device_name + - "_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) + - "_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) + - (obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") + - "_c" + std::to_string(obj.param.in_dims[1]) + - "_kw" + std::to_string(obj.param.kernel[X_AXIS]) + - "_kh" + std::to_string(obj.param.kernel[Y_AXIS]) + - (obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") + - "_sw" + std::to_string(obj.param.strides[X_AXIS]) + - "_sh" + std::to_string(obj.param.strides[Y_AXIS]) + - (obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") + - "_dilw" + std::to_string(obj.param.dilations[X_AXIS]) + - "_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) + - (obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") + - "_grpc" + std::to_string(obj.param.grp_c); -} - -conv_test_params conv_only_int8_test_cases[] = { - conv_test_params("CPU", case_1), - conv_test_params("CPU", case_2), - conv_test_params("CPU", case_3), - conv_test_params("CPU", case_4), - conv_test_params("CPU", case_5), - conv_test_params("CPU", case_6), -//// todo: it does not work on AVX-512 -// conv_test_params("CPU", case_7), -// conv_test_params("CPU", case_8), -// conv_test_params("CPU", case_9), -// conv_test_params("CPU", case_10), -// conv_test_params("CPU", case_11), - conv_test_params("CPU", case_12), - conv_test_params("CPU", case_13), - conv_test_params("CPU", case_14), - conv_test_params("CPU", case_15), - conv_test_params("CPU", case_16), -}; - -conv_test_params conv_only_int8_3d_test_cases[] = { - conv_test_params("CPU", case_3d_0), - conv_test_params("CPU", case_3d_1), - conv_test_params("CPU", case_3d_2), - conv_test_params("CPU", case_3d_3), - conv_test_params("CPU", case_3d_4), - conv_test_params("CPU", case_3d_5), - conv_test_params("CPU", case_3d_6), - conv_test_params("CPU", case_3d_7), -}; - -INSTANTIATE_TEST_CASE_P( - TestConvolution, smoke_conv_u8s32, ::testing::ValuesIn(conv_only_int8_test_cases), getTestCaseInt8Name); - -INSTANTIATE_TEST_CASE_P( - TestConvolution_3d, smoke_conv_u8s32, ::testing::ValuesIn(conv_only_int8_3d_test_cases), getTestCaseInt8Name); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp deleted file mode 100644 index 490e155f6b3f51..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "../common_single_layer_tests/conv_ref.hpp" -#include -#include -#include "common_test_utils/common_layers_params.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using std::vector; - -struct conv_base_params { - vector in_dims; - vector kernel; - vector strides; - vector pads_begin; - vector pads_end; - vector dilations; - - size_t out_c; - size_t grp_c; - - vector out_dims; -}; - -struct conv_test_params : conv_base_params { - std::string device_name; - - conv_test_params(std::string name, conv_base_params params) : - conv_base_params(params), device_name(name) {} -}; - -class smoke_ConvolutionOnlyTest : public TestsCommon, - public WithParamInterface { - - std::string model_t_4D = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _OC_ - _OH_ - _OW_ - - - - - - - - -)V0G0N"; - - std::string model_t_5D = R"V0G0N( - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - _IN_ - _OC_ - _OD_ - _OH_ - _OW_ - - - - - - - - -)V0G0N"; - -protected: - - size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) { - return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu; - } - - void createBlobs(const conv_test_params &p, TBlob::Ptr &src, TBlob::Ptr &dst, TBlob::Ptr &dst_ref) { - auto in_size = p.in_dims.size(); - auto out_size = p.out_dims.size(); - SizeVector dims_dst = { - p.out_dims[out_size - 1] == 0 ? - calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1], - p.out_dims[out_size - 2] == 0 ? - calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2], - p.out_c, - 1lu}; - SizeVector dims_src; - for (int i = in_size; i > 0; i--) { - dims_src.push_back(p.in_dims[i - 1]); - } - - Layout layout = NCHW; - if (in_size == 5) { - layout = NCDHW; - dims_dst.insert(dims_dst.begin() + 2, p.out_dims.size() > 2 ? - (p.out_dims[out_size - 3] == 0 ? - calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu); - } - - src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), layout)); - src->allocate(); - - dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout)); - dst->allocate(); - - dst_ref = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout)); - dst_ref->allocate(); - } - - TBlob::Ptr fillWeights(const conv_test_params &p) { - auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu; - TBlob *weights_ptr = new TBlob(TensorDesc(Precision::U8, - {(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c + p.out_c) - * sizeof(float)}, C)); - weights_ptr->allocate(); - fill_data((float *) weights_ptr->buffer(), weights_ptr->size() / sizeof(float)); - return TBlob::Ptr(weights_ptr); - } - - void calculateRef(const TBlob::Ptr &weights, const conv_test_params &p, const TBlob::Ptr &src, - TBlob::Ptr &dst_ref) { - const float *weights_data = (const float *) weights->buffer(); - size_t bias_size = p.out_c; - size_t weights_size = weights->size() / sizeof(float) - bias_size; - const float *bias_data = weights_data + weights_size; - CommonTestUtils::conv_common_params params; - for (int i = 0; i < p.kernel.size(); i++) - params.kernel.insert(i, p.kernel[i]); - for (int i = 0; i < p.strides.size(); i++) - params.stride.insert(i, p.strides[i]); - for (int i = 0; i < p.pads_begin.size(); i++) - params.pads_begin.insert(i, p.pads_begin[i]); - for (int i = 0; i < p.dilations.size(); i++) - params.dilation.insert(i, p.dilations[i]); - params.group = p.grp_c; - params.out_c = p.out_c; - ref_conv_common<>({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params); - } - - CNNNetwork getNetwork(const TBlob::Ptr &weights, const conv_test_params &p) { - Core ie; - return ie.ReadNetwork(getModel(p), weights); - } - - virtual void - infer(CNNNetwork &network, const conv_test_params &p, TBlob::Ptr &src, TBlob::Ptr &dst) { - Blob::Ptr srcPtr = std::shared_ptr(src); - Blob::Ptr dstPtr = std::shared_ptr(dst); - - Core ie; - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, srcPtr); - inferRequest.SetBlob(outInfo.begin()->first, dstPtr); - inferRequest.Infer(); - } - - void SetUp() override { - try { - conv_test_params p = ::testing::WithParamInterface::GetParam(); - TBlob::Ptr src, dst, dst_ref; - createBlobs(p, src, dst, dst_ref); - fill_data(src->data(), src->size()); - auto weights = fillWeights(p); - calculateRef(weights, p, src, dst_ref); - CNNNetwork network = getNetwork(weights, p); - infer(network, p, src, dst); - compare(*dst, *dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } - - virtual std::string getModel(conv_test_params p) { - std::string model; - auto in_dims_size = p.in_dims.size(); - if (in_dims_size == 4) { - model = model_t_4D; - } else if (in_dims_size == 5) { - model = model_t_5D; - } - - REPLACE_WITH_NUM(model, "_IW_", p.in_dims[in_dims_size - 1]); - REPLACE_WITH_NUM(model, "_IH_", p.in_dims[in_dims_size - 2]); - REPLACE_WITH_NUM(model, "_ID_", p.in_dims[in_dims_size - 3]); - REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]); - REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations); - - auto out_dims_size = p.out_dims.size(); - REPLACE_WITH_NUM(model, "_GC_", p.grp_c); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - REPLACE_WITH_NUM(model, "_OD_", out_dims_size > 2 ? - (p.out_dims[out_dims_size - 3] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_dims_size - 3]) : - 1lu); - REPLACE_WITH_NUM(model, "_OH_", p.out_dims[out_dims_size - 2] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_dims_size - 2]); - REPLACE_WITH_NUM(model, "_OW_", p.out_dims[out_dims_size - 1] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_dims_size - 1]); - - size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu; - size_t w_data_size = (p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KD * p.out_c * p.in_dims[1] / p.grp_c) * sizeof(float); - size_t b_data_size = p.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - return model; - } -}; - -class smoke_ConvolutionReshapeTest : public smoke_ConvolutionOnlyTest { -protected: - void SetUp() override { - try { - conv_test_params p = ::testing::WithParamInterface::GetParam(); - TBlob::Ptr src, dst, dst_ref; - auto weights = fillWeights(p); - CNNNetwork network = getNetwork(weights, p); - infer(network, p, src, dst); - updatePaddings(network, p); - dst_ref = std::make_shared>(dst->getTensorDesc()); - dst_ref->allocate(); - calculateRef(weights, p, src, dst_ref); - compare(*dst, *dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } - - void updatePaddings(const CNNNetwork &network, conv_test_params& p) { - details::CNNNetworkIterator i(network), end; - auto found = std::find_if(i, end, [](const CNNLayer::Ptr& layer) { - return layer->type == "Convolution"; - }); - ASSERT_NE(found, end); - auto convLayer = std::dynamic_pointer_cast(*found); - auto allPad = getPaddings(*convLayer.get()); - p.pads_begin[X_AXIS] = allPad.begin[X_AXIS]; - p.pads_begin[Y_AXIS] = allPad.begin[Y_AXIS]; - if (p.pads_begin.size() > Z_AXIS) - p.pads_begin[Z_AXIS] = allPad.begin[Z_AXIS]; - } - void - infer(CNNNetwork &network, const conv_test_params &p, TBlob::Ptr &src, TBlob::Ptr &dst) override { - Core ie; - auto firstInputInfo = *network.getInputsInfo().begin(); - std::string inputName = firstInputInfo.first; - auto firstOutputInfo = *network.getOutputsInfo().begin(); - std::string outputName = firstOutputInfo.first; - auto inputShapes = network.getInputShapes(); - IE_ASSERT(inputShapes.size() == 1); - inputShapes.begin()->second = p.in_dims; - ASSERT_NO_THROW(network.reshape(inputShapes)); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest request = exeNetwork.CreateInferRequest(); - Blob::Ptr src_b = request.GetBlob(inputName); - - src = std::dynamic_pointer_cast>(src_b); - fill_data(src->data(), src->size()); - request.Infer(); - Blob::Ptr dst_b = request.GetBlob(outputName); - dst = std::dynamic_pointer_cast>(dst_b); - } - - std::string getModel(conv_test_params p) override { - std::string model = smoke_ConvolutionOnlyTest::getModel(p); - REPLACE_WITH_STR(model, "convolution", "convolution auto_pad=\"same_upper\""); - std::string pads_pattern = "pads_begin=\""; - for (int i = p.pads_begin.size(); i > 0; i--) { - pads_pattern += std::to_string(p.pads_begin[i - 1]) + ","; - } - std::string pads = "pads_begin=\"0,0\""; - if (p.pads_begin.size() == 3) { - pads = "pads_begin=\"0,0,0\""; - } - REPLACE_WITH_NUM_VECTOR(model, pads_pattern, pads); - return model; - } -}; - -#define case_1 conv_base_params({{1lu, 9lu, 16lu, 32lu}, {1lu, 1lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}}) -#define case_2 conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}}) -#define case_3 conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}}) -#define case_4 conv_base_params({{1lu, 3lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 2lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 20lu, 1lu, {0lu, 0lu}}) -#define case_5 conv_base_params({{1lu, 9lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}}) -#define case_6 conv_base_params({{1lu, 3lu, 224lu, 224lu}, {7lu, 7lu}, {2lu, 2lu}, {2lu, 2lu}, {0lu, 0lu}, {1lu, 1lu}, 64lu, 1lu, {112lu, 112lu}}) -#define case_7 conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 16lu, 16lu, {0lu, 0lu}}) -#define case_8 conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 32lu, 32lu, {0lu, 0lu}}) -#define case_9 conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {9lu, 9lu}, 16lu, 16lu, {0lu, 0lu}}) -#define case_10 conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 32lu, 32lu, {0lu, 0lu}}) -#define case_11 conv_base_params({{1lu, 4lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 4lu, 4lu, {0lu, 0lu}}) -#define case_12 conv_base_params({{1lu, 3lu, 224lu, 224lu}, {10lu, 10lu}, {1lu, 1lu}, {4lu, 4lu}, {0lu, 0lu}, {1lu, 1lu}, 4lu, 1lu, {224lu, 224lu}}) -#define case_13 conv_base_params({{1lu, 32lu, 1lu, 15000lu}, {11lu, 1lu}, {1lu, 1lu}, {5lu, 0lu}, {0lu, 0lu}, {4lu, 1lu}, 32lu, 1lu, {15000lu, 1lu}}) - - -#define case_14 conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu}, {1lu, 1lu, 1lu}, {1lu, 1lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 17lu, 1lu, {0lu, 0lu, 0lu}}) -#define case_15 conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu}, {3lu, 3lu, 3lu}, {2lu, 2lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 64lu, 1lu, {0lu, 0lu, 0lu}}) - -// NOTE: always auto_pad = same_upper. IR with zero_pads, pad from params is used for ref_conv after reshape -#define case_si_1 conv_base_params({{1lu, 144lu, 75lu, 75lu}, {3lu, 3lu}, {2lu, 2lu}, {1lu, 1lu}, {0lu, 0lu}, {1lu, 1lu}, 144lu, 144lu, {1lu, 1lu}}) - -// TODO: rewrite to ngraph to have reshape functionality -TEST_P(smoke_ConvolutionReshapeTest, DISABLED_TestsReshapeConvolution) { -} - -std::string getTestCaseName(testing::TestParamInfo obj) { - auto in_dims_size = obj.param.in_dims.size(); - return obj.param.device_name + - "_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) + - "_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) + - (obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") + - "_c" + std::to_string(obj.param.in_dims[1]) + - "_kw" + std::to_string(obj.param.kernel[X_AXIS]) + - "_kh" + std::to_string(obj.param.kernel[Y_AXIS]) + - (obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") + - "_sw" + std::to_string(obj.param.strides[X_AXIS]) + - "_sh" + std::to_string(obj.param.strides[Y_AXIS]) + - (obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") + - "_dilw" + std::to_string(obj.param.dilations[X_AXIS]) + - "_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) + - (obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") + - "_grpc" + std::to_string(obj.param.grp_c); -} - -conv_test_params conv_only_test_cases[] = { - conv_test_params("CPU", case_1), - conv_test_params("CPU", case_2), - conv_test_params("CPU", case_3), - conv_test_params("CPU", case_4), - conv_test_params("CPU", case_5), - conv_test_params("CPU", case_6), - conv_test_params("CPU", case_7), - conv_test_params("CPU", case_8), - conv_test_params("CPU", case_9), - conv_test_params("CPU", case_10), - conv_test_params("CPU", case_11), - conv_test_params("CPU", case_12), - conv_test_params("CPU", case_13), - conv_test_params("CPU", case_14), - conv_test_params("CPU", case_15) -}; - -INSTANTIATE_TEST_CASE_P( - TestConvolution, smoke_ConvolutionOnlyTest, ::testing::ValuesIn(conv_only_test_cases), getTestCaseName); - -INSTANTIATE_TEST_CASE_P( - TestSameUpperConvolution, smoke_ConvolutionReshapeTest, - ::testing::Values(conv_test_params("CPU", case_si_1)), - getTestCaseName); - diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests_int8.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests_int8.cpp deleted file mode 100644 index 2ca739cab5e00d..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests_int8.cpp +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" - -#include - -#include -#include "common_test_utils/data_utils.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -struct conv_int8_base_params { - struct { - size_t w; - size_t h; - size_t c; - } in; - - size_t krn_w; - size_t krn_h; - size_t str_w; - size_t str_h; - size_t pad_w; - size_t pad_h; - size_t dil_w; - size_t dil_h; - - size_t out_c; - size_t grp_c; - - struct { - size_t w; - size_t h; - } out; -}; - -struct conv_test_int8_params : conv_int8_base_params { - std::string device_name; - - conv_test_int8_params(std::string name, conv_int8_base_params params) : - conv_int8_base_params(params), device_name(name) {} -}; - -template -void ref_conv_relu(const TBlob &src, const data_t *weights, const size_t weightsSize, - TBlob &dst, conv_test_int8_params prm) { - size_t KW = prm.krn_w; - size_t KH = prm.krn_h; - size_t GC = prm.grp_c; - - size_t IW = src.getTensorDesc().getDims()[3]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IC = src.getTensorDesc().getDims()[1]; - - size_t OW = prm.out.w == 0 ? (IW + 2 * prm.pad_w - prm.krn_w) / prm.str_w + 1 : prm.out.w; - size_t OH = prm.out.h == 0 ? (IH + 2 * prm.pad_h - prm.krn_h) / prm.str_h + 1 : prm.out.h; - size_t OC = prm.out_c; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - const data_t *bias_data = weights_data + KW * KH * OC * IC / GC; - data_t *dst_data = dst.data(); - - IE_ASSERT(KW * KH * OC * IC / GC + OC == weightsSize); - IE_ASSERT(OW == dst.getTensorDesc().getDims()[3]); - IE_ASSERT(OH == dst.getTensorDesc().getDims()[2]); - - for (uint32_t g = 0; g < GC; g++) { - for (uint32_t oc = 0; oc < OC / GC; oc++) { - for (uint32_t oh = 0; oh < OH; oh++) { - for (uint32_t ow = 0; ow < OW; ow++) { - size_t oidx = g * OC / GC * OH * OW - + oc * OH * OW + oh * OW + ow; - dst_data[oidx] = bias_data[g * OC / GC + oc]; - - for (size_t ic = 0; ic < IC / GC; ic++) { - for (size_t kh = 0; kh < KH; kh++) { - for (size_t kw = 0; kw < KW; kw++) { - int32_t iw = ow * prm.str_w - prm.pad_w + kw * (1 + prm.dil_w); - int32_t ih = oh * prm.str_h - prm.pad_h + kh * (1 + prm.dil_h); - if (iw < 0 || iw >= (int32_t)IW || ih < 0 - || ih >= (int32_t)IH) - continue; - size_t iidx = g * IC / GC * IH * IW - + ic * IH * IW + ih * IW + iw; - size_t widx = g * OC / GC * IC / GC * KH * KW - + oc * IC / GC * KH * KW - + ic * KH * KW + kh * KW + kw; - - dst_data[ oidx] += src_data[iidx] * weights_data[widx]; - } - } - } - - // Applying ReLU - if (dst_data[oidx] < 0) dst_data[oidx] = 0; - - } - } - } - } -} - -class smoke_ConvolutionInt8Test: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - 1 - _IC_ - _IH_ - _IW_ - - - - - - - - - - - - 1 - _IC_ - _IH_ - _IW_ - - - - - 1 - _OC_ - _OH_ - _OW_ - - - - - - - 1 - _OC_ - _OH_ - _OW_ - - - - - 1 - _OC_ - _OH_ - _OW_ - - - - - - - - - -)V0G0N"; - - std::string getModel(conv_test_int8_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - - REPLACE_WITH_NUM(model, "_KW_", p.krn_w); - REPLACE_WITH_NUM(model, "_KH_", p.krn_h); - REPLACE_WITH_NUM(model, "_SW_", p.str_w); - REPLACE_WITH_NUM(model, "_SH_", p.str_h); - REPLACE_WITH_NUM(model, "_PW_", p.pad_w); - REPLACE_WITH_NUM(model, "_PH_", p.pad_h); - - REPLACE_WITH_NUM(model, "_GC_", p.grp_c); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - REPLACE_WITH_NUM(model, "_OH_", p.out.h == 0 ? (p.in.h + 2 * p.pad_h - p.krn_h) / p.str_h + 1 : p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w == 0 ? (p.in.w + 2 * p.pad_w - p.krn_w) / p.str_w + 1 : p.out.w); - - size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c )* sizeof(float); - size_t b_data_size = p.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - return model; - } - -protected: - const char* DEFAULT_PATH_P = "./lib"; - - static void compare_NRMSD(InferenceEngine::Blob &res, InferenceEngine::Blob &ref, float max_nrmsd = 0.01f) { - float *res_ptr = res.buffer().as(); - size_t res_size = res.size(); - - float *ref_ptr = ref.buffer().as(); - size_t ref_size = ref.size(); - - ASSERT_EQ(res_size, ref_size); - - float sum = 0; - - float mmin = ref_ptr[0], mmax = ref_ptr[0]; - - for (size_t i = 0; i < ref_size; i++) { - float sqr = (ref_ptr[i] - res_ptr[i]); - sqr *= sqr; - sum += sqr; - - mmin = (std::min)(mmin, ref_ptr[i]); - mmax = (std::max)(mmax, ref_ptr[i]); - - if (i % 10007 == 0) { - std::cout << i << ": " << res_ptr[i] << "\t" << ref_ptr[i] << "\t" << "\tdiv: " << ref_ptr[i] / res_ptr[i] << std::endl; - } - - } - sum /= ref_size; - - sum = pow(sum, 0.5f); - - sum /= mmax - mmin; - - ASSERT_LE(sum, max_nrmsd); - } - - virtual void SetUp() { - try { - conv_test_int8_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - TBlob *weights = new TBlob(TensorDesc(Precision::U8, {(p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c + p.out_c) - * sizeof(float)}, C)); - weights->allocate(); - - //fill_data_sine((float *) weights->buffer(), weights->size() / sizeof(float), 0.00, 0.005, 0.1); - CommonTestUtils::fill_data_sine((float *) weights->buffer(), weights->size() / sizeof(float), 1, 4, 0.3); - //fill_data_dbgval((float *) weights->buffer(), weights->size() / sizeof(float)); - //size_t bias_start = p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c; - //fill_data_const((float *) weights->buffer() + bias_start, p.out_c, 0.00); - - // Set biases to 0 - /*for (int i = weights->size() / sizeof(float) - C - 1; i < weights->size() / sizeof(float); i++) { - ((float *) weights->buffer())[i] = 0; - }*/ - - - TBlob::Ptr weights_ptr = TBlob::Ptr(weights); - - // Collecting statistics - - // TODO Load nodes stats from file - std::string imageFilename = TestDataHelpers::get_data_path() + "/validation_set/224x224/dog.bmp"; - std::cout << "Using image file: " << imageFilename << std::endl; - - Core ie; - auto network = ie.ReadNetwork(model, weights_ptr); - - SizeVector dims_dst = {p.out.w == 0 ? (p.in.w + 2 * p.pad_w - p.krn_w) / p.str_w + 1 : p.out.w, - p.out.h == 0 ? (p.in.h + 2 * p.pad_h - p.krn_h) / p.str_h + 1 : p.out.h, - p.out_c, - 1}; - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst->allocate(); - - // Setting the statistics data - - CNNNetwork myNetwork = ie.ReadNetwork(model, weights_ptr); - - SizeVector dims_src = {p.in.w, - p.in.h, - p.in.c, - 1}; // 1 is a batch size - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - - - - - - std::vector imageNames = { imageFilename }; - - /** Taking information about all topology inputs **/ - InputsDataMap inputInfo(myNetwork.getInputsInfo()); - - if (inputInfo.size() != 1) throw std::logic_error("Sample supports topologies only with 1 input"); - auto inputInfoItem = *inputInfo.begin(); - - /** Specifying the precision of input data provided by the user. - * This should be called before load of the network to the plugin **/ - inputInfoItem.second->setPrecision(Precision::FP32); - inputInfoItem.second->setLayout(Layout::NCHW); - - - std::vector> imagesData; - for (auto & i : imageNames) { - FormatReader::ReaderPtr reader(i.c_str()); - if (reader.get() == nullptr) { - std::cout << "Image " + i + " cannot be read!" << std::endl; - continue; - } - /** Store image data **/ - SizeVector dims = inputInfoItem.second->getTensorDesc().getDims(); - std::shared_ptr data(reader->getData(dims.back(), dims.at(dims.size() - 2))); - if (data.get() != nullptr) { - imagesData.push_back(data); - } - } - if (imagesData.empty()) throw std::logic_error("Valid input images were not found!"); - - OutputsDataMap outputInfo(myNetwork.getOutputsInfo()); - for (auto itOut : outputInfo) { - itOut.second->setPrecision(Precision::FP32); - } - - /** Filling input tensor with images. First b channel, then g and r channels **/ - size_t num_chanels = src->getTensorDesc().getDims()[1]; - size_t image_size = src->getTensorDesc().getDims()[2] * src->getTensorDesc().getDims()[3]; - - float* data = src->buffer().as::value_type*>(); - - /** Iterate over all input images **/ - for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) { - /** Iterate over all pixel in image (b,g,r) **/ - for (size_t pid = 0; pid < image_size; pid++) { - /** Iterate over all channels **/ - for (size_t ch = 0; ch < num_chanels; ++ch) { - /** [images stride + channels stride + pixel id ] all in bytes **/ - data[image_id * image_size * num_chanels + ch * image_size + pid ] = (float)(imagesData.at(image_id).get()[pid*num_chanels + ch]); - } - } - } - - // Inferring the converted network and comparing the result with the reference - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - - std::cout << "Inferring int8" << std::endl; - inferRequest.Infer(); - - // Calculating FP32 reference - TBlob dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst_ref.allocate(); - auto * srcPtr = dynamic_cast*>(src.get()); - ref_conv_relu(*srcPtr, (const float *)weights->buffer(), weights->size() / sizeof(float), dst_ref, p); - - // Comparing the result with the reference - compare_NRMSD(*dst, dst_ref, 0.17); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -/* - struct { - size_t w; - size_t h; - size_t c; - } in; - - size_t krn_w; - size_t krn_h; - size_t str_w; - size_t str_h; - size_t pad_w; - size_t pad_h; - size_t dil_w; - size_t dil_h; - - size_t out_c; - size_t grp_c; - - struct { - size_t w; - size_t h; - } out; -*/ -// Wo=(Wi−F+2P)/S+1 - -#define case_1 conv_int8_base_params({{4, 4, 3}, 1, 1, 1, 1, 0, 0, 0, 0, 3, 1}) -#define case_2 conv_int8_base_params({{16, 32, 3}, 2, 4, 1, 1, 0, 0, 0, 0, 17, 1}) -#define case_3 conv_int8_base_params({{16, 32, 3}, 2, 4, 2, 1, 0, 0, 0, 0, 17, 1}) -#define case_4 conv_int8_base_params({{40, 40, 3}, 3, 3, 1, 2, 0, 0, 0, 0, 20, 1}) -#define case_5 conv_int8_base_params({{32, 16, 3}, 7, 7, 2, 2, 3, 3, 0, 0, 17, 1}) -#define case_6 conv_int8_base_params({{224, 224, 3}, 7, 7, 2, 2, 2, 2, 0, 0, 64, 1, {112, 112}}) -/*#define case_7 conv_int8_base_params({{40, 40, 16}, 3, 3, 1, 1, 0, 0, 0, 0, 16, 16}) -#define case_8 conv_int8_base_params({{32, 16, 32}, 7, 7, 2, 2, 3, 3, 0, 0, 32, 32})*/ - -// These tests use dilated convolution and don't work yet -/*#define case_9 conv_int8_base_params({{40, 40, 16}, 3, 3, 1, 1, 0, 0, 8, 8, 16, 16}) -#define case_10 conv_int8_base_params({{32, 16, 32}, 7, 7, 2, 2, 3, 3, 8, 8, 32, 32}) -#define case_11 conv_int8_base_params({{32, 16, 4}, 7, 7, 2, 2, 3, 3, 8, 8, 4, 4})*/ - -TEST_P(smoke_ConvolutionInt8Test, TestsConvolution) { -} - -std::string getTestCaseName(testing::TestParamInfo obj) { - return obj.param.device_name + - "_w" + std::to_string(obj.param.in.w) + - "_h" + std::to_string(obj.param.in.h) + - "_c" + std::to_string(obj.param.in.c) + - "_krnw" + std::to_string(obj.param.krn_w) + - "_krnh" + std::to_string(obj.param.krn_h) + - "_strw" + std::to_string(obj.param.str_w) + - "_strh" + std::to_string(obj.param.str_h) + - "_dilw" + std::to_string(obj.param.dil_w) + - "_dilh" + std::to_string(obj.param.dil_h) + - "_grpc" + std::to_string(obj.param.grp_c); -} - -conv_test_int8_params conv_int8_test_cases[] = { - conv_test_int8_params("CPU", case_1), - conv_test_int8_params("CPU", case_2), - conv_test_int8_params("CPU", case_3), - conv_test_int8_params("CPU", case_4), - conv_test_int8_params("CPU", case_5), - // conv_test_int8_params("CPU", case_6), - //conv_test_int8_params("CPU", case_7), - //conv_test_int8_params("CPU", case_8), - //conv_test_int8_params("CPU", case_9), - //conv_test_int8_params("CPU", case_10), - //conv_test_int8_params("CPU", case_11), -}; - -INSTANTIATE_TEST_CASE_P( - TestConvolution, smoke_ConvolutionInt8Test, ::testing::ValuesIn(conv_int8_test_cases), getTestCaseName); \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/detectionout_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/detectionout_tests.cpp deleted file mode 100644 index 3e718de151093b..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/detectionout_tests.cpp +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct detectionout_test_params { - std::string device_name; - - size_t mb; - - struct { - size_t c; - } in1; - - struct { - size_t c; - } in2; - - struct { - size_t c; - size_t h; - size_t w; - } in3; - - struct { - size_t c; - size_t h; - size_t w; - } out; -}; - -class smoke_CPUDetectionOutOnlyTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - 1 - _IC1_ - - - - - - - 1 - _IC2_ - - - - - - - 1 - _IC3_ - _IH3_ - _IW3_ - - - - - - - - 1 - _IC1_ - - - 1 - _IC2_ - - - 1 - _IC3_ - _IH3_ - _IW3_ - - - - - 1 - _OC_ - _OH_ - _OW_ - - - - - - - - - - - -)V0G0N"; - - std::string getModel(detectionout_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IC1_", p.in1.c); - REPLACE_WITH_NUM(model, "_IC2_", p.in2.c); - - REPLACE_WITH_NUM(model, "_IC3_", p.in3.c); - REPLACE_WITH_NUM(model, "_IH3_", p.in3.h); - REPLACE_WITH_NUM(model, "_IW3_", p.in3.w); - - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - return model; - } - -protected: - virtual void SetUp() { - - try { - detectionout_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - network.setBatchSize(p.mb); - - InputsDataMap inputs = network.getInputsInfo(); - - DataPtr inputPtr1 = inputs["input1"]->getInputData(); - DataPtr inputPtr2 = inputs["input2"]->getInputData(); - DataPtr inputPtr3 = inputs["input3"]->getInputData(); - - InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob(inputPtr1->getTensorDesc()); - input1->allocate(); - - InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob(inputPtr2->getTensorDesc()); - input2->allocate(); - - InferenceEngine::Blob::Ptr input3 = InferenceEngine::make_shared_blob(inputPtr3->getTensorDesc()); - input3->allocate(); - - InferenceEngine::BlobMap inputBlobs; - inputBlobs["input1"] = input1; - inputBlobs["input2"] = input2; - inputBlobs["input3"] = input3; - - OutputsDataMap outputs = network.getOutputsInfo(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(outputs["detection_out"]->getTensorDesc()); - output->allocate(); - - InferenceEngine::BlobMap outputBlobs; - outputBlobs["detection_out"] = output; - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(inputBlobs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPUDetectionOutOnlyTest, TestsDetectionOut) {} - -INSTANTIATE_TEST_CASE_P( - TestsDetectionOut, smoke_CPUDetectionOutOnlyTest, - ::testing::Values( - detectionout_test_params{ "CPU", - 10, {147264}, {147264}, {2, 1, 147264}, {1, 200, 7} })); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/fullycon_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/fullycon_tests.cpp deleted file mode 100644 index 5730a3a3c3520b..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/fullycon_tests.cpp +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct fc_base_params { - struct { - size_t w; - size_t h; - size_t c; - } in; - - size_t out_c; -}; - -struct fc_test_params : fc_base_params { - std::string device_name; - - fc_test_params(std::string name, fc_base_params params) : - fc_base_params(params), device_name(name) {} -}; - -template -void ref_innerproduct(const TBlob &src, const data_t *weights, const size_t weightsSize, - TBlob &dst, fc_test_params prm) -{ - size_t IW = src.getTensorDesc().getDims()[3]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IC = src.getTensorDesc().getDims()[1]; - - size_t OC = prm.out_c; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - const data_t *bias_data = weights_data + IW*IH*IC*OC; - data_t *dst_data = dst.data(); - - IE_ASSERT( IW*IH*IC*OC + OC == weightsSize); - IE_ASSERT( OC == dst.getTensorDesc().getDims()[1]); - - for (size_t oc = 0; oc < OC; oc++) { - dst_data[oc] = bias_data[oc]; - for (size_t ic = 0; ic < IC; ic++) { - for (size_t kh = 0; kh < IH; kh++) { - for (size_t kw = 0; kw < IW; kw++) { - size_t iidx = ic * IH * IW + kh * IW + kw; - size_t widx = oc * IC * IH * IW - + ic * IH * IW + kh * IW + kw; - - dst_data[oc] += src_data[iidx] * weights_data[widx]; - } - } - } - } -} - -class smoke_FullyConnectedOnlyTest: public TestsCommon, - public WithParamInterface { - - std::string layers_t = R"V0G0N( - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _OC_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(fc_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - - size_t w_data_size = (p.in.w * p.in.h * p.in.c * p.out_c )* sizeof(float); - size_t b_data_size = p.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - model = IRTemplateGenerator::getIRTemplate("FullyConnected_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - - try { - fc_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - TBlob *weights = new TBlob({Precision::U8, {(p.in.w * p.in.h * p.in.c * p.out_c + p.out_c) * sizeof(float)}, Layout::C}); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - TBlob::Ptr weights_ptr = TBlob::Ptr(weights); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, weights_ptr); - - SizeVector dims_src = {1, - p.in.c, - p.in.h, - p.in.w}; - Blob::Ptr src = make_shared_blob(TensorDesc({ Precision::FP32, dims_src, Layout::NCHW })); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - SizeVector dims_dst = {1, p.out_c}; - Blob::Ptr dst = make_shared_blob(TensorDesc({ Precision::FP32, dims_dst, Layout::NC })); - dst->allocate(); - - TBlob dst_ref({Precision::FP32, dims_dst, Layout::NC}); - dst_ref.allocate(); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - auto * srcPtr = dynamic_cast*>(src.get()); - ref_innerproduct(*srcPtr, weights->readOnly().as(), weights->size() / sizeof(float), dst_ref, p); - compare(*dst, dst_ref, 0.9f); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -#define case_1 fc_base_params({{227, 227, 3}, 96}) -#define case_2 fc_base_params({{227, 227, 4}, 8}) - -TEST_P(smoke_FullyConnectedOnlyTest, TestsFullyConnected) {} - -std::string getTestCaseName(testing::TestParamInfo obj) { - return obj.param.device_name + - "_w" + std::to_string(obj.param.in.w) + - "_h" + std::to_string(obj.param.in.h) + - "_c" + std::to_string(obj.param.in.c) + - "_outc" + std::to_string(obj.param.out_c); -} - -fc_test_params fc_only_test_cases[] = { - fc_test_params("CPU", case_1), - fc_test_params("CPU", case_2), -}; - -INSTANTIATE_TEST_CASE_P( - TestsFullyConnected, smoke_FullyConnectedOnlyTest, ::testing::ValuesIn(fc_only_test_cases), getTestCaseName); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_batchnorm_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_batchnorm_tests.cpp deleted file mode 100644 index 88a58955c484af..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_batchnorm_tests.cpp +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct batchnorm4D_test_params { - std::string device_name; - - struct { - size_t w; - size_t h; - size_t c; - } in; - - double epsilon; -}; - -template -void ref_batchnorm4D(const TBlob &src, const data_t *variance, const data_t *mean, - TBlob &dst, batchnorm4D_test_params prm) { - size_t IW = src.getTensorDesc().getDims()[3]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t MB = src.getTensorDesc().getDims()[0]; - - const double eps = prm.epsilon; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (int c = 0; c < IC; ++c) { - data_t v_mean = mean[c]; - data_t v_variance = variance[c]; - data_t sqrt_variance = 0; - - sqrt_variance = 1. / sqrt(v_variance + eps); - - for (int n = 0; n < MB; ++n) - for (int h = 0; h < IH; ++h) - for (int w = 0; w < IW; ++w) { - size_t idx = n * IC * IH * IW - + c * IH * IW - + h * IW + w; - dst_data[idx] = (src_data[idx] - v_mean) * sqrt_variance; - } - } -} - -class smoke_CPUBatchNorn4DOnlyTest: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _OC_ - _OH_ - _OW_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - std::string getModel(batchnorm4D_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_EPSILON_", p.epsilon); - - REPLACE_WITH_NUM(model, "_OW_", p.in.w); - REPLACE_WITH_NUM(model, "_OH_", p.in.h); - REPLACE_WITH_NUM(model, "_OC_", p.in.c); - - size_t w_data_size = p.in.c * sizeof(float); - size_t b_data_size = p.in.c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - model = IRTemplateGenerator::getIRTemplate("BatchNorm4D_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - try { - batchnorm4D_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - TBlob *weights = new TBlob(TensorDesc(Precision::U8, {p.in.c * 2 * sizeof(float)}, C)); - weights->allocate(); - fill_data(weights->buffer(), weights->size() / sizeof(float)); - float * data = weights->buffer(); - for (size_t i = 0; i < weights->size() / sizeof(float); i++) { - if (data[i] < 0) { - data[i] *= -1; - } - } - - TBlob::Ptr weights_ptr = TBlob::Ptr(weights); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, weights_ptr); - - SizeVector dims_src = {p.in.w, - p.in.h, - p.in.c, - 1}; // 1 is a batch size - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - dst->allocate(); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - TBlob dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - dst_ref.allocate(); - - auto * srcPtr = dynamic_cast*>(src.get()); - ref_batchnorm4D(*srcPtr, (const float*) weights->buffer(), ((const float*) weights->buffer() + p.in.c), dst_ref, p); - - compare(*dst, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPUBatchNorn4DOnlyTest, TestsBatchNorm4D) {} - -INSTANTIATE_TEST_CASE_P( - TestBatchNorm4D, smoke_CPUBatchNorn4DOnlyTest, - ::testing::Values( - batchnorm4D_test_params{ "CPU", - {256, 128, 32}, 1e-6})); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_deconv_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_deconv_tests.cpp deleted file mode 100644 index 1b247042ddfd7b..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_deconv_tests.cpp +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "tests_common.hpp" -#include "../common_single_layer_tests/deconv_ref.hpp" -#include "ir_gen_helper.hpp" -#include "common_test_utils/common_layers_params.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct deconv_test_params { - std::string device_name; - - struct { - size_t w; - size_t h; - size_t c; - } in; - - size_t krn_w; - size_t krn_h; - size_t str_w; - size_t str_h; - size_t pad_w; - size_t pad_h; - - size_t out_c; - - bool with_bias; -}; - -template -void ref_deconv(const Blob::Ptr &src, const Blob::Ptr &weights, const Blob::Ptr &bias, - Blob::Ptr &dst_ref, deconv_test_params p) { - const float *weights_data = (const float *) weights->buffer(); - size_t bias_size = p.out_c; - size_t weights_size = weights->size() / sizeof(float) - bias_size; - const float *bias_data = p.with_bias ? (const float *) bias->buffer() : nullptr; - CommonTestUtils::conv_common_params params; - params.kernel.insert(X_AXIS, p.krn_w); - params.kernel.insert(Y_AXIS, p.krn_h); - params.stride.insert(X_AXIS, p.str_w); - params.stride.insert(Y_AXIS, p.str_h); - params.pads_begin.insert(X_AXIS, p.pad_w); - params.pads_begin.insert(Y_AXIS, p.pad_h); - params.out_c = p.out_c; - ref_deconv_common({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params); -} - -class smoke_CPUDeconvolutionOnlyTest : public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _OC_ - _OH_ - _OW_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(deconv_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - - REPLACE_WITH_NUM(model, "_KW_", p.krn_w); - REPLACE_WITH_NUM(model, "_KH_", p.krn_h); - REPLACE_WITH_NUM(model, "_SW_", p.str_w); - REPLACE_WITH_NUM(model, "_SH_", p.str_h); - REPLACE_WITH_NUM(model, "_PW_", p.pad_w); - REPLACE_WITH_NUM(model, "_PH_", p.pad_h); - - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - REPLACE_WITH_NUM(model, "_OH_", p.str_h * (p.in.h - 1) + p.krn_h - 2 * p.pad_h); - REPLACE_WITH_NUM(model, "_OW_", p.str_w * (p.in.w - 1) + p.krn_w - 2 * p.pad_w); - - if (!p.with_bias) REMOVE_LINE(model, ""); - - size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * p.in.c) * sizeof(float); - size_t b_data_size = p.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_OFF2_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - model = IRTemplateGenerator::getIRTemplate("Deconvolution_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - try { - deconv_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - std::vector blob_to_model; - Blob::Ptr weights = make_shared_blob(TensorDesc(Precision::FP32, - {p.krn_w * p.krn_h * p.out_c * p.in.c}, C)); - weights->allocate(); - fill_data(weights->buffer().as(), weights->size()); - blob_to_model.push_back(weights); - - Blob::Ptr bias = nullptr; - if (p.with_bias) { - bias = make_shared_blob(TensorDesc(Precision::FP32, - {p.krn_w * p.krn_h * p.out_c * p.in.c}, C)); - bias->allocate(); - fill_data(bias->buffer().as(), bias->size()); - blob_to_model.push_back(bias); - } - - size_t total_size_in_bytes = 0; - for (Blob::Ptr blb : blob_to_model) total_size_in_bytes += blb->byteSize(); - - TBlob::Ptr model_blob = make_shared_blob(TensorDesc(Precision::U8, { total_size_in_bytes }, C)); - model_blob->allocate(); - uint8_t *model_blob_ptr = model_blob->buffer().as(); - for (Blob::Ptr blb : blob_to_model) { - memcpy(model_blob_ptr, blb->buffer().as(), blb->byteSize()); - model_blob_ptr += blb->byteSize(); - } - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, model_blob); - - SizeVector dims_src = {p.in.w, p.in.h, p.in.c, 1}; // 1 is a batch size - - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - size_t OW = p.str_w * (p.in.w - 1) + p.krn_w - 2 * p.pad_w; - size_t OH = p.str_h * (p.in.h - 1) + p.krn_h - 2 * p.pad_h; - - SizeVector dims_dst = {OW, OH, p.out_c, 1}; - - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst->allocate(); - fill_data(dst->buffer().as(), dst->size()); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - Blob::Ptr dst_ref = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst_ref->allocate(); - - ref_deconv(src, weights, bias, dst_ref, p); - - compare(*dst.get(), *dst_ref.get()); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPUDeconvolutionOnlyTest, TestsDeconvolution) {} - -INSTANTIATE_TEST_CASE_P( - TestDeconvolution, smoke_CPUDeconvolutionOnlyTest, - ::testing::Values( - deconv_test_params{"CPU", - {3, 3, 3}, - 3, 3, 1, 1, 0, 0, 2, true}, - deconv_test_params{"CPU", - {3, 3, 3}, - 4, 3, 1, 1, 0, 0, 2, true}, - deconv_test_params{"CPU", - {3, 3, 3}, - 4, 3, 1, 2, 0, 0, 2, true}, - deconv_test_params{"CPU", - {4, 4, 3}, - 3, 3, 1, 2, 0, 0, 2, true}, // jit impl should work - deconv_test_params{"CPU", - {4, 4, 3}, - 3, 3, 1, 2, 0, 0, 2, false}, // jit impl should work - deconv_test_params{"CPU", - {3, 3, 3}, - 3, 3, 1, 1, 0, 0, 2, false}, - deconv_test_params{"CPU", - {3, 3, 3}, - 4, 3, 1, 1, 0, 0, 2, false}, - deconv_test_params{"CPU", - {3, 3, 3}, - 4, 3, 1, 2, 0, 0, 2, false})); - - -/*** TBD ***/ - - diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_logistic_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_logistic_tests.cpp deleted file mode 100644 index 99363c68877211..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_logistic_tests.cpp +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -#include - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct logistic_test_params { - std::string device_name; - - struct { - size_t w; - size_t h; - size_t c; - } in; -}; - -template -T logistic_fwd(T s) { - T v = ::expf((float)(s)); - return v / (v + 1); -} - -template -void ref_logistic(const TBlob &src, TBlob &dst, logistic_test_params prm) { - data_t *dst_data = dst.data(); - - const data_t *src_data = src.readOnly(); - - for (int i = 0; i < src.size(); i++) { - dst_data[i] = logistic_fwd(src_data[i]); - } -} - -class smoke_CPULogisticOnlyTest : public TestsCommon, - public WithParamInterface { - - std::string layers_t = R"V0G0N( - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(logistic_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - model = IRTemplateGenerator::getIRTemplate("Logistic_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - return model; - } - - protected: - virtual void SetUp() { - - try { - logistic_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network; - ASSERT_NO_THROW(network = ie.ReadNetwork(model, Blob::CPtr())); - - SizeVector dims_src = {p.in.w, - p.in.h, - p.in.c, - 1}; - - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - SizeVector dims_dst = dims_src; - - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst->allocate(); - - TBlob dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst_ref.allocate(); - - auto * srcPtr = dynamic_cast*>(src.get()); - ref_logistic(*srcPtr, dst_ref, p); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(network.getOutputsInfo().begin()->first, dst); - inferRequest.Infer(); - - compare(*dst, dst_ref); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPULogisticOnlyTest, TestsLogistic) {} - -INSTANTIATE_TEST_CASE_P( - TestLogistic, smoke_CPULogisticOnlyTest, - ::testing::Values( - logistic_test_params{"CPU", - {13, 13, 8}} - ) -); - -/*** TBD ***/ diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_power_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_power_tests.cpp deleted file mode 100644 index 8437a12f1d252a..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_power_tests.cpp +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct power_test_params { - std::string device_name; - - struct { - size_t w; - size_t h; - size_t c; - } in; - - float power; - float scale; - float shift; -}; - -template -void ref_power(const TBlob &src, TBlob &dst, power_test_params prm) { - - data_t *dst_data = dst.data(); - const data_t *src_data = src.readOnly(); - - const double scale = prm.scale; - const double power = prm.power; - const double shift = prm.shift; - - for(int i = 0; i < src.size(); i++) { - dst_data[i] = (float)std::pow(shift + src_data[i] * scale, power); - } -} - -class smoke_CPUPowerOnlyTest: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(power_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_POWER_", p.power); - REPLACE_WITH_NUM(model, "_SCALE_", p.scale); - REPLACE_WITH_NUM(model, "_SHIFT_", p.shift); - - model = IRTemplateGenerator::getIRTemplate("Power_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - - try { - power_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - SizeVector dims_src = {p.in.w, - p.in.h, - p.in.c, - 1}; - - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - SizeVector dims_dst = dims_src; - - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst->allocate(); - - TBlob dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst_ref.allocate(); - - auto * srcPtr = dynamic_cast*>(src.get()); - ref_power(*srcPtr, dst_ref, p); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - compare(*dst, dst_ref); - - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPUPowerOnlyTest, TestsPower) {} - -INSTANTIATE_TEST_CASE_P( - TestPower, smoke_CPUPowerOnlyTest, - ::testing::Values( - power_test_params{ "CPU", - {13, 13, 3}, 1, 2, 0.5f }, - power_test_params{ "CPU", - {23, 23, 1}, 3, 8, 2 }, - power_test_params{ "CPU", - {23, 23, 8}, 8, 2, 1 })); - -/*** TBD ***/ - - diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_roipooling_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_roipooling_tests.cpp deleted file mode 100644 index 12f39d4ed47e3b..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_roipooling_tests.cpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct roipooling_test_params { - std::string device_name; - - struct { - size_t w; - size_t h; - size_t c; - } in; - - size_t pooled_h; - size_t pooled_w; - float spatial_scale; -}; - -template -void ref_roipool(const TBlob &src, TBlob &dst, roipooling_test_params prm) -{ -} - -class MKLDNNROIPoolingOnlyTest: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - _IN_ - _IC_ - _IW_ - _IH_ - - - 300 - 5 - - - - - 300 - 256 - 6 - 6 - - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(roipooling_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - - REPLACE_WITH_NUM(model, "_POOLED_H_", p.pooled_h); - REPLACE_WITH_NUM(model, "_POOLED_W_", p.pooled_w); - REPLACE_WITH_NUM(model, "_SPATIAL_SCALE_", p.spatial_scale); - - model = IRTemplateGenerator::getIRTemplate("ROIPooling_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - - try { - roipooling_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core ie; - ASSERT_NO_THROW(ie.ReadNetwork(model, Blob::CPtr())); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNROIPoolingOnlyTest, nightly_TestsROIPooling) {} \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_scaleshift_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_scaleshift_tests.cpp deleted file mode 100644 index bc1915393ffbc1..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_scaleshift_tests.cpp +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct scaleshift_test_params { - std::string device_name; - - struct { - size_t w; - size_t h; - size_t c; - } in; - - int broadcast; -}; - -template -void ref_scaleshift(const TBlob &src, const data_t *weights, const size_t weightsSize, - TBlob &dst, scaleshift_test_params prm) { - - size_t IW = src.getTensorDesc().getDims()[3]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t MB = src.getTensorDesc().getDims()[0]; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - const data_t *bias_data = weights_data + IC; - data_t *dst_data = dst.data(); - - for(int mb = 0; mb < MB; mb++) { - for(int c = 0; c < IC; c++) { - for(int h = 0; h < IH; h++) { - for(int w = 0; w < IW; w++) { - int idx = mb * IC * IH * IW - + c * IH * IW - + h * IW + w; - - int widx = c; - int bidx = c; - - dst_data[idx] = src_data[idx] * weights_data[widx] + bias_data[bidx]; - } - } - } - } -} - -class smoke_CPUScaleShiftOnlyTest: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(scaleshift_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_BROADCAST_", p.broadcast); - - size_t w_data_size = p.in.c * sizeof(float); - size_t b_data_size = p.in.c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - model = IRTemplateGenerator::getIRTemplate("ScaleShift_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - - try { - scaleshift_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - TBlob *weights = new TBlob(TensorDesc(Precision::U8, { p.in.c * 2 * sizeof(float) }, C)); - weights->allocate(); - fill_data( weights->data().as(), weights->size() / sizeof(float)); - - TBlob::Ptr weights_ptr = TBlob::Ptr(weights); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, weights_ptr); - - SizeVector dims_src = {p.in.w, - p.in.h, - p.in.c, - 1}; // 1 is a batch size - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - dst->allocate(); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - - TBlob dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - dst_ref.allocate(); - - auto * srcPtr = dynamic_cast*>(src.get()); - ref_scaleshift(*srcPtr, weights->readOnly().as(), weights->size() / sizeof(float), dst_ref, p); - - compare(*dst, dst_ref); - - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPUScaleShiftOnlyTest, TestsScaleShift) {} - -INSTANTIATE_TEST_CASE_P( - TestScaleShift, smoke_CPUScaleShiftOnlyTest, - ::testing::Values( - scaleshift_test_params{ "CPU", - {256, 128, 32}, 0})); - diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_simplernms_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_simplernms_tests.cpp deleted file mode 100644 index 1ddfe5a80e7d49..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_simplernms_tests.cpp +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct simplernms_test_params { - std::string device_name; - - struct { - size_t w; - size_t h; - size_t c; - } in; - - float cls_threshold; - size_t max_num_proposals; - float iou_threshold; - size_t min_bbox_size; - size_t feat_stride; - size_t pre_nms_topn; - size_t post_nms_topn; - float scale1; - float scale2; - float scale3; -}; - -template -void ref_simplernms(const TBlob &src, TBlob &dst, simplernms_test_params prm) -{ -} - -class MKLDNNSimplerNMSOnlyTest: public TestsCommon, - public WithParamInterface { - - std::string layers_t = R"V0G0N( - - - - - - - - - - - - - - 18 - 39 - 64 - - - 18 - 39 - 64 - - - - - 300 - 5 - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - - - -)V0G0N"; - - std::string getModel(simplernms_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", p.in.w); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - - REPLACE_WITH_NUM(model, "_CLS_THR_", p.cls_threshold); - REPLACE_WITH_NUM(model, "_MAX_NUM_", p.max_num_proposals); - REPLACE_WITH_NUM(model, "_IOU_THR_", p.iou_threshold); - REPLACE_WITH_NUM(model, "_MIN_BB_SIZE_", p.min_bbox_size); - REPLACE_WITH_NUM(model, "_FEAT_STRIDE_", p.feat_stride); - REPLACE_WITH_NUM(model, "_PRE_NMS_TOPN_", p.pre_nms_topn); - REPLACE_WITH_NUM(model, "_POST_NMS_TOPN_", p.post_nms_topn); - REPLACE_WITH_NUM(model, "_SCALE1_", p.scale1); - REPLACE_WITH_NUM(model, "_SCALE2_", p.scale2); - REPLACE_WITH_NUM(model, "_SCALE3_", p.scale3); - - model = IRTemplateGenerator::getIRTemplate("SimplerNMS_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - - try { - simplernms_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - SizeVector dims_src = {p.in.w, - p.in.h, - p.in.c, - 1}; - - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - SizeVector dims_dst = {300, 5, 1}; - - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst->allocate(); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNSimplerNMSOnlyTest, nightly_TestSimplerNMS) {} \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/norm_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/norm_tests.cpp deleted file mode 100644 index d950ce5492e3f9..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/norm_tests.cpp +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - - -struct norm_base_params { - struct { - size_t w; - size_t h; - size_t c; - } in; - - size_t local_size; - float alpha; - float beta; - size_t k; - -}; - -struct norm_test_params : norm_base_params { - std::string device_name; - - norm_test_params(std::string name, norm_base_params params) : - norm_base_params(params), device_name(name) {} -}; - - -template -void ref_norm(const TBlob &src, TBlob &dst, norm_test_params prm) -{ - size_t IW = prm.in.w; - size_t IH = prm.in.h; - size_t IC = prm.in.c; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (uint32_t c = 0; c < IC; c++) { - for (uint32_t h = 0; h < IH; h++) { - for (uint32_t w = 0; w < IW; w++) { - uint32_t oidx = c * IH * IW - + h * IW + w; - - uint32_t sz = prm.local_size; - int32_t c_start = c - sz / 2; - int32_t c_end = c_start + sz; - if (c_start < 0) c_start = 0; - if (c_end > (int32_t)IC) c_end = IC; - data_t sum = 0.0; - for (int32_t c1 = c_start; c1 < c_end; c1++) { - uint32_t idx = c1 * IH * IW + h * IW + w; - data_t s = src_data[idx]; - - sum += s * s; - } - - data_t norm_coef = powf(1. + prm.alpha * sum / sz, -prm.beta); - dst_data[oidx] = norm_coef * src_data[oidx]; - } - } - } -} - -class smoke_NormOnlyTest: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(norm_test_params p) { - std::string model = layers_t; - - REPLACE_WITH_NUM(model, "_IN_", 1); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - - REPLACE_WITH_NUM(model, "_LS_", p.local_size); - REPLACE_WITH_NUM(model, "_A__", p.alpha); - REPLACE_WITH_NUM(model, "_B__", p.beta); - REPLACE_WITH_NUM(model, "_K__", p.k); - - model = IRTemplateGenerator::getIRTemplate("FullyConnected_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - - try { - norm_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - SizeVector dims_src = {1, - p.in.c, - p.in.h, - p.in.w}; - Blob::Ptr src = make_shared_blob(TensorDesc({ Precision::FP32, dims_src, Layout::NCHW })); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - SizeVector dims_dst = dims_src; - Blob::Ptr dst = make_shared_blob(TensorDesc({ Precision::FP32, dims_dst, Layout::NCHW })); - dst->allocate(); - - TBlob dst_ref({Precision::FP32, dims_dst, Layout::NCHW}); - dst_ref.allocate(); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - auto * srcPtr = dynamic_cast*>(src.get()); - ref_norm(*srcPtr, dst_ref, p); - compare(*dst, dst_ref); - - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -#define case_1 norm_base_params({{228, 228, 3}, 5, 0.0001f, 0.75f, 1}) - -TEST_P(smoke_NormOnlyTest, TestsNorm) {} - -std::string getTestCaseName(testing::TestParamInfo obj) { - return obj.param.device_name + - "_w" + std::to_string(obj.param.in.w) + - "_h" + std::to_string(obj.param.in.h) + - "_c" + std::to_string(obj.param.in.c); -} - -norm_test_params norm_only_test_cases[] = { - norm_test_params("CPU", case_1), -}; - -INSTANTIATE_TEST_CASE_P( - TestsNorm, smoke_NormOnlyTest, ::testing::ValuesIn(norm_only_test_cases), getTestCaseName); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/pooling_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/pooling_tests.cpp deleted file mode 100644 index 8db87e9c59bcf6..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/pooling_tests.cpp +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ie_core.hpp" -#include "../common_single_layer_tests/pool_ref.hpp" -#include "common_test_utils/common_layers_params.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct pooling_base_params { - struct { size_t n, c, h, w; } in; - struct { size_t h, w; } out; - - size_t krn_h; - size_t krn_w; - size_t str_h; - size_t str_w; - size_t pad_h; - size_t pad_w; - - bool avg; - bool exclude_pad; -}; - -struct pooling_test_params : pooling_base_params { - std::string device_name; - - pooling_test_params(std::string name, pooling_base_params params) : - pooling_base_params(params), device_name(name) {} -}; - -template -void ref_pool(const Blob::Ptr &src, Blob::Ptr &dst, pooling_test_params p) -{ - CommonTestUtils::pool_common_params params; - params.kernel.insert(X_AXIS, p.krn_w); - params.kernel.insert(Y_AXIS, p.krn_h); - params.stride.insert(X_AXIS, p.str_w); - params.stride.insert(Y_AXIS, p.str_h); - params.pads_begin.insert(X_AXIS, p.pad_w); - params.pads_begin.insert(Y_AXIS, p.pad_h); - params.exclude_pad = p.exclude_pad; - params.avg = p.avg; - ref_pool_common({ src }, *dst.get(), params); -} - -class smoke_CPU_PoolingOnlyTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _OH_ - _OW_ - - - - - - - - -)V0G0N"; - - std::string getModel(pooling_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - - REPLACE_WITH_NUM(model, "_KH_", p.krn_h); - REPLACE_WITH_NUM(model, "_KW_", p.krn_w); - REPLACE_WITH_NUM(model, "_SH_", p.str_h); - REPLACE_WITH_NUM(model, "_SW_", p.str_w); - REPLACE_WITH_NUM(model, "_PH_", p.pad_h); - REPLACE_WITH_NUM(model, "_PW_", p.pad_w); - - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - REPLACE_WITH_STR(model, "_ALG_", p.avg ? "avg":"max"); - REPLACE_WITH_STR(model, "_EXCL_PAD_", p.exclude_pad ? "true":"false"); - - return model; - } - -protected: - virtual void SetUp() { - - try { - pooling_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - SizeVector dims_src = {p.in.w, p.in.h, p.in.c, p.in.n}; - Blob::Ptr src = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW)); - src->allocate(); - fill_data(src->buffer().as(), src->size()); - - SizeVector dims_dst = {p.out.w, p.out.h, p.in.c, p.in.n}; - Blob::Ptr dst = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst->allocate(); - - Blob::Ptr dst_ref = make_shared_blob(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW)); - dst_ref->allocate(); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - inferRequest.SetBlob(network.getInputsInfo().begin()->first, src); - inferRequest.SetBlob(outInfo.begin()->first, dst); - inferRequest.Infer(); - - ref_pool(src, dst_ref, p); - compare(*dst.get(), *dst_ref.get()); - - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -#define case_0 pooling_base_params({{1, 3, 228, 228}, {114, 114}, 2, 2, 2, 2, 0, 0}) -#define case_1 pooling_base_params({{1, 3, 228, 228}, {113, 114}, 4, 2, 2, 2, 0, 0}) -#define case_2 pooling_base_params({{1, 3, 228, 228}, {113, 227}, 4, 2, 2, 1, 0, 0}) -#define case_3 pooling_base_params({{1, 3, 224, 224}, {224, 224}, 3, 3, 1, 1, 1, 1, false, false}) -#define case_4 pooling_base_params({{1, 3, 224, 224}, {224, 224}, 3, 3, 1, 1, 1, 1, true, false}) -#define case_5 pooling_base_params({{1, 3, 224, 224}, {224, 224}, 3, 3, 1, 1, 1, 1, true, true}) - -#define case_6 pooling_base_params({{1, 3, 224, 224}, {112, 112}, 3, 3, 2, 2, 1, 1, false, false}) -#define case_7 pooling_base_params({{1, 3, 224, 224}, {112, 112}, 3, 3, 2, 2, 1, 1, true, false}) -#define case_8 pooling_base_params({{1, 3, 224, 224}, {112, 112}, 3, 3, 2, 2, 1, 1, true, true}) - -#define case_9 pooling_base_params({{1, 3, 224, 224}, {113, 113}, 3, 3, 2, 2, 1, 1, false, false}) -#define case_10 pooling_base_params({{1, 3, 224, 224}, {113, 113}, 3, 3, 2, 2, 1, 1, true, false}) -#define case_11 pooling_base_params({{1, 3, 224, 224}, {113, 113}, 3, 3, 2, 2, 1, 1, true, true}) - - -TEST_P(smoke_CPU_PoolingOnlyTest, TestsPooling) {} - -std::string getTestCaseName(testing::TestParamInfo obj) { - return obj.param.device_name + - "_w" + std::to_string(obj.param.in.w) + - "_h" + std::to_string(obj.param.in.h) + - "_c" + std::to_string(obj.param.in.c) + - "_krnw" + std::to_string(obj.param.krn_w) + - "_krnh" + std::to_string(obj.param.krn_h) + - "_strw" + std::to_string(obj.param.str_w) + - "_strh" + std::to_string(obj.param.str_h); -} - -pooling_test_params pooling_only_test_cases[] = { - pooling_test_params("CPU", case_0), - pooling_test_params("CPU", case_1), - pooling_test_params("CPU", case_2), - pooling_test_params("CPU", case_3), - pooling_test_params("CPU", case_4), - pooling_test_params("CPU", case_5), - pooling_test_params("CPU", case_6), - pooling_test_params("CPU", case_7), - pooling_test_params("CPU", case_8), - pooling_test_params("CPU", case_9), -// pooling_test_params("CPU", case_10), - pooling_test_params("CPU", case_11), -}; - -INSTANTIATE_TEST_CASE_P( - TestsPooling, smoke_CPU_PoolingOnlyTest, ::testing::ValuesIn(pooling_only_test_cases)); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/priorbox_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/priorbox_tests.cpp deleted file mode 100644 index 44a6a976db967d..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/priorbox_tests.cpp +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct priorbox_test_params { - std::string device_name; - - size_t mb; - - struct { - size_t c; - size_t h; - size_t w; - } in1; - - struct { - size_t c; - size_t h; - size_t w; - } in2; - - struct { - size_t c; - size_t h; - size_t w; - } out; - - int offset; - int stride; - int min_size; - int max_size; - bool flip; - bool clip; -}; - -class smoke_CPUPriorBoxOnlyTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - 1 - _IC1_ - _IH1_ - _IW1_ - - - - - - - 1 - _IC2_ - _IH2_ - _IW2_ - - - - - - - - 1 - _IC1_ - _IH1_ - _IW1_ - - - 1 - _IC2_ - _IH2_ - _IW2_ - - - - - 1 - _OC_ - _OH_ - _OW_ - - - - - - - - - - -)V0G0N"; - - std::string getModel(priorbox_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW1_", p.in1.w); - REPLACE_WITH_NUM(model, "_IH1_", p.in1.h); - REPLACE_WITH_NUM(model, "_IC1_", p.in1.c); - - REPLACE_WITH_NUM(model, "_IW2_", p.in2.w); - REPLACE_WITH_NUM(model, "_IH2_", p.in2.h); - REPLACE_WITH_NUM(model, "_IC2_", p.in2.c); - - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - - return model; - } - -protected: - virtual void SetUp() { - - try { - priorbox_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - network.setBatchSize(p.mb); - - InputsDataMap inputs = network.getInputsInfo(); - - DataPtr inputPtr1 = inputs["input1"]->getInputData(); - DataPtr inputPtr2 = inputs["input2"]->getInputData(); - - InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob(inputPtr1->getTensorDesc()); - input1->allocate(); - - InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob(inputPtr2->getTensorDesc()); - input2->allocate(); - - InferenceEngine::BlobMap inputBlobs; - inputBlobs["input1"] = input1; - inputBlobs["input2"] = input2; - - OutputsDataMap outputs = network.getOutputsInfo(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(outputs["prior"]->getTensorDesc()); - output->allocate(); - - InferenceEngine::BlobMap outputBlobs; - outputBlobs["prior"] = output; - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(inputBlobs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - // Check results - - const TBlob::Ptr outputArray = std::dynamic_pointer_cast>(output); - float* dst_ptr = outputArray->data(); - - const float eps = 1e-6; - - // pick a few generated priors and compare against the expected number. - // first prior - EXPECT_NEAR(dst_ptr[0], 0.03, eps); - EXPECT_NEAR(dst_ptr[1], 0.03, eps); - EXPECT_NEAR(dst_ptr[2], 0.07, eps); - EXPECT_NEAR(dst_ptr[3], 0.07, eps); - // second prior - EXPECT_NEAR(dst_ptr[4], 0.02, eps); - EXPECT_NEAR(dst_ptr[5], 0.02, eps); - EXPECT_NEAR(dst_ptr[6], 0.08, eps); - EXPECT_NEAR(dst_ptr[7], 0.08, eps); - // prior in the 5-th row and 5-th col - EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4], 0.43, eps); - EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+1], 0.43, eps); - EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+2], 0.47, eps); - EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+3], 0.47, eps); - - // check variance - dst_ptr += p.out.h * p.out.w; - for (int d = 0; d < p.out.h * p.out.w; ++d) { - EXPECT_NEAR(dst_ptr[d], 0.1, eps); - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPUPriorBoxOnlyTest, TestsPriorBox) {} - -INSTANTIATE_TEST_CASE_P( - TestsPriorBox, smoke_CPUPriorBoxOnlyTest, - ::testing::Values( - priorbox_test_params{ "CPU", - 10, {10, 10, 10}, {3, 100, 100}, {2, 1, 800}, 0, 0, 4, 9, true, true })); - - -class smoke_CPUPriorBoxDensityTest : public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - 1 - _IC1_ - _IH1_ - _IW1_ - - - - - - - 1 - _IC2_ - _IH2_ - _IW2_ - - - - - - - - 1 - _IC1_ - _IH1_ - _IW1_ - - - 1 - _IC2_ - _IH2_ - _IW2_ - - - - - 1 - _OC_ - _OH_ - _OW_ - - - - - - - - - - -)V0G0N"; - - std::string getModel(priorbox_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW1_", p.in1.w); - REPLACE_WITH_NUM(model, "_IH1_", p.in1.h); - REPLACE_WITH_NUM(model, "_IC1_", p.in1.c); - - REPLACE_WITH_NUM(model, "_IW2_", p.in2.w); - REPLACE_WITH_NUM(model, "_IH2_", p.in2.h); - REPLACE_WITH_NUM(model, "_IC2_", p.in2.c); - - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - - return model; - } - -protected: - virtual void SetUp() { - - try { - priorbox_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - network.setBatchSize(p.mb); - - InputsDataMap inputs = network.getInputsInfo(); - - DataPtr inputPtr1 = inputs["input1"]->getInputData(); - DataPtr inputPtr2 = inputs["input2"]->getInputData(); - - InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob(inputPtr1->getTensorDesc()); - input1->allocate(); - - InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob(inputPtr2->getTensorDesc()); - input2->allocate(); - - InferenceEngine::BlobMap inputBlobs; - inputBlobs["input1"] = input1; - inputBlobs["input2"] = input2; - - OutputsDataMap outputs = network.getOutputsInfo(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(outputs["prior"]->getTensorDesc()); - output->allocate(); - - InferenceEngine::BlobMap outputBlobs; - outputBlobs["prior"] = output; - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(inputBlobs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - // Check results - - const TBlob::Ptr outputArray = std::dynamic_pointer_cast>(output); - float* dst_ptr = outputArray->data(); - - // pick a few generated priors and compare against the expected number. - // first prior - EXPECT_NEAR(dst_ptr[0], 0.03, 1e-6); - EXPECT_NEAR(dst_ptr[1], 0.03, 1e-6); - EXPECT_NEAR(dst_ptr[2], 0.07, 1e-6); - EXPECT_NEAR(dst_ptr[3], 0.07, 1e-6); - // second prior - EXPECT_NEAR(dst_ptr[4], 0.03, 0.1); - EXPECT_NEAR(dst_ptr[5], 0.03, 0.1); - EXPECT_NEAR(dst_ptr[6], 0.17, 0.1); - EXPECT_NEAR(dst_ptr[7], 0.03, 0.1); - // prior in the 5-th row and 5-th col - EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4], 0.83, 0.1); - EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 1], 0.83, 0.1); - EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 2], 0.84, 0.1); - EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 3], 0.84, 0.1); - - // check variance - dst_ptr += p.out.h * p.out.w; - for (int d = 0; d < p.out.h * p.out.w; ++d) { - EXPECT_NEAR(dst_ptr[d], 0.1, 1e-6); - } - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPUPriorBoxDensityTest, TestsPriorBoxDensity) {} - -INSTANTIATE_TEST_CASE_P( - TestsPriorBoxDensity, smoke_CPUPriorBoxDensityTest, - ::testing::Values( - priorbox_test_params{ "CPU", - 10,{ 10, 10, 10 },{ 3, 100, 100 },{ 2, 1, 400 }, 0, 0, 4, 9, true, true })); - diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp deleted file mode 100644 index 9e998088948038..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include "common_test_utils/data_utils.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct region_yolo_test_params { - std::vector src_dims; - std::vector dst_dims; - int classes; - int coords; - int num; - float do_softmax; - std::vector mask; -}; - -static inline int entry_index(int width, int height, int coords, int classes, int outputs, int batch, int location, - int entry) { - int n = location / (width * height); - int loc = location % (width * height); - return batch * outputs + n * width * height * (coords + classes + 1) + - entry * width * height + loc; -} - -static inline float logistic_activate(float x) { - return 1.f / (1.f + exp(-x)); -} - -static inline -void softmax_generic(const float *src_data, float *dst_data, int B, int C, int H, int W) { - int start = 0; - for (int b = 0; b < B; b++) { - for (int i = start; i < H * W; i++) { - float max = src_data[b * C * H * W + i]; - for (int c = 0; c < C; c++) { - float val = src_data[b * C * H * W + c * H * W + i]; - if (val > max) max = val; - } - - float expSum = 0; - for (int c = 0; c < C; c++) { - dst_data[b * C * H * W + c * H * W + i] = exp(src_data[b * C * H * W + c * H * W + i] - max); - expSum += dst_data[b * C * H * W + c * H * W + i]; - } - - for (int c = 0; c < C; c++) { - dst_data[b * C * H * W + c * H * W + i] = dst_data[b * C * H * W + c * H * W + i] / expSum; - } - } - } -} - -static void ref_region_yolo(InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, region_yolo_test_params p) { - float* src_data = src.data(); - float* dst_data = dst.data(); - - int mask_size = p.mask.size();; - - int IW = (src.getTensorDesc().getDims().size() > 3) ? src.getTensorDesc().getDims()[3] : 1; - int IH = (src.getTensorDesc().getDims().size() > 2) ? src.getTensorDesc().getDims()[2] : 1; - int B = (src.getTensorDesc().getDims().size() > 0) ? src.getTensorDesc().getDims()[0] : 1; - - for (int i = 0; i < src.size(); i++) { - dst_data[i] = src_data[i]; - } - - int end_index = 0; - int num_ = 0; - if (p.do_softmax) { - // Region layer (Yolo v2) - end_index = IW * IH; - num_ = p.num; - } else { - // Yolo layer (Yolo v3) - end_index = IW * IH * (p.classes + 1); - num_ = mask_size; - } - int inputs_size = IH * IW * num_ * (p.classes + p.coords + 1); - - for (int b = 0; b < B; b++) { - for (int n = 0; n < num_; n++) { - int index = entry_index(IW, IH, p.coords, p.classes, inputs_size, b, n * IW * IH, 0); - for (int i = index; i < index + 2 * IW * IH; i++) { - dst_data[i] = logistic_activate(dst_data[i]); - } - - index = entry_index(IW, IH, p.coords, p.classes, inputs_size, b, n * IW * IH, p.coords); - for (int i = index; i < index + end_index; i++) { - dst_data[i] = logistic_activate(dst_data[i]); - } - } - } - - if (p.do_softmax) { - int index = entry_index(IW, IH, p.coords, p.classes, inputs_size, 0, 0, p.coords + 1); - int batch_offset = inputs_size / p.num; - for (int b = 0; b < B * p.num; b++) - softmax_generic(src_data + index + b * batch_offset, dst_data + index + b * batch_offset, 1, p.classes, - IH, IW); - } -} - -class smoke_CPU_RegionYoloOnlyTest: public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - __SRC_DIMS__ - - - - - - - __SRC_DIMS__ - - - - __DST_DIMS__ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(region_yolo_test_params p) { - std::string model = model_t; - - - std::string src_dims; - for (auto &dim : p.src_dims) { - src_dims += "\n "; - src_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims); - - std::string dst_dims; - for (auto &dim : p.dst_dims) { - dst_dims += "\n "; - dst_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims); - - std::string mask; - for (auto &n : p.mask) { - mask += std::to_string(n) + ","; - } - mask.pop_back(); - REPLACE_WITH_STR(model, "_MASK_", mask); - - - REPLACE_WITH_STR(model, "_CLASSES_", std::to_string(p.classes)); - REPLACE_WITH_STR(model, "_COORDS_", std::to_string(p.coords)); - REPLACE_WITH_STR(model, "_DO_SOFTMAX_", std::to_string(p.do_softmax)); - REPLACE_WITH_STR(model, "_NUM_", std::to_string(p.num)); - - - return model; - } - - virtual void SetUp() { - try { - region_yolo_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr()); - - Blob::Ptr src = make_shared_blob({Precision::FP32, p.src_dims, Layout::ANY}); - src->allocate(); - - TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - CommonTestUtils::fill_data_sine(src->buffer(), src->size(), 10, 30, 1); - - BlobMap srcs; - srcs.insert(std::pair("input", src)); - - OutputsDataMap out; - out = net.getOutputsInfo(); - BlobMap outputBlobs; - - std::pair item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_region_yolo(*srcPtr, dst_ref, p); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(net, "CPU"); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(srcs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - compare(*outputBlobs.begin()->second, dst_ref); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(smoke_CPU_RegionYoloOnlyTest, TestsRegionYolo) {} - -INSTANTIATE_TEST_CASE_P( - TestsRegionYolo, smoke_CPU_RegionYoloOnlyTest, - ::testing::Values( - region_yolo_test_params{{1, 255, 52, 52}, {1, 255, 52, 52}, 80, 4, 9, 0, {0, 1, 2}}, - region_yolo_test_params{{1, 255, 26, 26}, {1, 255, 26, 26}, 80, 4, 9, 0, {3, 4, 5}}, - region_yolo_test_params{{1, 255, 13, 13}, {1, 255, 13, 13}, 80, 4, 9, 0, {6, 7, 8}}, - region_yolo_test_params{{1, 125, 13, 13}, {1, 21125}, 20, 4, 5, 1, {0, 1, 2}} - )); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/multi_out_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/multi_out_test.cpp deleted file mode 100644 index c94817cd87bfe3..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/multi_out_test.cpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "common_test_utils/xml_net_builder/xml_net_builder.hpp" -#include "tests_common.hpp" -#include "precision_utils.h" -#include - -using namespace InferenceEngine; -using std::string; -using std::pair; -using std::map; -using std::vector; - -const static size_t _H = 16; -const static size_t _W = 16; -const static size_t _C = 1; -const static size_t _B = 2; - -const static SizeVector dims {_B, _C, _H, _W}; - -class MultiOutConnectNet : CommonTestUtils::V2NetBuilder { - std::string model; - TBlob::Ptr weightsPtr; - -public: - MultiOutConnectNet(): CommonTestUtils::V2NetBuilder(buildNetworkWithOneInput( - "MultiOutNet", {_B, 3*_C, _H, _W}, "FP32")) { - weightsPtr = make_shared_blob(TensorDesc(Precision::U8, SizeVector{0}, Layout::C)); - weightsPtr->allocate(); - - /** - * [in] - * | - * [__split__] - * | | | - * [out1] | [out2] - * |_______ - * | | - * [power1] [power2] - * | | - * [out3] [out4] - */ - addLayer("Split", "FP32", nullptr, - { {{_B, 3*_C, _H, _W}}, - {dims, dims, dims}}); - - map pow_params = { {"scale", "-1"}, {"shift", "0"}, {"power", "1"} }; - addLayer("Power", "FP32", &pow_params, - { {dims}, {dims} }); - - addLayer("Power", "FP32", &pow_params, - { {dims}, {dims} }); - - vector> edges = { - {"0,0", "1,1"}, - {"1,3", "2,5"}, - {"1,3", "3,7"} - }; - model = finish(&edges); - } - - CNNNetwork net(Core & ie) { - return ie.ReadNetwork(model, weightsPtr); - } -}; - -using test_param = std::tuple; - -class smoke_MultiOutConnectTest : public ::testing::TestWithParam { -protected: - string device_name; - MultiOutConnectNet topology; - - void SetUp() override { - device_name = std::get<0>(GetParam()); - } -}; - -static void fill_with(Blob::Ptr &blob, std::vector vals) { - float* ptr = blob->buffer().as(); - const size_t size = blob->size(); - const size_t fill_size = vals.size(); - - for (int i = 0; i < size; i++) - ptr[i] = vals[i%fill_size]; -} - -static bool check_with(Blob::Ptr &blob, std::vector vals) { - float* ptr = blob->buffer().as(); - const size_t size = blob->size(); - const size_t fill_size = vals.size(); - - bool res = true; - for (int i = 0; i < size; i++) - if (ptr[i] != vals[i%fill_size]) - res = false; - return res; -} - -TEST_P(smoke_MultiOutConnectTest, canLoad) { - Core ie; - CNNNetwork net = topology.net(ie); - - auto execNet = ie.LoadNetwork(net, device_name); - auto req = execNet.CreateInferRequest(); - - auto input = req.GetBlob("Input0"); - fill_with(input, {1,2,3,4}); - - req.Infer(); - - auto output1 = req.GetBlob("Power2"); - auto output2 = req.GetBlob("Power3"); - ASSERT_TRUE(check_with(output1, {-1,-2,-3,-4})); - ASSERT_TRUE(check_with(output2, {-1,-2,-3,-4})); -} - -#define PLUGING_CASE(_plugin, _test) \ - INSTANTIATE_TEST_CASE_P(_plugin##_run, _test, ::testing::Values(#_plugin) ) - -PLUGING_CASE(CPU, smoke_MultiOutConnectTest); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/tripple_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/tripple_test.cpp deleted file mode 100644 index cf544daac38744..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/tripple_test.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "common_test_utils/xml_net_builder/xml_net_builder.hpp" -#include "tests_common.hpp" -#include "precision_utils.h" -#include - -using namespace InferenceEngine; -using std::string; -using std::pair; -using std::map; -using std::vector; - -const static size_t _H = 16; -const static size_t _W = 16; -const static size_t _C = 1; -const static size_t _B = 2; - -const static SizeVector dims {_B, _C, _H, _W}; - -class TripleConnectNet : CommonTestUtils::V2NetBuilder { - std::string model; - TBlob::Ptr weightsPtr; - -public: - TripleConnectNet(): CommonTestUtils::V2NetBuilder(buildNetworkWithOneInput( - "Triple_Net", {_B, _C, _H, _W}, "FP32")) { - weightsPtr = make_shared_blob(TensorDesc(Precision::U8, SizeVector{0}, Layout::C)); - weightsPtr->allocate(); - - /** - * [in] - * ___|___ - * | | | - * [0] [1] [2] - * [__Concat___] - * | - * [out] - */ - map lstm_params = {}; - addLayer("Concat", "FP32", - &lstm_params, - { // input dims - {dims, dims, dims}, - // output dims - {{_B, 3*_C, _H, _W}} - }); - - vector> edges = { - {"0,0", "1,1"}, - {"0,0", "1,2"}, - {"0,0", "1,3"} - }; - model = finish(&edges); - } - - CNNNetwork net(Core & ie) { - return ie.ReadNetwork(model, weightsPtr); - } -}; - -using test_param = std::tuple; - -class smoke_TripleConnectTest : public ::testing::TestWithParam { -protected: - string device_name; - TripleConnectNet topology; - - void SetUp() override { - device_name = std::get<0>(GetParam()); - } -}; - -static void fill_with(Blob::Ptr &blob, std::vector vals) { - float* ptr = blob->buffer().as(); - const size_t size = blob->size(); - const size_t fill_size = vals.size(); - - for (int i = 0; i < size; i++) - ptr[i] = vals[i%fill_size]; -} - -static bool check_with(Blob::Ptr &blob, std::vector vals) { - float* ptr = blob->buffer().as(); - const size_t size = blob->size(); - const size_t fill_size = vals.size(); - - bool res = true; - for (int i = 0; i < size; i++) - if (ptr[i] != vals[i%fill_size]) - res = false; - return res; -} - -TEST_P(smoke_TripleConnectTest, canLoad) { - Core ie; - CNNNetwork net = topology.net(ie); - - auto execNet = ie.LoadNetwork(net, device_name); - auto req = execNet.CreateInferRequest(); - - auto input = req.GetBlob("Input0"); - fill_with(input, {1,2,3,4}); - - req.Infer(); - - auto output = req.GetBlob("Concat1"); - ASSERT_TRUE(check_with(output, {1,2,3,4})); -} - -#define PLUGING_CASE(_plugin, _test) \ - INSTANTIATE_TEST_CASE_P(_plugin##_run, _test, ::testing::Values(#_plugin) ) - -PLUGING_CASE(CPU, smoke_TripleConnectTest); diff --git a/inference-engine/tests_deprecated/functional/mkldnn/test_model_repo.cpp b/inference-engine/tests_deprecated/functional/mkldnn/test_model_repo.cpp deleted file mode 100644 index 97ab83c5c31e6b..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/test_model_repo.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_model_repo.hpp" - -std::string get_model_repo() { - return "models:"; -}; - -const char* TestDataHelpers::getModelPathNonFatal() noexcept { - return TestDataHelpers::getModelPathNonFatalDefault(); -} - -std::string TestDataHelpers::get_data_path() { - return TestDataHelpers::get_data_path_default(); -} \ No newline at end of file diff --git a/inference-engine/tests_deprecated/unit/CMakeLists.txt b/inference-engine/tests_deprecated/unit/CMakeLists.txt index 9746f06602ff2f..ef103ba3cad172 100644 --- a/inference-engine/tests_deprecated/unit/CMakeLists.txt +++ b/inference-engine/tests_deprecated/unit/CMakeLists.txt @@ -52,28 +52,28 @@ if (ENABLE_GNA) endif() endif() -if (ENABLE_MKL_DNN) - if (GEMM STREQUAL "MKL") - add_definitions(-DUSE_MKL) - endif () - file(GLOB - MKLDNN_TESTS - engines/mkldnn/*.cpp - engines/mkldnn/graph/layers/extensions/*.cpp - engines/mkldnn/graph/layers/internal/*.cpp - engines/mkldnn/graph/structure/*.cpp - engines/mkldnn/graph/*.cpp) - file(GLOB - MKLDNN_TESTS_INCLUDE engines/mkldnn/graph/*.hpp) - - source_group("mkldnn" FILES ${MKLDNN_TESTS} ${MKLDNN_TESTS_INCLUDE}) - - include_directories(engines/mkldnn/graph) - - list(APPEND TEST_SRC ${MKLDNN_TESTS}) - list(APPEND TEST_INCLUDE ${MKLDNN_TESTS_INCLUDE}) - list(APPEND TEST_DEPS MKLDNNPlugin_obj) -endif () +# if (ENABLE_MKL_DNN) +# if (GEMM STREQUAL "MKL") +# add_definitions(-DUSE_MKL) +# endif () +# file(GLOB +# MKLDNN_TESTS +# engines/mkldnn/*.cpp +# engines/mkldnn/graph/layers/extensions/*.cpp +# engines/mkldnn/graph/layers/internal/*.cpp +# engines/mkldnn/graph/structure/*.cpp +# engines/mkldnn/graph/*.cpp) +# file(GLOB +# MKLDNN_TESTS_INCLUDE engines/mkldnn/graph/*.hpp) + +# source_group("mkldnn" FILES ${MKLDNN_TESTS} ${MKLDNN_TESTS_INCLUDE}) + +# include_directories(engines/mkldnn/graph) + +# list(APPEND TEST_SRC ${MKLDNN_TESTS}) +# list(APPEND TEST_INCLUDE ${MKLDNN_TESTS_INCLUDE}) +# list(APPEND TEST_DEPS MKLDNNPlugin_obj) +# endif () if (ENABLE_MYRIAD) include(${XLINK_DIR}/XLink.cmake) diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/constant_propagation_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/constant_propagation_test.cpp deleted file mode 100644 index dfe0789481bef0..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/constant_propagation_test.cpp +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include "graph/test_graph.hpp" -#include - -using namespace ::testing; - -class ConstLayerImpl : public InferenceEngine::ILayerExecImpl { -public: - explicit ConstLayerImpl(const InferenceEngine::CNNLayer *layer): cnnLayer(*layer) {} - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = 0; - if (cnnLayer.outData.size() != 1 && cnnLayer.insData.size() != 1) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = true; - cfg.inPlace = 0; - InferenceEngine::SizeVector order; - for(size_t i = 0; i < cnnLayer.outData[0]->getTensorDesc().getDims().size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(cnnLayer.outData[0]->getTensorDesc().getPrecision(), - cnnLayer.outData[0]->getTensorDesc().getDims(), - {cnnLayer.outData[0]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - value = cnnLayer.GetParamAsInt("const_val", 1); - if (config.dynBatchSupport) - return InferenceEngine::NOT_IMPLEMENTED; - for(auto input : config.inConfs) { - if (!input.constant) - return InferenceEngine::GENERAL_ERROR; - } - for(auto output : config.outConfs) { - if (!output.constant) - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, InferenceEngine::ResponseDesc *resp) noexcept override { - float *dst_data = outputs[0]->buffer(); - - size_t data_size = outputs[0]->size(); - for (size_t i = 0; i < data_size; i++) { - dst_data[i] = value; - } - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer cnnLayer; - int value = 0; -}; - -class ConstLayerFactory : public InferenceEngine::ILayerImplFactory { -public: - ConstLayerFactory(const InferenceEngine::CNNLayer *layer): cnnLayer(*layer) {} - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new ConstLayerImpl(&cnnLayer))); - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer cnnLayer; -}; - -using fake_ext_factory = std::function; - -class FakeConstExtensionFabric : public InferenceEngine::Extensions::Cpu::MKLDNNExtensions { -public: - FakeConstExtensionFabric() { - factories["ConstLayer"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new ConstLayerFactory(cnnLayer); }; - } - - virtual ~FakeConstExtensionFabric() { - factories.clear(); - } - - void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override {} - void Unload() noexcept override {} - InferenceEngine::StatusCode getPrimitiveTypes(char**& types, unsigned int& size, InferenceEngine::ResponseDesc* resp) noexcept override { - types = new char *[factories.size()]; - size_t count = 0; - for (auto it = factories.begin(); it != factories.end(); it++, count ++) { - types[count] = new char[it->first.size() + 1]; - std::copy(it->first.begin(), it->first.end(), types[count]); - types[count][it->first.size() ] = '\0'; - } - return InferenceEngine::OK; - }; - InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory *&factory, - const InferenceEngine::CNNLayer *cnnLayer, - InferenceEngine::ResponseDesc *resp) noexcept override { - if (factories.find(cnnLayer->type) == factories.end()) { - std::string errorMsg = std::string("Factory for ") + cnnLayer->type + " wasn't found!"; - errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); - return InferenceEngine::NOT_FOUND; - } - factory = factories[cnnLayer->type](cnnLayer); - return InferenceEngine::OK; - } - -private: - std::map factories; -}; - -class MKLDNNConstantPropagationTests: public TestsCommon { -protected: - virtual void SetUp() { - TestsCommon::SetUp(); - extension.reset(new FakeConstExtensionFabric()); - extMgr.reset(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - } - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr; - std::shared_ptr extension; -}; - -TEST_F(MKLDNNConstantPropagationTests, ConcatAfterConstLayers) { - std::string model = R"V0G0N( - - - - - - 1 - 2 - 10 - 5 - - - - - - - 1 - 2 - 5 - 5 - - - - - - - 1 - 2 - 10 - 5 - - - - - 1 - 2 - 10 - 5 - - - - - - - - 1 - 2 - 5 - 5 - - - - - 1 - 2 - 5 - 5 - - - - - - - - 1 - 2 - 10 - 5 - - - 1 - 2 - 5 - 5 - - - - - 1 - 2 - 15 - 5 - - - - - - - - - - - - )V0G0N"; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - InferenceEngine::SizeVector dims_src1 = {1, 2, 10, 5}; - - InferenceEngine::Blob::Ptr src1 = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, InferenceEngine::NCHW}); - src1->allocate(); - - InferenceEngine::SizeVector dims_src2 = {1, 2, 5, 5}; - - InferenceEngine::Blob::Ptr src2 = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, InferenceEngine::NCHW}); - src2->allocate(); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - auto it = out.begin(); - - std::pair item = *it; - - InferenceEngine::TensorDesc outputDesc1 = item.second->getTensorDesc(); - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(outputDesc1); - output->allocate(); - outputBlobs[item.first] = output; - - auto& nodes = graph.getNodes(); - bool existConcat = false; - for (auto& node : nodes) { - if (node->getType() != MKLDNNPlugin::Concatenation && node->getType() != MKLDNNPlugin::Generic) - continue; - if (node->getName() == "con" && node->getType() == MKLDNNPlugin::Concatenation) - existConcat = true; - ASSERT_TRUE(node->isConstant()); - } - - ASSERT_TRUE(existConcat); - - graph.Infer(srcs, outputBlobs); - - // Compare - float *dst_ptr = output->buffer(); - - int len1 = 1, len2 = 1, cycles; - for (int dim = 2; dim < output->getTensorDesc().getDims().size(); dim++) { - len1 *= src1->getTensorDesc().getDims()[dim]; - len2 *= src2->getTensorDesc().getDims()[dim]; - } - cycles = 2; - - int index1 = 0, index2 = 0, index = 0; - for (int cycle = 0; cycle < cycles; cycle ++) { - for (int i1 = 0; i1 < len1; i1++) { - if (1 != dst_ptr[index]) { - FAIL() << "index: " << index << " src: " << 1 << ", dst: " << dst_ptr[index]; - } - index1++; index++; - } - for (int i2 = 0; i2 < len2; i2++) { - if (4 != dst_ptr[index]) { - FAIL() << "index: " << index << " src: " << 4 << ", dst: " << dst_ptr[index]; - } - index2++; index++; - } - } -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/convert_desc_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/convert_desc_test.cpp deleted file mode 100644 index 89da83f8cf2171..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/convert_desc_test.cpp +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include -#include - -#include - -#include - -namespace IE = InferenceEngine; -using Tag = mkldnn::memory::format_tag; -using RefDesc = mkldnn::impl::memory_desc_wrapper; -using MKLDNNPlugin::MKLDNNMemory; -using MKLDNNPlugin::MKLDNNMemoryDesc; - -TEST(TensorDescTests, checkOff) { - auto workload = std::vector>{ - {{5}, Tag::a}, - {{10, 3}, Tag::ab}, - {{5, 3}, Tag::ba}, - {{1, 3, 8, 8}, Tag::abcd}, - {{1, 3, 5, 2}, Tag::acdb}, - {{1, 24, 5, 7}, Tag::aBcd8b}, - {{2, 10, 3, 3}, Tag::aBcd8b}, - {{1, 3, 8, 8}, Tag::aBcd8b}, - {{1, 32, 8, 8}, Tag::aBcd16b}, - {{1, 32, 8, 8}, Tag::aBcd16b}, - {{2, 3, 5, 2, 1}, Tag::abcde}, - }; - - for (const auto &p : workload) { - mkldnn::memory::dims dims {p.first.begin(), p.first.end()}; - - const auto cpu_tDesc = MKLDNNMemoryDesc {dims, mkldnn::memory::data_type::f32, p.second}; - const auto ie_tDesc = IE::TensorDesc {cpu_tDesc}; - - mkldnn::memory::desc dnnl_tdesc = cpu_tDesc; - const RefDesc ref(dnnl_tdesc.data); - size_t total_size = cpu_tDesc.getDims().size(); - - for (size_t i = 0; i < total_size; i++) { - ASSERT_EQ(ie_tDesc.offset(i), ref.off_l(i)) << "Offset calculation are different"; - } - } -} - -TEST(TensorDescTests, convertToFrom) { - struct Param { IE::SizeVector dims, blk, ord; }; - auto workload = std::vector{ - {{5}, {5}, {0}}, - {{10, 3}, {10, 3}, {0, 1}}, - {{1, 3, 8, 8}, {1, 8, 8, 3}, {0, 2, 3, 1}}, - {{1, 3, 8, 8}, {1, 3, 8, 8}, {0, 1, 2, 3}}, - {{1, 8, 8, 8}, {1, 1, 8, 8, 8}, {0, 1, 2, 3, 1}}, - {{1, 32, 8, 8}, {1, 2, 8, 8, 16}, {0, 1, 2, 3, 1}}, - {{1, 3, 8}, {1, 3, 8}, {0, 1, 2}} - }; - - for (const auto &p : workload) { - const auto ie_tDesc = IE::TensorDesc(IE::Precision::FP32, p.dims, {p.blk, p.ord}); - const auto cpu_tDesc = MKLDNNMemoryDesc {ie_tDesc}; - - mkldnn::memory::desc dnnl_tdesc = cpu_tDesc; - const RefDesc ref(dnnl_tdesc.data); - size_t total_size = cpu_tDesc.getDims().size(); - - for (size_t i = 0; i < total_size; i++) { - ASSERT_EQ(ie_tDesc.offset(i), ref.off_l(i)) << "Offset calculation are different"; - } - } -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp new file mode 100644 index 00000000000000..ffe853f7697581 --- /dev/null +++ b/inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp @@ -0,0 +1,4 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/dump_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/dump_test.cpp deleted file mode 100644 index 9373b61a7c2357..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/dump_test.cpp +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "ie_blob.h" -#include "blob_factory.hpp" -#include "utils/blob_dump.h" - -using namespace InferenceEngine; -using namespace MKLDNNPlugin; - -TEST(MKLDNNDumpTests, UnallocatedBlob_NoDump) { - SizeVector dims {2,3,4,5}; - Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NHWC}); - - std::stringstream buff; - - EXPECT_THROW({ - BlobDumper(blob).dump(buff); - }, Exception); -} - -TEST(MKLDNNDumpTests, EmptyBlob_NoDump) { - SizeVector dims {2,3,4,5}; - Blob::Ptr blob; - - std::stringstream buff; - - EXPECT_THROW({ - BlobDumper(blob).dump(buff); - }, Exception); -} - -TEST(MKLDNNDumpTests, Ser) { - SizeVector dims {2,3,4,5}; - Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NHWC}); - blob->allocate(); - - std::stringstream buff; - BlobDumper(blob).dump(buff); - - ASSERT_GT(buff.str().size(), blob->byteSize()); -} - -TEST(MKLDNNDumpTests, SerDeser) { - SizeVector dims {2,3,4,5}; - Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NCHW}); - blob->allocate(); - - std::stringstream buff; - - BlobDumper(blob).dump(buff); - Blob::Ptr deser_blob = BlobDumper::read(buff).get(); - - ASSERT_EQ(deser_blob->getTensorDesc().getDims(), blob->getTensorDesc().getDims()); - ASSERT_EQ(deser_blob->getTensorDesc().getPrecision(), blob->getTensorDesc().getPrecision()); - - std::vector data(blob->buffer().as(), blob->buffer().as() + blob->size()); - std::vector deser_data(deser_blob->buffer().as(), deser_blob->buffer().as() - + deser_blob->size()); - ASSERT_EQ(deser_data, data); -} - -TEST(MKLDNNDumpTests, SerDeserWithScales) { - SizeVector dims {2,3,4,5}; - auto blob = make_blob_with_precision({Precision::U8, dims, NCHW}); - blob->allocate(); - - auto scls = make_blob_with_precision({Precision::FP32, {3}, C}); - scls->allocate(); - - std::stringstream buff; - - BlobDumper(blob).withScales(scls).dump(buff); - auto deser = BlobDumper::read(buff); - auto deser_blob = deser.get(); - auto deser_scls = deser.getScales(); - - ASSERT_EQ(deser_blob->getTensorDesc().getDims(), blob->getTensorDesc().getDims()); - ASSERT_EQ(deser_blob->getTensorDesc().getPrecision(), blob->getTensorDesc().getPrecision()); - - std::vector data(blob->buffer().as(), blob->buffer().as() + blob->size()); - std::vector deser_data(deser_blob->buffer().as(), deser_blob->buffer().as() - + deser_blob->size()); - ASSERT_EQ(deser_data, data); - - std::vector scls_data(scls->buffer().as(), scls->buffer().as() + scls->size()); - std::vector deser_scls_data(deser_scls->buffer().as(), deser_scls->buffer().as() - + deser_scls->size()); - ASSERT_EQ(deser_scls_data, scls_data); -} - - -TEST(MKLDNNDumpTests, SerU8AsTxt) { - SizeVector dims {2,3,4,5}; - - Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NCHW}); - blob->allocate(); - - Blob::Ptr scls = make_blob_with_precision({Precision::FP32, {dims[1]}, C}); - scls->allocate(); - - std::stringstream buff; - BlobDumper(blob).withScales(scls).dumpAsTxt(buff); - - std::string deser_header, ref_header = "U8 4D shape: 2 3 4 5 (120)"; - std::getline(buff, deser_header); - deser_header = deser_header.substr(0, ref_header.length()); - ASSERT_EQ(deser_header, ref_header); - - auto num_line = std::count(std::istreambuf_iterator(buff), - std::istreambuf_iterator(), '\n'); - ASSERT_EQ(num_line, blob->size()); -} - -TEST(MKLDNNDumpTests, SerAsTxt) { - SizeVector dims {2,3}; - - Blob::Ptr blob = make_blob_with_precision({Precision::FP32, dims, NC}); - blob->allocate(); - - Blob::Ptr scls = make_blob_with_precision({Precision::FP32, {dims[1]}, C}); - scls->allocate(); - - std::stringstream buff; - BlobDumper(blob).withScales(scls).dumpAsTxt(buff); - - std::string deser_header, ref_header = "FP32 2D shape: 2 3 (6)"; - std::getline(buff, deser_header); - deser_header = deser_header.substr(0, ref_header.length()); - ASSERT_EQ(deser_header, ref_header); - - auto num_line = std::count(std::istreambuf_iterator(buff), - std::istreambuf_iterator(), '\n'); - ASSERT_EQ(num_line, blob->size()); -} \ No newline at end of file diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/dumper_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/dumper_test.cpp deleted file mode 100644 index 3c95375c969bf7..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/dumper_test.cpp +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "mkldnn_graph.h" -#include "mkldnn_graph_dumper.h" -#include "ie_blob.h" -#include -#include "common_test_utils/xml_net_builder/xml_net_builder.hpp" -#include - -#include -#include - -using namespace InferenceEngine; -using namespace MKLDNNPlugin; -using std::string; -using std::map; - -class NetGen : CommonTestUtils::V2NetBuilder { - string model; - TBlob::Ptr weights; - -public: - NetGen(): CommonTestUtils::V2NetBuilder(buildNetworkWithOneInput( - "SomeNet", {2,3,16,16}, "FP32")) { - using prm_t = map; - - CommonTestUtils::InOutShapes inout = {{{2,3,16,16}},{{2,16,16,16}}}; - - prm_t conv_prm = { - {"stride-x", std::to_string(1)}, - {"stride-y", std::to_string(1)}, - {"pad-x", std::to_string(1)}, - {"pad-y", std::to_string(1)}, - {"kernel-x", std::to_string(3)}, - {"kernel-y", std::to_string(3)}, - {"output", std::to_string(16)}, - {"group", std::to_string(1)} - }; - size_t wght = 3*16*3*3*sizeof(float); - size_t bias = 16*sizeof(float); - - prm_t relu_prm = {{"negative_slope", std::to_string(0)}}; - - addLayer("Convolution", "FP32", &conv_prm, {{{2,3,16,16}},{{2,16,16,16}}}, wght, bias); - addLayer("Relu", "FP32", &relu_prm, {{{2,16,16,16}},{{2,16,16,16}}}); - - model = finish(); - - weights.reset(new TBlob({Precision::U8, {wght+bias}, C})); - weights->allocate(); - } - - CNNNetwork net() { - InferenceEngine::Core core; - return core.ReadNetwork(model, weights); - } -}; - -TEST(MKLDNNLayersTests, DumpSimpleGraph) { - auto net = NetGen().net(); - MKLDNNGraph graph; - MKLDNNExtensionManager::Ptr extMgr; - MKLDNNWeightsSharing::Ptr cache; - - graph.CreateGraph(net, extMgr, cache); - - auto dump_net = dump_graph_as_ie_net(graph); - auto layers = details::CNNNetSortTopologically(dump_net); - - ASSERT_EQ(layers.size(), 4); - ASSERT_EQ(layers[0]->type, "Input"); - ASSERT_EQ(layers[1]->type, "Convolution"); - ASSERT_EQ(layers[2]->type, "Reorder"); - ASSERT_EQ(layers[3]->type, "Output"); -} - -TEST(MKLDNNLayersTests, DumpSimpleGraphToDot) { - auto net = NetGen().net(); - MKLDNNGraph graph; - MKLDNNExtensionManager::Ptr extMgr; - MKLDNNWeightsSharing::Ptr cache; - graph.CreateGraph(net, extMgr, cache); - - std::stringstream buff; - dump_graph_as_dot(graph, buff); - - std::string dot = buff.str(); - std::cout << dot; - ASSERT_EQ(std::count(dot.begin(), dot.end(), '{'), 1); // 1-graph - ASSERT_EQ(std::count(dot.begin(), dot.end(), '}'), 1); - ASSERT_EQ(std::count(dot.begin(), dot.end(), '['), 10); // 4-node 3-data 3-shape - ASSERT_EQ(std::count(dot.begin(), dot.end(), ']'), 10); - ASSERT_EQ(std::count(dot.begin(), dot.end(), '>'), 6); // connection -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/broadcast_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/broadcast_tests.cpp deleted file mode 100644 index 8f5377589994ab..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/broadcast_tests.cpp +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct broadcast_test_params { - std::string shape_precision; - std::string precision; - InferenceEngine::SizeVector in_shape; - InferenceEngine::SizeVector out_shape; - - std::vector> comp; -}; - - -template -void ref_broadcast(InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst) { - size_t i; - const data_t *src_data = src.data(); - InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims(); - InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides(); - - if (!src_dims.size()) - src_dims = InferenceEngine::SizeVector(1, 1); - if (!srcStrides.size()) - srcStrides = InferenceEngine::SizeVector(1, 1); - data_t* dst_data = dst.data(); - InferenceEngine::SizeVector dst_dims = dst.getTensorDesc().getDims(); - InferenceEngine::SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides(); - - if (src_dims.size() > dst_dims.size()) - FAIL() << "Output tensor dimension is smaller then input tensor dimension"; - - size_t prefix_size = dst_dims.size() - src_dims.size(); - for (i = 0; i < src_dims.size(); i++) { - if (src_dims[i] != 1 && src_dims[i] != dst_dims[i + prefix_size]) - FAIL() << "In/Output corresponding dimension must have the same value, or Input dimension is equal to 1"; - } - - InferenceEngine::SizeVector src_aligned(dst_dims.size()); - InferenceEngine::SizeVector srcStrides_aligned(dst_dims.size()); - for (i = 0; i < dst_dims.size(); i++) { - if (i < prefix_size) { - src_aligned[i] = 1; - srcStrides_aligned[i] = srcStrides[0]; - } else { - src_aligned[i] = src_dims[i - prefix_size]; - srcStrides_aligned[i] = srcStrides[i - prefix_size]; - } - } - - size_t src_idx, work_amount_dst = dstStrides[0] * dst_dims[0]; - InferenceEngine::SizeVector counters(dst_dims.size(), 0); - - for (size_t iwork = 0; iwork < work_amount_dst; ++iwork) { - for (i = 0, src_idx = 0; i < dst_dims.size(); ++i) - src_idx += counters[i] ? ((counters[i] % src_aligned[i]) * srcStrides_aligned[i]) : 0; - - dst_data[iwork] = src_data[src_idx]; - - for (int j = dst_dims.size() - 1; j >= 0; j--) { - counters[j] = (counters[j] + 1) % dst_dims[j]; - if (counters[j] != 0) break; - } - } -} - - -class MKLDNNCPUExtBroadcastTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - - - - - - - _DIM_SIZE_ - - - - - - - - _IN_ - - - _DIM_SIZE_ - - - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(broadcast_test_params p) { - std::string model = model_t; - std::string in_shape = ""; - std::string out_shape; - - REPLACE_WITH_STR(model, "_IIDXP_", p.precision); - REPLACE_WITH_STR(model, "_ISDXP_", p.shape_precision); - for (size_t i = 0; i < p.in_shape.size(); i++) { - in_shape += ""; - in_shape += std::to_string(p.in_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_IN_", in_shape); - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_OUT_", out_shape); - REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.out_shape.size()); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - broadcast_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - // Input Data - InferenceEngine::Blob::Ptr dims; - InferenceEngine::SizeVector vector_dim(1, p.out_shape.size()); - if (p.shape_precision == "I32") { - dims = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, vector_dim, InferenceEngine::TensorDesc::getLayoutByDims(vector_dim) }); - dims->allocate(); - for (size_t i = 0; i < p.out_shape.size(); i++) { - static_cast(dims->buffer())[i] = static_cast(p.out_shape[i]); - } - auto * dimsPtr = dynamic_cast*>(dims.get()); - if (dimsPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - } else if (p.shape_precision == "FP32") { - dims = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, vector_dim, InferenceEngine::TensorDesc::getLayoutByDims(vector_dim) }); - dims->allocate(); - for (size_t i = 0; i < p.out_shape.size(); i++) { - static_cast(dims->buffer())[i] = static_cast(p.out_shape[i]); - } - auto * dimsPtr = dynamic_cast*>(dims.get()); - if (dimsPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - } - - InferenceEngine::BlobMap srcs; - InferenceEngine::Blob::Ptr src; - std::pair item = *out.begin(); - if (p.precision == "I32") { - src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::I32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape)}); - src->allocate(); - for (size_t i = 0; i < src->size(); i++) - static_cast(src->buffer())[i] = static_cast(i); - auto * srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("input", src)); - srcs.insert(std::pair("shape", dims)); - - // Output Blob - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_broadcast(*srcPtr, dst_ref); - - // Infer - graph.Infer(srcs, outputBlobs); - for (int i = 0; i < dst_ref.size(); i++) { - if (dst_ref.data()[i] != (*output).data()[i]) - FAIL() << "The difference between res_ptr[i] and ref_ptr[i]"; - } - } else if (p.precision == "FP32") { - src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape)}); - src->allocate(); - fill_data_dbgval(src->buffer(), src->size()); - auto * srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("input", src)); - srcs.insert(std::pair("shape", dims)); - - // Output Blob - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_broadcast(*srcPtr, dst_ref); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } - else { - return; - } - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtBroadcastTests, TestsBroadcast) {} - -INSTANTIATE_TEST_CASE_P( - TestsBroadcast, MKLDNNCPUExtBroadcastTests, - ::testing::Values( - // Params: shape_precision, precision, in_shape, out_shape - broadcast_test_params{ "I32", "I32",{},{ 2, 3, 4 } }, - broadcast_test_params{ "I32", "I32",{ 4, 1, 2 },{ 4, 2, 2 } }, - broadcast_test_params{ "I32", "I32",{ 4, 2, 1 },{ 4, 2, 2 } }, - broadcast_test_params{ "I32", "I32",{ 4, 2 },{ 2, 4, 2 } }, - broadcast_test_params{ "I32", "I32",{ 4, 1, 1 },{ 4, 2, 1 } }, - broadcast_test_params{ "I32", "I32",{ 2, 1, 3, 1 },{ 2, 2, 2, 3, 1 } }, - broadcast_test_params{ "I32","FP32",{},{ 2, 3, 4 } }, - broadcast_test_params{ "I32","FP32",{ 4, 1, 2 },{ 4, 2, 2 } }, - broadcast_test_params{ "I32","FP32",{ 4, 2, 1 },{ 4, 2, 2 } }, - broadcast_test_params{ "I32","FP32",{ 4, 2 },{ 2, 4, 2 } }, - broadcast_test_params{ "I32","FP32",{ 4, 1, 1 },{ 4, 2, 1 } }, - broadcast_test_params{ "I32","FP32", { 2, 1, 3, 1 },{ 2, 2, 2, 3, 1 } }, - broadcast_test_params{"FP32","FP32",{ 2, 1, 3, 1 },{ 2, 2, 2, 3, 1 } } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/bucketize_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/bucketize_tests.cpp deleted file mode 100644 index f8b03c9fef643f..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/bucketize_tests.cpp +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" -#include - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct bucketize_test_params { - std::string model; - std::string precision; - std::string right; - - InferenceEngine::SizeVector input_shape; - std::vector input_value; - - bool with_second_input; - InferenceEngine::SizeVector boundaries_shape; - std::vector boundaries_value; - - InferenceEngine::SizeVector output_shape; - std::vector output_value_ref; - - size_t num_prim_desc; - int selectedType; - - std::vector> comp; -}; - -class MKLDNNCPUExtBucketizeTests : public TestsCommon, public WithParamInterface { - std::string getModel(bucketize_test_params p) { - std::string model = p.model; - - std::string input_shape; - std::string boundaries_shape; - std::string output_shape; - - for (auto& shape : p.input_shape) { - input_shape += ""; - input_shape += std::to_string(shape) + "\n"; - } - if (p.with_second_input) { - for (auto& shape : p.boundaries_shape) { - boundaries_shape += ""; - boundaries_shape += std::to_string(shape) + "\n"; - } - } - - for (auto& shape : p.output_shape) { - output_shape += ""; - output_shape += std::to_string(shape) + "\n"; - } - - REPLACE_WITH_STR(model, "_RIGHT_", p.right); - REPLACE_WITH_STR(model, "_INPUT_SHAPE_", input_shape); - REPLACE_WITH_STR(model, "_BOUNDARIES_SHAPE_", boundaries_shape); - REPLACE_WITH_STR(model, "_OUTPUT_SHAPE_", output_shape); - - return model; - } - -protected: - static void compare_int( - InferenceEngine::Blob &res, - InferenceEngine::Blob &ref, - int max_diff = 0, - const std::string assertDetails = "") { - int *res_ptr = res.buffer().as(); - size_t res_size = res.size(); - - int *ref_ptr = ref.buffer().as(); - size_t ref_size = ref.size(); - - ASSERT_EQ(res_size, ref_size) << assertDetails; - - for (size_t i = 0; i < ref_size; i++) { - ASSERT_EQ(res_ptr[i], ref_ptr[i]) << assertDetails; - } - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - bucketize_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "Bucketize") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - // prepare input blob and input blob map - InferenceEngine::Blob::Ptr input = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_shape) }); - input->allocate(); - auto *input_ptr = dynamic_cast*>(input.get()); - std::copy(p.input_value.begin(), p.input_value.end(), (float *)input_ptr->data()); - InferenceEngine::BlobMap input_blob_map; - input_blob_map["InputValues"] = input; - - InferenceEngine::Blob::Ptr boundaries = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.boundaries_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.boundaries_shape) }); - boundaries->allocate(); - auto *boundaries_ptr = dynamic_cast*>(boundaries.get()); - std::copy(p.boundaries_value.begin(), p.boundaries_value.end(), (float *)boundaries_ptr->data()); - input_blob_map["BoundariesValues"] = boundaries; - - // prepare output blob map - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - InferenceEngine::BlobMap output_blob_map; - for (auto iter = out.begin(); iter != out.end(); iter++) { - std::pair item = *iter; - InferenceEngine::Blob::Ptr output_blob_ptr = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_blob_ptr->allocate(); - output_blob_map[item.first] = output_blob_ptr; - } - - // prepare blobs with reference data - InferenceEngine::Blob::Ptr output_blob_ref = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.output_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.output_shape) }); - output_blob_ref->allocate(); - auto *output_blob_ref_ptr = dynamic_cast*>(output_blob_ref.get()); - std::copy(p.output_value_ref.begin(), p.output_value_ref.end(), (int *)output_blob_ref_ptr->data()); - - // infer - graph.Infer(input_blob_map, output_blob_map); - - // check the result - auto iter = out.begin(); - compare_int(*output_blob_map[iter->first], *output_blob_ref, 0); - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtBucketizeTests, TestsBucketize) {} - -// model 1 that contains one Bucketize layer -std::string bucketize_model1 = R"V0G0N( - - - - - - _INPUT_SHAPE_ - - - - - - - _BOUNDARIES_SHAPE_ - - - - - - - - _INPUT_SHAPE_ - - - _BOUNDARIES_SHAPE_ - - - - - _OUTPUT_SHAPE_ - - - - - - - - - -)V0G0N"; - -// case 1 - the right attribute equal to False -InferenceEngine::SizeVector bucketize_input_shape_case1 = { 10 }; -std::vector bucketize_input_value_case1 = { 8.f, 1.f, 2.f, 1.f, 8.f, 5.f, 1.f, 5.f, 0.f, 20.f }; -std::string bucketize_right_case1 = "\"false\""; -bool bucketize_with_second_input_case1 = true; -InferenceEngine::SizeVector bucketize_boundaries_shape_case1 = { 4 }; -std::vector bucketize_boundaries_value_case1 = { 1.f, 4.f, 10.f, 20.f}; -InferenceEngine::SizeVector bucketize_output_shape_case1 = { 10 }; -std::vector bucketize_output_value_ref_case1 = { 2, 1, 1, 1, 2, 2, 1, 2, 0, 4 }; - -// case 2 - the right attribute equal to True -InferenceEngine::SizeVector bucketize_input_shape_case2 = { 10 }; -std::vector bucketize_input_value_case2 = { 8.f, 1.f, 2.f, 1.f, 8.f, 5.f, 1.f, 5.f, 0.f, 20.f }; -std::string bucketize_right_case2 = "\"true\""; -bool bucketize_with_second_input_case2 = true; -InferenceEngine::SizeVector bucketize_boundaries_shape_case2 = { 4 }; -std::vector bucketize_boundaries_value_case2 = { 1.f, 4.f, 10.f, 20.f }; -InferenceEngine::SizeVector bucketize_output_shape_case2 = { 10 }; -std::vector bucketize_output_value_ref_case2 = { 2, 0, 1, 0, 2, 2, 0, 2, 0, 3 }; - -INSTANTIATE_TEST_CASE_P( - TestsBucketize, MKLDNNCPUExtBucketizeTests, - ::testing::Values( - bucketize_test_params { - bucketize_model1, "I32", bucketize_right_case1, - bucketize_input_shape_case1, bucketize_input_value_case1, - bucketize_with_second_input_case1, bucketize_boundaries_shape_case1, bucketize_boundaries_value_case1, - bucketize_output_shape_case1, bucketize_output_value_ref_case1, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - bucketize_test_params{ - bucketize_model1, "I32", bucketize_right_case2, - bucketize_input_shape_case2, bucketize_input_value_case2, - bucketize_with_second_input_case2, bucketize_boundaries_shape_case2, bucketize_boundaries_value_case2, - bucketize_output_shape_case2, bucketize_output_value_ref_case2, - 1, MKLDNNPlugin::impl_desc_type::unknown - } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp deleted file mode 100644 index e59bbb0dbfacef..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include "nodes/base.hpp" - -using namespace InferenceEngine; -using namespace Extensions; - -struct TestExtensionsHolder { - std::map list; -}; - - -class FakeExtensions : public Cpu::MKLDNNExtensions { - public: - void Unload() noexcept override {}; - - static std::shared_ptr GetExtensionsHolder() { - static std::shared_ptr localHolder; - if (localHolder == nullptr) { - localHolder = std::shared_ptr(new TestExtensionsHolder()); - } - return localHolder; - } - - static void AddExt(std::string name, Cpu::ext_factory factory) { - GetExtensionsHolder()->list[name] = factory; - } - - void GetVersion(const Version *&versionInfo) const noexcept override { - static Version ExtensionDescription = { - {2, 1}, // extension API version - "2.1", - "ie-cpu-ext" // extension description message - }; - - versionInfo = &ExtensionDescription; - } - - StatusCode getPrimitiveTypes(char **&types, unsigned int &size, ResponseDesc *resp) noexcept override { - collectTypes(types, size, GetExtensionsHolder()->list); - return OK; - }; - StatusCode getFactoryFor(ILayerImplFactory *&factory, const CNNLayer *cnnLayer, ResponseDesc *resp) noexcept override { - auto &factories = GetExtensionsHolder()->list; - if (factories.find(cnnLayer->type) == factories.end()) { - std::string errorMsg = std::string("Factory for ") + cnnLayer->type + " wasn't found!"; - errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); - return NOT_FOUND; - } - factory = factories[cnnLayer->type](cnnLayer); - return OK; - } - - template - void collectTypes(char **&types, unsigned int &size, const std::map &factories) { - types = new char *[factories.size()]; - unsigned count = 0; - for (auto it = factories.begin(); it != factories.end(); it++, count++) { - types[count] = new char[it->first.size() + 1]; - std::copy(it->first.begin(), it->first.end(), types[count]); - types[count][it->first.size()] = '\0'; - } - size = count; - } -}; - -class FakeLayerPLNImpl: public Cpu::ExtLayerBase { -public: - explicit FakeLayerPLNImpl(const CNNLayer* layer) { - try { - addConfig(layer, {{ConfLayout::PLN, false, 0}}, {{ConfLayout::PLN, false, 0}}); - } catch (InferenceEngine::Exception &ex) { - errorMsg = ex.what(); - } - } - - StatusCode execute(std::vector& inputs, std::vector& outputs, - ResponseDesc *resp) noexcept override { - return OK; - } -}; - -class FakeLayerBLKImpl: public Cpu::ExtLayerBase { -public: - explicit FakeLayerBLKImpl(const CNNLayer* layer) { - try { -#if defined(HAVE_AVX512F) - auto blk_layout = ConfLayout::BLK16; -#else - auto blk_layout = ConfLayout::BLK8; -#endif - addConfig(layer, {{blk_layout, false, 0}}, {{blk_layout, false, 0}}); - } catch (InferenceEngine::Exception &ex) { - errorMsg = ex.what(); - } - } - - StatusCode execute(std::vector& inputs, std::vector& outputs, - ResponseDesc *resp) noexcept override { - return OK; - } -}; - -template -class FakeRegisterBase { - public: - explicit FakeRegisterBase(const std::string& type) { - FakeExtensions::AddExt(type, - [](const CNNLayer* layer) -> InferenceEngine::ILayerImplFactory* { - return new Ext(layer); - }); - } -}; - -#define REG_FAKE_FACTORY_FOR(__prim, __type) \ -static FakeRegisterBase<__prim> __reg__##__type(#__type) - -REG_FAKE_FACTORY_FOR(Cpu::ImplFactory, FakeLayerPLN); -REG_FAKE_FACTORY_FOR(Cpu::ImplFactory, FakeLayerBLK); - - -InferenceEngine::IExtensionPtr make_FakeExtensions() { - return InferenceEngine::IExtensionPtr(new FakeExtensions()); -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fill_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fill_tests.cpp deleted file mode 100644 index b3b44fb37ca5ba..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fill_tests.cpp +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct fill_test_params { - std::string precision; - InferenceEngine::SizeVector out_shape; - float value; - - std::vector> comp; -}; - -class MKLDNNCPUExtFillTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _DIM_SIZE_ - - - - - - - 1 - - - - - - - - _DIM_SIZE_ - - - 1 - - - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(fill_test_params p) { - std::string model = model_t; - std::string out_shape; - - REPLACE_WITH_STR(model, "_IIDXP_", p.precision); - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_OUT_", out_shape); - REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.out_shape.size()); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - fill_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - // Input Data - InferenceEngine::Blob::Ptr dims; - InferenceEngine::SizeVector vector_dim(1, p.out_shape.size()); - dims = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, vector_dim, InferenceEngine::TensorDesc::getLayoutByDims(vector_dim) }); - dims->allocate(); - for (size_t i = 0; i < p.out_shape.size(); i++) { - static_cast(dims->buffer())[i] = static_cast(p.out_shape[i]); - } - auto * srcPtr = dynamic_cast*>(dims.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - InferenceEngine::Blob::Ptr value_scalar; - InferenceEngine::SizeVector value_scalar_dim(1, 1); - std::pair item = *out.begin(); - if (p.precision == "I32") { - value_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, value_scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(value_scalar_dim) }); - value_scalar->allocate(); - static_cast(value_scalar->buffer())[0] = static_cast(p.value); - auto * value_scalarPtr = dynamic_cast*>(value_scalar.get()); - if (value_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("dims", dims)); - srcs.insert(std::pair("value", value_scalar)); - - // Output Blob - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - std::fill_n(static_cast(dst_ref.data()), dst_ref.size(), static_cast(p.value)); - - // Infer - graph.Infer(srcs, outputBlobs); - for (int i = 0; i < dst_ref.size(); i++) { - if(dst_ref.data()[i] != (*output).data()[i]) - FAIL() << "The difference between res_ptr[i] and ref_ptr[i]"; - } - } else if (p.precision == "FP32") { - value_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, value_scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(value_scalar_dim) }); - value_scalar->allocate(); - static_cast(value_scalar->buffer())[0] = p.value; - auto * value_scalarPtr = dynamic_cast*>(value_scalar.get()); - if (value_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("dims", dims)); - srcs.insert(std::pair("value", value_scalar)); - - // Output Blob - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - std::fill_n(static_cast(dst_ref.data()), dst_ref.size(), p.value); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } else { - return; - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtFillTests, TestsFill) {} - -INSTANTIATE_TEST_CASE_P( - TestsFill, MKLDNNCPUExtFillTests, - ::testing::Values( -// Params: precision, value, out_shape - fill_test_params{ "I32", { 1 }, 1.f }, - fill_test_params{ "I32", { 1, 3, 1 }, 1.f }, - fill_test_params{ "I32", { 2, 3, 6 }, -1.f }, - fill_test_params{"FP32", { 2, 3, 6 }, -1.f }, - fill_test_params{"FP32", { 1 }, 1.f }, - fill_test_params{"FP32", { 1, 3, 1, 2 }, .5f }, - fill_test_params{"FP32", { 4, 3, 2, 5, 4, 2 }, .25f } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp deleted file mode 100644 index 42276c246f41db..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp +++ /dev/null @@ -1,684 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct gather_test_params { - std::string inIdxPrecision; - InferenceEngine::SizeVector inDict; - InferenceEngine::SizeVector inIdx; - - int axis; - InferenceEngine::SizeVector out; - - size_t num_prim_desc; - int selectedType; - - std::vector> comp; -}; - -template -void ref_gather(InferenceEngine::TBlob &srcIdx, InferenceEngine::TBlob &srcDct, InferenceEngine::TBlob &dst, size_t axis) { - size_t i, j; - const data_t *src_dataIdx = srcIdx.data(); - float* src_dataDict = srcDct.data(); - float *dst_data = dst.data(); - size_t src_size = srcIdx.size(); - - std::vector dictionary_dims = srcDct.getTensorDesc().getDims(); - - // Find number of dictionaries, index range and data length - size_t numDictionaries = 1; - for (i = 0; i < axis; i++) - numDictionaries *= dictionary_dims[i]; - size_t indexRange = dictionary_dims[axis]; - size_t dataLength = 1; - for (i = axis + 1; i < dictionary_dims.size(); i++) - dataLength *= dictionary_dims[i]; - - // The gathering process - for (i = 0; i < src_size; i++) { - unsigned int idx = static_cast(src_dataIdx[i]); - - // Index clipping - if (idx < indexRange) { - // Copying data to destination from Dictionary - for (j = 0; j < numDictionaries; j++) { - memcpy(&dst_data[dataLength * (i + j * src_size)], - &src_dataDict[dataLength * (idx + j * indexRange)], sizeof(float) * dataLength); - } - } else { - for (j = 0; j < numDictionaries; j++) { - std::fill_n(&dst_data[dataLength * (i + j * src_size)], dataLength, 0.0f); - } - } - } -} - -class MKLDNNCPUExtGatherTests: public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IDICT_ - - - - - - - _IIDX_ - - - - - - - - _IDICT_ - - - _IIDX_ - - - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(gather_test_params p) { - std::string model = model_t; - std::string inIdx = ""; - std::string inDict; - std::string out = ""; - - for (auto& idx : p.inIdx) { - inIdx += ""; - inIdx += std::to_string(idx) + "\n"; - } - - for (auto& dct : p.inDict) { - inDict += ""; - inDict += std::to_string(dct) + "\n"; - } - - for (auto& dst : p.out) { - out += ""; - out += std::to_string(dst) + "\n"; - } - - REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision); - REPLACE_WITH_STR(model, "_IIDX_", inIdx); - REPLACE_WITH_STR(model, "_IDICT_", inDict); - REPLACE_WITH_NUM(model, "_AX_", p.axis); - REPLACE_WITH_STR(model, "_OUT_", out); - - return model; - } - - template - static void fill_data_dbgval(data_t *data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = static_cast(i & (sizeof(data_t) * 8 - 1)); - } - } -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - gather_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "gather") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - // Input Dictionary - InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.inDict, InferenceEngine::TensorDesc::getLayoutByDims(p.inDict) }); - srcDict->allocate(); - fill_data(srcDict->buffer(), srcDict->size()); - auto * srcDictPtr = dynamic_cast*>(srcDict.get()); - if (srcDictPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - // Input Indexes - InferenceEngine::Blob::Ptr srcIdx; - if (p.inIdxPrecision == "I32") { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) }); - srcIdx->allocate(); - fill_data_dbgval(static_cast(srcIdx->buffer()), srcIdx->size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis); - } - else if (p.inIdxPrecision == "FP32") { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) }); - srcIdx->allocate(); - fill_data(srcIdx->buffer(), srcIdx->size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis); - } - else if (p.inIdxPrecision == "U16") { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::U16, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) }); - srcIdx->allocate(); - fill_data_dbgval(static_cast(srcIdx->buffer()), srcIdx->size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis); - } - else if (p.inIdxPrecision == "I16") { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I16, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) }); - srcIdx->allocate(); - fill_data_dbgval(static_cast(srcIdx->buffer()), srcIdx->size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis); - } - else if (p.inIdxPrecision == "U8") { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::U8, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) }); - srcIdx->allocate(); - fill_data_dbgval(static_cast(srcIdx->buffer()), srcIdx->size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis); - } - else if (p.inIdxPrecision == "I8") { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I8, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) }); - srcIdx->allocate(); - fill_data_dbgval(static_cast(srcIdx->buffer()), srcIdx->size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis); - } - else { - return; - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("InputDictionary", srcDict)); - srcs.insert(std::pair("InputText", srcIdx)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtGatherTests, TestsGather) {} - -INSTANTIATE_TEST_CASE_P( - TestsGather, MKLDNNCPUExtGatherTests, - ::testing::Values( -// Params: inIdxPrecision, inDict, inIdx, axis, out, num_prim_desc, selectedType - gather_test_params{ "I32",{ 31 },{}, 0,{}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "FP32",{ 31 },{}, 0,{}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "FP32",{ 1, 31, 4 },{ 10 }, 1,{ 1, 10, 4 }, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "FP32",{ 31, 7 },{ 1,12,1 }, 0,{ 1, 12, 1, 7 }, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "FP32", {71, 16}, {1, 12, 256}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "I32", {71, 16}, {1, 12, 256}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "I32", {71, 16}, {12, 256}, 0, {12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "I32", {2, 5, 6}, {3, 4}, 0, {3, 4, 5, 6}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "I32", {5, 1}, {3, 4}, 0, {3, 4, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "FP32", {71, 16}, {1, 12, 256}, 1, {1, 71, 12, 256}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "I32", {2, 5, 6}, {1, 1, 3, 4}, 1, {2, 3, 4, 6}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "I32", {2, 5, 6}, {1, 1, 3, 4}, 2, {2, 5, 3, 4}, 1, MKLDNNPlugin::impl_desc_type::unknown }, - gather_test_params{ "I32", {6, 13, 10, 3}, {12, 4, 9, 8}, 1, {6, 12, 4, 9, 8, 10, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown } - )); - - - - -struct gatherTF_test_params { - InferenceEngine::SizeVector dct_dim; - std::vector dct; - - InferenceEngine::SizeVector in_dim; - std::vector in; - - int axis; - - InferenceEngine::SizeVector ref_dim; - std::vector ref; - - std::vector> comp; -}; - -class MKLDNNCPUExtGatherTFTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IDICT_ - - - - - - - _IIDX_ - - - - - - - - _IDICT_ - - - _IIDX_ - - - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(gatherTF_test_params p) { - std::string model = model_t; - std::string inIdx; - std::string inDict; - std::string out; - - for (auto& idx : p.in_dim) { - inIdx += ""; - inIdx += std::to_string(idx) + "\n"; - } - - for (auto& dct : p.dct_dim) { - inDict += ""; - inDict += std::to_string(dct) + "\n"; - } - - for (auto& dst : p.ref_dim) { - out += ""; - out += std::to_string(dst) + "\n"; - } - - REPLACE_WITH_STR(model, "_IIDX_", inIdx); - REPLACE_WITH_STR(model, "_IDICT_", inDict); - REPLACE_WITH_NUM(model, "_AX_", p.axis); - REPLACE_WITH_STR(model, "_OUT_", out); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - gatherTF_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Input Indexes - InferenceEngine::Blob::Ptr srcIdx; - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, p.in_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.in_dim) }); - srcIdx->allocate(); - memcpy(static_cast(srcIdx->buffer()), &p.in[0], sizeof(int32_t)*p.in.size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input Dictionary - InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.dct_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.dct_dim) }); - srcDict->allocate(); - memcpy(srcDict->buffer(), &p.dct[0], sizeof(float)*p.dct.size()); - auto * srcDictPtr = dynamic_cast*>(srcDict.get()); - if (srcDictPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Infer - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("InputDictionary", srcDict)); - srcs.insert(std::pair("InputText", srcIdx)); - graph.Infer(srcs, outputBlobs); - - // Check results - if (memcmp((*output).data(), &p.ref[0], output->byteSize()) != 0) - FAIL() << "Wrong result with compare TF reference!"; - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtGatherTFTests, TestsGather) {} - -// Test data vectors -std::vector dict = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f }; -std::vector ref_in0_a0_d223 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }; // 2x2x2x3 -std::vector ref_in0_a2_d232 = { 1.f, 2.f, 2.f, 1.f, 3.f, 4.f, 4.f, 3.f, 5.f, 6.f, 6.f, 5.f, 7.f, 8.f, 8.f, 7.f, 9.f, 10.f, 10.f, 9.f, 11.f, 12.f, 12.f, 11.f }; // 2x3x2x2 -std::vector ref_in1_a0_d322 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 5.f, 6.f, 7.f, 8.f }; // 2x2x2x2 -std::vector ref_in1_a1_d232 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 3.f, 4.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 9.f, 10.f }; // 2x2x2x2 -std::vector ref_in1_a2_d223 = { 1.f, 2.f, 3.f, 2.f, 4.f, 5.f, 6.f, 5.f, 7.f, 8.f, 9.f, 8.f, 10.f, 11.f, 12.f, 11.f }; // 2x2x2x2 - -INSTANTIATE_TEST_CASE_P( - TestsGather, MKLDNNCPUExtGatherTFTests, - ::testing::Values( -// Params: dct_dim, dct, in_dim, in, axis, ref_dim, ref - gatherTF_test_params{ { 3,2 }, {1.0, 1.2, 2.3, 3.4, 4.5, 5.7 }, { 2, 2 }, { 0, 1, 1, 2 },0, { 2, 2, 2 }, {1.0, 1.2, 2.3, 3.4,2.3, 3.4,4.5, 5.7 } }, - gatherTF_test_params{ { 3,3 },{ 1.0, 1.2, 1.9,2.3, 3.4, 3.9,4.5, 5.7, 5.9 }, { 1, 2 }, { 0, 2 },1,{ 3, 2 },{ 1.0, 1.9,2.3, 3.9,4.5, 5.9 } }, - gatherTF_test_params{ { 2, 2, 3 }, dict, { 2, 2 }, { 0, 1, 1, 0 },0, { 2, 2, 2, 3 }, ref_in0_a0_d223 }, - gatherTF_test_params{ { 2, 2, 3 }, dict,{ 2, 2 }, { 0, 1, 1, 0 },-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 }, - gatherTF_test_params{ { 2, 3, 2 }, dict, { 2, 2 }, { 0, 1, 1, 0 },2, { 2, 3, 2, 2 }, ref_in0_a2_d232 }, - gatherTF_test_params{ { 2, 3, 2 }, dict,{ 2, 2 }, { 0, 1, 1, 0 },-1, { 2, 3, 2, 2 }, ref_in0_a2_d232 }, - gatherTF_test_params{ { 3, 2, 2 }, dict,{ 2, 2 }, { 0, 1, 2, 1 }, 0, { 2, 2, 2, 2 }, ref_in1_a0_d322 }, - gatherTF_test_params{ { 3, 2, 2 }, dict,{ 2, 2 }, { 0, 1, 2, 1 },-3, { 2, 2, 2, 2 }, ref_in1_a0_d322 }, - gatherTF_test_params{ { 2, 3, 2 }, dict,{ 2, 2 }, { 0, 1, 2, 1 }, 1, { 2, 2, 2, 2 }, ref_in1_a1_d232 }, - gatherTF_test_params{ { 2, 3, 2 }, dict,{ 2, 2 }, { 0, 1, 2, 1 },-2, { 2, 2, 2, 2 }, ref_in1_a1_d232 }, - gatherTF_test_params{ { 2, 2, 3 }, dict,{ 2, 2 }, { 0, 1, 2, 1 }, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 }, - gatherTF_test_params{ { 2, 2, 3 }, dict,{ 2, 2 }, { 0, 1, 2, 1 },-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 })); - - -class MKLDNNCPUExtGatherHolesTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - 3 - 2 - 2 - - - - - - - 2 - 2 - - - - - - - 2 - 1 - 2 - 2 - - - - - - - - 3 - 2 - 2 - - - 2 - 2 - - - - - 2 - 2 - 2 - 2 - - - - - - - - 2 - 2 - 2 - 2 - - - 2 - 1 - 2 - 2 - - - - - 2 - 3 - 2 - 2 - - - - - - - - - - - -)V0G0N"; - - std::string getModel(gatherTF_test_params p) { - std::string model = model_t; - std::string inIdx; - std::string inDict; - std::string out; - - for (auto& idx : p.in_dim) { - inIdx += ""; - inIdx += std::to_string(idx) + "\n"; - } - - for (auto& dct : p.dct_dim) { - inDict += ""; - inDict += std::to_string(dct) + "\n"; - } - - for (auto& dst : p.ref_dim) { - out += ""; - out += std::to_string(dst) + "\n"; - } - - REPLACE_WITH_STR(model, "_OUTC_", inIdx); - REPLACE_WITH_STR(model, "_IDICT_", inDict); - REPLACE_WITH_NUM(model, "_AX_", p.axis); - REPLACE_WITH_STR(model, "_OUT_", out); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - gatherTF_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Input Indexes - InferenceEngine::Blob::Ptr srcIdx; - int32_t in_size = 4; - InferenceEngine::SizeVector in_dim = {2, 2}; - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, in_dim, InferenceEngine::TensorDesc::getLayoutByDims(in_dim) }); - srcIdx->allocate(); - memcpy(static_cast(srcIdx->buffer()), &p.in[0], sizeof(int32_t)*in_size); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input Dictionary - InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.dct_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.dct_dim) }); - srcDict->allocate(); - memcpy(srcDict->buffer(), &p.dct[0], sizeof(float)*p.dct.size()); - auto * srcDictPtr = dynamic_cast*>(srcDict.get()); - if (srcDictPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input3 - InferenceEngine::SizeVector src3_dim = { 2, 1, 2, 2 }; - InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, src3_dim, InferenceEngine::TensorDesc::getLayoutByDims(src3_dim) }); - src3->allocate(); - memcpy(src3->buffer(), &p.dct[0], sizeof(float) * src3_dim.size()); - auto* src3Ptr = dynamic_cast*>(src3.get()); - if (src3Ptr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Infer - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("InputDictionary", srcDict)); - srcs.insert(std::pair("InputText", srcIdx)); - srcs.insert(std::pair("Input3", src3)); - graph.Infer(srcs, outputBlobs); - - // Check results - if (memcmp((*output).data(), &p.ref[0], 8 * sizeof(float)) != 0) - FAIL() << "Wrong result with compare TF reference!"; - if (memcmp(&((float*)(*output).data())[12], &p.ref[8], 8 * sizeof(float)) != 0) - FAIL() << "Wrong result with compare TF reference!"; - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtGatherHolesTests, TestsGather) {} - -INSTANTIATE_TEST_CASE_P( - TestsGather, MKLDNNCPUExtGatherHolesTests, - ::testing::Values( - // Params: dct_dim, dct, in_dim, in, axis, ref_dim, ref - gatherTF_test_params{ { 3, 2, 2 }, dict,{ 1, 5, 2, 2 },{ 0, 1, 2, 1 }, 1,{ 2, 2, 2, 2 }, ref_in1_a0_d322 })); - diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp deleted file mode 100644 index a46bb4f2d5a702..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp +++ /dev/null @@ -1,1521 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include -#include -#include -#include "tests_common.hpp" - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -class FakeGenericPrimitiveImpl : public InferenceEngine::ILayerExecImpl { -public: - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, InferenceEngine::ResponseDesc *resp) noexcept override { - return InferenceEngine::NOT_IMPLEMENTED; - } -}; - -class FakeGenericPrimitiveFactory : public InferenceEngine::ILayerImplFactory { -public: - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new FakeGenericPrimitiveImpl())); - return InferenceEngine::OK; - } -}; - -class DoublePrimitiveImpl : public InferenceEngine::ILayerExecImpl { -public: - DoublePrimitiveImpl(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = true; - if (cnnLayer->outData.size() != 1 && cnnLayer->insData.size() != 1) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = false; - cfg.inPlace = 0; - InferenceEngine::SizeVector order; - for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(), - cnnLayer->outData[0]->getTensorDesc().getDims(), - {cnnLayer->outData[0]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - for(auto input : config.inConfs) { - if (input.inPlace < 0) - return InferenceEngine::GENERAL_ERROR; - if (input.constant) - return InferenceEngine::GENERAL_ERROR; - } - for(auto output : config.outConfs) { - if (output.constant) - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, InferenceEngine::ResponseDesc *resp) noexcept override { - const float *src_data = inputs[0]->buffer(); - float *dst_data = outputs[0]->buffer(); - if (src_data != dst_data) - return InferenceEngine::GENERAL_ERROR; - - size_t data_size = inputs[0]->size(); - for (size_t i = 0; i < data_size; i++) { - dst_data[i] = src_data[i]*2; - } - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer* cnnLayer; -}; - -class ConstPrimitiveImpl : public InferenceEngine::ILayerExecImpl { -public: - ConstPrimitiveImpl(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = 0; - if (cnnLayer->outData.size() != 1 && cnnLayer->insData.size() != 1) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = true; - // Cannot be in-place because memory will change a memory. - cfg.inPlace = -1; - InferenceEngine::SizeVector order; - for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(), - cnnLayer->outData[0]->getTensorDesc().getDims(), - {cnnLayer->outData[0]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - if (config.dynBatchSupport) - return InferenceEngine::NOT_IMPLEMENTED; - for(auto input : config.inConfs) { - if (input.inPlace >= 0) - return InferenceEngine::GENERAL_ERROR; - if (!input.constant) - return InferenceEngine::GENERAL_ERROR; - } - for(auto output : config.outConfs) { - if (output.inPlace >= 0) - return InferenceEngine::GENERAL_ERROR; - if (!output.constant) - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, InferenceEngine::ResponseDesc *resp) noexcept override { - float *dst_data = outputs[0]->buffer(); - - size_t data_size = outputs[0]->size(); - for (size_t i = 0; i < data_size; i++) { - dst_data[i] = 2; - } - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer *cnnLayer; -}; - -class ConstPrimitiveFactory : public InferenceEngine::ILayerImplFactory { -public: - ConstPrimitiveFactory(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new ConstPrimitiveImpl(cnnLayer))); - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer * cnnLayer; -}; - -class DoublePrimitiveFactory : public InferenceEngine::ILayerImplFactory { -public: - DoublePrimitiveFactory(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new DoublePrimitiveImpl(cnnLayer))); - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer * cnnLayer; -}; - -class TwoDifferentOutputsImpl : public InferenceEngine::ILayerExecImpl { -public: - TwoDifferentOutputsImpl(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = 0; - if (cnnLayer->outData.size() != 2 && cnnLayer->insData.size() != 1) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = false; - cfg.inPlace = -1; - InferenceEngine::SizeVector order; - for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(), - cnnLayer->outData[0]->getTensorDesc().getDims(), - {cnnLayer->outData[0]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[1]->getTensorDesc().getPrecision(), - cnnLayer->outData[1]->getTensorDesc().getDims(), - {cnnLayer->outData[1]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->insData[0].lock()->getTensorDesc().getPrecision(), - cnnLayer->insData[0].lock()->getTensorDesc().getDims(), - {cnnLayer->insData[0].lock()->getTensorDesc().getDims(), order}); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - if (config.dynBatchSupport) - return InferenceEngine::NOT_IMPLEMENTED; - for(auto input : config.inConfs) { - if (input.inPlace >= 0) - return InferenceEngine::GENERAL_ERROR; - if (input.constant) - return InferenceEngine::GENERAL_ERROR; - } - for(auto output : config.outConfs) { - if (output.inPlace >= 0) - return InferenceEngine::GENERAL_ERROR; - if (output.constant) - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, InferenceEngine::ResponseDesc *resp) noexcept override { - const float *src_data = inputs[0]->buffer(); - float *dst_data0 = outputs[0]->buffer(); - float *dst_data1 = outputs[1]->buffer(); - - size_t out_data_size0 = outputs[0]->size(); - size_t out_data_size1 = outputs[1]->size(); - for (size_t i = 0; i < out_data_size0; i++) { - dst_data0[i] = (*(src_data++))*2; - } - - for (size_t i = 0; i < out_data_size1; i++) { - dst_data1[i] = (*(src_data++))*3; - } - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer* cnnLayer; -}; - -class TwoDifferentOutputsFactory : public InferenceEngine::ILayerImplFactory { -public: - TwoDifferentOutputsFactory(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new TwoDifferentOutputsImpl(cnnLayer))); - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer * cnnLayer; -}; - -class CustomConcatImpl : public InferenceEngine::ILayerExecImpl { -public: - CustomConcatImpl(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = 0; - if (cnnLayer->outData.size() != 1 && cnnLayer->insData.size() != 2) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = false; - cfg.inPlace = -1; - InferenceEngine::SizeVector order; - for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(), - cnnLayer->outData[0]->getTensorDesc().getDims(), - {cnnLayer->outData[0]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - cfg.inPlace = 0; - InferenceEngine::SizeVector dims = cnnLayer->insData[0].lock()->getTensorDesc().getDims(); - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->insData[0].lock()->getTensorDesc().getPrecision(), - dims, {dims, order}); - size_t dataSize = std::accumulate(std::begin(dims), std::end(dims), (size_t) 1, std::multiplies()); - config.inConfs.push_back(cfg); - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->insData[1].lock()->getTensorDesc().getPrecision(), - cnnLayer->insData[1].lock()->getTensorDesc().getDims(), - {cnnLayer->insData[1].lock()->getTensorDesc().getDims(), order, - dataSize}); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - if (config.dynBatchSupport) - return InferenceEngine::NOT_IMPLEMENTED; - for(auto input : config.inConfs) { - if (input.inPlace < 0) - return InferenceEngine::GENERAL_ERROR; - if (input.constant) - return InferenceEngine::GENERAL_ERROR; - } - for(auto output : config.outConfs) { - if (output.inPlace >= 0) - return InferenceEngine::GENERAL_ERROR; - if (output.constant) - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; - } - - InferenceEngine::StatusCode execute(std::vector& inputs, - std::vector& outputs, - InferenceEngine::ResponseDesc *resp) noexcept override { - return InferenceEngine::OK; - } -private: - InferenceEngine::CNNLayer * cnnLayer; -}; - -class CustomConcatFactory : public InferenceEngine::ILayerImplFactory { -public: - CustomConcatFactory(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new CustomConcatImpl(cnnLayer))); - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer * cnnLayer; -}; - -class CustomSplitImpl : public InferenceEngine::ILayerExecImpl { -public: - CustomSplitImpl(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = 0; - if (cnnLayer->outData.size() != 2 && cnnLayer->insData.size() != 1) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = false; - cfg.inPlace = 0; - InferenceEngine::SizeVector order; - for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(), - cnnLayer->outData[0]->getTensorDesc().getDims(), - {cnnLayer->outData[0]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - size_t dataSize = std::accumulate(std::begin(cnnLayer->outData[0]->getTensorDesc().getDims()), - std::end(cnnLayer->outData[0]->getTensorDesc().getDims()), - (size_t) 1, std::multiplies()); - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[1]->getTensorDesc().getPrecision(), - cnnLayer->outData[1]->getTensorDesc().getDims(), - {cnnLayer->outData[1]->getTensorDesc().getDims(), order, dataSize}); - config.outConfs.push_back(cfg); - cfg.inPlace = -1; - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->insData[0].lock()->getTensorDesc().getPrecision(), - cnnLayer->insData[0].lock()->getTensorDesc().getDims(), - {cnnLayer->insData[0].lock()->getTensorDesc().getDims(), order}); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - if (config.dynBatchSupport) - return InferenceEngine::NOT_IMPLEMENTED; - for(auto input : config.inConfs) { - if (!input.inPlace) - return InferenceEngine::GENERAL_ERROR; - if (input.constant) - return InferenceEngine::GENERAL_ERROR; - } - for(auto output : config.outConfs) { - if (output.constant) - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, - std::vector& outputs, - InferenceEngine::ResponseDesc *resp) noexcept override { - return InferenceEngine::OK; - } -private: - InferenceEngine::CNNLayer * cnnLayer; -}; - -class CustomSplitFactory : public InferenceEngine::ILayerImplFactory { -public: - CustomSplitFactory(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new CustomSplitImpl(cnnLayer))); - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer * cnnLayer; -}; -using fake_ext_factory = std::function; - -class FakeExtensionFabric : public InferenceEngine::Extensions::Cpu::MKLDNNExtensions { -public: - FakeExtensionFabric() { - factories["CustomNewConvolution"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new FakeGenericPrimitiveFactory(); }; - factories["NewDoubleLayer"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new DoublePrimitiveFactory(cnnLayer); }; - factories["NewTwoDifferentOutputs"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new TwoDifferentOutputsFactory(cnnLayer); }; - factories["ConstPrim"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new ConstPrimitiveFactory(cnnLayer); }; - factories["CustomInPlaceConcat"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new CustomConcatFactory(cnnLayer); }; - factories["CustomInPlaceSplit"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new CustomSplitFactory(cnnLayer); }; - } - - virtual ~FakeExtensionFabric() { - factories.clear(); - } - - void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override {} - void Unload() noexcept override {} - InferenceEngine::StatusCode getPrimitiveTypes(char**& types, unsigned int& size, InferenceEngine::ResponseDesc* resp) noexcept override { - types = new char *[factories.size()]; - size_t count = 0; - for (auto it = factories.begin(); it != factories.end(); it++, count ++) { - types[count] = new char[it->first.size() + 1]; - std::copy(it->first.begin(), it->first.end(), types[count]); - types[count][it->first.size() ] = '\0'; - } - return InferenceEngine::OK; - }; - InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory *&factory, - const InferenceEngine::CNNLayer *cnnLayer, - InferenceEngine::ResponseDesc *resp) noexcept override { - if (factories.find(cnnLayer->type) == factories.end()) { - std::string errorMsg = std::string("Factory for ") + cnnLayer->type + " wasn't found!"; - errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); - return InferenceEngine::NOT_FOUND; - } - factory = factories[cnnLayer->type](cnnLayer); - return InferenceEngine::OK; - } - -private: - std::map factories; -}; - -class MKLDNNGraphGenericTests: public TestsCommon { -protected: - virtual void SetUp() { - TestsCommon::SetUp(); - extension.reset(new FakeExtensionFabric()); - } - std::shared_ptr extension; -}; - -TEST_F(MKLDNNGraphGenericTests, canGetPrimitiveDescriptorsList) { - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - std::shared_ptr node; - InferenceEngine::DataPtr dataPtr; - dataPtr.reset(new InferenceEngine::Data("test", { InferenceEngine::Precision::FP32, {5, 4, 3, 1}, InferenceEngine::Layout::NCHW })); - InferenceEngine::CNNLayerPtr layerPtr; - layerPtr.reset(new InferenceEngine::CNNLayer({"name", "CustomNewConvolution", InferenceEngine::Precision::FP32})); - layerPtr->outData.push_back(dataPtr); - - mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)); - MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache; - node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layerPtr, eng, extMgr, cache)); - ASSERT_EQ(MKLDNNPlugin::Type::Generic, node->getType()); - - ASSERT_NO_THROW(node->getSupportedDescriptors()); -} - -template -void ref_double(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst) { - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (int i=0; i < src.size(); i++) - dst_data[i] = src_data[i]*2; -} - -template -void ref_double_batch1(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst) { - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (int i= 0; i < src.size() / 2; i++) - dst_data[i] = src_data[i]*2; - - for (int i= src.size() / 2; i < src.size(); i++) - dst_data[i] = 0; -} - -template -void ref_twoDifferent(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst1, InferenceEngine::TBlob &dst2) { - const data_t *src_data = src.readOnly(); - data_t *dst_data1 = dst1.data(); - data_t *dst_data2 = dst2.data(); - - for (int i=0; i < dst1.size(); i++) - dst_data1[i] = (*(src_data++))*2; - - for (int i=0; i < dst2.size(); i++) - dst_data2[i] = (*(src_data++))*6; -} - -TEST_F(MKLDNNGraphGenericTests, DontCreateGPUGenericPrimitive) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 5 - 5 - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - )V0G0N"; - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - ASSERT_THROW(graph.CreateGraph(network, extMgr), InferenceEngine::Exception); -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteConstGenericPrimitive) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 5 - 5 - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - )V0G0N"; - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - InferenceEngine::SizeVector dims_src = {1, 3, 5, 5}; - - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - float * dst_data = dst_ref.buffer(); - for (size_t i = 0; i < dst_ref.size(); i++) { - dst_data[i] = 2; - } - - compare(*output, dst_ref); -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitive) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 5 - 5 - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - )V0G0N"; - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - InferenceEngine::SizeVector dims_src = {1, 3, 5, 5}; - - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_double(*srcPtr, dst_ref); - - compare(*output, dst_ref); -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitiveWithTwoOutputs) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 5 - 5 - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 1 - 5 - 5 - - - 1 - 2 - 5 - 5 - - - - - - - - 1 - 2 - 5 - 5 - - - - - 1 - 2 - 5 - 5 - - - - - - - - 1 - 1 - 5 - 5 - - - 1 - 2 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - - - - )V0G0N"; - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - InferenceEngine::SizeVector dims_src = {1, 3, 5, 5}; - - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - - float * data_src = src->buffer(); - for (size_t i = 0; i < src->size(); i++) - data_src[i] = 1; - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - auto it = out.begin(); - - std::pair item = *it; - InferenceEngine::DataPtr data1 = item.second; - - InferenceEngine::TensorDesc outputDesc1 = item.second->getTensorDesc(); - InferenceEngine::TBlob::Ptr output1; - output1 = InferenceEngine::make_shared_blob(outputDesc1); - output1->allocate(); - outputBlobs[item.first] = output1; - - graph.Infer(srcs, outputBlobs); - - float * data = outputBlobs.begin()->second->buffer(); - for (size_t i = 0; i < 25; i++) { - ASSERT_EQ(*data, 2); - data++; - } - for (size_t i = 0; i < 50; i++) { - ASSERT_EQ(*data, 6); - data++; - } -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteGenericInPlaceConcat) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 5 - 5 - - - - - - - 1 - 2 - 5 - 5 - - - - - - - 1 - 3 - 5 - 5 - - - 1 - 2 - 5 - 5 - - - - - 1 - 5 - 5 - 5 - - - - - - - - - - )V0G0N"; - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - InferenceEngine::SizeVector dims_src1 = {1, 3, 5, 5}; - - InferenceEngine::Blob::Ptr src1 = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, InferenceEngine::NCHW}); - src1->allocate(); - - float * data_src1 = src1->buffer(); - for (size_t i = 0; i < src1->size(); i++) - data_src1[i] = 1; - - InferenceEngine::SizeVector dims_src2 = {1, 2, 5, 5}; - - InferenceEngine::Blob::Ptr src2 = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, InferenceEngine::NCHW}); - src2->allocate(); - - float * data_src2 = src2->buffer(); - for (size_t i = 0; i < src2->size(); i++) - data_src2[i] = 2; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - auto it = out.begin(); - - std::pair item = *it; - - InferenceEngine::TensorDesc outputDesc1 = item.second->getTensorDesc(); - InferenceEngine::TBlob::Ptr output1; - output1 = InferenceEngine::make_shared_blob(outputDesc1); - output1->allocate(); - outputBlobs[item.first] = output1; - - graph.Infer(srcs, outputBlobs); - - float * data = outputBlobs.begin()->second->buffer(); - for (size_t i = 0; i < 75; i++) { - ASSERT_EQ(*data, 1); - data++; - } - for (size_t i = 0; i < 50; i++) { - ASSERT_EQ(*data, 2); - data++; - } -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteGenericInPlaceSplit) { - std::string model = R"V0G0N( - - - - - - 1 - 4 - 4 - 4 - - - - - - - 1 - 4 - 4 - 4 - - - - - 1 - 2 - 4 - 4 - - - 1 - 2 - 4 - 4 - - - - - - - - 1 - 2 - 4 - 4 - - - - - 1 - 2 - 4 - 4 - - - - - - - - 1 - 2 - 4 - 4 - - - - - 1 - 2 - 4 - 4 - - - - - - - - - - - )V0G0N"; - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - InferenceEngine::SizeVector dims_src = {1, 4, 4, 4}; - - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - - float * data_src = src->buffer(); - for (size_t i = 0; i < src->size(); i++) { - if (i < src->size() / 2) - data_src[i] = 1; - else - data_src[i] = 2; - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - auto it = out.begin(); - - std::pair item = *it; - - InferenceEngine::TBlob::Ptr output1; - output1 = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output1->allocate(); - outputBlobs[item.first] = output1; - - item = *(++it); - InferenceEngine::TBlob::Ptr output2; - output2 = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output2->allocate(); - outputBlobs[item.first] = output2; - - graph.Infer(srcs, outputBlobs); - - float * data = output1->buffer(); - for (size_t i = 0; i < output1->size(); i++) { - ASSERT_EQ(*data, 4); - data++; - } - data = output2->buffer(); - for (size_t i = 0; i < output2->size(); i++) { - ASSERT_EQ(*data, 4); - data++; - } -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitiveWithDynamicBatch) { - std::string model = R"V0G0N( - - - - - - 2 - 3 - 5 - 5 - - - - - - - 2 - 3 - 5 - 5 - - - - - 2 - 3 - 5 - 5 - - - - - - - - - )V0G0N"; - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - InferenceEngine::SizeVector dims_src = {2, 3, 5, 5}; - - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - float *dstData = output->data(); - - for (size_t i = 0; i < output->size(); i++) { - dstData[i] = 0; - } - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_double(*srcPtr, dst_ref); - - compare(*output, dst_ref); - - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "1"}}); - - for (size_t i = 0; i < output->size(); i++) { - dstData[i] = 0; - } - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref2(item.second->getTensorDesc()); - dst_ref2.allocate(); - - ref_double_batch1(*srcPtr, dst_ref2); - - compare(*output, dst_ref2); -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteNotInLineGRN) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - 1 - 3 - 2 - 2 - - - - - 1 - 6 - 2 - 2 - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {1, 3, 2, 2}; - - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst = {0.000f, 0.503f, 0.659f, 0.117f, -0.474f, -0.573f, -0.202f, 0.545f, 0.619f, 0.246f, - 0.000f, 0.000f, 0.000f, 0.503f, 0.659f, 0.117f, -0.474f, -0.573f, -0.202f, 0.545f, - 0.619f, 0.246f, 0.000f, 0.000f}; - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphGenericTests, ExecuteInLineGRN) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 2 - 2 - - - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - 1 - 3 - 2 - 2 - - - - - 1 - 6 - 2 - 2 - - - - - - - - - - -)V0G0N"; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {1, 3, 2, 2}; - - InferenceEngine::Blob::Ptr src1 = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src1->allocate(); - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::Blob::Ptr src2 = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src2->allocate(); - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data1", src1)); - srcs.insert(std::pair("data2", src2)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst = {0.000f, 0.503f, 0.659f, 0.117f, -0.474f, -0.573f, -0.202f, 0.545f, 0.619f, 0.246f, - 0.000f, 0.000f, 0.000f, 0.503f, 0.659f, 0.117f, -0.474f, -0.573f, -0.202f, 0.545f, - 0.619f, 0.246f, 0.000f, 0.000f}; - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/log_softmax_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/log_softmax_tests.cpp deleted file mode 100644 index bd9978dcc72b6c..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/log_softmax_tests.cpp +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct log_softmax_test_params { - InferenceEngine::SizeVector in_out; - std::vector src; - int axis; - std::vector reference; - - std::vector> comp; -}; - -void ref_log_softmax(InferenceEngine::TBlob &src, int axis, InferenceEngine::TBlob &dst) { - float *src_data = src.data(); - float *dst_data = dst.data(); - InferenceEngine::SizeVector dims = src.getTensorDesc().getDims(); - - if (axis < 0) axis += dims.size(); - - size_t W = dims[3]; - size_t H = dims[2]; - size_t C = dims[1]; - size_t MB = dims[0]; - - auto off = [=](int n, int c, int h, int w) - { - return (n * W * H * C + c * W * H + h * W + w); - }; - - if(axis == 0) { - for (int c = 0; c < C; ++c) { - for (int h = 0; h < H; ++h) { - for (int w = 0; w < W; ++w) { - float result = 0.0f; - for (int n = 0; n < MB; ++n) { - result += expf(src_data[off(n, c, h, w)]); - } - result = logf(result); - for (int n = 0; n < MB; ++n) { - dst_data[off(n, c, h, w)] = src_data[off(n, c, h, w)] - result; - } - } - } - } - } else if(axis == 1) { - for (int n = 0; n < MB; ++n) { - for (int h = 0; h < H; ++h) { - for (int w = 0; w < W; ++w) { - float result = 0.0f; - for (int c = 0; c < C; ++c) { - result += expf(src_data[off(n, c, h, w)]); - } - result = logf(result); - for (int c = 0; c < C; ++c) { - dst_data[off(n, c, h, w)] = src_data[off(n, c, h, w)] - result; - } - } - } - } - } else if(axis == 2) { - for (int n = 0; n < MB; ++n) { - for (int c = 0; c < C; ++c) { - for (int w = 0; w < W; ++w) { - float result = 0.0f; - for (int h = 0; h < H; ++h) { - result += expf(src_data[off(n, c, h, w)]); - } - result = logf(result); - for (int h = 0; h < H; ++h) { - dst_data[off(n, c, h, w)] = src_data[off(n, c, h, w)] - result; - } - } - } - } - } else if(axis == 3) { - for (int n = 0; n < MB; ++n) { - for (int c = 0; c < C; ++c) { - for (int h = 0; h < H; ++h) { - float result = 0.0f; - for (int w = 0; w < W; ++w) { - result += expf(src_data[off(n, c, h, w)]); - } - result = logf(result); - for (int w = 0; w < W; ++w) { - dst_data[off(n, c, h, w)] = src_data[off(n, c, h, w)] - result; - } - } - } - } - } -} - -void ref_log_softmax_any_dims(InferenceEngine::TBlob &src, int axis, InferenceEngine::TBlob &dst) { - size_t i, j, k, axis_step = 1, reduced_axis_size, reduced_axis_stride = 1; - InferenceEngine::SizeVector dims = src.getTensorDesc().getDims(); - float *src_data = src.data(); - float *dst_data = dst.data(); - - if (axis < 0) axis += dims.size(); - for (i = 0; i < axis; i++) axis_step *= dims[i]; - reduced_axis_size = dims[axis]; - for (i = (axis + 1); i < dims.size(); i++) reduced_axis_stride *= dims[i]; - - for (k = 0; k < axis_step; k++) { - for (i = 0; i < reduced_axis_stride; i++) { - float reduce_prod = 0.0f; - const float *src_dataPtr = &src_data[k * reduced_axis_stride * reduced_axis_size + i]; - for (j = 0; j < reduced_axis_size; ++j) { - reduce_prod += expf((*src_dataPtr)); - src_dataPtr += reduced_axis_stride; - } - - reduce_prod = logf(reduce_prod); - src_dataPtr = &src_data[k * reduced_axis_stride * reduced_axis_size + i]; - float *dst_dataPtr = (float*)&dst_data[k * reduced_axis_stride * reduced_axis_size + i]; - for (j = 0; j < reduced_axis_size; ++j) { - (*dst_dataPtr) = (*src_dataPtr) - reduce_prod; - src_dataPtr += reduced_axis_stride; - dst_dataPtr += reduced_axis_stride; - } - } - } -} - -class MKLDNNCPUExtLogSoftmaxTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_OUT_ - - - - - - - - _IN_OUT_ - - - - - _IN_OUT_ - - - - - - - - -)V0G0N"; - - std::string getModel(log_softmax_test_params p) { - std::string model = model_t; - std::string in_out; - - for (auto& dst : p.in_out) { - in_out += ""; - in_out += std::to_string(dst) + "\n"; - } - - REPLACE_WITH_STR(model, "_IN_OUT_", in_out); - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - log_softmax_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Input Data - InferenceEngine::Blob::Ptr srcData = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.in_out, InferenceEngine::TensorDesc::getLayoutByDims(p.in_out) }); - srcData->allocate(); - if (p.src.size()) - memcpy(srcData->buffer(), &p.src[0], sizeof(float)*p.src.size()); - else - fill_data(srcData->buffer(), srcData->size()); - auto * srcDataPtr = dynamic_cast*>(srcData.get()); - if (srcDataPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - // Check results - if (p.in_out.size() == 4) { - ref_log_softmax(*srcDataPtr, p.axis, dst_ref); - if (p.reference.size()) { - for (size_t i = 0; i < p.reference.size(); i++) { - ASSERT_NEAR(dst_ref.data()[i], p.reference[i], 0.00001f); - } - } - } - ref_log_softmax_any_dims(*srcDataPtr, p.axis, dst_ref); - if (p.reference.size()) { - for (size_t i = 0; i < p.reference.size(); i++) { - ASSERT_NEAR(dst_ref.data()[i], p.reference[i], 0.00001f); - } - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("Input", srcData)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref, 0.00001f); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtLogSoftmaxTests, TestsLogSoftmax) {} - -INSTANTIATE_TEST_CASE_P( - TestsLogSoftmax, MKLDNNCPUExtLogSoftmaxTests, - ::testing::Values( - // Params: in_out, src, axis, reference - log_softmax_test_params{ { 1, 1, 1, 3 },{ -0.5f, 0.f, 0.5f },3,{ -1.68026966f, -1.1802697f, -0.68026966 } }, - log_softmax_test_params{ { 1, 1, 1, 3 },{ -0.5f, 0.f, 0.5f },-1,{ -1.68026966f, -1.1802697f, -0.68026966 } }, - log_softmax_test_params{ { 3, 1, 1, 1 },{ -0.5f, 0.f, 0.5f },0,{ -1.68026966f, -1.1802697f, -0.68026966 } }, - log_softmax_test_params{ { 1, 1, 2, 2 },{ 1.0f, 0.5f, 0.f, -0.5f },3,{ -0.474077f, -0.974077f, -0.474077f, -0.974077f } }, - log_softmax_test_params{ { 2, 2, 1, 1 },{ 1.0f, 0.5f, 0.f, -0.5f },1,{ -0.474077f, -0.974077f, -0.474077f, -0.974077f } }, - log_softmax_test_params{ { 2, 2, 1, 1 },{ 1.0f, 0.5f, 0.f, -0.5f },-3,{ -0.474077f, -0.974077f, -0.474077f, -0.974077f } }, - log_softmax_test_params{ { 2, 3, 3, 2 },{ },3,{ } }, - log_softmax_test_params{ { 1, 1, 2, 2 },{ 1.0f, 0.5f, 0.f, -0.5f },2,{ -0.31326166f, -0.31326166f, -1.3132616f, -1.3132616f } }, - log_softmax_test_params{ { 2, 3, 3, 2 },{},0,{} }, - log_softmax_test_params{ { 2, 3, 3, 2 },{},1,{} }, - log_softmax_test_params{ { 2, 3, 3, 2 },{},2,{} }, - log_softmax_test_params{ { 2, 3, 3, 2, 4, 5, 1, 2 },{},4,{} } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/math_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/math_tests.cpp deleted file mode 100644 index 573ed839dc93d1..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/math_tests.cpp +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" -#include "common_test_utils/data_utils.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct math_test_params { - std::string math_function; - InferenceEngine::SizeVector in_out; - std::vector input_tensor; - std::vector alpha; - std::vector beta; - std::vector gamma; - std::vector reference; - - std::vector> comp; -}; - -void ref_math( - std::string math_function, - InferenceEngine::TBlob &src, - std::vector alpha, - std::vector beta, - std::vector gamma, - InferenceEngine::TBlob &dst -) { - size_t i; - float* src_data = src.data(); - float *dst_data = dst.data(); - size_t dst_size = dst.size(); - - if (math_function == "Erf") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = std::erf(src_data[i]); - } - } else if (math_function == "Abs") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = (std::abs)(src_data[i]); - } - } else if (math_function == "Acos") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = acosf(src_data[i]); - } - } else if (math_function == "Acosh") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = acoshf(src_data[i]); - } - } else if (math_function == "Asin") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = asinf(src_data[i]); - } - } else if (math_function == "Asinh") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = asinhf(src_data[i]); - } - } else if (math_function == "Atan") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = atanf(src_data[i]); - } - } else if (math_function == "Atanh") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = atanhf(src_data[i]); - } - } else if (math_function == "Ceil") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = ceilf(src_data[i]); - } - } else if (math_function == "Cos") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = cosf(src_data[i]); - } - } else if (math_function == "Cosh") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = coshf(src_data[i]); - } - } else if (math_function == "Floor") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = floorf(src_data[i]); - } - } else if (math_function == "HardSigmoid") { - alpha[0] = (alpha[0] == 0.0f) ? 0.2f : alpha[0]; - beta[0] = (beta[0] == 0.0f) ? 0.5f : beta[0]; - for (i = 0; i < dst_size; i++) { - dst_data[i] = (std::max)(0.f, (std::min)(1.f, alpha[0] * src_data[i] + beta[0])); - } - } else if (math_function == "Log") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = logf(src_data[i]); - } - } else if (math_function == "Neg") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = -src_data[i]; - } - } else if (math_function == "Reciprocal") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = 1.0f / src_data[i]; - } - } else if (math_function == "Selu") { - alpha[0] = (alpha[0] == 0.0f) ? 1.67326f : alpha[0]; - gamma[0] = (gamma[0] == 0.0f) ? 1.0507f : gamma[0]; - for (i = 0; i < dst_size; i++) { - float x = src_data[i]; - dst_data[i] = (x > 0.0f) ? (gamma[0] * x) : (gamma[0] * alpha[0] * (exp(x) - 1.0f)); - } - } else if (math_function == "Sign") { - for (i = 0; i < dst_size; i++) { - if (src_data[i] > 0.0f) dst_data[i] = 1.0f; - else if (src_data[i] < 0.0f) dst_data[i] = -1.0f; - else dst_data[i] = 0.0f; - } - } else if (math_function == "Sin") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = sinf(src_data[i]); - } - } else if (math_function == "Sinh") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = sinhf(src_data[i]); - } - } else if (math_function == "SoftPlus") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = logf(expf(src_data[i]) + 1); - } - } else if (math_function == "Softsign") { - for (i = 0; i < dst_size; i++) { - float x = src_data[i]; - dst_data[i] = x / (1.f + (std::abs)(x)); - } - } else if (math_function == "Tan") { - for (i = 0; i < dst_size; i++) { - dst_data[i] = tanf(src_data[i]); - } - } -} - -class MKLDNNCPUExtMathTests: public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_OUT_ - - - - - - - - _IN_OUT_ - - - - - _IN_OUT_ - - - - - - - - -)V0G0N"; - - std::string getModel(math_test_params p) { - std::string model = model_t; - std::string in_out = ""; - std::string alpha; - std::string beta; - std::string gamma; - - for (auto& dst : p.in_out) { - in_out += ""; - in_out += std::to_string(dst) + "\n"; - } - - REPLACE_WITH_STR(model, "_IN_OUT_", in_out); - REPLACE_WITH_STR(model, "_MATH_FUNCTION_", p.math_function); - - if (p.alpha.size()) { - alpha = "alpha=\"" + to_string_c_locale(p.alpha[0]) + "\""; - } - REPLACE_WITH_STR(model, "_ALPHA_", alpha); - - if (p.beta.size()) { - beta = "beta=\"" + to_string_c_locale(p.beta[0]) + "\""; - } - REPLACE_WITH_STR(model, "_BETA_", beta); - - if (p.gamma.size()) { - gamma = "gamma=\"" + to_string_c_locale(p.gamma[0]) + "\""; - } - REPLACE_WITH_STR(model, "_GAMMA_", gamma); - return model; - } - - template - static void fill_data_dbgval(data_t *data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = static_cast(i & (sizeof(data_t) * 8 - 1)); - } - } -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - math_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Input Data - InferenceEngine::Blob::Ptr srcData = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.in_out, InferenceEngine::TensorDesc::getLayoutByDims(p.in_out) }); - srcData->allocate(); - if (p.input_tensor.size()) - memcpy(srcData->buffer(), &p.input_tensor[0], sizeof(float)*p.input_tensor.size()); - else { - if (p.math_function == "Erf") - CommonTestUtils::fill_data_sine(srcData->buffer(), srcData->size(), 0.f, 3.f, 1.f); - else - CommonTestUtils::fill_data(srcData->buffer(), srcData->size()); - } - auto * srcDataPtr = dynamic_cast*>(srcData.get()); - if (srcDataPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - // Check results - ref_math(p.math_function, *srcDataPtr, p.alpha, p.beta, p.gamma, dst_ref); - if (p.reference.size()) { - for (size_t i = 0; i < p.reference.size(); i++) { - ASSERT_NEAR(dst_ref.data()[i], p.reference[i], 0.00001f); - } - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("Input", srcData)); - - // Infer - graph.Infer(srcs, outputBlobs); - float threshold = p.math_function == "Erf" ? 0.0001f : 0.00001f; - compare(*output, dst_ref, threshold); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtMathTests, TestsMath) {} - -INSTANTIATE_TEST_CASE_P( - TestsMath, MKLDNNCPUExtMathTests, - ::testing::Values( - // Params: math_function, in_out, input_tensor, alpha, beta, gamma, reference - math_test_params{ "Erf", {},{},{},{},{},{} }, - math_test_params{ "Erf", { 1, 1, 12, 256 }, {},{},{},{}, {} }, - math_test_params{ "Erf", { 12, 256, 3 },{},{},{},{},{} }, - math_test_params{ "Erf", { 3, 4 },{},{},{},{},{} }, - math_test_params{ "Erf", { 20 },{},{},{},{},{} }, - math_test_params{ "Erf", { 12, 4, 9, 8 },{},{},{},{},{} }, - math_test_params{ "Erf", { 6, 12, 4, 9, 8, 10, 3 },{},{},{},{},{} }, - math_test_params{ "Abs",{ 3 },{ -1, 0, 1 },{},{},{},{ 1, 0, 1 } }, - math_test_params{ "Acos",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{ 2.09439516f, 1.57079637f, 1.04719758f } }, - math_test_params{ "Acosh",{ 3 },{ 1.f, 2.0f, 3.0f },{},{},{},{} }, - math_test_params{ "Asin",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{ -0.523598790f, 0.0f, 0.523598790f } }, - math_test_params{ "Asinh",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{ } }, - math_test_params{ "Atan",{ 3 },{ -1, 0, 1 },{},{},{},{ -0.785398185f, 0.0f, 0.785398185f } }, - math_test_params{ "Atanh",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{ } }, - math_test_params{ "Ceil",{ 2 },{ -1.5f, 1.2f },{},{},{},{ -1, 2 } }, - math_test_params{ "Cos",{ 3 },{ -1, 0, 1 },{},{},{},{ 0.540302336f, 1.0f, 0.540302336f } }, - math_test_params{ "Cosh",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{ } }, - math_test_params{ "Floor",{ 3 },{-1.5f, 1.2f, 2.f},{},{},{},{-2, 1, 2} }, - math_test_params{ "HardSigmoid",{ 3 },{ -1, 0, 1 },{0.5f},{0.6f},{},{ 0.1f, 0.6f, 1.f } }, - math_test_params{ "Log",{ 2 },{ 1, 10 },{},{},{},{ 0.f, 2.30258512f } }, - math_test_params{ "Neg",{ 3 },{ -1, 0, 1 },{},{},{},{ 1, 0, -1 } }, - math_test_params{ "Reciprocal",{ 3 },{ -1, 0.1, 1 },{2},{},{3},{-1, 10, 1} }, - math_test_params{ "Selu",{ 3 },{ -1, 0, 1 },{2},{},{3},{ -3.79272318f, 0.f, 3.f } }, - math_test_params{ "Sign",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{-1, 0, 1} }, - math_test_params{ "Sin",{ 3 },{ -1, 0, 1 },{},{},{},{ -0.841470957f, 0.0f, 0.841470957f } }, - math_test_params{ "Sinh",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{ } }, - math_test_params{ "SoftPlus",{ 3 },{ -1, 0, 1 },{},{},{},{ 0.31326166f, 0.69314718f, 1.31326163f } }, - math_test_params{ "Softsign",{ 3 },{ -1, 0, 1 },{},{},{},{ -0.5f, 0.f, 0.5f } }, - math_test_params{ "Tan",{ 3 },{ -1, 0, 1 },{},{},{},{ -1.55740774f, 0.0f, 1.55740774f } } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp deleted file mode 100644 index 7e1179fbf5e4ac..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include "ir_gen_helper.hpp" -#include - -#include -#include - -using namespace InferenceEngine; -using namespace ::testing; -using namespace std; -using namespace single_layer_tests; - -using namespace Extensions; -using namespace ::Cpu; - -namespace { - -OV_CC_DOMAINS(MVNTests); - -} // namespace - -struct mvn_test_params { - vector dims; - - int across_channels; - int normalize_variance; - float eps; - - size_t num_prim_desc; - bool isBlockedFormat; - int selectedType; - - Precision prec_in; - Precision prec_out; - - vector> comp; -}; - -extern InferenceEngine::IExtensionPtr make_FakeExtensions(); - -template -void ref_mvn(const TBlob &src, TBlob &dst, mvn_test_params prm) { - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - size_t dims_size = prm.dims.size(); - - size_t N = prm.dims[0]; - size_t C = prm.dims[1]; - size_t D = dims_size > 4 ? prm.dims[dims_size - 3lu] : 1lu; - size_t H = dims_size > 3 ? prm.dims[dims_size - 2lu] : 1lu; - size_t W = prm.dims[dims_size - 1lu]; - - float eps = prm.eps; - - size_t C1 = H * W; - size_t C2 = C1 * D; - size_t C3 = C2 * C; - - float C2inv = 1.f / static_cast(C2); - float C3inv = 1.f / static_cast(C3); - - for (size_t b = 0lu; b < N; b++) { - size_t cb = b * C3; - // Calculate mean value - if (prm.across_channels) { - float mean = 0.0f; - for (size_t c = 0lu; c < C; c++) { - size_t cc = cb + c * C2; - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - mean += src_data[ch + w]; - } - } - } - } - mean *= C3inv; - for (size_t c = 0lu; c < C; c++) { - size_t cc = cb + c * C2; - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - size_t index = ch + w; - dst_data[index] = src_data[index] - mean; - } - } - } - } - } else { - for (size_t c = 0lu; c < C; c++) { - size_t cc = cb + c * C2; - float mean = 0.0f; - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - mean += src_data[ch + w]; - } - } - } - - mean *= C2inv; - - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - size_t index = ch + w; - dst_data[index] = src_data[index] - mean; - } - } - } - } - } - } - - if (prm.normalize_variance) { - for (size_t b = 0; b < N; b++) { - size_t cb = b * C3; - // Calculate variances value - if (prm.across_channels) { - float variance = 0.f; - for (size_t c = 0lu; c < C; c++) { - size_t cc = cb + c * C2; - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - variance += dst_data[ch + w] * dst_data[ch + w]; - } - } - } - } - variance = 1.f / sqrtf(variance * C3inv + eps); - for (size_t c = 0lu; c < C; c++) { - size_t cc = cb + c * C2; - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - dst_data[ch + w] *= variance; - } - } - } - } - } else { - for (size_t c = 0lu; c < C; c++) { - size_t cc = cb + c * C2; - float variance = 0.0f; - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - variance += dst_data[ch + w] * dst_data[ch + w]; - } - } - } - variance = 1.f / sqrtf(variance * C2inv + eps); - for (size_t d = 0lu; d < D; d++) { - size_t cd = cc + d * C1; - for (size_t h = 0lu; h < H; h++) { - size_t ch = cd + h * W; - for (size_t w = 0lu; w < W; w++) { - dst_data[ch + w] *= variance; - if (prm.prec_out == Precision::U8) { - dst_data[ch + w] = (dst_data[ch + w] > 0) ? roundf(dst_data[ch + w]) : 0; - } else if (prm.prec_out == Precision::I8) { - dst_data[ch + w] = roundf(dst_data[ch + w]); - } - } - } - } - } - } - } - } -} - -class MKLDNNCPUExtMVNTests: public TestsCommon, public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - __SRC_DIMS__ - - - - - __SRC_DIMS__ - - - - - - - - __SRC_DIMS__ - - - - - __SRC_DIMS__ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - - -)V0G0N"; - - std::string getModel(mvn_test_params p) { - std::string model = layers_t; - if (p.isBlockedFormat) - REPLACE_WITH_STR(model, "_FL_", "FakeLayerBLK"); - else - REPLACE_WITH_STR(model, "_FL_", "FakeLayerPLN"); - - std::string s_dims; - for (auto& dim : p.dims) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims); - - REPLACE_WITH_NUM(model, "_AC_", p.across_channels); - REPLACE_WITH_NUM(model, "_NV_", p.normalize_variance); - REPLACE_WITH_NUM(model, "_EPS_", p.eps); - - model = IRTemplateGenerator::getIRTemplate("MVN_Only", p.dims, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - mvn_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - auto defaultExtensions = std::make_shared(); - extMgr->AddExtension(defaultExtensions); - extMgr->AddExtension(make_FakeExtensions()); - - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "mvn") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - SizeVector dims_src = p.dims; - - Layout layout = ANY; - switch (p.dims.size()) { - case 4: - layout = NCHW; - break; - case 5: - layout = NCDHW; - break; - } - - Blob::Ptr src = make_shared_blob({ Precision::FP32, dims_src, layout }); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out; - out = network.getOutputsInfo(); - BlobMap outputBlobs; - - std::pair item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_mvn(*srcPtr, dst_ref, p); - compare(*output, dst_ref, 0.0001f); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtMVNTests, TestsMVN) {} - -INSTANTIATE_TEST_CASE_P( - TestsMVN, MKLDNNCPUExtMVNTests, - ::testing::Values( - /*0*/ mvn_test_params{{2, 64, 15, 15}, 0, 0, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 2, 33, 65}, 0, 0, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 15, 15}, 1, 0, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 2, 33, 65}, 1, 0, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 15, 15}, 1, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 2, 33, 65}, 1, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 15, 15}, 0, 0, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - /*9*/ mvn_test_params{{2, 2, 33, 65}, 0, 0, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 15, 15}, 1, 0, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 2, 33, 65}, 1, 0, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - /*14*/ mvn_test_params{{2,640, 15, 15}, 1, 1, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 2, 33, 65}, 1, 1, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - - // 5D - /*16*/ mvn_test_params{{2, 64, 24, 32, 40}, 0, 0, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 24, 32, 40}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 24, 32, 40}, 1, 0, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 24, 32, 40}, 1, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 24, 32, 40}, 0, 0, 0.00001f, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 24, 32, 40}, 0, 1, 0.00001f, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{2, 64, 24, 32, 40}, 1, 0, 0.00001f, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - /*23*/ mvn_test_params{{2, 64, 24, 32, 40}, 1, 1, 0.00001f, 3, true, MKLDNNPlugin::impl_desc_type::unknown }, - mvn_test_params{{1, 64, 32, 32, 32}, 0, 1, 0.001f, 3, true, MKLDNNPlugin::impl_desc_type::unknown } - )); - -static std::string precToStr (Precision prec) { - return prec == Precision::U8 ? "U8" : prec == Precision::I8 ? "I8" : "FP32"; -} - -template -static void fill_int_data(data_t *data, int size, bool is_signed) { - for (int i = 0 ; i < size; i++) { - data[i] = i * 13 % 21 - 10 * is_signed; - } -} - -class FakeLayerImpl_MVN: public Cpu::ExtLayerBase, - public WithParamInterface { -public: - explicit FakeLayerImpl_MVN(const CNNLayer* layer) { - try { - is_blocked = layer->GetParamAsBool("is_blocked"); - addConfig(layer); - } catch (InferenceEngine::Exception &ex) { - errorMsg = ex.what(); - } - } - - bool is_blocked; - - void addConfig(const CNNLayer* layer) { - LayerConfig config; - - // Fill tensor parameters into config - auto fill_port = [&] (std::vector& port, const DataPtr& data) { - auto div_up = [](const int a, const int b) -> int { - if (!b) - return 0; - return (a + b - 1) / b; - }; - if (!data) IE_THROW() << "Cannot get input data!"; - - DataConfig dataConfig; - dataConfig.inPlace = 0; - dataConfig.constant = false; - - const TensorDesc& data_desc = data->getTensorDesc(); - const SizeVector& data_dims = data_desc.getDims(); - - InferenceEngine::Precision precision = data_desc.getPrecision(); - Layout layout; - if (is_blocked) { - int blk_size = InferenceEngine::with_cpu_x86_avx512f() ? 16 : 8; - - std::vector blocks = data_dims; - std::vector order(blocks.size()); - for (size_t i = 0; i < order.size(); i++) order[i] = i; - - order.push_back(1); - blocks[1] = div_up(blocks[1], blk_size); - blocks.push_back(blk_size); - - dataConfig.desc = TensorDesc(precision, data_dims, {blocks, order}); - } else { - dataConfig.desc = TensorDesc(precision, data_dims, data_dims.size() == 5 ? NDHWC : NHWC); - } - - port.push_back(dataConfig); - }; - - fill_port(config.inConfs, layer->insData[0].lock()); - fill_port(config.outConfs, layer->outData[0]); - config.inConfs[0].desc.setPrecision(config.outConfs[0].desc.getPrecision()); - confs.push_back(config); - } - - StatusCode execute(std::vector& inputs, std::vector& outputs, - ResponseDesc *resp) noexcept override { - return OK; - } -}; - -class MKLDNNCPUExtMVNTests_Blocked: public TestsCommon, public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - __SRC_DIMS__ - - - - - __SRC_DIMS__ - - - - - - - - __SRC_DIMS__ - - - - - __SRC_DIMS__ - - - - - - - - __SRC_DIMS__ - - - - - __SRC_DIMS__ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - - - -)V0G0N"; - - std::string getModel(mvn_test_params p) { - std::string model = layers_t; - - std::string s_dims; - for (auto& dim : p.dims) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims); - - REPLACE_WITH_NUM(model, "_AC_", p.across_channels); - REPLACE_WITH_NUM(model, "_NV_", p.normalize_variance); - REPLACE_WITH_NUM(model, "_EPS_", p.eps); - REPLACE_WITH_STR(model, "_PREC_IN_", precToStr(p.prec_in)); - REPLACE_WITH_STR(model, "_PREC_OUT_", precToStr(p.prec_out)); - REPLACE_WITH_NUM(model, "_IS_BLOCKED_", p.isBlockedFormat); - - model = IRTemplateGenerator::getIRTemplate("MVN_Only", p.dims, "FP32", model, edges_t, 7); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - mvn_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - auto manager = std::make_shared(); - { - auto defaultExt = std::make_shared(); - defaultExt->layersFactory.registerNodeIfRequired(MVNTests, FakeLayer_MVN, "FakeLayer_MVN", Cpu::ImplFactory); - manager->AddExtension(defaultExt); - } - graph.CreateGraph(network, manager); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "mvn") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - SizeVector dims_src = p.dims; - - Layout layout = ANY; - switch (p.dims.size()) { - case 4: - layout = NCHW; - break; - case 5: - layout = NCDHW; - break; - } - - Blob::Ptr src = make_shared_blob({ Precision::FP32, dims_src, layout }); - src->allocate(); - if (p.prec_in == Precision::U8) { - fill_int_data(src->buffer().as(), src->size(), false); - } else if (p.prec_in == Precision::I8) { - fill_int_data(src->buffer().as(), src->size(), true); - } else { - fill_data(src->buffer(), src->size()); - } - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out; - out = network.getOutputsInfo(); - BlobMap outputBlobs; - - std::pair item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_mvn(*srcPtr, dst_ref, p); - compare(*output, dst_ref, 0.0001f); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtMVNTests_Blocked, TestsMVN) {} - -INSTANTIATE_TEST_CASE_P( - TestsMVN, MKLDNNCPUExtMVNTests_Blocked, - ::testing::Values( - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - mvn_test_params{{2, 64, 8, 8, 8}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - /*4*/ // mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - mvn_test_params{{2, 64, 8, 8, 8}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - /*7*/ // mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - mvn_test_params{{2, 64, 8, 8, 8}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - mvn_test_params{{2, 64, 8, 8, 8}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - /*13*/ // mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - mvn_test_params{{2, 64, 8, 8, 8}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - /*16*/ // mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - mvn_test_params{{2, 64, 8, 8, 8}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - mvn_test_params{{2, 64, 8, 8, 8}, 0, 1, 0.00001f, 3, false, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - - mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 3, true, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - mvn_test_params{{2, 64, 24, 32, 40}, 0, 1, 0.00001f, 3, true, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/non_max_suppression_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/non_max_suppression_tests.cpp deleted file mode 100644 index 1b7972eff63ca0..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/non_max_suppression_tests.cpp +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct nmsTF_test_params { - int center_point_box; - int sort_result_descending; - InferenceEngine::SizeVector scoresDim; - std::vector boxes; - std::vector scores; - std::vector max_output_boxes_per_class; - std::vector iou_threshold; - std::vector score_threshold; - - int num_selected_indices; - std::vector ref; - - std::vector> comp; -}; - -static float intersectionOverUnion(float* boxesI, float* boxesJ, bool center_point_box) { - float yminI, xminI, ymaxI, xmaxI, yminJ, xminJ, ymaxJ, xmaxJ; - if (center_point_box) { - // box format: x_center, y_center, width, height - yminI = boxesI[1] - boxesI[3] / 2.f; - xminI = boxesI[0] - boxesI[2] / 2.f; - ymaxI = boxesI[1] + boxesI[3] / 2.f; - xmaxI = boxesI[0] + boxesI[2] / 2.f; - yminJ = boxesJ[1] - boxesJ[3] / 2.f; - xminJ = boxesJ[0] - boxesJ[2] / 2.f; - ymaxJ = boxesJ[1] + boxesJ[3] / 2.f; - xmaxJ = boxesJ[0] + boxesJ[2] / 2.f; - } else { - // box format: y1, x1, y2, x2 - yminI = (std::min)(boxesI[0], boxesI[2]); - xminI = (std::min)(boxesI[1], boxesI[3]); - ymaxI = (std::max)(boxesI[0], boxesI[2]); - xmaxI = (std::max)(boxesI[1], boxesI[3]); - yminJ = (std::min)(boxesJ[0], boxesJ[2]); - xminJ = (std::min)(boxesJ[1], boxesJ[3]); - ymaxJ = (std::max)(boxesJ[0], boxesJ[2]); - xmaxJ = (std::max)(boxesJ[1], boxesJ[3]); - } - - float areaI = (ymaxI - yminI) * (xmaxI - xminI); - float areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ); - if (areaI <= 0.f || areaJ <= 0.f) - return 0.f; - - float intersection_area = - (std::max)((std::min)(ymaxI, ymaxJ) - (std::max)(yminI, yminJ), 0.f) * - (std::max)((std::min)(xmaxI, xmaxJ) - (std::max)(xminI, xminJ), 0.f); - return intersection_area / (areaI + areaJ - intersection_area); -} - -typedef struct { - float score; - int batch_index; - int class_index; - int box_index; -} filteredBoxes; - -static void ref_nms( - InferenceEngine::TBlob &srcBoxes, - InferenceEngine::TBlob &srcScores, - InferenceEngine::TBlob &selected_idxs, - nmsTF_test_params p -) { - float *boxes = srcBoxes.data(); - float *scores = srcScores.data(); - - InferenceEngine::SizeVector scores_dims = srcScores.getTensorDesc().getDims(); - int num_boxes = static_cast(scores_dims[2]); - int max_output_boxes_per_class = num_boxes; - if (p.max_output_boxes_per_class.size()) - max_output_boxes_per_class = (std::min)(max_output_boxes_per_class, p.max_output_boxes_per_class[0]); - - float iou_threshold = 1.f; // Value range [0, 1] - if (p.iou_threshold.size()) - iou_threshold = (std::min)(iou_threshold, p.iou_threshold[0]); - - float score_threshold = 0.f; - if (p.score_threshold.size()) - score_threshold = p.score_threshold[0]; - - int* selected_indices = selected_idxs.data(); - InferenceEngine::SizeVector selected_indices_dims = selected_idxs.getTensorDesc().getDims(); - - InferenceEngine::SizeVector boxesStrides = srcBoxes.getTensorDesc().getBlockingDesc().getStrides(); - InferenceEngine::SizeVector scoresStrides = srcScores.getTensorDesc().getBlockingDesc().getStrides(); - - // boxes shape: {num_batches, num_boxes, 4} - // scores shape: {num_batches, num_classes, num_boxes} - int num_batches = static_cast(scores_dims[0]); - int num_classes = static_cast(scores_dims[1]); - std::vector fb; - - for (int batch = 0; batch < num_batches; batch++) { - float *boxesPtr = boxes + batch * boxesStrides[0]; - for (int class_idx = 0; class_idx < num_classes; class_idx++) { - float *scoresPtr = scores + batch * scoresStrides[0] + class_idx * scoresStrides[1]; - std::vector > scores_vector; - for (int box_idx = 0; box_idx < num_boxes; box_idx++) { - if (scoresPtr[box_idx] > score_threshold) - scores_vector.push_back(std::make_pair(scoresPtr[box_idx], box_idx)); - } - - if (scores_vector.size()) { - std::sort(scores_vector.begin(), scores_vector.end(), - [](const std::pair& l, const std::pair& r) { return l.first > r.first; }); - - int io_selection_size = 1; - fb.push_back({ scores_vector[0].first, batch, class_idx, scores_vector[0].second }); - for (int box_idx = 1; (box_idx < static_cast(scores_vector.size()) && io_selection_size < max_output_boxes_per_class); box_idx++) { - bool box_is_selected = true; - for (int idx = io_selection_size - 1; idx >= 0; idx--) { - float iou = intersectionOverUnion(&boxesPtr[scores_vector[box_idx].second * 4], - &boxesPtr[scores_vector[idx].second * 4], (p.center_point_box == 1)); - if (iou > iou_threshold) { - box_is_selected = false; - break; - } - } - - if (box_is_selected) { - scores_vector[io_selection_size] = scores_vector[box_idx]; - io_selection_size++; - fb.push_back({ scores_vector[box_idx].first, batch, class_idx, scores_vector[box_idx].second }); - } - } - } - } - } - - if(p.sort_result_descending) - std::sort(fb.begin(), fb.end(), [](const filteredBoxes& l, const filteredBoxes& r) { return l.score > r.score; }); - int selected_indicesStride = selected_idxs.getTensorDesc().getBlockingDesc().getStrides()[0]; - int* selected_indicesPtr = selected_indices; - size_t idx; - for (idx = 0; idx < (std::min)(selected_indices_dims[0], fb.size()); idx++) { - selected_indicesPtr[0] = fb[idx].batch_index; - selected_indicesPtr[1] = fb[idx].class_index; - selected_indicesPtr[2] = fb[idx].box_index; - selected_indicesPtr += selected_indicesStride; - } - for (; idx < selected_indices_dims[0]; idx++) { - selected_indicesPtr[0] = -1; - selected_indicesPtr[1] = -1; - selected_indicesPtr[2] = -1; - selected_indicesPtr += selected_indicesStride; - } -} - -class MKLDNNCPUExtNonMaxSuppressionTFTests : public TestsCommon, public WithParamInterface { - std::string model_t2 = R"V0G0N( - - - - - - _IBOXES_ - - - - - - - _ISCORES_ - - - - - - - - _IBOXES_ - - - _ISCORES_ - - - - - _IOUT_ - - - - - - - - - -)V0G0N"; - - std::string model_t3 = R"V0G0N( - - - - - - _IBOXES_ - - - - - - - _ISCORES_ - - - - - - - - - - - - - _IBOXES_ - - - _ISCORES_ - - - - - - _IOUT_ - - - - - - - - - - -)V0G0N"; - std::string model_t4 = R"V0G0N( - - - - - - _IBOXES_ - - - - - - - _ISCORES_ - - - - - - - - - - - - - - - - - - _IBOXES_ - - - _ISCORES_ - - - - - - - _IOUT_ - - - - - - - - - - - -)V0G0N"; - - std::string model_t5 = R"V0G0N( - - - - - - _IBOXES_ - - - - - - - _ISCORES_ - - - - - - - - - - - - - - - - - - - - - - - _IBOXES_ - - - _ISCORES_ - - - - - - - - _IOUT_ - - - - - - - - - - - - -)V0G0N"; - - std::string getModel(nmsTF_test_params p) { - std::string model; - if (!p.max_output_boxes_per_class.size()) - model = model_t2; - else if (!p.iou_threshold.size()) - model = model_t3; - else if (!p.score_threshold.size()) - model = model_t4; - else - model = model_t5; - - std::string inBoxes; - std::string inScores; - std::string out; - - inBoxes += "" + std::to_string(p.scoresDim[0]) + "\n"; - inBoxes += "" + std::to_string(p.scoresDim[2]) + "\n"; - inBoxes += "4"; - - - for (auto& scr : p.scoresDim) { - inScores += ""; - inScores += std::to_string(scr) + "\n"; - } - - out += "" + std::to_string(p.num_selected_indices) + "\n"; - out += "3"; - - REPLACE_WITH_STR(model, "_IBOXES_", inBoxes); - REPLACE_WITH_STR(model, "_ISCORES_", inScores); - REPLACE_WITH_STR(model, "_IOUT_", out); - REPLACE_WITH_NUM(model, "_CPB_", p.center_point_box); - REPLACE_WITH_NUM(model, "_SRD_", p.sort_result_descending); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - nmsTF_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - //std::cout << model << std::endl; - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Input - InferenceEngine::BlobMap srcs; - - // Input Boxes - InferenceEngine::SizeVector boxesDim = {p.scoresDim[0], p.scoresDim[2], 4}; - InferenceEngine::Blob::Ptr srcBoxes = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, boxesDim, InferenceEngine::TensorDesc::getLayoutByDims(boxesDim) }); - srcBoxes->allocate(); - for (size_t i = 0; i < p.boxes.size(); i++) { - static_cast(srcBoxes->buffer())[i] = static_cast(p.boxes[i]); - } - //memcpy(srcBoxes->buffer(), &p.boxes[0], sizeof(float)*boxes.size()); - auto * srcBoxesPtr = dynamic_cast*>(srcBoxes.get()); - if (srcBoxesPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - srcs.insert(std::pair("InputBoxes", srcBoxes)); - - // Input Scores - InferenceEngine::Blob::Ptr srcScores = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.scoresDim, InferenceEngine::TensorDesc::getLayoutByDims(p.scoresDim) }); - srcScores->allocate(); - for (size_t i = 0; i < p.scores.size(); i++) { - static_cast(srcScores->buffer())[i] = static_cast(p.scores[i]); - } - auto * srcScoresPtr = dynamic_cast*>(srcScores.get()); - if (srcScoresPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - srcs.insert(std::pair("InputScores", srcScores)); - - // Input BoxesPerClass - InferenceEngine::Blob::Ptr srcBoxesPerClass; - InferenceEngine::Blob::Ptr srcIouThr; - InferenceEngine::Blob::Ptr srcScoreThr; - if (p.max_output_boxes_per_class.size()) { - srcBoxesPerClass = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, {}, InferenceEngine::TensorDesc::getLayoutByDims({}) }); - srcBoxesPerClass->allocate(); - memcpy(static_cast(srcBoxesPerClass->buffer()), &p.max_output_boxes_per_class[0], sizeof(int32_t)); - auto * srcBoxesPerClassPtr = dynamic_cast*>(srcBoxesPerClass.get()); - if (srcBoxesPerClassPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - srcs.insert(std::pair("InputBoxesPerClass", srcBoxesPerClass)); - } - - // Input IouThr - if (p.iou_threshold.size()) { - srcIouThr = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, {}, InferenceEngine::TensorDesc::getLayoutByDims({}) }); - srcIouThr->allocate(); - memcpy(static_cast(srcIouThr->buffer()), &p.iou_threshold[0], sizeof(float)); - auto * srcIouThrPtr = dynamic_cast*>(srcIouThr.get()); - if (srcIouThrPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - srcs.insert(std::pair("InputIouThr", srcIouThr)); - } - - // Input ScoreThr - if (p.score_threshold.size()) { - srcScoreThr = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, {}, InferenceEngine::TensorDesc::getLayoutByDims({}) }); - srcScoreThr->allocate(); - memcpy(static_cast(srcScoreThr->buffer()), &p.score_threshold[0], sizeof(float)); - auto * srcScoreThrPtr = dynamic_cast*>(srcScoreThr.get()); - if (srcScoreThrPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - srcs.insert(std::pair("InputScoreThr", srcScoreThr)); - } - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Infer - graph.Infer(srcs, outputBlobs); - - // Output Reference - if (!p.ref.size()) { - InferenceEngine::TBlob selected_indices_ref(item.second->getTensorDesc()); - selected_indices_ref.allocate(); - ref_nms(*srcBoxesPtr, *srcScoresPtr, selected_indices_ref, p); - compare(*output, selected_indices_ref); - } else { - // Check results - if (p.ref.size() != output->size()) - FAIL() << "Wrong result vector size!"; - if (memcmp((*output).data(), &p.ref[0], output->byteSize()) != 0) - FAIL() << "Wrong result with compare TF reference!"; - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtNonMaxSuppressionTFTests, TestsNonMaxSuppression) {} - -static std::vector boxes = { 0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0 }; -static std::vector scores = { 0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f }; -static std::vector reference = { 0,0,3,0,0,0,0,0,5 }; - -INSTANTIATE_TEST_CASE_P( - TestsNonMaxSuppression, MKLDNNCPUExtNonMaxSuppressionTFTests, - ::testing::Values( -// Params: center_point_box, sort_result_descending, scoresDim, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, num_selected_indices, ref - - nmsTF_test_params{ 1, 1, {1,1,6}, { 0.5f, 0.5f, 1.0f, 1.0f,0.5f, 0.6f, 1.0f, 1.0f,0.5f, 0.4f, 1.0f, 1.0f,0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.6f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f }, - scores,{ 3 },{ 0.5f },{ 0.f }, 3, reference }, /*nonmaxsuppression_center_point_box_format*/ - - nmsTF_test_params{ 0, 1, {1,1,6}, { 1.0, 1.0, 0.0, 0.0, 0.0, 0.1, 1.0, 1.1, 0.0, 0.9, 1.0, -0.1, 0.0, 10.0, 1.0, 11.0, 1.0, 10.1, 0.0, 11.1, 1.0, 101.0, 0.0, 100.0 }, - scores,{ 3 },{ 0.5 },{ 0.0 }, 3, reference }, /*nonmaxsuppression_flipped_coordinates*/ - - nmsTF_test_params{ 0, 1, { 1,1,10 },{ 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, - 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0 }, - { 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9 },{ 3 },{ 0.5 },{ 0.0 }, 1,{ 0,0,0 } }, /*nonmaxsuppression_identical_boxes*/ - - nmsTF_test_params{ 0, 1, { 1,1,6 }, boxes, scores,{ 2 },{ 0.5 },{ 0.0 }, 2,{ 0,0,3,0,0,0 } }, /*nonmaxsuppression_limit_output_size*/ - - nmsTF_test_params{ 0, 1,{ 1,1,1 },{ 0.0, 0.0, 1.0, 1.0 }, { 0.9 },{ 3 },{ 0.5 },{ 0.0 }, 1, { 0,0,0 } }, /*nonmaxsuppression_single_box*/ - - nmsTF_test_params{ 0, 1, { 1,1,6 }, boxes, scores, { 3 }, { 0.5 }, { 0.0 }, 3, reference }, /*nonmaxsuppression_suppress_by_IOU*/ - - nmsTF_test_params{ 0, 1, { 1,1,6 }, boxes, scores, { 3 }, { 0.5 }, { 0.4 }, 2, { 0,0,3,0,0,0 } }, /*nonmaxsuppression_suppress_by_IOU_and_scores*/ - - nmsTF_test_params{ 0, 0, { 2,1,6 },{ 0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0, - 0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0 }, - { 0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.9, 0.75, 0.6, 0.95, 0.5, 0.3 },{ 2 },{ 0.5 },{ 0.0 }, 4,{ 0,0,3,0,0,0,1,0,3,1,0,0 } }, /*nonmaxsuppression_two_batches*/ - - nmsTF_test_params{ 0, 1, { 2,1,6 },{ 0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0, - 0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0 }, - { 0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.9, 0.75, 0.6, 0.95, 0.5, 0.3 },{ 2 },{ 0.5 },{ 0.0 }, 4,{ 0,0,3,1,0,3,0,0,0,1,0,0 } }, /*nonmaxsuppression_two_batches*/ - - nmsTF_test_params{ 0, 0, { 1,2,6 }, boxes, - { 0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.9, 0.75, 0.6, 0.95, 0.5, 0.3 },{ 2 },{ 0.5 },{ 0.0 }, 4,{ 0,0,3,0,0,0,0,1,3,0,1,0 } }, /*nonmaxsuppression_two_classes*/ - - nmsTF_test_params{ 0, 1, { 1,2,6 }, boxes, - { 0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.9, 0.75, 0.6, 0.95, 0.5, 0.3 },{ 2 },{ 0.5 },{ 0.0 }, 4,{ 0,0,3,0,1,3,0,0,0,0,1,0 } }, /*nonmaxsuppression_two_classes*/ - - nmsTF_test_params{ 0, 1, { 1,1,6 }, boxes, scores, { 3 }, { 0.5 }, {}, 3, reference }, /*nonmaxsuppression_no_score_threshold*/ - - nmsTF_test_params{ 0, 1, { 1,1,6 }, boxes, scores, { 3 }, {}, {}, 3, { 0,0,3,0,0,0,0,0,1 } }, /*nonmaxsuppression_no_iou_threshold_and_score_threshold*/ - - nmsTF_test_params{ 0, 1, { 1,1,6 }, boxes, scores, {}, {}, {}, 3, {} } /*nonmaxsuppression_no_max_output_boxes_per_class_and_iou_threshold_and_score_threshold*/ -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp deleted file mode 100644 index 8ad5d5bbd89070..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp +++ /dev/null @@ -1,640 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "common_test_utils/data_utils.hpp" -#include "ir_gen_helper.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include - - -using namespace InferenceEngine; -using namespace ::testing; -using namespace std; -using namespace single_layer_tests; - -using namespace Extensions; -using namespace ::Cpu; - -namespace { - -OV_CC_DOMAINS(NormalizeTests); - -} // namespace - -struct normalize_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - int across_spatial; - int channel_shared; - float eps; - bool isBlockedFormat; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - Precision prec_in; - Precision prec_out; - - std::vector> comp; -}; - -extern InferenceEngine::IExtensionPtr make_FakeExtensions(); - -template -void ref_normalize(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, normalize_test_params prm, const float *weights) { - int B = static_cast(src.getTensorDesc().getDims()[0]); - int C = static_cast(src.getTensorDesc().getDims()[1]); - int H = static_cast(src.getTensorDesc().getDims()[2]); - int W = static_cast(src.getTensorDesc().getDims()[3]); - - float eps = prm.eps; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (int b = 0; b < B; b++) { - const data_t *src_data_b = src_data + b * C * H * W; - data_t *dst_data_b = dst_data + b * C * H * W; - if (prm.across_spatial) { - float sqrt_sum = 0.f; - for (int i = 0; i < H * W * C; i++) { - sqrt_sum += (src_data_b[i] * src_data_b[i]); - } - - sqrt_sum = std::sqrt(sqrt_sum) + eps; - - for (int c = 0; c < C; c++) { - float s = prm.channel_shared ? weights[0] : weights[c]; - for (int hw = 0; hw < H * W; hw++) { - float dst_value = (src_data_b[c * H * W + hw] / sqrt_sum) * s; - if (prm.prec_out == Precision::FP32) { - dst_data_b[c * H * W + hw] = dst_value; - } else if (prm.prec_out == Precision::U8) { - dst_data_b[c * H * W + hw] = (dst_value > 0) ? roundf(dst_value) : 0; - } else if (prm.prec_out == Precision::I8) { - dst_data_b[c * H * W + hw] = roundf(dst_value); - } - } - } - } else { - for(int i = 0; i 0) ? roundf(dst_value) : 0; - } else if (prm.prec_out == Precision::I8) { - dst_data_b_c[offset] = roundf(dst_value); - } - } - } - } - } -} - -class MKLDNNCPUExtNormalizeTests: public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - -)V0G0N"; - - std::string getModel(normalize_test_params p) { - std::string model = model_t; - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - - REPLACE_WITH_NUM(model, "_AS_", p.across_spatial); - REPLACE_WITH_NUM(model, "_CS_", p.channel_shared); - - REPLACE_WITH_NUM(model, "_WS_", p.in.c*sizeof(float)); - REPLACE_WITH_NUM(model, "_EPS_", p.eps); - - if (p.isBlockedFormat) - REPLACE_WITH_STR(model, "_FL_", "FakeLayerBLK"); - else - REPLACE_WITH_STR(model, "_FL_", "FakeLayerPLN"); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - normalize_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - auto defaultExtensions = std::make_shared(); - extMgr->AddExtension(defaultExtensions); - extMgr->AddExtension(make_FakeExtensions()); - - size_t weightSize = p.in.c*sizeof(float); - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {weightSize}, InferenceEngine::C }); - weights->allocate(); - float center = 0; - float ampl = 100; - float omega = 0.5; - CommonTestUtils::fill_data_sine( weights->data().as(), weights->size() / sizeof(float), center, ampl, omega); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network, extMgr); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getName() == "normalize") { - ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - ASSERT_LE(3, nodes.size()); - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_normalize(*srcPtr, dst_ref, p, weights->readOnly().as()); - compare(*output, dst_ref); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtNormalizeTests, TestsNormalize) {} - -INSTANTIATE_TEST_CASE_P( - TestsNormalize, MKLDNNCPUExtNormalizeTests, - ::testing::Values( - normalize_test_params{{1, 22, 129, 323}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 22, 129, 323}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{5, 1, 128, 256}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{5, 1, 128, 256}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 2, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 2, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 1, 21, 21}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 1, 21, 21}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 1, 21, 21}, true, true, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 35, 101, 127}, true, true, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 35, 101, 127}, true, false, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 128, 320, 320}, false, true, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 22, 129, 323}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 22, 129, 323}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{5, 1, 128, 256}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{5, 1, 128, 256}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 2, 129, 323}, true, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 2, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 1, 21, 21}, true, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 1, 21, 21}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 1, 21, 21}, true, true, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 35, 101, 127}, true, true, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 35, 101, 127}, true, false, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{1, 128, 320, 320}, false, true, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 } - )); - -static std::string precToStr (Precision prec) { - return prec == Precision::U8 ? "U8" : prec == Precision::I8 ? "I8" : "FP32"; -} - -template -static void fill_int_data(data_t *data, int size, bool is_signed) { - for (int i = 0 ; i < size; i++) { - data[i] = i * 13 % 21 - 10 * is_signed; - } -} - -class FakeLayerImpl_Normalize: public Cpu::ExtLayerBase, - public WithParamInterface { -public: - explicit FakeLayerImpl_Normalize(const CNNLayer* layer) { - try { - is_blocked = layer->GetParamAsBool("is_blocked"); - addConfig(layer); - } catch (InferenceEngine::Exception &ex) { - errorMsg = ex.what(); - } - } - - bool is_blocked; - - void addConfig(const CNNLayer* layer) { - LayerConfig config; - - // Fill tensor parameters into config - auto fill_port = [&] (std::vector& port, const DataPtr& data) { - auto div_up = [](const int a, const int b) -> int { - if (!b) - return 0; - return (a + b - 1) / b; - }; - if (!data) IE_THROW() << "Cannot get input data!"; - - DataConfig dataConfig; - dataConfig.inPlace = 0; - dataConfig.constant = false; - - const TensorDesc& data_desc = data->getTensorDesc(); - const SizeVector& data_dims = data_desc.getDims(); - - InferenceEngine::Precision precision = data_desc.getPrecision(); - if (is_blocked) { - int blk_size = InferenceEngine::with_cpu_x86_avx512f() ? 16 : 8; - - std::vector blocks = data_dims; - std::vector order(blocks.size()); - for (size_t i = 0; i < order.size(); i++) order[i] = i; - - order.push_back(1); - blocks[1] = div_up(blocks[1], blk_size); - blocks.push_back(blk_size); - - dataConfig.desc = TensorDesc(precision, data_dims, {blocks, order}); - } else { - dataConfig.desc = TensorDesc(precision, data_dims, data_dims.size() == 5 ? NDHWC : NHWC); - } - - port.push_back(dataConfig); - }; - - fill_port(config.inConfs, layer->insData[0].lock()); - fill_port(config.outConfs, layer->outData[0]); - config.inConfs[0].desc.setPrecision(config.outConfs[0].desc.getPrecision()); - confs.push_back(config); - } - - StatusCode execute(std::vector& inputs, std::vector& outputs, - ResponseDesc *resp) noexcept override { - return OK; - } -}; - -class MKLDNNCPUExtNormalizeTests_Blocked: public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - - - -)V0G0N"; - - std::string getModel(normalize_test_params p) { - std::string model = model_t; - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - - REPLACE_WITH_NUM(model, "_AS_", p.across_spatial); - REPLACE_WITH_NUM(model, "_CS_", p.channel_shared); - - REPLACE_WITH_NUM(model, "_WS_", p.in.c*sizeof(float)); - REPLACE_WITH_NUM(model, "_EPS_", p.eps); - REPLACE_WITH_STR(model, "_PREC_IN_", precToStr(p.prec_in)); - REPLACE_WITH_STR(model, "_PREC_OUT_", precToStr(p.prec_out)); - REPLACE_WITH_NUM(model, "_IS_BLOCKED_", p.isBlockedFormat); - - model = IRTemplateGenerator::getIRTemplate("Normalize_Only", {p.in.n, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t, 7); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - normalize_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - - size_t weightSize = p.in.c*sizeof(float); - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {weightSize}, InferenceEngine::C }); - weights->allocate(); - float center = 0; - float ampl = 100; - float omega = 0.5; - CommonTestUtils::fill_data_sine( weights->data().as(), weights->size() / sizeof(float), center, ampl, omega); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - auto manager = std::make_shared(); - { - auto defaultExt = std::make_shared(); - defaultExt->layersFactory.registerNodeIfRequired(NormalizeTests, FakeLayer_Normalize, "FakeLayer_Normalize", Cpu::ImplFactory); - manager->AddExtension(defaultExt); - } - graph.CreateGraph(network, manager); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getName() == "normalize") { - ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, NCHW}); - src->allocate(); - if (p.prec_in == Precision::U8) { - fill_int_data(src->buffer().as(), src->size(), false); - } else if (p.prec_in == Precision::I8) { - fill_int_data(src->buffer().as(), src->size(), true); - } else { - fill_data(src->buffer(), src->size()); - } - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_normalize(*srcPtr, dst_ref, p, weights->readOnly().as()); - compare(*output, dst_ref); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtNormalizeTests_Blocked, TestsNormalize) {} - -INSTANTIATE_TEST_CASE_P( - TestsNormalize, MKLDNNCPUExtNormalizeTests_Blocked, - ::testing::Values( - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 }, - - normalize_test_params{{2, 33, 129, 323}, true, true, 0.0001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }, - normalize_test_params{{2, 67, 77, 78}, false, false, 0.0001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 } - )); \ No newline at end of file diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/onehot_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/onehot_tests.cpp deleted file mode 100644 index aec442364a7ea4..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/onehot_tests.cpp +++ /dev/null @@ -1,854 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -#include "single_layer_common.hpp" -#include - -using namespace ::testing; -using namespace InferenceEngine; - -struct one_hot_base_params { - struct { size_t n, c, h, w; } in; - struct { size_t d, n, c, h, w; } out; - int axis; - unsigned int depth; - float on, off; -}; - -struct one_hot_test_params : one_hot_base_params { - std::string device_name; - - one_hot_test_params(std::string name, one_hot_base_params params) : - one_hot_base_params(params), device_name(name) {} -}; - -class OneHotOnly1dTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - 1 - - - - - - - - - - 1 - - - - - _OW_ - - - - - l - - - -)V0G0N"; - - std::string getModel(one_hot_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - REPLACE_WITH_NUM(model, "_DEPTH_", p.depth); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - return model; - } - void ref_one_hot_1d(InferenceEngine::Blob &src, InferenceEngine::Blob &dst, one_hot_test_params p) - { - float *src_ptr = src.buffer().as(); - std::size_t src_size = src.size(); - float *dst_ptr = dst.buffer().as(); - std::size_t dst_size = dst.size(); - - for (int ow = 0; ow < p.out.w; ow++) { - std::size_t src_offset = 0; - std::size_t dst_offset = ow; - - int hot_axis = -1; - if (p.axis == -1) { - hot_axis = ow; - src_offset = 0; - } else if (p.axis == 0) { - hot_axis = ow; - src_offset = 0; - } - int v = src_ptr[src_offset]; - - dst_ptr[dst_offset] = (v == hot_axis) ? p.on : p.off; - } - } - -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - one_hot_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - try { - network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - } catch (InferenceEngine::Exception &e) { - FAIL() << e.what(); - } catch (std::exception &e) { - FAIL() << e.what(); - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - SizeVector dims_src = {}; - TBlob src({Precision::FP32, dims_src, Layout::SCALAR}); - src.allocate(); - float * s = src.buffer().as(); - s[0] = 2; - - ref_one_hot_1d(src, dst_ref, p); - - InferenceEngine::Blob::Ptr pSrc = make_shared_blob(src); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", pSrc)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - - - -class OneHotOnly2dTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IW_ - - - - - - - - - - _IW_ - - - - - _OH_ - _OW_ - - - - - l - - - -)V0G0N"; - - std::string getModel(one_hot_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - REPLACE_WITH_NUM(model, "_DEPTH_", p.depth); - - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - return model; - } - void ref_one_hot_2d(InferenceEngine::Blob &src, InferenceEngine::Blob &dst, one_hot_test_params p) - { - float *src_ptr = src.buffer().as(); - std::size_t src_size = src.size(); - float *dst_ptr = dst.buffer().as(); - std::size_t dst_size = dst.size(); - - for (int oh = 0; oh < p.out.h; oh++) { - for (int ow = 0; ow < p.out.w; ow++) { - std::size_t src_offset = 0; - - std::size_t dst_offset = ow + p.out.w * oh; - - int hot_axis = -1; - if (p.axis == -1) { - hot_axis = ow; - src_offset = oh; - } else if (p.axis == 0) { - hot_axis = oh; - src_offset = ow; - } else if (p.axis == 1) { - hot_axis = ow; - src_offset = oh; - } - int v = src_ptr[src_offset]; - - dst_ptr[dst_offset] = (v == hot_axis) ? p.on : p.off; - } - } - } - -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - one_hot_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - try { - network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - } catch (InferenceEngine::Exception &e) { - FAIL() << e.what(); - } catch (std::exception &e) { - FAIL() << e.what(); - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - SizeVector dims_src = {p.in.w}; - TBlob src({Precision::FP32, dims_src, Layout::C}); - src.allocate(); - float * s = src.buffer().as(); - for (int i = 0; i < src.size(); ++i) - s[i] = -1; - s[0] = 3; - s[2] = 2; - - // Check results - InferenceEngine::SizeVector out_dims = {p.out.w, p.out.h}; - ref_one_hot_2d(src, dst_ref, p); - - InferenceEngine::Blob::Ptr pSrc = make_shared_blob(src); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", pSrc)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - - -class OneHotOnly3dTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IH_ - _IW_ - - - - - - - - - - _IH_ - _IW_ - - - - - _OC_ - _OH_ - _OW_ - - - - - l - - - -)V0G0N"; - - std::string getModel(one_hot_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - REPLACE_WITH_NUM(model, "_DEPTH_", p.depth); - REPLACE_WITH_NUM(model, "_ON_VALUE_", p.on); - REPLACE_WITH_NUM(model, "_OFF_VALUE_", p.off); - - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - return model; - } - void ref_one_hot_3d(InferenceEngine::Blob &src, InferenceEngine::Blob &dst, one_hot_test_params p) - { - float *src_ptr = src.buffer().as(); - std::size_t src_size = src.size(); - float *dst_ptr = dst.buffer().as(); - std::size_t dst_size = dst.size(); - - for (int oc = 0; oc < p.out.c; oc++) { - for (int oh = 0; oh < p.out.h; oh++) { - for (int ow = 0; ow < p.out.w; ow++) { - std::size_t src_offset = 0; - - std::size_t dst_offset = ow + p.out.w * oh + p.out.w * p.out.h * oc; - - int hot_axis = -1; - if (p.axis == -1) { - hot_axis = ow; - src_offset = oh + p.in.w * oc; - } else if (p.axis == 0) { - hot_axis = oc; - src_offset = ow + p.in.w * oh; - } else if (p.axis == 1) { - hot_axis = oh; - src_offset = ow + p.in.w * oc; - } - int v = src_ptr[src_offset]; - - dst_ptr[dst_offset] = (v == hot_axis) ? p.on : p.off; - } - } - } - } - -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - one_hot_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - try { - network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - } catch (InferenceEngine::Exception &e) { - FAIL() << e.what(); - } catch (std::exception &e) { - FAIL() << e.what(); - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - SizeVector dims_src = {p.in.h, p.in.w}; - TBlob src({Precision::FP32, dims_src, Layout::HW}); - src.allocate(); - float * s = src.buffer().as(); - for (int i = 0; i < src.size(); ++i) - s[i] = -1; - s[0] = 3; - s[4] = 2; - - // Check results - InferenceEngine::SizeVector out_dims = {p.out.w, p.out.h, p.out.c}; - ref_one_hot_3d(src, dst_ref, p); - - InferenceEngine::Blob::Ptr pSrc = make_shared_blob(src); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", pSrc)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -class OneHotOnly4dTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IC_ - _IH_ - _IW_ - - - - - - - - - - _IC_ - _IH_ - _IW_ - - - - - _ON_ - _OC_ - _OH_ - _OW_ - - - - - l - - - -)V0G0N"; - - std::string getModel(one_hot_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - REPLACE_WITH_NUM(model, "_DEPTH_", p.depth); - - REPLACE_WITH_NUM(model, "_ON_", p.out.n); - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - return model; - } -void ref_one_hot_4d(InferenceEngine::Blob &src, InferenceEngine::Blob &dst, one_hot_test_params p) -{ - float *src_ptr = src.buffer().as(); - std::size_t src_size = src.size(); - float *dst_ptr = dst.buffer().as(); - std::size_t dst_size = dst.size(); - - for (int ob = 0; ob < p.out.n; ob++) { - for (int oc = 0; oc < p.out.c; oc++) { - for (int oh = 0; oh < p.out.h; oh++) { - for (int ow = 0; ow < p.out.w; ow++) { - std::size_t src_offset = 0; - - std::size_t dst_offset = ow + p.out.w * oh + p.out.w * p.out.h * oc + p.out.w * p.out.h * p.out.c * ob; - - int hot_axis = -1; - if (p.axis == -1) { - hot_axis = ow; - src_offset = oh + p.in.w * oc + p.in.w * p.in.h * ob; - } else if (p.axis == 0) { - hot_axis = ob; - src_offset = ow + p.in.w * oh + p.in.w * p.in.h * oc; - } else if (p.axis == 1) { - hot_axis = oc; - src_offset = ow + p.in.w * oh + p.in.w * p.in.h * ob; - } else if (p.axis == 2) { - hot_axis = oh; - src_offset = ow + p.in.w * oc + p.in.w * p.in.h * ob; - } - int v = src_ptr[src_offset]; - - dst_ptr[dst_offset] = (v == hot_axis) ? p.on : p.off; - } - } - } - } -} -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - one_hot_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - try { - network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - } catch (InferenceEngine::Exception &e) { - FAIL() << e.what(); - } catch (std::exception &e) { - FAIL() << e.what(); - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - SizeVector dims_src = {p.in.c, p.in.h, p.in.w}; - - TBlob src({Precision::FP32, dims_src, Layout::CHW}); - src.allocate(); - - float * s = src.buffer().as(); - for (int i = 0; i < src.size(); ++i) - s[i] = -1; - s[0] = 3; - s[4] = 2; - - // Check results - InferenceEngine::SizeVector out_dims = {p.out.w, p.out.h, p.out.c, p.out.n}; - ref_one_hot_4d(src, dst_ref, p); - - InferenceEngine::Blob::Ptr pSrc = make_shared_blob(src); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", pSrc)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - - -class OneHotOnly5dTest: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _ON_ - _OC_ - _OD_ - _OH_ - _OW_ - - - - - l - - - -)V0G0N"; - - std::string getModel(one_hot_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - REPLACE_WITH_NUM(model, "_DEPTH_", p.depth); - - REPLACE_WITH_NUM(model, "_ON_", p.out.n); - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - REPLACE_WITH_NUM(model, "_OD_", p.out.d); - REPLACE_WITH_NUM(model, "_OH_", p.out.h); - REPLACE_WITH_NUM(model, "_OW_", p.out.w); - - return model; - } -void ref_one_hot_5d(InferenceEngine::Blob &src, InferenceEngine::Blob &dst, one_hot_test_params p) -{ - float *src_ptr = src.buffer().as(); - std::size_t src_size = src.size(); - float *dst_ptr = dst.buffer().as(); - std::size_t dst_size = dst.size(); - - for (int ob = 0; ob < p.out.n; ob++) { - for (int oc = 0; oc < p.out.c; oc++) { - for (int od = 0; od < p.out.d; od++) { - for (int oh = 0; oh < p.out.h; oh++) { - for (int ow = 0; ow < p.out.w; ow++) { - std::size_t src_offset = 0; - - std::size_t dst_offset = ow + p.out.w * oh + p.out.w * p.out.h * od \ - + p.out.w * p.out.h * p.out.d * oc + p.out.w * p.out.h * p.out.d * p.out.c * ob; - - int hot_axis = -1; - if (p.axis == -1 || p.axis == 4) { - hot_axis = ow; - src_offset = oh + p.in.w * od + p.in.w * p.in.h * oc + p.in.w * p.in.h * p.in.c * ob; - } else if (p.axis == 0) { - hot_axis = ob; - src_offset = ow + p.in.w * oh + p.in.w * p.in.h * od + p.in.w * p.in.h * p.in.c * oc; - } else if (p.axis == 1) { - hot_axis = oc; - src_offset = ow + p.in.w * oh + p.in.w * p.in.h * od + p.in.w * p.in.h * p.in.c * ob; - } else if (p.axis == 2) { - hot_axis = od; - src_offset = ow + p.in.w * oh + p.in.w * p.in.h * oc + p.in.w * p.in.h * p.in.c * ob; - } else if (p.axis == 3) { - hot_axis = oh; - src_offset = ow + p.in.w * od + p.in.w * p.in.h * oc + p.in.w * p.in.h * p.in.c * ob; - } - - int v = src_ptr[src_offset]; - dst_ptr[dst_offset] = (v == hot_axis) ? p.on : p.off; - } - } - } - } - } -} -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - one_hot_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - try { - network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - } catch (InferenceEngine::Exception &e) { - FAIL() << e.what(); - } catch (std::exception &e) { - FAIL() << e.what(); - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - TBlob src({Precision::FP32, dims_src, Layout::NCHW}); - src.allocate(); - - float * s = src.buffer().as(); - for (int i = 0; i < src.size(); ++i) - s[i] = -1; - s[3] = 3; - s[7] = 2; - - - - // Check results - ref_one_hot_5d(src, dst_ref, p); - - InferenceEngine::Blob::Ptr pSrc = make_shared_blob(src); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", pSrc)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -// 0d -> 1d, depth -#define case_1d_0 one_hot_base_params({ {0, 0, 0, 0}, {0, 0, 0, 0, 3},-1, 3, 1.0f, 0.0f }) -#define case_1d_1 one_hot_base_params({ {0, 0, 0, 0}, {0, 0, 0, 0, 4}, 0, 4, 1.0f, 0.0f }) -// 1d -> 2d, axis default -#define case_2d_0 one_hot_base_params({ {0, 0, 0, 3}, {0, 0, 0, 3, 6},-1, 6, 1.0f, 0.0f }) -#define case_2d_1 one_hot_base_params({ {0, 0, 0, 3}, {0, 0, 0, 6, 3}, 0, 6, 1.0f, 0.0f }) -#define case_2d_2 one_hot_base_params({ {0, 0, 0, 3}, {0, 0, 0, 3, 6}, 1, 6, 1.0f, 0.0f }) -// 2d -> 3d, on_value, off_value -#define case_3d_0 one_hot_base_params({ {0, 0, 3, 2}, {0, 0, 3, 2, 4},-1, 4, 2.0f, -1.0f }) -#define case_3d_1 one_hot_base_params({ {0, 0, 3, 2}, {0, 0, 4, 3, 2}, 0, 4, 2.0f, -1.0f }) -#define case_3d_2 one_hot_base_params({ {0, 0, 3, 2}, {0, 0, 3, 4, 2}, 1, 4, 2.0f, -1.0f }) -// 3d -> 4d -#define case_4d_0 one_hot_base_params({ {0, 1, 3, 2}, {0, 1, 3, 2, 4},-1, 4, 1.0f, 0.0f }) -#define case_4d_1 one_hot_base_params({ {0, 1, 3, 2}, {0, 4, 1, 3, 2}, 0, 4, 1.0f, 0.0f }) -#define case_4d_2 one_hot_base_params({ {0, 1, 3, 2}, {0, 1, 4, 3, 2}, 1, 4, 1.0f, 0.0f }) -#define case_4d_3 one_hot_base_params({ {0, 1, 3, 2}, {0, 1, 3, 4, 2}, 2, 4, 1.0f, 0.0f }) -// 4d -> 5d IE layouts are NCHW -> NCDHW, param layouts are {n, c , h, w} {d, n, c, h ,w} -#define case_5d_0 one_hot_base_params({ {1, 3, 2, 3}, {2, 1, 3, 3, 4},-1, 4, 1.0f, 0.0f }) -#define case_5d_1 one_hot_base_params({ {1, 3, 2, 3}, {3, 4, 1, 2, 3}, 0, 4, 1.0f, 0.0f }) -#define case_5d_2 one_hot_base_params({ {1, 3, 2, 3}, {3, 1, 4, 2, 3}, 1, 4, 1.0f, 0.0f }) -#define case_5d_3 one_hot_base_params({ {1, 3, 2, 3}, {4, 1, 3, 2, 3}, 2, 4, 1.0f, 0.0f }) -#define case_5d_4 one_hot_base_params({ {1, 3, 2, 3}, {2, 1, 3, 4, 3}, 3, 4, 1.0f, 0.0f }) - -one_hot_test_params one_hot_only_1d_test_cases[] = { - one_hot_test_params("CPU", case_1d_0), - one_hot_test_params("CPU", case_1d_1) -}; - -one_hot_test_params one_hot_only_2d_test_cases[] = { - one_hot_test_params("CPU", case_2d_0), - one_hot_test_params("CPU", case_2d_1), - one_hot_test_params("CPU", case_2d_2), -}; - -one_hot_test_params one_hot_only_3d_test_cases[] = { - one_hot_test_params("CPU", case_3d_0), - one_hot_test_params("CPU", case_3d_1), - one_hot_test_params("CPU", case_3d_2), -}; -one_hot_test_params one_hot_only_4d_test_cases[] = { - one_hot_test_params("CPU", case_4d_0), - one_hot_test_params("CPU", case_4d_1), - one_hot_test_params("CPU", case_4d_2), - one_hot_test_params("CPU", case_4d_3) -}; - -one_hot_test_params one_hot_only_5d_test_cases[] = { - one_hot_test_params("CPU", case_5d_0), - one_hot_test_params("CPU", case_5d_1), - one_hot_test_params("CPU", case_5d_2), - one_hot_test_params("CPU", case_5d_3), - one_hot_test_params("CPU", case_5d_4) -}; - -TEST_P(OneHotOnly1dTest, TestsOneHot) {} -INSTANTIATE_TEST_CASE_P(TestsOneHot, OneHotOnly1dTest, ::testing::ValuesIn(one_hot_only_1d_test_cases)); - -TEST_P(OneHotOnly2dTest, TestsOneHot) {} -INSTANTIATE_TEST_CASE_P(TestsOneHot, OneHotOnly2dTest, ::testing::ValuesIn(one_hot_only_2d_test_cases)); - -TEST_P(OneHotOnly3dTest, TestsOneHot) {} -INSTANTIATE_TEST_CASE_P(TestsOneHot, OneHotOnly3dTest, ::testing::ValuesIn(one_hot_only_3d_test_cases)); - -TEST_P(OneHotOnly4dTest, TestsOneHot) {} -INSTANTIATE_TEST_CASE_P(TestsOneHot, OneHotOnly4dTest, ::testing::ValuesIn(one_hot_only_4d_test_cases)); - -TEST_P(OneHotOnly5dTest, TestsOneHot) {} -INSTANTIATE_TEST_CASE_P(TestsOneHot, OneHotOnly5dTest, ::testing::ValuesIn(one_hot_only_5d_test_cases)); - diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/range_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/range_tests.cpp deleted file mode 100644 index f4d5c8a5de6ad4..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/range_tests.cpp +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct range_test_params { - std::string precision; - float start; - float limit; - float delta; - InferenceEngine::SizeVector out_shape; - - std::vector> comp; -}; - -template -void ref_range( - float start, - float limit, - float delta, - InferenceEngine::TBlob &dst -) { - data_t* dst_data = dst.data(); - size_t work_amount_dst = std::floor(std::abs((limit - start) / delta)); - if (work_amount_dst != dst.size()) - FAIL() << "Range indexes exceeds data tensor dimension"; - - data_t dst_value = static_cast(start); - for (size_t iwork = 0; iwork < work_amount_dst; ++iwork, dst_value += static_cast(delta)) { - dst_data[iwork] = dst_value; - } -} - -class MKLDNNCPUExtRangeTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - 1 - - - - - - - 1 - - - - - - - 1 - - - - - - - - 1 - - - 1 - - - 1 - - - - - _OUT_ - - - - - - - - - - -)V0G0N"; - - std::string getModel(range_test_params p) { - std::string model = model_t; - std::string out_shape; - - REPLACE_WITH_STR(model, "_IIDXP_", p.precision); - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_OUT_", out_shape); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - range_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - // Input Data - InferenceEngine::Blob::Ptr start_scalar; - InferenceEngine::Blob::Ptr limit_scalar; - InferenceEngine::Blob::Ptr delta_scalar; - std::pair item = *out.begin(); - InferenceEngine::SizeVector scalar_dim(1, 1); - InferenceEngine::BlobMap srcs; - InferenceEngine::SizeVector out_dims; - if (p.precision == "I32") { - start_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) }); - start_scalar->allocate(); - static_cast(start_scalar->buffer())[0] = static_cast(p.start); - auto * start_scalarPtr = dynamic_cast*>(start_scalar.get()); - if (start_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - limit_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) }); - limit_scalar->allocate(); - static_cast(limit_scalar->buffer())[0] = static_cast(p.limit); - auto * limit_scalarPtr = dynamic_cast*>(limit_scalar.get()); - if (limit_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - delta_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) }); - delta_scalar->allocate(); - static_cast(delta_scalar->buffer())[0] = static_cast(p.delta); - auto * delta_scalarPtr = dynamic_cast*>(delta_scalar.get()); - if (delta_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("start", start_scalar)); - srcs.insert(std::pair("limit", limit_scalar)); - srcs.insert(std::pair("delta", delta_scalar)); - - // Output Blob - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_range(p.start, p.limit, p.delta, dst_ref); - - // Infer - graph.Infer(srcs, outputBlobs); - for (int i = 0; i < dst_ref.size(); i++) { - if (dst_ref.data()[i] != (*output).data()[i]) - FAIL() << "The difference between res_ptr[i] and ref_ptr[i]"; - } - } else if (p.precision == "FP32") { - start_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) }); - start_scalar->allocate(); - static_cast(start_scalar->buffer())[0] = p.start; - auto * start_scalarPtr = dynamic_cast*>(start_scalar.get()); - if (start_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - limit_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) }); - limit_scalar->allocate(); - static_cast(limit_scalar->buffer())[0] = p.limit; - auto * limit_scalarPtr = dynamic_cast*>(limit_scalar.get()); - if (limit_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - delta_scalar = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) }); - delta_scalar->allocate(); - static_cast(delta_scalar->buffer())[0] = p.delta; - auto * delta_scalarPtr = dynamic_cast*>(delta_scalar.get()); - if (delta_scalarPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("start", start_scalar)); - srcs.insert(std::pair("limit", limit_scalar)); - srcs.insert(std::pair("delta", delta_scalar)); - - // Output Blob - InferenceEngine::Blob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_range(p.start, p.limit, p.delta, dst_ref); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } else { - return; - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtRangeTests, TestsRange) {} - -INSTANTIATE_TEST_CASE_P( - TestsRange, MKLDNNCPUExtRangeTests, - ::testing::Values( -// Params: precision, start, limit, delta, out_shape - range_test_params{ "I32", 3.f, 18.f, 3.f, { 5 } }, - range_test_params{ "I32", 3.f, 1.f, -1.f, { 2 } }, - range_test_params{ "I32", 3.f, -3.f, -1.f, { 6 } }, - range_test_params{ "I32", 0.f, 5.f, 1.f, { 5 } }, - range_test_params{"FP32", 3.f, 18.f, 3.f, { 5 } }, - range_test_params{"FP32", 3.f, 1.f, -.5f, { 4 } }, - range_test_params{"FP32", 3.f, -1.f, -.5f, { 8 } }, - range_test_params{"FP32", 0.f, 5.f, 1.f, { 5 } } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reduce_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reduce_tests.cpp deleted file mode 100644 index 2e969771006045..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reduce_tests.cpp +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct reduce_test_params { - std::string reduce_type; - bool keep_dims; - InferenceEngine::SizeVector in_shape; - std::string inType; - std::vector input_tensor; - std::vector axes_for_reduction; - InferenceEngine::SizeVector out_shape; - std::vector reference; - - std::vector> comp; -}; - -template -void reduce( - const src_t *src_data, - InferenceEngine::SizeVector src_dims, - InferenceEngine::SizeVector srcStrides, - dst_t* dst_data, - InferenceEngine::SizeVector dst_dims, - InferenceEngine::SizeVector dstStrides, - dst_t init_value, - bool keep_dims, - InferenceEngine::SizeVector skip_dims, - F func -) { - size_t i, src_idx, dst_idx; - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = init_value; - - InferenceEngine::SizeVector counters(src_dims.size(), 0); - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) { - if (keep_dims) - for (i = 0, dst_idx = 0; i < dst_dims.size(); ++i) - dst_idx += (counters[i] % dst_dims[i]) * dstStrides[i]; - else - for (i = 0, dst_idx = 0; i < dst_dims.size(); ++i) - dst_idx += counters[skip_dims[i]] * dstStrides[i]; - - dst_data[dst_idx] = func(dst_data[dst_idx], src_data[src_idx]); - for (int j = src_dims.size() - 1; j >= 0; j--) { - counters[j] = (counters[j] + 1) % src_dims[j]; - if (counters[j] != 0) break; - } - } -} - -template -void ref_reduce( - std::string reduce_type, - InferenceEngine::TBlob &src, - bool keep_dims, - std::vector axes_for_reduction, - InferenceEngine::TBlob &dst, - InferenceEngine::SizeVector &out_dims -) { - size_t i, src_idx, dst_idx; - const src_t *src_data = src.data(); - InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims(); - InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides(); - dst_t* dst_data = dst.data(); - InferenceEngine::SizeVector dst_dims = dst.getTensorDesc().getDims(); - InferenceEngine::SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides(); - InferenceEngine::SizeVector skip_dims; - - if (!dst_dims.size()) - dst_dims = InferenceEngine::SizeVector(1, 1); - - if (!dstStrides.size()) - dstStrides = InferenceEngine::SizeVector(1, 1); - - if (axes_for_reduction.size() == 0) - FAIL() << " Index vector should be 1 dimension"; - - for (i = 0; i < axes_for_reduction.size(); i++) { - int32_t axis = axes_for_reduction[i]; - if (axis < 0) - axis += src_dims.size(); - - if (axis > src_dims.size()) - FAIL() << " Index to squeeze exceeds data tensor dimension"; - axes_for_reduction[i] = axis; - } - - for (size_t j = 0; j < src_dims.size(); j++) { - bool found = false; - for (size_t axis : axes_for_reduction) - if (j == axis) found = true; - - if (!found) { - out_dims.push_back(src_dims[j]); - if (!keep_dims) skip_dims.push_back(j); - } - else { - if (keep_dims) out_dims.push_back(1); - } - } - - if (reduce_type == "ReduceAnd") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 1, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x && y; } ); - } else { - dst_data[0] = 1; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] && src_data[src_idx]; - } - } else if (reduce_type == "ReduceL1") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x + (std::abs)(y); } ); - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += (std::abs)(src_data[src_idx]); - } - } else if (reduce_type == "ReduceL2") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x + y * y; } ); - - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = (std::sqrt)(dst_data[i]); - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx] * src_data[src_idx]; - dst_data[0] = sqrt(dst_data[0]); - } - } else if (reduce_type == "ReduceLogSum") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x + y; }); - - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = logf(dst_data[i]); - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx]; - dst_data[0] = logf(dst_data[0]); - } - } else if (reduce_type == "ReduceLogSumExp") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x + expf(y); }); - - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = logf(dst_data[i]); - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += expf(src_data[src_idx]); - dst_data[0] = logf(dst_data[0]); - } - } else if (reduce_type == "ReduceMax") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, (std::numeric_limits::min)(), keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x > y ? x : y; }); - } else { - dst_data[0] = (std::numeric_limits::min)(); - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] > src_data[src_idx] ? dst_data[0] : src_data[src_idx]; - } - } else if (reduce_type == "ReduceMean") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x + y; }); - float reduced_dims_work_amount = 1.f; - for (size_t axis : axes_for_reduction) { - reduced_dims_work_amount *= static_cast(src_dims[axis]); - } - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] /= reduced_dims_work_amount; - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx]; - dst_data[0] /= static_cast(srcStrides[0] * src_dims[0]); - } - } else if (reduce_type == "ReduceMin") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, (std::numeric_limits::max)(), keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x < y ? x : y; }); - } else { - dst_data[0] = (std::numeric_limits::max)(); - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] < src_data[src_idx] ? dst_data[0] : src_data[src_idx]; - } - } else if (reduce_type == "ReduceOr") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x || y; }); - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] || src_data[src_idx]; - } - } else if (reduce_type == "ReduceProd") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 1, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x * y; }); - } else { - dst_data[0] = 1; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] *= src_data[src_idx]; - } - } else if (reduce_type == "ReduceSum") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x + y; }); - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx]; - } - } else if (reduce_type == "ReduceSumSquare") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0, keep_dims, skip_dims, - [](dst_t x, src_t y)->dst_t { return x + y * y; }); - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx] * src_data[src_idx]; - } - } -} - -class MKLDNNCPUExtReducesTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - - - - - - - _DIM_SIZE_ - - - - - - - - _IN_ - - - _DIM_SIZE_ - - - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(reduce_test_params p) { - std::string model = model_t; - std::string in_shape; - std::string out_shape = ""; - - for (size_t i = 0; i < p.in_shape.size(); i++) { - in_shape += ""; - in_shape += std::to_string(p.in_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_IN_", in_shape); - REPLACE_WITH_STR(model, "_IP_", p.inType); - REPLACE_WITH_STR(model, "_OP_", p.inType); - REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.axes_for_reduction.size()); - REPLACE_WITH_STR(model, "_REDUCE_TYPE_", p.reduce_type); - REPLACE_WITH_NUM(model, "_KEEP_DIMS_", p.keep_dims); - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_OUT_", out_shape); - - return model; - } - -protected: - virtual void TearDown() { - } - - template - static void fill_data_dbgval(T *data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = i + 1; - } - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - reduce_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - // Input Data - InferenceEngine::Blob::Ptr src; - InferenceEngine::SizeVector out_dims; - - InferenceEngine::BlobMap srcs; - - InferenceEngine::Blob::Ptr seq_lengthsIdx; - InferenceEngine::SizeVector seq_lengths_dim(1, p.axes_for_reduction.size()); - seq_lengthsIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) }); - seq_lengthsIdx->allocate(); - if (p.axes_for_reduction.size()) - memcpy(static_cast(seq_lengthsIdx->buffer()), &p.axes_for_reduction[0], sizeof(int32_t)*p.axes_for_reduction.size()); - auto * seq_lengthsIdxPtr = dynamic_cast*>(seq_lengthsIdx.get()); - if (seq_lengthsIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("axes_for_reduction", seq_lengthsIdx)); - if (p.inType == "FP32") { - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, p.in_shape, - InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape)}); - src->allocate(); - if (p.input_tensor.size()) - for (int i = 0; i < p.input_tensor.size(); i++) { - static_cast(src->buffer())[i] = static_cast(p.input_tensor[i]); - } - else - fill_data_dbgval(src->buffer(), src->size()); - auto *srcPtr = dynamic_cast *>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - ref_reduce(p.reduce_type, *srcPtr, p.keep_dims, p.axes_for_reduction, dst_ref, out_dims); - if (p.reference.size()) - if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0) - FAIL() << "Wrong result with compare reference vector!"; - // Infer - srcs.insert(std::pair("input", src)); - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } else if (p.inType == "I32") { - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - InferenceEngine::TBlob dst_ref({ InferenceEngine::Precision::I32, p.out_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.out_shape) }); - dst_ref.allocate(); - - src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::I32, p.in_shape, - InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape)}); - src->allocate(); - if (p.input_tensor.size()) - for (int i = 0; i < p.input_tensor.size(); i++) { - static_cast(src->buffer())[i] = static_cast(p.input_tensor[i]); - } - else - fill_data_dbgval(src->buffer(), src->size()); - auto *srcPtr = dynamic_cast *>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - ref_reduce(p.reduce_type, *srcPtr, p.keep_dims, p.axes_for_reduction, dst_ref, out_dims); - if (p.reference.size()) { - for (int i = 0; i < p.reference.size(); i++) { - if (dst_ref.data()[i] != p.reference[i]) - FAIL() << "Wrong result with compare reference vector!"; - //std::cout << p.reference[i] << " " << dst_ref.data()[i] << std::endl; - } - } - - // Infer - srcs.insert(std::pair("input", src)); - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } - // Check results - if (out_dims.size() != p.out_shape.size()) - FAIL() << "Wrong out_shape size!"; - for (size_t i = 0; i < p.out_shape.size(); i++) { - if (out_dims[i] != p.out_shape[i]) - FAIL() << "Wrong out_shape dimensions!"; - } - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtReducesTests, TestsReduceSum) {} - -INSTANTIATE_TEST_CASE_P( - TestsReduceSum, MKLDNNCPUExtReducesTests, - ::testing::Values( - // Params: reduce_type, keep_dims, in_shape, inType, input_tensor, axes_for_reduction, out_shape, reference - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 0 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ -3 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 2 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ -1 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 0, 2 },{ 1, 3, 1 },{ 68, 100, 132 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 1, 2 },{ 2, 1, 1 },{ 78, 222 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 2, 1 },{ 2, 1, 1 },{ 78, 222 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 0, 1, 2 },{ 1, 1, 1 },{ 300 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 0, -2, 2 },{ 1, 1, 1 },{ 300 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 2, 2, 2, 2, 2, 2 },"FP32",{},{ 0, 1, 2, 3, 4, 5, 6 },{ 1, 1, 1, 1, 1, 1, 1 },{ 8256 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 2, 2, 2, 2, 2, 2 },"FP32",{},{ 6, 3, 1, 4, 0 },{ 1, 1, 2, 1, 1, 2, 1 },{ 1776, 1840, 2288, 2352 } }, - reduce_test_params{ "ReduceSum", true,{ 2, 3, 4 },"FP32",{},{ 2, 2, 0, 2, 0 },{ 1, 3, 1 },{ 68, 100, 132 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 0 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ -3 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 2 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ -1 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 0, 2 },{ 3 },{ 68, 100, 132 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 1, 2 },{ 2 },{ 78, 222 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 2, 1 },{ 2 },{ 78, 222 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 0, 1, 2 },{},{ 300 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 0, -2, 2 },{},{ 300 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 2, 2, 2, 2, 2, 2 },"FP32",{},{ 0, 1, 2, 3, 4, 5, 6 },{},{ 8256 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"FP32",{},{ 2, 2, 0, 2, 0 },{ 3 },{ 68, 100, 132 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 2, 2, 2, 2, 2, 2 },"FP32",{},{ 6, 3, 1, 4, 0 },{ 2, 2 },{ 1776, 1840, 2288, 2352 } }, - reduce_test_params{ "ReduceSum", true,{ 1, 2, 3, 4, 1 },"FP32",{},{ 1 },{ 1, 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }, - reduce_test_params{ "ReduceSum", false,{ 1, 2, 3, 4, 1 },"FP32",{},{ 1 },{ 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }, -// I32 tests - reduce_test_params{ "ReduceAnd", true,{ 2, 2, 2 },"I32",{1, 0, 1, 1, 0, 1, 1, 0},{ 2 },{ 2, 2, 1 },{ 0, 1, 0, 0} }, - reduce_test_params{ "ReduceL1", true, { 3, 2, 2 },"I32",{},{ 2 },{ 3, 2, 1 },{ 3, 7, 11, 15, 19, 23 } }, - reduce_test_params{ "ReduceL1", false, { 3, 2, 2 },"I32",{},{ 0, 1, 2 },{ },{ 78 } }, - reduce_test_params{ "ReduceL2", false,{ 3, 2, 2 },"I32",{},{ 2 },{ 3, 2 },{ 2, 5, 7, 10, 13, 16 } }, - reduce_test_params{ "ReduceL2", false,{ 3, 2, 2 },"I32",{},{ 0, 1, 2 },{ },{ 25 } }, - reduce_test_params{ "ReduceLogSum", true,{ 10, 10, 2 },"I32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceLogSumExp", true,{ 5, 5, 2 },"I32",{},{ 2 },{ 5, 5, 1 },{} }, - reduce_test_params{ "ReduceMax", true,{ 3, 2, 2 },"I32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20, 2, 40, 2, 60, 2 } }, - reduce_test_params{ "ReduceMean", true, { 3, 2, 2 },"I32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 12, 1, 35, 1, 57, 1 } }, - reduce_test_params{ "ReduceMin", false,{ 3, 2, 2 },"I32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 5, 1, 30, 1, 55, 1 } }, - reduce_test_params{ "ReduceOr", true,{ 2, 2, 2 },"I32",{1, 0, 1, 1, 0, 0, 1, 0},{ 2 },{ 2, 2, 1 },{1, 1, 0, 1 } }, - reduce_test_params{ "ReduceProd", true,{ 3, 2, 2 },"I32",{},{ 1 },{ 3, 1, 2 },{ 3, 8, 35, 48, 99, 120 } }, - reduce_test_params{ "ReduceSum", false,{ 2, 3, 4 },"I32",{},{ 2, 2, 0, 2, 0 },{ 3 },{ 68, 100, 132 } }, - reduce_test_params{ "ReduceSumSquare", true, { 3, 2, 2 },"I32",{},{ 1 },{ 3, 1, 2 },{ 10, 20, 74, 100, 202, 244 } }, - reduce_test_params{ "ReduceSumSquare", false, { 3, 2, 2 },"I32",{},{ 0, 1, 2 },{ },{ 650 } } -)); - - -TEST_P(MKLDNNCPUExtReducesTests, TestsReduceAll) {} - -INSTANTIATE_TEST_CASE_P( - TestsReduceAll, MKLDNNCPUExtReducesTests, - ::testing::Values( -// Params: reduce_type, keep_dims, in_shape, inType, input_tensor, axes_for_reduction, out_shape, reference - reduce_test_params{ "ReduceAnd", true,{ 2, 2, 2 },"FP32",{1, 0, 1, 1, 0, 1, 1, 0},{ 2 },{ 2, 2, 1 },{ 0, 1, 0, 0} }, - reduce_test_params{ "ReduceAnd", false, { 2, 2, 2 },"FP32",{1, 0, 1, 1, 0, 1, 1, 0},{ 0, 1, 2 },{ },{ 0 } }, - reduce_test_params{ "ReduceL1", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{ } }, - reduce_test_params{ "ReduceL1", true, { 3, 2, 2 },"FP32",{},{ 2 },{ 3, 2, 1 },{ 3, 7, 11, 15, 19, 23 } }, - reduce_test_params{ "ReduceL1", false, { 3, 2, 2 },"FP32",{},{ 2 },{ 3, 2 },{ 3, 7, 11, 15, 19, 23 } }, - reduce_test_params{ "ReduceL1", false, { 3, 2, 2 },"FP32",{},{ 0, 1, 2 },{ },{ 78 } }, - reduce_test_params{ "ReduceL2", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceL2", true,{ 3, 2, 2 },"FP32",{},{ 2 },{ 3, 2, 1 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } }, - reduce_test_params{ "ReduceL2", false,{ 3, 2, 2 },"FP32",{},{ 2 },{ 3, 2 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } }, - reduce_test_params{ "ReduceL2", false,{ 3, 2, 2 },"FP32",{},{ 0, 1, 2 },{ },{ 25.49509757f } }, - reduce_test_params{ "ReduceLogSum", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceLogSum", true,{ 3, 2, 2 },"FP32",{ },{ 1 },{ 3, 1, 2 },{ } }, - reduce_test_params{ "ReduceLogSum", false,{ 3, 2, 2 },"FP32",{ },{ 1 },{ 3, 2 },{ } }, - reduce_test_params{ "ReduceLogSum", false,{ 3, 2, 2 },"FP32",{ },{ 0, 1, 2 },{},{ } }, - reduce_test_params{ "ReduceLogSumExp", true,{ 5, 5, 2 },"FP32",{},{ 2 },{ 5, 5, 1 },{} }, - reduce_test_params{ "ReduceLogSumExp", true,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } }, - reduce_test_params{ "ReduceLogSumExp", false,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } }, - reduce_test_params{ "ReduceLogSumExp", false,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60.00671387f } }, - reduce_test_params{ "ReduceMax", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceMax", true,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20, 2, 40, 2, 60, 2 } }, - reduce_test_params{ "ReduceMax", false,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20, 2, 40, 2, 60, 2 } }, - reduce_test_params{ "ReduceMax", false,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60 } }, - reduce_test_params{ "ReduceMean", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceMean", true, { 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } }, - reduce_test_params{ "ReduceMean", false, { 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } }, - reduce_test_params{ "ReduceMean", false, { 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{ },{ 18.25f } }, - reduce_test_params{ "ReduceMin", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceMin", true,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 5, 1, 30, 1, 55, 1 } }, - reduce_test_params{ "ReduceMin", false,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 5, 1, 30, 1, 55, 1 } }, - reduce_test_params{ "ReduceMin", false,{ 3, 2, 2 },"FP32",{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 1 } }, - reduce_test_params{ "ReduceOr", true,{ 2, 2, 2 },"FP32",{1, 0, 1, 1, 0, 0, 1, 0},{ 2 },{ 2, 2, 1 },{1, 1, 0, 1 } }, - reduce_test_params{ "ReduceOr", false, { 2, 2, 2 },"FP32",{},{ 0, 1, 2 },{ },{ 1 } }, - reduce_test_params{ "ReduceProd", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceProd", true,{ 3, 2, 2 },"FP32",{},{ 1 },{ 3, 1, 2 },{ 3, 8, 35, 48, 99, 120 } }, - reduce_test_params{ "ReduceProd", false,{ 3, 2, 2 },"FP32",{},{ 1 },{ 3, 2 },{ 3, 8, 35, 48, 99, 120 } }, - reduce_test_params{ "ReduceProd", false,{ 3, 2, 2 },"FP32",{},{ 0, 1, 2 },{ },{ 4.790016e+08 } }, - reduce_test_params{ "ReduceSumSquare", true,{ 10, 10, 2 },"FP32",{},{ 2 },{ 10, 10, 1 },{} }, - reduce_test_params{ "ReduceSumSquare", true, { 3, 2, 2 },"FP32",{},{ 1 },{ 3, 1, 2 },{ 10, 20, 74, 100, 202, 244 } }, - reduce_test_params{ "ReduceSumSquare", false, { 3, 2, 2 },"FP32",{},{ 1 },{ 3, 2 },{ 10, 20, 74, 100, 202, 244 } }, - reduce_test_params{ "ReduceSumSquare", false, { 3, 2, 2 },"FP32",{},{ 0, 1, 2 },{ },{ 650 } } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reverse_sequence_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reverse_sequence_tests.cpp deleted file mode 100644 index c31ba958bba316..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reverse_sequence_tests.cpp +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct reverse_sequence_test_params { - std::string inIdxPrecision; - InferenceEngine::SizeVector in_out_shape; - std::vector seq_lengths; - int seq_axis; - int batch_axis; - std::vector reference; - - std::vector> comp; -}; - -template -void ref_reverse_sequence( - InferenceEngine::TBlob &src, - InferenceEngine::TBlob &seq_lengths, - InferenceEngine::TBlob &dst, - int seq_axis, - int batch_axis -) { - size_t i, src_idx; - const float *src_data = src.data(); - InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims(); - InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides(); - const data_t *seq_lengths_data = seq_lengths.data(); - InferenceEngine::SizeVector seq_lengths_dims = seq_lengths.getTensorDesc().getDims(); - float* dst_data = dst.data(); - - if (seq_axis < 0) - seq_axis += src_dims.size(); - - if (seq_axis < 0 || seq_axis >= src_dims.size()) - FAIL() << "Incorrect 'seq_axis' parameters dimensions and axis number!"; - - if (batch_axis < 0) - batch_axis += src_dims.size(); - - if (batch_axis < 0 || batch_axis >= src_dims.size()) - FAIL() << "Incorrect 'batch_axis' parameters dimensions and axis number!"; - - for (i = 0; i < src_dims[batch_axis]; i++) { - if (static_cast(seq_lengths_data[i]) > src_dims[seq_axis]) - FAIL() << "Incorrect input 'seq_lengths' values!"; - } - - size_t work_amount_dst = srcStrides[0] * src_dims[0]; - InferenceEngine::SizeVector counters(src_dims.size(), 0); - for (size_t iwork = 0; iwork < work_amount_dst; ++iwork) { - for (i = 0, src_idx = 0; i < src_dims.size(); ++i) { - size_t idx = counters[i]; - if (i == seq_axis && idx < static_cast(seq_lengths_data[counters[batch_axis]])) { - idx = static_cast(seq_lengths_data[counters[batch_axis]]) - idx - 1; - } - src_idx += idx * srcStrides[i]; - } - - dst_data[iwork] = src_data[src_idx]; - - for (int j = src_dims.size() - 1; j >= 0; j--) { - counters[j] = (counters[j] + 1) % src_dims[j]; - if (counters[j] != 0) break; - } - } -} - -class MKLDNNCPUExtReverseSequenceTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_OUT_ - - - - - - - _DIM_SIZE_ - - - - - - - - _IN_OUT_ - - - _DIM_SIZE_ - - - - - _IN_OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(reverse_sequence_test_params p) { - std::string model = model_t; - std::string in_out_shape; - for (size_t i = 0; i < p.in_out_shape.size(); i++) { - in_out_shape += ""; - in_out_shape += std::to_string(p.in_out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision); - REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape); - REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.seq_lengths.size()); - REPLACE_WITH_NUM(model, "_SA_", p.seq_axis); - REPLACE_WITH_NUM(model, "_BA_", p.batch_axis); - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - reverse_sequence_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - // Input Data - InferenceEngine::Blob::Ptr src; - src = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.in_out_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_out_shape) }); - src->allocate(); - fill_data_dbgval(src->buffer(), src->size()); - auto * srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", src)); - - InferenceEngine::Blob::Ptr seq_lengthsIdx; - InferenceEngine::SizeVector seq_lengths_dim(1, p.seq_lengths.size()); - if (p.inIdxPrecision == "I32") { - seq_lengthsIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) }); - seq_lengthsIdx->allocate(); - if (p.seq_lengths.size()) - memcpy(static_cast(seq_lengthsIdx->buffer()), &p.seq_lengths[0], sizeof(int32_t)*p.seq_lengths.size()); - auto * seq_lengthsIdxPtr = dynamic_cast*>(seq_lengthsIdx.get()); - if (seq_lengthsIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_reverse_sequence(*srcPtr, *seq_lengthsIdxPtr, dst_ref, p.seq_axis, p.batch_axis); - if (p.reference.size()) { - if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0) - FAIL() << "Wrong result with compare TF reference!"; - } - srcs.insert(std::pair("seq_lengths", seq_lengthsIdx)); - } else if (p.inIdxPrecision == "FP32") { - seq_lengthsIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) }); - seq_lengthsIdx->allocate(); - if (p.seq_lengths.size()) - for (size_t i = 0; i < p.seq_lengths.size(); i++) { - static_cast(seq_lengthsIdx->buffer())[i] = static_cast(p.seq_lengths[i]); - } - auto * seq_lengthsIdxPtr = dynamic_cast*>(seq_lengthsIdx.get()); - if (seq_lengthsIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - ref_reverse_sequence(*srcPtr, *seq_lengthsIdxPtr, dst_ref, p.seq_axis, p.batch_axis); - if (p.reference.size()) { - if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0) - FAIL() << "Wrong result with compare TF reference!"; - } - srcs.insert(std::pair("seq_lengths", seq_lengthsIdx)); - } else { - return; - } - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -// Test data vectors -static std::vector test0 = { 9.f,10.f,11.f,12.f,13.f,14.f,15.f,16.f,17.f,0.f,1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,18.f,19.f,20.f,21.f,22.f,23.f,24.f,25.f,26.f }; -static std::vector test2 = { 3.f,4.f,5.f,0.f,1.f,2.f,6.f,7.f,8.f,12.f,13.f,14.f,9.f,10.f,11.f,15.f,16.f,17.f,21.f,22.f,23.f,18.f,19.f,20.f,24.f,25.f,26.f }; -static std::vector test4 = { 1.f,0.f,2.f,4.f,3.f,5.f,7.f,6.f,8.f,10.f,9.f,11.f,13.f,12.f,14.f,16.f,15.f,17.f,19.f,18.f,20.f,22.f,21.f,23.f,25.f,24.f,26.f }; -static std::vector test7 = { 0.f,1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,12.f,13.f,14.f,9.f,10.f,11.f,15.f,16.f,17.f,24.f,25.f,26.f,21.f,22.f,23.f,18.f,19.f,20.f }; -static std::vector test8 = { 0.f,4.f,8.f,3.f,1.f,5.f,6.f,7.f,2.f,9.f,13.f,17.f,12.f,10.f,14.f,15.f,16.f,11.f,18.f,22.f,26.f,21.f,19.f,23.f,24.f,25.f,20.f }; - -TEST_P(MKLDNNCPUExtReverseSequenceTests, TestsReverseSequence) {} -INSTANTIATE_TEST_CASE_P( - TestsReverseSequence, MKLDNNCPUExtReverseSequenceTests, - ::testing::Values( -// Params: in_out_shape, seq_lengths, seq_axis, batch_axis, reference -/* 0 */ reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 2, 2, 2 }, 0, 0, test0 }, - reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 2, 2, 2 }, -3, 0, test0 }, - reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 2, 2, 2 }, 1, 0, test2 }, - reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 2, 2, 2 }, -2, 0, test2 }, - reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 2, 2, 2 }, 2, 1, test4 }, -/* 5 */ reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 2, 2, 2 }, -1, 1, test4 }, - reverse_sequence_test_params{ "I32", { 2, 3 },{ 3, 2 }, 1, 0, {2,1,0,4,3,5} }, - reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 1, 2, 3 }, 1, 0, test7 }, - reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 1, 2, 3 }, 1,-3, test7 }, - reverse_sequence_test_params{ "I32", { 3, 3, 3 },{ 1, 2, 3 }, 1, 2, test8 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 2, 2, 2 }, 0, 0, test0 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 2, 2, 2 }, -3, 0, test0 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 2, 2, 2 }, 1, 0, test2 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 2, 2, 2 }, -2, 0, test2 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 2, 2, 2 }, 2, 1, test4 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 2, 2, 2 }, -1, 1, test4 }, -/* 15 */ reverse_sequence_test_params{"FP32", { 2, 3 },{ 3, 2 }, 1, 0, {2,1,0,4,3,5} }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 1, 2, 3 }, 1, 0, test7 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 1, 2, 3 }, 1,-3, test7 }, - reverse_sequence_test_params{"FP32", { 3, 3, 3 },{ 1, 2, 3 }, 1, 2, test8 }, - reverse_sequence_test_params{"FP32", { 2, 2, 2, 3 },{ 1, 2 }, 3, 0, {0,1,2,3,4,5,6,7,8,9,10,11,13,12,14,16,15,17,19,18,20,22,21,23} }, - reverse_sequence_test_params{"FP32", { 2, 2, 2, 3 },{ 2, 2 }, 2, 0, {3,4,5,0,1,2,9,10,11,6,7,8,15,16,17,12,13,14,21,22,23,18,19,20} } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/scatter_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/scatter_tests.cpp deleted file mode 100644 index ef4644a4dcb1d8..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/scatter_tests.cpp +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct scatterTF_test_params { - std::string inIdxPrecision; - InferenceEngine::SizeVector inDataDim; - std::vector inData; - InferenceEngine::SizeVector inIdxDim; - std::vector inIdx; - std::vector inUpd; - int axis; - - std::vector reference; - - std::vector> comp; -}; - -class MKLDNNCPUExtScatterTFTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IDATA_ - - - - - - - _IIDX_ - - - - - - - _IIDX_ - - - - - - - - _IDATA_ - - - _IIDX_ - - - _IIDX_ - - - - - _IDATA_ - - - - - - - - - - -)V0G0N"; - - std::string getModel(scatterTF_test_params p) { - std::string model = model_t; - std::string inIdx; - std::string inData; - - for (auto& idx : p.inIdxDim) { - inIdx += ""; - inIdx += std::to_string(idx) + "\n"; - } - - for (auto& dct : p.inDataDim) { - inData += ""; - inData += std::to_string(dct) + "\n"; - } - - REPLACE_WITH_STR(model, "_IIDX_", inIdx); - REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision); - REPLACE_WITH_STR(model, "_IDATA_", inData); - REPLACE_WITH_NUM(model, "_AX_", p.axis); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - scatterTF_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - //std::cout << model << std::endl; - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Input Data - InferenceEngine::Blob::Ptr srcData = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.inDataDim, InferenceEngine::TensorDesc::getLayoutByDims(p.inDataDim) }); - srcData->allocate(); - memcpy(srcData->buffer(), &p.inData[0], sizeof(float)*p.inData.size()); - auto * srcDataPtr = dynamic_cast*>(srcData.get()); - if (srcDataPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input Indexes - InferenceEngine::Blob::Ptr srcIdx; - if (p.inIdxPrecision == "I32") { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, p.inIdxDim, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdxDim) }); - srcIdx->allocate(); - memcpy(static_cast(srcIdx->buffer()), &p.inIdx[0], sizeof(int32_t)*p.inIdx.size()); - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - } else { - srcIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.inIdxDim, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdxDim) }); - srcIdx->allocate(); - for (size_t i = 0; i < p.inIdx.size(); i++) { - static_cast(srcIdx->buffer())[i] = static_cast(p.inIdx[i]); - } - auto * srcIdxPtr = dynamic_cast*>(srcIdx.get()); - if (srcIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - } - - // Input Updates - InferenceEngine::Blob::Ptr srcUpd; - srcUpd = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.inIdxDim, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdxDim) }); - srcUpd->allocate(); - memcpy(static_cast(srcUpd->buffer()), &p.inUpd[0], sizeof(float)*p.inUpd.size()); - auto * srcUpdPtr = dynamic_cast*>(srcUpd.get()); - if (srcUpdPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Infer - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("InputData", srcData)); - srcs.insert(std::pair("InputIndexes", srcIdx)); - srcs.insert(std::pair("InputUpdates", srcUpd)); - graph.Infer(srcs, outputBlobs); - - // Check results - if (memcmp((*output).data(), &p.reference[0], output->byteSize()) != 0) - FAIL() << "Wrong result with compare TF reference!"; - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -// Disabled these tests as they need to adjust with new specs: -// - new Scatter Update layer: like TF scatter_update -// - new Scatter Elements Update: like ONNX Scatter Elements -// See merge requests: -// DLDT #6005: Specification for the ScatterElementsUpdate layer -// DLDT #6091: Specification for ScatterUpdate operation -TEST_P(MKLDNNCPUExtScatterTFTests, DISABLED_TestsScatter) {} - -INSTANTIATE_TEST_CASE_P( - TestsScatter, MKLDNNCPUExtScatterTFTests, - ::testing::Values( -// Params: inDataDim, inData, inIdxDim, inIdx, inUpd, axis, reference - scatterTF_test_params{ "I32", { 3,3 },{ 0,0,0,0,0,0,0,0,0 },{ 2,3 },{ 1,0,2,0,2,1 },{ 1.,1.1,1.2,2,2.1,2.2 }, 0,{ 2,1.1,0,1,0,2.2,0,2.1,1.2 }}, - scatterTF_test_params{ "I32", { 3,3 },{ 0,0,0,0,0,0,0,0,0 },{ 2,3 },{ 1,0,2,0,2,1 },{ 1.,1.1,1.2,2,2.1,2.2 }, 1,{ 1.1,1,1.2,2,2.2,2.1,0,0,0 }}, - scatterTF_test_params{ "I32", { 1,5 },{ 1,2,3,4,5 },{ 1,2 },{ 1,3 },{ 1.1,2.1 }, 1,{ 1,1.1,3,2.1,5 }}, - scatterTF_test_params{"FP32", { 3,3 },{ 0,0,0,0,0,0,0,0,0 },{ 2,3 },{ 1,0,2,0,2,1 },{ 1.,1.1,1.2,2,2.1,2.2 }, 0,{ 2,1.1,0,1,0,2.2,0,2.1,1.2 }}, - scatterTF_test_params{"FP32", { 3,3 },{ 0,0,0,0,0,0,0,0,0 },{ 2,3 },{ 1,0,2,0,2,1 },{ 1.,1.1,1.2,2,2.1,2.2 }, 1,{ 1.1,1,1.2,2,2.2,2.1,0,0,0 }}, - scatterTF_test_params{"FP32", { 1,5 },{ 1,2,3,4,5 },{ 1,2 },{ 1,3 },{ 1.1,2.1 }, 1,{ 1,1.1,3,2.1,5 }})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/select_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/select_tests.cpp deleted file mode 100644 index 8e5b2fcbe8561c..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/select_tests.cpp +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; -using namespace InferenceEngine; - -using select_test_params = std::tuple< - InferenceEngine::Precision, // conditionType - InferenceEngine::SizeVector, // conditionShape - InferenceEngine::SizeVector // inputShape ->; - -template -void ref_select( - InferenceEngine::TBlob &condition, - InferenceEngine::TBlob &then_, - InferenceEngine::TBlob &else_, - InferenceEngine::TBlob &dst -) { - const T *conditionData = condition.buffer(); - - const float *thenData = then_.cbuffer().as(); - - const float *elseData = else_.cbuffer().as(); - - float* dstData = dst.cbuffer().as(); - enum {N, C, H, W, Dims}; - int dim[Dims] = {1, 1, 1, 1}; - int cdim[Dims] = {1, 1, 1, 1}; - - InferenceEngine::SizeVector dims = then_.getTensorDesc().getDims(); - std::copy(std::begin(dims), std::end(dims), std::begin(dim) + (Dims - dims.size())); - - InferenceEngine::SizeVector cDims = condition.getTensorDesc().getDims(); - std::copy(std::begin(cDims), std::end(cDims), std::begin(cdim) + (Dims - cDims.size())); - - for (int b = 0; b < dim[N]; b++) - for (int c = 0; c < dim[C]; c++) - for (int h = 0; h < dim[H]; h++) - for (int w = 0; w < dim[W]; w++) { - dstData[b*dim[C]*dim[H]*dim[W] + c*dim[H]*dim[W] + h*dim[W] + w] - = conditionData[(b % cdim[N])*cdim[C]*cdim[H]*cdim[W] + (c % cdim[C])*cdim[H]*cdim[W] + (h % cdim[H])*cdim[W] + (w % cdim[W])] - ? thenData[b*dim[C]*dim[H]*dim[W] + c*dim[H]*dim[W] + h*dim[W] + w] - : elseData[b*dim[C]*dim[H]*dim[W] + c*dim[H]*dim[W] + h*dim[W] + w]; - } -} - -class MKLDNNCPUExtSelectTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _CONDITION_SHAPE_ - - - - - - - _INPUT_SHAPE_ - - - - - - - _INPUT_SHAPE_ - - - - - - - _CONDITION_SHAPE_ - - - _INPUT_SHAPE_ - - - _INPUT_SHAPE_ - - - - - _INPUT_SHAPE_ - - - - - - - - - - -)V0G0N"; - - std::string getModel(InferenceEngine::Precision conditionType, - InferenceEngine::SizeVector conditionShape, - InferenceEngine::SizeVector inputShape) { - std::string model = model_t; - - { - std::string conditionTypeStr; - switch(conditionType) { - case InferenceEngine::Precision::FP32 : conditionTypeStr = "FP32"; break; - case InferenceEngine::Precision::I32 : conditionTypeStr = "I32" ; break; - default: EXPECT_FALSE("Unsuported pressision"); - } - REPLACE_WITH_STR(model, "_CONDITION_TYPE_", conditionTypeStr); - } - - { - std::string conditionShapeStr; - for (auto dim : conditionShape) { - conditionShapeStr += ""; - conditionShapeStr += std::to_string(dim) + "\n"; - } - conditionShapeStr.pop_back(); - REPLACE_WITH_STR(model, "_CONDITION_SHAPE_", conditionShapeStr); - } - - { - std::string inputShapeStr; - for (auto dim : inputShape) { - inputShapeStr += ""; - inputShapeStr += std::to_string(dim) + "\n"; - } - inputShapeStr.pop_back(); - REPLACE_WITH_STR(model, "_INPUT_SHAPE_", inputShapeStr); - } - - return model; - } - - static void fill_even(int32_t *data, size_t size) { - for (size_t i = 0; i < size; i++) - data[i] = i%2 ? 1 : 0; - } - - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - Precision conditionType; - SizeVector conditionShape; - SizeVector inputShape; - std::tie(conditionType, conditionShape, inputShape) = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(conditionType, conditionShape, inputShape); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - // Input Data - InferenceEngine::Blob::Ptr then_; - then_ = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - inputShape, InferenceEngine::TensorDesc::getLayoutByDims(inputShape) }); - then_->allocate(); - fill_data_dbgval(then_->buffer(), then_->size()); - auto * thenPtr = dynamic_cast*>(then_.get()); - if (thenPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input Data - InferenceEngine::Blob::Ptr else_; - else_ = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - inputShape, InferenceEngine::TensorDesc::getLayoutByDims(inputShape) }); - else_->allocate(); - fill_data_dbgval(else_->buffer(), else_->size(), -1.0); - auto * elsePtr = dynamic_cast*>(else_.get()); - if (elsePtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::Blob::Ptr condition; - - switch (conditionType) { - case Precision::FP32 : - condition = make_shared_blob({ conditionType, conditionShape, - TensorDesc::getLayoutByDims(conditionShape) }); - condition->allocate(); - fill_data(condition->buffer(), condition->size()); - - break; - case Precision::I32 : - condition = make_shared_blob({ conditionType, conditionShape, - TensorDesc::getLayoutByDims(conditionShape) }); - break; - default: - FAIL(); - break; - } - - condition->allocate(); - fill_even(condition->buffer(), condition->size()); - - switch (conditionType) { - case InferenceEngine::Precision::FP32 : { - auto conditionPtr = std::dynamic_pointer_cast>(condition); - if (conditionPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - ref_select(*conditionPtr, *thenPtr, *elsePtr, dst_ref); - } - break; - case InferenceEngine::Precision::I32 : { - auto conditionPtr = std::dynamic_pointer_cast>(condition); - if (conditionPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - ref_select(*conditionPtr, *thenPtr, *elsePtr, dst_ref); - } - break; - default: - FAIL(); - } - - InferenceEngine::BlobMap srcs = { - {"condition", condition}, - {"then_", then_}, - {"else_", else_}, - }; - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtSelectTests, TestsSelect) {} -INSTANTIATE_TEST_CASE_P( - TestsSelect, MKLDNNCPUExtSelectTests, - Combine( - Values(Precision::I32), - Values( -// SizeVector {}, // TODO: scalars is not supported right now for CPU backend - SizeVector {1}, - SizeVector {1, 1}, - SizeVector {1, 16}, - SizeVector {3, 1, 16}, - SizeVector {1, 16, 1}, - SizeVector {3, 16, 16}), - Values(SizeVector {3, 16, 16}) - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/shuffle_channels_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/shuffle_channels_tests.cpp deleted file mode 100644 index 8a4c12bd422000..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/shuffle_channels_tests.cpp +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct shuffle_channels_test_params { - InferenceEngine::SizeVector in_out_shape; - int axis; - int group; - - std::vector reference; - std::vector> comp; -}; - -void ref_shuffle_channels( - InferenceEngine::TBlob &src, - InferenceEngine::TBlob &dst, - int axis, - int group -) { - size_t i; - const float *src_data = src.data(); - InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims(); - InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides(); - float* dst_data = dst.data(); - InferenceEngine::SizeVector dst_dims = dst.getTensorDesc().getDims(); - InferenceEngine::SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides(); - - if (axis < 0) - axis += dst_dims.size(); - - if (axis < 0 || axis >= dst_dims.size()) - FAIL() << "Incorrect input parameters dimensions and axis number!"; - - if (dst_dims[axis] % group) - FAIL() << "Group parameter must evenly divide the channel dimension!"; - - // Find number of dictionaries, index range and data length - size_t numDictionaries = 1; - for (i = 0; i <= axis; i++) - numDictionaries *= dst_dims[i]; - - size_t channelsNum = dst_dims[axis] / group; - - size_t dataLength = 1; - for (i = axis + 1; i < dst_dims.size(); i++) - dataLength *= dst_dims[i]; - - if (dataLength == 0) - FAIL() << "Incorrect input parameters dimension!"; - - size_t j, k; - for (j = 0, k = 0; j < numDictionaries; j += dst_dims[axis]) { - for (i = 0; i < (dst_dims[axis] * channelsNum); i += channelsNum, k += dataLength) { - int idx = j + i / dst_dims[axis] + i % dst_dims[axis]; - memcpy(&dst_data[k], &src_data[dataLength * idx], sizeof(float) * dataLength); - } - } -} - -class MKLDNNCPUExtShuffleChannelsTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_OUT_ - - - - - - - - _IN_OUT_ - - - - - _IN_OUT_ - - - - - - - - -)V0G0N"; - - std::string getModel(shuffle_channels_test_params p) { - std::string model = model_t; - std::string in_out_shape; - - for (size_t i = 0; i < p.in_out_shape.size(); i++) { - in_out_shape += ""; - in_out_shape += std::to_string(p.in_out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape); - REPLACE_WITH_NUM(model, "_AX_", p.axis); - REPLACE_WITH_NUM(model, "_GR_", p.group); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - shuffle_channels_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - // Input Data - InferenceEngine::Blob::Ptr src; - src = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.in_out_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_out_shape) }); - src->allocate(); - fill_data_dbgval(src->buffer(), src->size()); - auto * srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - InferenceEngine::SizeVector out_dims; - ref_shuffle_channels(*srcPtr, dst_ref, p.axis, p.group); - - // Check results - if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0) - FAIL() << "Wrong result with compare TF reference!"; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", src)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - - -TEST_P(MKLDNNCPUExtShuffleChannelsTests, TestsShuffleChannels) {} - -// Test data vectors -static std::vector test0 = { 0.f, 1.f, 2.f, 3.f, 12.f, 13.f, 14.f, 15.f, 24.f, 25.f, 26.f, 27.f, 36.f, 37.f, 38.f, 39.f, 48.f, 49.f, 50.f, 51.f, - 4.f, 5.f, 6.f, 7.f, 16.f, 17.f, 18.f, 19.f, 28.f, 29.f, 30.f, 31.f, 40.f, 41.f, 42.f, 43.f, 52.f, 53.f, 54.f, 55.f, - 8.f, 9.f, 10.f, 11.f, 20.f, 21.f, 22.f, 23.f, 32.f, 33.f, 34.f, 35.f, 44.f, 45.f, 46.f, 47.f, 56.f, 57.f, 58.f, 59.f }; -static std::vector test4 = { 0.f, 2.f, 4.f, 1.f, 3.f, 5.f, 6.f, 8.f, 10.f, 7.f, 9.f, 11.f, 12.f, 14.f, 16.f, 13.f, 15.f, 17.f, 18.f, 20.f, 22.f, 19.f, 21.f, 23.f }; -static std::vector test5 = { 0.f, 1.f, 4.f, 5.f, 8.f, 9.f, 2.f, 3.f, 6.f, 7.f, 10.f, 11.f, 12.f, 13.f, 16.f, 17.f, 20.f, 21.f, 14.f, 15.f, 18.f, 19.f, 22.f, 23.f }; -static std::vector test6 = { 0.f, 3.f, 1.f, 4.f, 2.f, 5.f, 6.f, 9.f, 7.f, 10.f, 8.f, 11.f, 12.f, 15.f, 13.f, 16.f, 14.f, 17.f, 18.f, 21.f, 19.f, 22.f, 20.f, 23.f }; -static std::vector test7 = { 0.f, 1.f, 6.f, 7.f, 2.f, 3.f, 8.f, 9.f, 4.f, 5.f, 10.f, 11.f, 12.f, 13.f, 18.f, 19.f, 14.f, 15.f, 20.f, 21.f, 16.f, 17.f, 22.f, 23.f }; -static std::vector test8 = { 0.f, 3.f, 1.f, 4.f, 2.f, 5.f }; - -INSTANTIATE_TEST_CASE_P( - TestsShuffleChannels, MKLDNNCPUExtShuffleChannelsTests, - ::testing::Values( -// Params: in_out_shape, axis, group, reference -/* 0 */ shuffle_channels_test_params{ { 1, 15, 2, 2 }, 1, 5, test0 }, - shuffle_channels_test_params{ { 1, 15, 2, 2 }, -3, 5, test0 }, - shuffle_channels_test_params{ { 15, 2, 2 }, 0, 5, test0 }, - shuffle_channels_test_params{ { 15, 2, 2 }, -3, 5, test0 }, - shuffle_channels_test_params{ { 2, 2, 6 }, -1, 3, test4 }, -/* 5 */ shuffle_channels_test_params{ { 2, 6, 2 }, -2, 3, test5 }, - shuffle_channels_test_params{ { 2, 2, 6 }, -1, 2, test6 }, - shuffle_channels_test_params{ { 2, 6, 2 }, -2, 2, test7 }, - shuffle_channels_test_params{ { 6 }, 0, 2, test8 } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_fill_empty_rows_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_fill_empty_rows_tests.cpp deleted file mode 100644 index a1cb52dd793006..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_fill_empty_rows_tests.cpp +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct sparse_fill_empty_rows_test_params { - std::string precision; - InferenceEngine::SizeVector input_indices_shape; - std::vector input_indices_value; - - InferenceEngine::SizeVector input_values_shape; - - InferenceEngine::SizeVector input_dense_shape_shape; - std::vector input_dense_shape_value; - - InferenceEngine::SizeVector input_default_value_shape; - std::vector input_default_value_value; - - InferenceEngine::SizeVector output_indices_shape; - InferenceEngine::SizeVector output_values_shape; - InferenceEngine::SizeVector output_empty_rows_indicator_shape; - - size_t num_prim_desc; - int selectedType; - - std::vector> comp; -}; - -void ref_sparse_fill_empty_rows(InferenceEngine::TBlob &input_indices, - InferenceEngine::TBlob &input_values, - InferenceEngine::TBlob &dense_shape, - InferenceEngine::TBlob &default_value, - InferenceEngine::TBlob &output_indices, - InferenceEngine::TBlob &output_values, - InferenceEngine::TBlob &output_empty_rows_indicator) { - const float *input_indices_ptr = input_indices.data(); - const float *input_values_ptr = input_values.data(); - const float *dense_shape_ptr = dense_shape.data(); - const float *default_value_ptr = default_value.data(); - float dflt_value = default_value_ptr[0]; - - float num_rows = dense_shape_ptr[0]; - float num_cols = dense_shape_ptr[1]; - - std::vector dims = input_values.getTensorDesc().getDims(); - size_t inMaxNumValues = dims[0]; - std::vector out_dims = output_values.getTensorDesc().getDims(); - size_t outMaxNumValues = out_dims[0]; - - // compute actual number of values by searching out of range indice that serves as a marker - size_t in_actual_num_values = 0; - for (in_actual_num_values = 0; in_actual_num_values < inMaxNumValues; in_actual_num_values++) { - float indice_x = input_indices_ptr[2 * in_actual_num_values]; - float indice_y = input_indices_ptr[2 * in_actual_num_values + 1]; - if (indice_x < 0 || indice_y < 0 || indice_x >= num_rows || indice_y >= num_cols) break; - } - - // create auxiliary container for sorting - std::vector> indices_values(in_actual_num_values); // - for (size_t i = 0; i < in_actual_num_values; i++) { - float row = input_indices_ptr[2 * i]; - float col = input_indices_ptr[2 * i + 1]; - float value = input_values_ptr[i]; - std::array elem = { row, col, value }; - indices_values[i] = elem; - } - - // sort values by row - std::sort(indices_values.begin(), indices_values.end(), - [](const std::array& first, const std::array& second) { - return first[0] < second[0]; - }); - - // unsplit indices and values - std::vector indices_with_sorted_rows; - std::vector values_for_sorted_rows; - for (auto const & elem : indices_values) { - indices_with_sorted_rows.push_back(elem[0]); - indices_with_sorted_rows.push_back(elem[1]); - values_for_sorted_rows.push_back(elem[2]); - } - - // compute start indice for each row and a number of values at each row - std::vector values_at_row(num_rows); - std::fill(values_at_row.begin(), values_at_row.end(), 0); - float prev_row_with_value = -1.0; - unsigned int total_num_values = 0; - std::vector>::iterator curr_it, prev_it; - for (float row_ind = 0.0; row_ind < num_rows; row_ind = row_ind + 1.0) { - curr_it = std::find_if(indices_values.begin(), indices_values.end(), - [row_ind](std::array elem) { return elem[0] == row_ind; }); - if (curr_it != indices_values.end()) { - if (prev_row_with_value != -1.0) { - unsigned int num_values_at_prev_row = std::distance(prev_it, curr_it); - values_at_row[(int)prev_row_with_value] = num_values_at_prev_row; - total_num_values += num_values_at_prev_row; - } - prev_row_with_value = row_ind; - prev_it = curr_it; - } - else { - total_num_values++; - } - } - if (prev_row_with_value != -1.0) { - unsigned int num_values_at_prev_row = std::distance(prev_it, indices_values.end()); - values_at_row[(int)prev_row_with_value] = num_values_at_prev_row; - total_num_values += num_values_at_prev_row; - } - - // create output indices - float *output_indices_ptr = output_indices.data(); - float *output_values_ptr = output_values.data(); - float *output_empty_rows_indicator_ptr = output_empty_rows_indicator.data(); - - // zero output buffers - std::memset(output_indices_ptr, 0, outMaxNumValues * 2 * sizeof(float)); - std::memset(output_values_ptr, 0, outMaxNumValues * sizeof(float)); - std::memset(output_empty_rows_indicator_ptr, 0, num_rows * sizeof(float)); - - unsigned int curr_pos_from_copy = 0; - unsigned int curr_pos_to_copy = 0; - for (int row_ind = 0; row_ind < (int)num_rows; row_ind++) { - unsigned int num_values_at_row = values_at_row[row_ind]; - if (num_values_at_row == 0) { - output_empty_rows_indicator_ptr[row_ind] = 1.0; - output_values_ptr[curr_pos_to_copy] = dflt_value; - output_indices_ptr[curr_pos_to_copy * 2] = (float)row_ind; - output_indices_ptr[curr_pos_to_copy * 2 + 1] = 0.0; - curr_pos_to_copy++; - } - else { - output_empty_rows_indicator_ptr[row_ind] = 0.0; - std::copy(values_for_sorted_rows.begin() + curr_pos_from_copy, - values_for_sorted_rows.begin() + curr_pos_from_copy + num_values_at_row, - output_values_ptr + curr_pos_to_copy); - std::copy(indices_with_sorted_rows.begin() + 2 * curr_pos_from_copy, - indices_with_sorted_rows.begin() + 2 * curr_pos_from_copy + 2 * num_values_at_row, output_indices_ptr + 2 * curr_pos_to_copy); - curr_pos_to_copy += num_values_at_row; - curr_pos_from_copy += num_values_at_row; - } - } - - // mark the end of output using (-1, -1) indice - if (total_num_values < outMaxNumValues) { - output_indices_ptr[total_num_values * 2] = -1.0; - output_indices_ptr[total_num_values * 2 + 1] = -1.0; - } -} - -class MKLDNNCPUExtSparseFillEmptyRowsTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IIN_ - - - - - - - _IVL_ - - - - - - - _IDS_ - - - - - - - _IDV_ - - - - - - - _IIN_ - - - _IVL_ - - - _IDS_ - - - _IDV_ - - - - - _OIN_ - - - _OVL_ - - - _ERI_ - - - - - - - - - - - -)V0G0N"; - - std::string getModel(sparse_fill_empty_rows_test_params p) { - std::string model = model_t; - std::string input_indices; - std::string input_values; - std::string dense_shape; - std::string default_value; - std::string output_indices; - std::string output_values; - std::string output_empty_rows_indicator; - - InferenceEngine::SizeVector input_dense_shape_shape = { 2 }; - - for (auto& shape : p.input_indices_shape) { - input_indices += ""; - input_indices += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.input_values_shape) { - input_values += ""; - input_values += std::to_string(shape) + "\n"; - } - - for (auto& shape : input_dense_shape_shape) { - dense_shape += ""; - dense_shape += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.input_default_value_shape) { - default_value += ""; - default_value += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.output_indices_shape) { - output_indices += ""; - output_indices += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.output_values_shape) { - output_values += ""; - output_values += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.output_empty_rows_indicator_shape) { - output_empty_rows_indicator += ""; - output_empty_rows_indicator += std::to_string(shape) + "\n"; - } - - REPLACE_WITH_STR(model, "_IIN_", input_indices); - REPLACE_WITH_STR(model, "_IVL_", input_values); - REPLACE_WITH_STR(model, "_IDS_", dense_shape); - REPLACE_WITH_STR(model, "_IDV_", default_value); - REPLACE_WITH_STR(model, "_OIN_", output_indices); - REPLACE_WITH_STR(model, "_OVL_", output_values); - REPLACE_WITH_STR(model, "_ERI_", output_empty_rows_indicator); - - return model; - } - - template - static void fill_data_dbgval(data_t *data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = static_cast(i & (sizeof(data_t) * 8 - 1)); - } - } -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - sparse_fill_empty_rows_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "SparseFillEmptyRows") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - // 4 inputs + 1 op + 3 outputs - ASSERT_EQ(8, nodes.size()); - - // Input Data - InferenceEngine::Blob::Ptr input_indices = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_indices_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_indices_shape) }); - input_indices->allocate(); - auto *input_indices_ptr = dynamic_cast*>(input_indices.get()); - std::copy(p.input_indices_value.begin(), p.input_indices_value.end(), (float *) input_indices_ptr->data()); - - InferenceEngine::Blob::Ptr input_values = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_values_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_values_shape) }); - input_values->allocate(); - fill_data(input_values->buffer(), input_values->size()); - - auto *input_values_ptr = dynamic_cast*>(input_values.get()); - InferenceEngine::Blob::Ptr input_dense_shape = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_dense_shape_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_dense_shape_shape) }); - input_dense_shape->allocate(); - auto *input_dense_shape_ptr = dynamic_cast*>(input_dense_shape.get()); - std::copy(p.input_dense_shape_value.begin(), p.input_dense_shape_value.end(), (float *) input_dense_shape_ptr->data()); - - InferenceEngine::Blob::Ptr input_default_value = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_default_value_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_default_value_shape) }); - input_default_value->allocate(); - auto *input_default_value_ptr = dynamic_cast*>(input_default_value.get()); - std::copy(p.input_default_value_value.begin(), p.input_default_value_value.end(), (float *) input_default_value_ptr->data()); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap output_blobs; - auto iter = out.begin(); - - std::pair item = *(iter++); - InferenceEngine::Blob::Ptr output_indices = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_indices->allocate(); - output_blobs[item.first] = output_indices; - InferenceEngine::TBlob output_indices_ref(item.second->getTensorDesc()); - output_indices_ref.allocate(); - - item = *(iter++); - InferenceEngine::Blob::Ptr output_values = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_values->allocate(); - output_blobs[item.first] = output_values; - InferenceEngine::TBlob output_values_ref(item.second->getTensorDesc()); - output_values_ref.allocate(); - - item = *(iter++); - InferenceEngine::Blob::Ptr output_empty_rows_indicator = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_empty_rows_indicator->allocate(); - output_blobs[item.first] = output_empty_rows_indicator; - InferenceEngine::TBlob output_empty_rows_indicator_ref(item.second->getTensorDesc()); - output_empty_rows_indicator_ref.allocate(); - - // Compute reference result - ref_sparse_fill_empty_rows(*input_indices_ptr, *input_values_ptr, *input_dense_shape_ptr, *input_default_value_ptr, - output_indices_ref, output_values_ref, output_empty_rows_indicator_ref); - - // Compute IE result - InferenceEngine::BlobMap inputs; - inputs.insert(std::pair("InputIndices", input_indices)); - inputs.insert(std::pair("InputValues", input_values)); - inputs.insert(std::pair("InputDenseShape", input_dense_shape)); - inputs.insert(std::pair("InputDefaultValue", input_default_value)); - - // Check the result - graph.Infer(inputs, output_blobs); - compare(*output_indices, output_indices_ref, 0.0f); - compare(*output_values, output_values_ref, 0.0f); - compare(*output_empty_rows_indicator, output_empty_rows_indicator_ref, 0.0f); - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtSparseFillEmptyRowsTests, TestsSparseFillEmptyRows) {} - - -// case 1 - empty sparse tensor with marker -InferenceEngine::SizeVector input_indices_shape_case1 = {2, 2}; -std::vector input_indices_value_case1 = {-1.f, -1.f}; -InferenceEngine::SizeVector input_values_shape_case1 = {2}; -InferenceEngine::SizeVector input_dense_shape_shape_case1 = {2}; -std::vector input_dense_shape_value_case1 = {3.f, 4.f}; -InferenceEngine::SizeVector input_default_value_shape_case1 = {1}; -std::vector input_default_value_case1 = {0.f}; -InferenceEngine::SizeVector output_indices_shape_case1 = {12, 2}; -InferenceEngine::SizeVector output_values_shape_case1 = {12}; -InferenceEngine::SizeVector output_empty_rows_indicator_shape_case1 = {3}; - -// case 2 - in one row all values absent without marker -InferenceEngine::SizeVector input_indices_shape_case2 = {6, 2}; -std::vector input_indices_value_case2 = {1.f, 0.f, 0.f, 0.f, 3.f, 1.f, 1.f, 2.f, 3.f, 4.f, 0.f, 1.f}; -InferenceEngine::SizeVector input_values_shape_case2 = {6}; -InferenceEngine::SizeVector input_dense_shape_shape_case2 = {2}; -std::vector input_dense_shape_value_case2 = {4.f, 5.f}; -InferenceEngine::SizeVector input_default_value_shape_case2 = {1}; -std::vector input_default_value_case2 = {0.f}; -InferenceEngine::SizeVector output_indices_shape_case2 = {20, 2}; -InferenceEngine::SizeVector output_values_shape_case2 = {20}; -InferenceEngine::SizeVector output_empty_rows_indicator_shape_case2 = {4}; - -// case 3 - in one row all values absent with marker -InferenceEngine::SizeVector input_indices_shape_case3 = { 6, 2 }; -std::vector input_indices_value_case3 = { 1.f, 0.f, 0.f, 0.f, 3.f, 1.f, 1.f, 2.f, 3.f, 4.f, -1.f, -1.f }; -InferenceEngine::SizeVector input_values_shape_case3 = { 6 }; -InferenceEngine::SizeVector input_dense_shape_shape_case3 = { 2 }; -std::vector input_dense_shape_value_case3 = { 4.f, 5.f }; -InferenceEngine::SizeVector input_default_value_shape_case3 = { 1 }; -std::vector input_default_value_case3 = { 0.f }; -InferenceEngine::SizeVector output_indices_shape_case3 = { 20, 2 }; -InferenceEngine::SizeVector output_values_shape_case3 = { 20 }; -InferenceEngine::SizeVector output_empty_rows_indicator_shape_case3 = { 4 }; - -// case 4 - in all rows at least one value presents without marker -InferenceEngine::SizeVector input_indices_shape_case4 = { 7, 2 }; -std::vector input_indices_value_case4 = { 1.f, 0.f, 0.f, 0.f, 3.f, 1.f, 1.f, 2.f, 3.f, 3.f, 2.f, 1.f, 4.f, 3.f }; -InferenceEngine::SizeVector input_values_shape_case4 = { 7 }; -InferenceEngine::SizeVector input_dense_shape_shape_case4 = { 2 }; -std::vector input_dense_shape_value_case4 = { 5.f, 4.f }; -InferenceEngine::SizeVector input_default_value_shape_case4 = { 1 }; -std::vector input_default_value_case4 = { 0.f }; -InferenceEngine::SizeVector output_indices_shape_case4 = { 20, 2 }; -InferenceEngine::SizeVector output_values_shape_case4 = { 20 }; -InferenceEngine::SizeVector output_empty_rows_indicator_shape_case4 = { 5 }; - -// case 5 - in all rows at least one value presents with marker -InferenceEngine::SizeVector input_indices_shape_case5 = { 8, 2 }; -std::vector input_indices_value_case5 = { 1.f, 0.f, 0.f, 0.f, 3.f, 1.f, 1.f, 2.f, 3.f, 3.f, 2.f, 1.f, 4.f, 3.f, -1.f, -1.f }; -InferenceEngine::SizeVector input_values_shape_case5 = { 8 }; -InferenceEngine::SizeVector input_dense_shape_shape_case5 = { 2 }; -std::vector input_dense_shape_value_case5 = { 5.f, 4.f }; -InferenceEngine::SizeVector input_default_value_shape_case5 = { 1 }; -std::vector input_default_value_case5 = { 0.f }; -InferenceEngine::SizeVector output_indices_shape_case5 = { 20, 2 }; -InferenceEngine::SizeVector output_values_shape_case5 = { 20 }; -InferenceEngine::SizeVector output_empty_rows_indicator_shape_case5 = { 5 }; - -// case 6 - big sparse tensor with many missed rows without marker -InferenceEngine::SizeVector input_indices_shape_case6 = { 7, 2 }; -std::vector input_indices_value_case6 = { 1.f, 0.f, 0.f, 0.f, 99.f, 19.f, 12.f, 2.f, 37.f, 13.f, 2.f, 1.f, 45.f, 3.f }; -InferenceEngine::SizeVector input_values_shape_case6 = { 7 }; -InferenceEngine::SizeVector input_dense_shape_shape_case6 = { 2 }; -std::vector input_dense_shape_value_case6 = { 100.f, 20.f }; -InferenceEngine::SizeVector input_default_value_shape_case6 = { 1 }; -std::vector input_default_value_case6 = { 0.f }; -InferenceEngine::SizeVector output_indices_shape_case6 = { 2000, 2 }; -InferenceEngine::SizeVector output_values_shape_case6 = { 2000 }; -InferenceEngine::SizeVector output_empty_rows_indicator_shape_case6 = { 100 }; - -// case 7 - big sparse tensor with many missed rows with marker -InferenceEngine::SizeVector input_indices_shape_case7 = { 8, 2 }; -std::vector input_indices_value_case7 = { 1.f, 0.f, 0.f, 0.f, 99.f, 19.f, 12.f, 2.f, 37.f, 13.f, 2.f, 1.f, 45.f, 3.f, -1.f, -1.f }; -InferenceEngine::SizeVector input_values_shape_case7 = { 8 }; -InferenceEngine::SizeVector input_dense_shape_shape_case7 = { 2 }; -std::vector input_dense_shape_value_case7 = { 100.f, 20.f }; -InferenceEngine::SizeVector input_default_value_shape_case7 = { 1 }; -std::vector input_default_value_case7 = { 0.f }; -InferenceEngine::SizeVector output_indices_shape_case7 = { 2000, 2 }; -InferenceEngine::SizeVector output_values_shape_case7 = { 2000 }; -InferenceEngine::SizeVector output_empty_rows_indicator_shape_case7 = { 100 }; - -INSTANTIATE_TEST_CASE_P( - TestsSparseFillEmptyRows, MKLDNNCPUExtSparseFillEmptyRowsTests, - ::testing::Values( - // case 1 - empty sparse tensor without marker - sparse_fill_empty_rows_test_params{ "FP32", - input_indices_shape_case1, input_indices_value_case1, input_values_shape_case1, - input_dense_shape_shape_case1, input_dense_shape_value_case1, input_default_value_shape_case1, input_default_value_case1, - output_indices_shape_case1, output_values_shape_case1, output_empty_rows_indicator_shape_case1, - 1, MKLDNNPlugin::impl_desc_type::unknown }, - - // case 2 - in one row all values absent without marker - sparse_fill_empty_rows_test_params{ "FP32", - input_indices_shape_case2, input_indices_value_case2, input_values_shape_case2, - input_dense_shape_shape_case2, input_dense_shape_value_case2, input_default_value_shape_case2, input_default_value_case2, - output_indices_shape_case2, output_values_shape_case2, output_empty_rows_indicator_shape_case2, - 1, MKLDNNPlugin::impl_desc_type::unknown }, - - // case 3 - in one row all values absent with marker - sparse_fill_empty_rows_test_params{ "FP32", - input_indices_shape_case3, input_indices_value_case3, input_values_shape_case3, - input_dense_shape_shape_case3, input_dense_shape_value_case3, input_default_value_shape_case3, input_default_value_case3, - output_indices_shape_case3, output_values_shape_case3, output_empty_rows_indicator_shape_case3, - 1, MKLDNNPlugin::impl_desc_type::unknown }, - - // case 4 - in all rows at least one value presents without marker - sparse_fill_empty_rows_test_params{ "FP32", - input_indices_shape_case4, input_indices_value_case4, input_values_shape_case4, - input_dense_shape_shape_case4, input_dense_shape_value_case4, input_default_value_shape_case4, input_default_value_case4, - output_indices_shape_case4, output_values_shape_case4, output_empty_rows_indicator_shape_case4, - 1, MKLDNNPlugin::impl_desc_type::unknown }, - - // case 5 - in all rows at least one value presents with marker - sparse_fill_empty_rows_test_params{ "FP32", - input_indices_shape_case5, input_indices_value_case5, input_values_shape_case5, - input_dense_shape_shape_case5, input_dense_shape_value_case5, input_default_value_shape_case5, input_default_value_case5, - output_indices_shape_case5, output_values_shape_case5, output_empty_rows_indicator_shape_case5, - 1, MKLDNNPlugin::impl_desc_type::unknown }, - - // case 6 - big sparse tensor with many missed rows without marker - sparse_fill_empty_rows_test_params{ "FP32", - input_indices_shape_case6, input_indices_value_case6, input_values_shape_case6, - input_dense_shape_shape_case6, input_dense_shape_value_case6, input_default_value_shape_case6, input_default_value_case6, - output_indices_shape_case6, output_values_shape_case6, output_empty_rows_indicator_shape_case6, - 1, MKLDNNPlugin::impl_desc_type::unknown }, - - // case 7 - big sparse tensor with many missed rows with marker - sparse_fill_empty_rows_test_params{ "FP32", - input_indices_shape_case7, input_indices_value_case7, input_values_shape_case7, - input_dense_shape_shape_case7, input_dense_shape_value_case7, input_default_value_shape_case7, input_default_value_case7, - output_indices_shape_case7, output_values_shape_case7, output_empty_rows_indicator_shape_case7, - 1, MKLDNNPlugin::impl_desc_type::unknown } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_segment_reduce_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_segment_reduce_tests.cpp deleted file mode 100644 index 7bde79bf0298ff..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_segment_reduce_tests.cpp +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct sparse_segment_reduce_test_params { - std::string model; - - std::string precision; - - std::string reduce_op; - - InferenceEngine::SizeVector input_data_shape; - std::vector input_data_value; - InferenceEngine::SizeVector input_indices_shape; - std::vector input_indices_value; - InferenceEngine::SizeVector input_segment_ids_shape; - std::vector input_segment_ids_value; - - InferenceEngine::SizeVector output_shape; - - std::vector output_ref; - - size_t num_prim_desc; - int selectedType; - - std::vector> comp; -}; - -class MKLDNNCPUExtSparseSegmentReduceTests : public TestsCommon, public WithParamInterface { - std::string getModel(sparse_segment_reduce_test_params p) { - std::string model = p.model; - - std::string input_data_shape; - std::string input_indices_shape; - std::string input_segment_ids_shape; - std::string output_shape; - - for (auto& shape : p.input_data_shape) { - input_data_shape += ""; - input_data_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.input_indices_shape) { - input_indices_shape += ""; - input_indices_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.input_segment_ids_shape) { - input_segment_ids_shape += ""; - input_segment_ids_shape += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.output_shape) { - output_shape += ""; - output_shape += std::to_string(shape) + "\n"; - } - - REPLACE_WITH_STR(model, "_REDUCE_OP_", p.reduce_op); - REPLACE_WITH_STR(model, "_INPUT_DATA_", input_data_shape); - REPLACE_WITH_STR(model, "_INPUT_INDICES_", input_indices_shape); - REPLACE_WITH_STR(model, "_INPUT_SEGMENT_IDS_", input_segment_ids_shape); - REPLACE_WITH_STR(model, "_OUTPUT_", output_shape); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - sparse_segment_reduce_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "SparseSegmentReduce") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - // prepare input blobs and input blob map - InferenceEngine::BlobMap input_blob_map; - InferenceEngine::Blob::Ptr input_data = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_data_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_data_shape) }); - input_data->allocate(); - auto *input_data_ptr = dynamic_cast*>(input_data.get()); - std::copy(p.input_data_value.begin(), p.input_data_value.end(), (float *)input_data_ptr->data()); - input_blob_map["InputData"] = input_data; - InferenceEngine::Blob::Ptr input_indices = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_indices_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_indices_shape) }); - input_indices->allocate(); - auto *input_indices_ptr = dynamic_cast*>(input_indices.get()); - std::copy(p.input_indices_value.begin(), p.input_indices_value.end(), (float *)input_indices_ptr->data()); - input_blob_map["InputIndices"] = input_indices; - - InferenceEngine::Blob::Ptr input_segment_ids = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_segment_ids_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_segment_ids_shape) }); - input_segment_ids->allocate(); - auto *input_segment_ids_ptr = dynamic_cast*>(input_segment_ids.get()); - std::copy(p.input_segment_ids_value.begin(), p.input_segment_ids_value.end(), (float *)input_segment_ids_ptr->data()); - input_blob_map["InputSegmentIds"] = input_segment_ids; - - // prepare output blob map - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - InferenceEngine::BlobMap output_blob_map; - for (auto iter = out.begin(); iter != out.end(); iter++) { - std::pair item = *iter; - InferenceEngine::Blob::Ptr output_blob_ptr = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_blob_ptr->allocate(); - output_blob_map[item.first] = output_blob_ptr; - } - - // prepare blob with output reference data - InferenceEngine::Blob::Ptr output_ref = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.output_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.output_shape) }); - output_ref->allocate(); - auto *output_ref_ptr = dynamic_cast*>(output_ref.get()); - std::copy(p.output_ref.begin(), p.output_ref.end(), (float *)output_ref_ptr->data()); - - // infer - graph.Infer(input_blob_map, output_blob_map); - - // check the result - auto iter = out.begin(); - compare(*output_blob_map[iter->first], *output_ref, 0.0f); - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtSparseSegmentReduceTests, TestsSparseSegmentReduce) {} - -// model that contains one SparseSegmentReduce layer -std::string model = R"V0G0N( - - - - - - _INPUT_DATA_ - - - - - - - _INPUT_INDICES_ - - - - - - - _INPUT_SEGMENT_IDS_ - - - - - - - _INPUT_DATA_ - - - _INPUT_INDICES_ - - - _INPUT_SEGMENT_IDS_ - - - - - _OUTPUT_ - - - - - - - - - - -)V0G0N"; - -// case 0 - reduce = "sum", 5 segments, where two segments are empty -std::string reduce_op_case0 = "SparseSegmentSum"; -InferenceEngine::SizeVector input_data_shape_case0 = { 4, 3 }; -std::vector input_data_value_case0 = { 0.f, 1.f, 2.f, - 3.f, 4.f, 5.f, - 6.f, 7.f, 8.f, - 9.f, 10.f, 11.f }; -InferenceEngine::SizeVector input_indices_shape_case0 = { 5 }; -std::vector input_indices_value_case0 = { 3.f, 1.f, 1.f, 0.f, 2.f}; -InferenceEngine::SizeVector input_segment_ids_shape_case0 = { 5 }; -std::vector input_segment_ids_value_case0 = { 0.f, 0.f, 2.f, 2.f, 4.f }; -InferenceEngine::SizeVector output_shape_case0 = { 5, 3 }; -std::vector output_value_ref_case0 = { 12.f, 14.f, 16.f, - 0.f, 0.f, 0.f, - 3.f, 5.f, 7.f, - 0.f, 0.f, 0.f, - 6.f, 7.f, 8.f }; - -// case 1 - reduce = "mean", 5 segments, where two segments are empty -std::string _reduce_op_case1 = "SparseSegmentMean"; -InferenceEngine::SizeVector _input_data_shape_case1 = { 4, 3 }; -std::vector _input_data_value_case1 = { 0.f, 1.f, 2.f, - 3.f, 4.f, 5.f, - 6.f, 7.f, 8.f, - 9.f, 10.f, 11.f }; -InferenceEngine::SizeVector _input_indices_shape_case1 = { 5 }; -std::vector _input_indices_value_case1 = { 3.f, 1.f, 1.f, 0.f, 2.f }; -InferenceEngine::SizeVector _input_segment_ids_shape_case1 = { 5 }; -std::vector _input_segment_ids_value_case1 = { 0.f, 0.f, 2.f, 2.f, 4.f }; -InferenceEngine::SizeVector _output_shape_case1 = { 5, 3 }; -std::vector _output_value_ref_case1 = { 6.f, 7.f, 8.f, - 0.f, 0.f, 0.f, - 1.5f, 2.5f, 3.5f, - 0.f, 0.f, 0.f, - 6.f, 7.f, 8.f }; - -// case 2 - reduce = "sqrtn", 5 segments, where two segments are empty -std::string _reduce_op_case2 = "SparseSegmentSqrtN"; -InferenceEngine::SizeVector _input_data_shape_case2 = { 4, 3 }; -std::vector _input_data_value_case2 = { 0.f, 1.f, 2.f, - 3.f, 4.f, 5.f, - 6.f, 7.f, 8.f, - 9.f, 10.f, 11.f }; -InferenceEngine::SizeVector _input_indices_shape_case2 = { 6 }; -std::vector _input_indices_value_case2 = { 0.f, 1.f, 2.f, 3.f, 1.f, 0.f}; -InferenceEngine::SizeVector _input_segment_ids_shape_case2 = { 6 }; -std::vector _input_segment_ids_value_case2 = { 0.f, 0.f, 0.f, 0.f, 2.f, 4.f }; -InferenceEngine::SizeVector _output_shape_case2 = { 6, 3 }; -std::vector _output_value_ref_case2 = { 9.f, 11.f, 13.f, - 0.f, 0.f, 0.f, - 3.f, 4.f, 5.f, - 0.f, 0.f, 0.f, - 0.f, 1.f, 2.f, - 0.f, 0.f, 0.f}; - -INSTANTIATE_TEST_CASE_P( - TestsSparseSegmentReduce, MKLDNNCPUExtSparseSegmentReduceTests, - ::testing::Values( - // case 0 - reduce with sum operation, 5 segments, where two segments are empty - sparse_segment_reduce_test_params{ - model, "FP32", reduce_op_case0, - input_data_shape_case0, input_data_value_case0, - input_indices_shape_case0, input_indices_value_case0, - input_segment_ids_shape_case0, input_segment_ids_value_case0, - output_shape_case0, output_value_ref_case0, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 1 - reduce with mean operation, 5 segments, where two segments are empty - sparse_segment_reduce_test_params{ - model, "FP32", _reduce_op_case1, - _input_data_shape_case1, _input_data_value_case1, - _input_indices_shape_case1, _input_indices_value_case1, - _input_segment_ids_shape_case1, _input_segment_ids_value_case1, - _output_shape_case1, _output_value_ref_case1, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 2 - reduce with sqrtn operation, 5 segments, where two segments are empty - sparse_segment_reduce_test_params{ - model, "FP32", _reduce_op_case2, - _input_data_shape_case2, _input_data_value_case2, - _input_indices_shape_case2, _input_indices_value_case2, - _input_segment_ids_shape_case2, _input_segment_ids_value_case2, - _output_shape_case2, _output_value_ref_case2, - 1, MKLDNNPlugin::impl_desc_type::unknown - } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_to_dense_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_to_dense_tests.cpp deleted file mode 100644 index be143c740e524e..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_to_dense_tests.cpp +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct sparse_to_dense_test_params { - std::string model; - std::string precision; - - InferenceEngine::SizeVector input_indices_shape; - std::vector input_indices; - InferenceEngine::SizeVector input_dense_shape_shape; - std::vector input_dense_shape; - InferenceEngine::SizeVector input_values_shape; - std::vector input_values; - int input_default_value; - - InferenceEngine::SizeVector output_shape; - std::vector output_value_ref; - - size_t num_prim_desc; - int selectedType; - - std::vector> comp; -}; - -class MKLDNNCPUExtSparseToDenseTests : public TestsCommon, public WithParamInterface { - std::string getModel(sparse_to_dense_test_params p) { - std::string model = p.model; - - std::string input_indices_shape; - std::string input_dense_shape_shape; - std::string input_values_shape; - std::string output_shape; - - for (auto& shape : p.input_indices_shape) { - input_indices_shape += ""; - input_indices_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.input_dense_shape_shape) { - input_dense_shape_shape += ""; - input_dense_shape_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.input_values_shape) { - input_values_shape += ""; - input_values_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.output_shape) { - output_shape += ""; - output_shape += std::to_string(shape) + "\n"; - } - - REPLACE_WITH_STR(model, "_INPUT_INDICES_SHAPE_", input_indices_shape); - REPLACE_WITH_STR(model, "_INPUT_DENSE_SHAPE_SHAPE_", input_dense_shape_shape); - REPLACE_WITH_STR(model, "_INPUT_VALUES_SHAPE_", input_values_shape); - REPLACE_WITH_STR(model, "_OUTPUT_SHAPE_", output_shape); - - return model; - } - -protected: - static void compare_int( - InferenceEngine::Blob &res, - InferenceEngine::Blob &ref, - int max_diff = 0, - const std::string assertDetails = "") { - int *res_ptr = res.buffer().as(); - size_t res_size = res.size(); - - int *ref_ptr = ref.buffer().as(); - size_t ref_size = ref.size(); - - ASSERT_EQ(res_size, ref_size) << assertDetails; - - for (size_t i = 0; i < ref_size; i++) { - ASSERT_EQ(res_ptr[i], ref_ptr[i]) << assertDetails; - } - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - sparse_to_dense_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "SparseToDense") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - // prepare input blob and input blob map - InferenceEngine::BlobMap input_blob_map; - InferenceEngine::Blob::Ptr input_indices = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.input_indices_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_indices_shape) }); - input_indices->allocate(); - auto *input_indices_ptr = dynamic_cast*>(input_indices.get()); - std::copy(p.input_indices.begin(), p.input_indices.end(), (int *)input_indices_ptr->data()); - input_blob_map["InputIndices"] = input_indices; - - InferenceEngine::Blob::Ptr input_dense_shape = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.input_dense_shape_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_dense_shape_shape) }); - input_dense_shape->allocate(); - auto *input_dense_shape_ptr = dynamic_cast*>(input_dense_shape.get()); - std::copy(p.input_dense_shape.begin(), p.input_dense_shape.end(), (int *)input_dense_shape_ptr->data()); - input_blob_map["InputDenseShape"] = input_dense_shape; - - InferenceEngine::Blob::Ptr input_values = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.input_values_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_values_shape) }); - input_values->allocate(); - auto *input_values_ptr = dynamic_cast*>(input_values.get()); - std::copy(p.input_values.begin(), p.input_values.end(), (int *)input_values_ptr->data()); - input_blob_map["InputValues"] = input_values; - - InferenceEngine::Blob::Ptr input_default_value = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - { }, InferenceEngine::TensorDesc::getLayoutByDims({ }) }); - input_default_value->allocate(); - auto *input_default_value_ptr = dynamic_cast*>(input_default_value.get()); - *((int *)input_default_value_ptr->data()) = p.input_default_value; - input_blob_map["InputDefaultValue"] = input_default_value; - - // prepare output blob map - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - InferenceEngine::BlobMap output_blob_map; - for (auto iter = out.begin(); iter != out.end(); iter++) { - std::pair item = *iter; - InferenceEngine::Blob::Ptr output_blob_ptr = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_blob_ptr->allocate(); - output_blob_map[item.first] = output_blob_ptr; - } - - // prepare blobs with reference data - InferenceEngine::Blob::Ptr output_blob_ref = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.output_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.output_shape) }); - output_blob_ref->allocate(); - auto *output_blob_ref_ptr = dynamic_cast*>(output_blob_ref.get()); - std::copy(p.output_value_ref.begin(), p.output_value_ref.end(), (int *)output_blob_ref_ptr->data()); - - // infer - graph.Infer(input_blob_map, output_blob_map); - - // check the result - auto iter = out.begin(); - compare_int(*output_blob_map[iter->first], *output_blob_ref, 0); - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtSparseToDenseTests, TestsSparseToDense) {} - -// model 1 that contains one SparseToDense layer -std::string sp2d_model1 = R"V0G0N( - - - - - - _INPUT_INDICES_SHAPE_ - - - - - - - _INPUT_DENSE_SHAPE_SHAPE_ - - - - - - - _INPUT_VALUES_SHAPE_ - - - - - - - - - - - - _INPUT_INDICES_SHAPE_ - - - _INPUT_DENSE_SHAPE_SHAPE_ - - - _INPUT_VALUES_SHAPE_ - - - - - - _OUTPUT_SHAPE_ - - - - - - - - - - - -)V0G0N"; - -// case 1 - it contains of the default value input -InferenceEngine::SizeVector sp2d_input_indices_shape_case1 = { 5, 2 }; -std::vector sp2d_input_indices_case1 = { 0, 1, - 1, 2, - 1, 3, - 3, 0, - 3, 4 }; -InferenceEngine::SizeVector sp2d_input_dense_shape_shape_case1 = { 2 }; -std::vector sp2d_input_dense_shape_case1 = { 4, 5}; -InferenceEngine::SizeVector sp2d_input_values_shape_case1 = { 5 }; -std::vector sp2d_input_values_case1 = { 8, - 1, - 2, - 1, - 8 }; -int sp2d_input_default_value_case1 = -1; -InferenceEngine::SizeVector sp2d_output_shape_case1 = { 4, 5}; -std::vector sp2d_output_value_ref_case1 = { -1, 8, -1, -1, -1, - -1, -1, 1, 2, -1, - -1, -1, -1, -1, -1, - 1, -1, -1, -1, 8}; - -INSTANTIATE_TEST_CASE_P( - TestsSparseToDense, MKLDNNCPUExtSparseToDenseTests, - ::testing::Values( - sparse_to_dense_test_params{ - sp2d_model1, "I32", - sp2d_input_indices_shape_case1, sp2d_input_indices_case1, - sp2d_input_dense_shape_shape_case1, sp2d_input_dense_shape_case1, - sp2d_input_values_shape_case1, sp2d_input_values_case1, - sp2d_input_default_value_case1, - sp2d_output_shape_case1, sp2d_output_value_ref_case1, - 1, MKLDNNPlugin::impl_desc_type::unknown - } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_weighted_reduce_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_weighted_reduce_tests.cpp deleted file mode 100644 index 0917fd9c0d0510..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_weighted_reduce_tests.cpp +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct sparse_weighted_reduce_test_params { - std::string model; - std::string precision; - std::string reduce_operation; - bool with_weights; - - InferenceEngine::SizeVector input_indices_shape; - std::vector input_indices; - InferenceEngine::SizeVector input_values_shape; - std::vector input_values; - InferenceEngine::SizeVector input_dense_shape_shape; - std::vector input_dense_shape; - InferenceEngine::SizeVector input_params_table_shape; - std::vector input_params_table; - int input_default_value; - InferenceEngine::SizeVector input_weights_shape; - std::vector input_weights; - - InferenceEngine::SizeVector output_shape; - std::vector output_value_ref; - - size_t num_prim_desc; - int selectedType; - - std::vector> comp; -}; - -class MKLDNNCPUExtExperimentalSparseWeightedReduceTests : public TestsCommon, public WithParamInterface { - std::string getModel(sparse_weighted_reduce_test_params p) { - std::string model = p.model; - - std::string input_indices_shape; - std::string input_values_shape; - std::string input_dense_shape_shape; - std::string input_params_table_shape; - std::string input_weights_shape; - std::string output_shape; - - for (auto& shape : p.input_indices_shape) { - input_indices_shape += ""; - input_indices_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.input_values_shape) { - input_values_shape += ""; - input_values_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.input_dense_shape_shape) { - input_dense_shape_shape += ""; - input_dense_shape_shape += std::to_string(shape) + "\n"; - } - for (auto& shape : p.input_params_table_shape) { - input_params_table_shape += ""; - input_params_table_shape += std::to_string(shape) + "\n"; - } - if (p.with_weights) { - for (auto& shape : p.input_weights_shape) { - input_weights_shape += ""; - input_weights_shape += std::to_string(shape) + "\n"; - } - } - - for (auto& shape : p.output_shape) { - output_shape += ""; - output_shape += std::to_string(shape) + "\n"; - } - - REPLACE_WITH_STR(model, "_PRECISION_", p.precision); - REPLACE_WITH_STR(model, "_REDUCE_OPERATION_", p.reduce_operation); - - REPLACE_WITH_STR(model, "_INPUT_INDICES_SHAPE_", input_indices_shape); - REPLACE_WITH_STR(model, "_INPUT_VALUES_SHAPE_", input_values_shape); - REPLACE_WITH_STR(model, "_INPUT_DENSE_SHAPE_SHAPE_", input_dense_shape_shape); - REPLACE_WITH_STR(model, "_INPUT_PARAMS_TABLE_SHAPE_", input_params_table_shape); - if (p.with_weights) { - REPLACE_WITH_STR(model, "_INPUT_WEIGHTS_SHAPE_", input_weights_shape); - } - REPLACE_WITH_STR(model, "_OUTPUT_SHAPE_", output_shape); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - sparse_weighted_reduce_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "ExperimentalSparseWeightedReduce") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - // prepare input blob and input blob map - InferenceEngine::BlobMap input_blob_map; - InferenceEngine::Blob::Ptr input_indices = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.input_indices_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_indices_shape) }); - input_indices->allocate(); - auto *input_indices_ptr = dynamic_cast*>(input_indices.get()); - std::vector input_indices_int(p.input_indices.begin(), p.input_indices.end()); - std::copy(input_indices_int.begin(), input_indices_int.end(), (int *)input_indices_ptr->data()); - input_blob_map["InputIndices"] = input_indices; - - InferenceEngine::Blob::Ptr input_values = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.input_values_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_values_shape) }); - input_values->allocate(); - auto *input_values_ptr = dynamic_cast*>(input_values.get()); - std::vector input_values_int(p.input_values.begin(), p.input_values.end()); - std::copy(input_values_int.begin(), input_values_int.end(), (int *)input_values_ptr->data()); - input_blob_map["InputValues"] = input_values; - - InferenceEngine::Blob::Ptr input_dense_shape = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - p.input_dense_shape_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_dense_shape_shape) }); - input_dense_shape->allocate(); - auto *input_dense_shape_ptr = dynamic_cast*>(input_dense_shape.get()); - std::vector input_dense_shape_int(p.input_dense_shape.begin(), p.input_dense_shape.end()); - std::copy(input_dense_shape_int.begin(), input_dense_shape_int.end(), (int *)input_dense_shape_ptr->data()); - input_blob_map["InputDenseShape"] = input_dense_shape; - - InferenceEngine::Blob::Ptr input_params_table = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_params_table_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_params_table_shape) }); - input_params_table->allocate(); - auto *input_params_table_ptr = dynamic_cast*>(input_params_table.get()); - std::copy(p.input_params_table.begin(), p.input_params_table.end(), (float *)input_params_table_ptr->data()); - input_blob_map["InputParamsTable"] = input_params_table; - - InferenceEngine::Blob::Ptr input_default_value = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, - { }, InferenceEngine::TensorDesc::getLayoutByDims({ }) }); - input_default_value->allocate(); - auto *input_default_value_ptr = dynamic_cast*>(input_default_value.get()); - *((int *)input_default_value_ptr->data()) = p.input_default_value; - input_blob_map["InputDefaultValue"] = input_default_value; - - if (p.with_weights) { - InferenceEngine::Blob::Ptr input_weights = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_weights_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_weights_shape) }); - input_weights->allocate(); - auto *input_weights_ptr = dynamic_cast*>(input_weights.get()); - std::copy(p.input_weights.begin(), p.input_weights.end(), (float *)input_weights_ptr->data()); - input_blob_map["InputWeights"] = input_weights; - } - - // prepare output blob map - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - InferenceEngine::BlobMap output_blob_map; - for (auto iter = out.begin(); iter != out.end(); iter++) { - std::pair item = *iter; - InferenceEngine::Blob::Ptr output_blob_ptr = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_blob_ptr->allocate(); - output_blob_map[item.first] = output_blob_ptr; - } - - // prepare blobs with reference data - InferenceEngine::Blob::Ptr output_blob_ref = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.output_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.output_shape) }); - output_blob_ref->allocate(); - auto *output_blob_ref_ptr = dynamic_cast*>(output_blob_ref.get()); - std::copy(p.output_value_ref.begin(), p.output_value_ref.end(), (float *)output_blob_ref_ptr->data()); - - // infer - graph.Infer(input_blob_map, output_blob_map); - - // check the result - auto iter = out.begin(); - compare(*output_blob_map[iter->first], *output_blob_ref, 0.0f); - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtExperimentalSparseWeightedReduceTests, TestsExperimentalSparseWeightedReduce) {} - -// model 1 that contains one ExperimentalSparseWeightedReduce layer with the weights input -std::string swr_model1 = R"V0G0N( - - - - - - _INPUT_INDICES_SHAPE_ - - - - - - - _INPUT_VALUES_SHAPE_ - - - - - - - _INPUT_DENSE_SHAPE_SHAPE_ - - - - - - - _INPUT_PARAMS_TABLE_SHAPE_ - - - - - - - - - - - - _INPUT_WEIGHTS_SHAPE_ - - - - - - - _INPUT_INDICES_SHAPE_ - - - _INPUT_VALUES_SHAPE_ - - - _INPUT_DENSE_SHAPE_SHAPE_ - - - _INPUT_PARAMS_TABLE_SHAPE_ - - - - _INPUT_WEIGHTS_SHAPE_ - - - - - _OUTPUT_SHAPE_ - - - - - - - - - - - - - -)V0G0N"; - -// model 2 that contains one ExperimentalSparseWeightedReduce layer without the weights input -std::string swr_model2 = R"V0G0N( - - - - - - _INPUT_INDICES_SHAPE_ - - - - - - - _INPUT_VALUES_SHAPE_ - - - - - - - _INPUT_DENSE_SHAPE_SHAPE_ - - - - - - - _INPUT_PARAMS_TABLE_SHAPE_ - - - - - - - - - - - - _INPUT_INDICES_SHAPE_ - - - _INPUT_VALUES_SHAPE_ - - - _INPUT_DENSE_SHAPE_SHAPE_ - - - _INPUT_PARAMS_TABLE_SHAPE_ - - - - - - _OUTPUT_SHAPE_ - - - - - - - - - - - - -)V0G0N"; - -// case 1 - ExperimentalSparseWeightedSum, I32, the model with weights input -std::string swr_precision_case2 = "I32"; -std::string swr_reduce_operation_case2 = "ExperimentalSparseWeightedSum"; -bool swr_with_weights_case2 = true; -InferenceEngine::SizeVector swr_input_indices_shape_case2 = { 5, 2 }; -std::vector swr_input_indices_case2 = { 0.0f, 1.0f, - 1.0f, 2.0f, - 1.0f, 3.0f, - 3.0f, 0.0f, - 3.0f, 4.0f }; -InferenceEngine::SizeVector swr_input_values_shape_case2 = { 5 }; -std::vector swr_input_values_case2 = { 3.0f, - 1.0f, - 2.0f, - 1.0f, - 4.0f }; -InferenceEngine::SizeVector swr_input_dense_shape_shape_case2 = { 2 }; -std::vector swr_input_dense_shape_case2 = { 4.0f, 5.0f }; -InferenceEngine::SizeVector swr_input_params_table_shape_case2 = { 5, 3 }; -std::vector swr_input_params_table_case2 = { 1.0f, 2.0f, 3.0f, - 4.0f, 5.0f, 6.0f, - 6.0f, 5.0f, 4.0f, - 3.0f, 2.0f, 1.0f, - 10.0f, 11.0f, 12.0f }; -int swr_input_default_value_case2 = 0; -InferenceEngine::SizeVector swr_input_weights_shape_case2 = { 5 }; -std::vector swr_input_weights_case2 = { 1.0f, - 2.0f, - 0.5f, - 1.0f, - 3.0f }; -InferenceEngine::SizeVector swr_output_shape_case2 = { 4, 3 }; -std::vector swr_output_value_ref_case2 = { 3.0f, 2.0f, 1.0f, - 11.0f, 12.5f, 14.0f, - 1.0f, 2.0f, 3.0f, - 34.0f, 38.0f, 42.0f }; - - -INSTANTIATE_TEST_CASE_P( - TestsExperimentalSparseWeightedReduce, MKLDNNCPUExtExperimentalSparseWeightedReduceTests, - ::testing::Values( - sparse_weighted_reduce_test_params{ - swr_model1, swr_precision_case2, swr_reduce_operation_case2, swr_with_weights_case2, - swr_input_indices_shape_case2, swr_input_indices_case2, - swr_input_values_shape_case2, swr_input_values_case2, - swr_input_dense_shape_shape_case2, swr_input_dense_shape_case2, - swr_input_params_table_shape_case2, swr_input_params_table_case2, - swr_input_default_value_case2, - swr_input_weights_shape_case2, swr_input_weights_case2, - swr_output_shape_case2, swr_output_value_ref_case2, - 1, MKLDNNPlugin::impl_desc_type::unknown - } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/strided_slice_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/strided_slice_tests.cpp deleted file mode 100644 index ca8f1fc24e4acc..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/strided_slice_tests.cpp +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct strided_slice_test_params { - InferenceEngine::SizeVector in_shape; - size_t dim_size; - std::vector begin; - std::vector end; - std::vector stride; - - InferenceEngine::SizeVector begin_mask; - InferenceEngine::SizeVector end_mask; - InferenceEngine::SizeVector ellipsis_mask; - InferenceEngine::SizeVector new_axis_mask; - InferenceEngine::SizeVector shrink_axis_mask; - InferenceEngine::SizeVector out_shape; - std::vector reference; - - std::vector> comp; -}; - -inline void clipping(int *idx, const int min, const int max) { - (*idx) = ((*idx) > min) ? (*idx) : min; - (*idx) = ((*idx) < max) ? (*idx) : (max - 1); - return; -} - -void ref_strided_slice( - InferenceEngine::TBlob &src, - InferenceEngine::TBlob &dst, - InferenceEngine::SizeVector &out_dims, - std::vector begin, - std::vector end, - std::vector stride, - InferenceEngine::SizeVector begin_mask, - InferenceEngine::SizeVector end_mask, - InferenceEngine::SizeVector ellipsis_mask, - InferenceEngine::SizeVector new_axis_mask, - InferenceEngine::SizeVector shrink_axis_mask -) { - size_t i; - const float *src_data = src.data(); - InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims(); - InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides(); - float* dst_data = dst.data(); - InferenceEngine::SizeVector dst_dims = dst.getTensorDesc().getDims(); - InferenceEngine::SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides(); - - int new_axis = 0; - for (auto& na : new_axis_mask) - new_axis += na; - - int shrink_axis = 0; - for (auto& sa : shrink_axis_mask) - shrink_axis += sa; - int max_dims = src_dims.size() + new_axis; -// if ((max_dims - shrink_axis) != dst_dims.size()) -// FAIL() << "Destination dims should be equal source dims + new axis - shrink_axis"; - - // Check beging/end/stride vector sizes - int bounds_size = 0; - if (begin.size() && end.size() && begin.size() != end.size()) FAIL() << "Begin vector size should be equal end vectror size"; - if (begin.size() && stride.size() && stride.size() != begin.size()) FAIL() << "Stride vector size should be equal begin vectror size"; - if (end.size() && stride.size() && stride.size() != end.size()) FAIL() << "Stride vector size should be equal end vectror size"; - - if (begin.size()) bounds_size = begin.size(); - if (end.size()) bounds_size = end.size(); - if (stride.size()) bounds_size = stride.size(); - - // ellipsis_mask must be a power of two (only one ellipsis), so to take a first position - int ellipsis_pos1, ellipsis_pos2; - ellipsis_pos1 = ellipsis_pos2 = max_dims; - for (i = 0; i < ellipsis_mask.size(); i++) { - if (ellipsis_mask[i] > 0) { - ellipsis_pos1 = i; - break; - } - } - bounds_size -= ellipsis_pos1; - if(bounds_size > 0 && (max_dims - bounds_size) > ellipsis_pos1) - ellipsis_pos2 = max_dims - bounds_size; - - std::vector begin_dms(max_dims, 0); - std::vector end_dms(max_dims, -1); - std::vector stride_dms(max_dims, 1); - - int j, k, bj, ej, sj; - InferenceEngine::SizeVector our_dims; - for (i = 0, j = 0, k = 0, bj = 0, ej = 0, sj = 0; i < max_dims; i++) { - if (i >= ellipsis_pos1 && i < ellipsis_pos2) { - if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) { - end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j++] + end_dms[i]; - } else { - //end_dms[i] = 0; - end_dms[i] = begin_dms[i]; - } - out_dims.push_back(static_cast(ceil(static_cast(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast(abs(stride_dms[i]))))); - our_dims.push_back(static_cast(ceil(static_cast(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast(abs(stride_dms[i]))))); - k = ellipsis_pos1; - continue; - } - stride_dms[i] = (stride.size() > sj && stride[sj] != 0) ? stride[sj++] : 1; - - if (!(begin_mask.size() > j && begin_mask[j] == 0)) - begin_dms[i] = begin.size() > bj ? begin[bj] : (stride_dms[i] > 0 ? 0 : -1); - else - begin_dms[i] = stride_dms[i] > 0 ? 0 : -1; - bj++; - begin_dms[i] = begin_dms[i] >= 0 ? begin_dms[i] : src_dims[j] + begin_dms[i]; - // Clipping 'begin' - clipping(&begin_dms[i], 0, src_dims[j]); - - if (!(end_mask.size() > j && end_mask[j] == 0)) { - int end_dms_tmp = end.size() > ej ? (stride_dms[i] > 0 ? end[ej] - 1 : end[ej] + 1) : end_dms[i]; - end_dms[i] = end.size() > ej ? end_dms_tmp : (stride_dms[i] > 0 ? -1 : 0); - } - else { - end_dms[i] = stride_dms[i] > 0 ? -1 : 0; - } - ej++; - end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j] + end_dms[i]; - // Clipping 'end' - clipping(&end_dms[i], 0, src_dims[j]); - - if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) - j++; - else - end_dms[i] = 0; - - if (shrink_axis_mask.size() > k && shrink_axis_mask[k] == 1) - end_dms[i] = begin_dms[i]; - else - out_dims.push_back(static_cast(ceil(static_cast(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast(abs(stride_dms[i]))))); - - our_dims.push_back(static_cast(ceil(static_cast(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast(abs(stride_dms[i]))))); - k++; - } - - size_t work_amount_dst = dstStrides[0] * dst_dims[0]; - InferenceEngine::SizeVector counters(max_dims, 0); - - for (size_t iwork = 0, dst_idx = 0; iwork < work_amount_dst; ++iwork) { - int src_idx = 0; - for (i = 0, j = 0; i < max_dims; ++i) { - src_idx += (begin_dms[i] + counters[i] * stride_dms[i]) * srcStrides[j]; - if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) j++; - } - - dst_data[dst_idx++] = src_data[src_idx]; - - for (j = max_dims - 1; j >= 0; j--) { - counters[j] = (counters[j] + 1) % our_dims[j]; - if (counters[j] != 0) break; - } - } -} - -class MKLDNNCPUExtStridedSliceTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - - - - - - - _DIM_SIZE_ - - - - - - - _DIM_SIZE_ - - - - - - - _DIM_SIZE_ - - - - - - - - _IN_ - - - _DIM_SIZE_ - - - _DIM_SIZE_ - - - _DIM_SIZE_ - - - - - _OUT_ - - - - - - - - - - - -)V0G0N"; - - std::string getModel(strided_slice_test_params p) { - std::string model = model_t; - std::string in_shape; - std::string out_shape; - std::string begin; - std::string end; - std::string ellipsis; - std::string new_axis; - std::string shrink_axis; - - for (size_t i = 0; i < p.in_shape.size(); i++) { - in_shape += ""; - in_shape += std::to_string(p.in_shape[i]) + "\n"; - } - in_shape.pop_back(); - REPLACE_WITH_STR(model, "_IN_", in_shape); - REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.dim_size); - - if (p.begin_mask.size()) { - begin = "begin_mask=\""; - for (auto& pb : p.begin_mask) - begin += std::to_string(pb) + ","; - begin.pop_back(); - begin += "\""; - } - REPLACE_WITH_STR(model, "_BEGIN_", begin); - - if (p.end_mask.size()) { - end = "end_mask=\""; - for (auto& pb : p.end_mask) - end += std::to_string(pb) + ","; - end.pop_back(); - end += "\""; - } - REPLACE_WITH_STR(model, "_END_", end); - - if (p.ellipsis_mask.size()) { - ellipsis = "ellipsis_mask=\""; - for (auto& pb : p.ellipsis_mask) - ellipsis += std::to_string(pb) + ","; - ellipsis.pop_back(); - ellipsis += "\""; - } - REPLACE_WITH_STR(model, "_ELLIPSIS_", ellipsis); - - if (p.new_axis_mask.size()) { - new_axis = "new_axis_mask=\""; - for (auto& pb : p.new_axis_mask) - new_axis += std::to_string(pb) + ","; - new_axis.pop_back(); - new_axis += "\""; - } - REPLACE_WITH_STR(model, "_NEW_AXIS_", new_axis); - - if (p.shrink_axis_mask.size()) { - shrink_axis = "shrink_axis_mask=\""; - for (auto& pb : p.shrink_axis_mask) - shrink_axis += std::to_string(pb) + ","; - shrink_axis.pop_back(); - shrink_axis += "\""; - } - REPLACE_WITH_STR(model, "_SHRINK_", shrink_axis); - - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - out_shape.pop_back(); - REPLACE_WITH_STR(model, "_OUT_", out_shape); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - strided_slice_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - ////std::cout << model; - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Output Reference - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - // Input Data - InferenceEngine::Blob::Ptr src; - src = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape) }); - src->allocate(); - fill_data_dbgval(src->buffer(), src->size()); - auto * srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input Begin - InferenceEngine::Blob::Ptr beginIdx; - InferenceEngine::SizeVector begin_dim(1, p.dim_size); - beginIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, begin_dim, InferenceEngine::TensorDesc::getLayoutByDims(begin_dim) }); - beginIdx->allocate(); - if (p.begin.size()) - memcpy(static_cast(beginIdx->buffer()), &p.begin[0], sizeof(int32_t)*p.begin.size()); - else - memset(static_cast(beginIdx->buffer()), 0, sizeof(int32_t)*p.begin.size()); - auto * beginIdxPtr = dynamic_cast*>(beginIdx.get()); - if (beginIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input End - InferenceEngine::Blob::Ptr endIdx; - InferenceEngine::SizeVector end_dim(1, p.dim_size); - endIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, end_dim, InferenceEngine::TensorDesc::getLayoutByDims(end_dim) }); - endIdx->allocate(); - if (p.end.size()) - memcpy(static_cast(endIdx->buffer()), &p.end[0], sizeof(int32_t)*p.end.size()); - else - memset(static_cast(endIdx->buffer()), 0, sizeof(int32_t)*p.end.size()); - auto * endIdxPtr = dynamic_cast*>(endIdx.get()); - if (endIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Input Stride - InferenceEngine::Blob::Ptr stridesIdx; - InferenceEngine::SizeVector strides_dim(1, p.dim_size); - stridesIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, strides_dim, InferenceEngine::TensorDesc::getLayoutByDims(strides_dim) }); - stridesIdx->allocate(); - if (p.stride.size()) - memcpy(static_cast(stridesIdx->buffer()), &p.stride[0], sizeof(int32_t)*p.stride.size()); - else - memset(static_cast(stridesIdx->buffer()), 0, sizeof(int32_t)*p.stride.size()); - auto * stridesIdxPtr = dynamic_cast*>(stridesIdx.get()); - if (stridesIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Check results - InferenceEngine::SizeVector out_dims; - ref_strided_slice(*srcPtr, dst_ref, out_dims, p.begin, p.end, p.stride, p.begin_mask, p.end_mask, p.ellipsis_mask, p.new_axis_mask, p.shrink_axis_mask); - - // Check results - if(out_dims.size() != p.out_shape.size()) - FAIL() << "Wrong out_shape size!"; - for (size_t i = 0; i < p.out_shape.size(); i++) { - if (out_dims[i] != p.out_shape[i]) - FAIL() << "Wrong out_shape dimensions!"; - } - if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0) - FAIL() << "Wrong result with compare TF reference!"; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", src)); - srcs.insert(std::pair("begin", beginIdx)); - srcs.insert(std::pair("end", endIdx)); - srcs.insert(std::pair("strides", stridesIdx)); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - - -// Test data vectors -std::vector test0 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f }; -std::vector test2 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f }; -std::vector test5 = { 5.f, 6.f, 7.f, 8.f }; -std::vector test6 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f }; -std::vector test8 = { 5.f, 4.f, 3.f, 2.f, 1.f }; -std::vector test9 = { 5.f, 4.f, 3.f, 2.f, 1.f, 0.f }; -std::vector test10 = { 5.f, 4.f, 3.f }; -std::vector test11 = { 0.f, 2.f, 4.f, 6.f, 8.f }; -std::vector test12 = { 1.f, 3.f, 5.f, 7.f, 9.f }; -std::vector test13 = { 9.f, 8.f, 7.f, 6.f, 5.f, 4.f, 3.f, 2.f, 1.f, 0.f }; -std::vector test14 = { 9.f, 7.f, 5.f, 3.f, 1.f }; -std::vector test16 = { 0.f, 1.f, 3.f, 4.f }; -std::vector test17 = { 1.f, 4.f }; -std::vector test19 = { 0.f, 1.f, 2.f, 3.f }; -std::vector test20 = { 4.f, 5.f, 6.f, 7.f }; -/* -0. [0,1,2,3,4,5,6,7,8,9], shape=[10] -1. [0,1,2,3,4,5,6,7,8,9], shape=[10] -2. [0,1,2,3,4,5,6,7,8], shape=[9] -3. [0,1,2,3,4,5,6,7,8], shape=[9] -4. [0,1,2,3,4,5,6,7,8,9], shape=[10] -5. [5,6,7,8,9], shape=[5] -6. [0,1,2,3,4,5], shape=[6] -7. [5,6,7,8,9], shape=[5] -8. [5,4,3,2,1], shape=[5] -9. [5,4,3,2,1,0], shape=[6] -10. [5,4,3], shape=[3] -11. [0,2,4,6,8], shape=[5] -12. [1,3,5,7,9], shape=[5] -13. [9,8,7,6,5,4,3,2,1,0], shape=[10] -14. [9,7,5,3,1], shape=[5] -15. [[0,1,2,3,4,5,6,7,8,9]], shape=[1,10] -16. [[[0,1,2],[3,4,5]]], shape=[1,2,2] -17. [[[0,1,2],[3,4,5]]], shape=[1,2,1] -18. [[[0,1,2],[3,4,5]]], shape=[1,1,2,1] -19. [[[[0,1],[2,3]],[[4,5],[6,7]]]], shape=[1,2,2] -20. [[[[0,1],[2,3]],[[4,5],[6,7]]]], shape=[1,2,2] -21. [[[0,1,2],[3,4,5]]], shape=[1,1,2] -*/ - -TEST_P(MKLDNNCPUExtStridedSliceTests, DISABLED_TestsStridedSlice) {} -INSTANTIATE_TEST_CASE_P( - TestsStridedSlice, MKLDNNCPUExtStridedSliceTests, - ::testing::Values( -// Params: in_shape, dim_size, begin, end, stride, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, out_shape, reference -/* 0 */ strided_slice_test_params{ { 10 }, 1, {}, {}, {}, {}, {}, {}, {}, {}, { 10 }, test0 }, - strided_slice_test_params{ { 10 }, 1, {0}, {0}, {}, {}, {0}, {}, {}, {}, { 10 }, test0 }, - strided_slice_test_params{ { 10 }, 1,{ -1 },{ -1 },{},{ 0 },{},{},{},{},{ 9 }, test2 }, - strided_slice_test_params{ { 10 }, 1,{ 0 },{ -1 },{},{},{},{},{},{},{ 9 }, test2 }, - strided_slice_test_params{ { 10 }, 1,{ 0 },{ 10 },{},{},{},{},{},{},{ 10 }, test0 }, -/* 5 */ strided_slice_test_params{ { 10 }, 1,{ 5 },{ 10 },{},{},{},{},{},{},{ 5 }, test5 }, - strided_slice_test_params{ { 10 }, 1,{ 0 },{ 6 },{},{},{},{},{},{},{ 6 }, test6 }, - strided_slice_test_params{ { 10 }, 1,{ -5 },{ 10 },{},{},{},{},{},{},{ 5 }, test5 }, - strided_slice_test_params{ { 10 }, 1,{ -5 },{ 0 },{-1},{},{},{},{},{},{ 5 }, test8 }, - strided_slice_test_params{ { 10 }, 1,{ -5 },{ 0 },{ -1 },{},{0},{},{},{},{ 6 }, test9 }, -/* 10 */ strided_slice_test_params{ { 10 }, 1,{ -5 },{ 2 },{ -1 },{},{},{},{},{},{ 3 }, test10 }, - strided_slice_test_params{ { 10 }, 1,{ 0 },{ 0 },{ 2 },{},{0},{},{},{},{ 5 }, test11 }, - strided_slice_test_params{ { 10 }, 1,{ 1 },{ 0 },{ 2 },{},{ 0 },{},{},{},{ 5 }, test12 }, - strided_slice_test_params{ { 10 }, 1,{ -1 },{ 0 },{ -1 },{},{ 0 },{},{},{},{ 10 }, test13 }, - strided_slice_test_params{ { 10 }, 1,{ -1 },{ 0 },{ -2 },{},{ 0 },{},{},{},{ 5 }, test14 }, -/* 15 */ strided_slice_test_params{ { 10 }, 1,{ 0 },{ 10 },{},{},{},{},{1},{},{ 1, 10 }, test0 }, - strided_slice_test_params{ { 1, 2, 3 }, 2,{ 0, 0 },{ 1, 2 },{},{},{},{0, 1},{},{},{ 1, 2, 2 }, test16 }, - strided_slice_test_params{ { 1, 2, 3 }, 4,{ 0, 0, 0, 1 },{ 2, 3, 2, 2 },{},{},{},{},{ 0,0,1,0 },{ 0,0,0,1 },{ 1,2,1 }, test17 }, - strided_slice_test_params{ { 1, 2, 3 }, 3,{ 0, 0, 1 },{ 2, 2, 2 },{},{},{},{ 0, 1 },{ 1 },{},{ 1, 1, 2, 1 }, test17 }, - strided_slice_test_params{ { 1, 2, 2, 2 }, 4,{},{},{},{ 0,1,0,0 },{ 0,1,0,0 },{},{},{ 0,1 },{ 1,2,2 }, test19 }, -/* 20 */ strided_slice_test_params{ { 1, 2, 2, 2 }, 4,{ 0,1,0,0 },{ 1,2,2,2 },{},{ 0,1,0,0 },{ 0,1,0,0 },{},{},{ 0,1,0,0 },{ 1,2,2 }, test20 }, - strided_slice_test_params{ { 1, 2, 3 }, 3,{ 0, 0, 1 },{ 2, 2, 2 },{},{},{},{ 0, 1 },{ 1 },{ 0, 0, 1 },{ 1, 1, 2 }, test17 } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/topk_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/topk_tests.cpp deleted file mode 100644 index a72aa4ced8ad28..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/topk_tests.cpp +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -using namespace InferenceEngine; -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct topk_test_params { - SizeVector in_shape; - std::vector input_tensor; - int axis; - std::vector src_k; - std::string sort; - std::string mode; - SizeVector out_shape; - std::vector reference_val; - std::vector reference_idx; - - std::vector> comp; -}; - -static inline int count(std::vector dims, size_t start_ind, size_t end_ind) { - size_t count = 1; - for (size_t i = start_ind; i < end_ind; i++) - count *= dims[i]; - return static_cast(count); -} - -static inline int count(std::vector dims, size_t start_ind = 0) { - return count(dims, start_ind, dims.size()); -} - -static void ref_topk(InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst_data, InferenceEngine::TBlob &dst_indx, topk_test_params p) { - float *src_data = src.data(); - float* dst_val = dst_data.data(); - int* dst_idx = dst_indx.data(); - - int dim, axis_dist; - int src_k = static_cast(p.src_k[0]); - - - InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims();; - int axis_ = p.axis; - if (axis_ < 0) - axis_ += src_dims.size(); - - size_t axis = static_cast(axis_); - - if (src_dims.size() < (1 + axis)) - FAIL() << " Incorrect input parameters dimensions and axis number!"; - - bool mode_max; - if (p.mode == "max") - mode_max = true; - else - mode_max = false; - - bool sort_value; - if (p.sort == "value") - sort_value = true; - else - sort_value = false; - - int j; - for (j = src_dims.size() - 1; j >= 0; j--) { - if (src_dims[j] != 1) break; - } - if (static_cast(j) == axis) { - dim = count(src_dims, static_cast(j)); - axis_dist = 1; - } else { - int axis_ = (p.axis < 0) ? p.axis + static_cast(src_dims.size()) : p.axis; - dim = static_cast(src_dims[axis_]); - axis_dist = count(src_dims, axis_) / dim; - } - - int num = count(src_dims) / dim; - std::vector > src_vector(src_k); - - for (int i = 0; i < num; ++i) { - src_vector[0] = std::make_pair(src_data[(i / axis_dist * dim) * axis_dist + i % axis_dist], 0); - for (j = 1; j < src_k; ++j) { - src_vector[j] = std::make_pair(src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j); - if (mode_max) { - if (src_vector[j].first > src_vector[j - 1].first) - std::sort(src_vector.begin(), src_vector.begin() + j + 1, std::greater >()); - } else { - if (src_vector[j].first < src_vector[0].first) - std::sort(src_vector.begin(), src_vector.begin() + j + 1, std::less >()); - } - } - - for (; j < dim; ++j) { - float value = src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist]; - if (mode_max) { - if (value > src_vector[src_k - 1].first) { - src_vector[src_k - 1] = std::make_pair(value, j); - std::sort(src_vector.begin(), src_vector.end(), std::greater >()); - } - } else { - if (value < src_vector[0].first) { - src_vector[0] = std::make_pair(value, j); - std::sort(src_vector.begin(), src_vector.end(), std::less >()); - } - } - } - - if (!sort_value) - std::sort(src_vector.begin(), src_vector.begin() + src_k, [](const pair &a, const pair &b) - { return (a.second < b.second); }); - - for (int j = 0; j < src_k; ++j) { - if (axis_dist != 1) { - // Produces max_val per axis - dst_val[(i / axis_dist * src_k + j) * axis_dist + i % axis_dist] = src_vector[j].first; - dst_idx[(i / axis_dist * src_k + j) * axis_dist + i % axis_dist] = src_vector[j].second; - } else { - // Produces max_ind and max_val - dst_val[i * src_k + j] = src_vector[j].first; - dst_idx[i * src_k + j] = src_vector[j].second; - } - } - } -} - - -class MKLDNNCPUExtTopKTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - - - - - - - 1 - - - - - - - - _IN_ - - - 1 - - - - - _OUT_ - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(topk_test_params p) { - std::string model = model_t; - std::string in_shape; - std::string out_shape; - - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_OUT_", out_shape); - - for (auto& dct : p.in_shape) { - in_shape += ""; - in_shape += std::to_string(dct) + "\n"; - } - - REPLACE_WITH_STR(model, "_IN_", in_shape); - REPLACE_WITH_STR(model, "_SORT_", p.sort); - REPLACE_WITH_STR(model, "_MODE_", p.mode); - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - topk_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core ie; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = ie.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - auto it = out.begin(); - std::pair item0 = *it; - std::pair item1 = *(++it); - - InferenceEngine::TBlob::Ptr output0; - output0 = InferenceEngine::make_shared_blob(item0.second->getTensorDesc()); - output0->allocate(); - outputBlobs[item0.first] = output0; - - InferenceEngine::TBlob::Ptr output1; - output1 = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, p.out_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.out_shape) }); - output1->allocate(); - outputBlobs[item1.first] = output1; - - // Input Data - InferenceEngine::Blob::Ptr src; - src = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape) }); - src->allocate(); - if (p.input_tensor.size()) - memcpy(src->buffer(), &p.input_tensor[0], sizeof(float)*p.input_tensor.size()); - else - fill_data_dbgval(src->buffer(), src->size()); - auto * srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("value", src)); - - InferenceEngine::Blob::Ptr seq_lengthsIdx; - InferenceEngine::SizeVector seq_lengths_dim(1, 1); - seq_lengthsIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) }); - seq_lengthsIdx->allocate(); - memcpy(static_cast(seq_lengthsIdx->buffer()), &p.src_k[0], sizeof(int32_t)); - auto * seq_lengthsIdxPtr = dynamic_cast*>(seq_lengthsIdx.get()); - if (seq_lengthsIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("src_k", seq_lengthsIdx)); - - // Output Reference - InferenceEngine::TBlob dst_data_ref(item0.second->getTensorDesc()); - dst_data_ref.allocate(); - InferenceEngine::TBlob dst_indx_ref(item1.second->getTensorDesc()); - dst_indx_ref.allocate(); - ref_topk(*srcPtr, dst_data_ref, dst_indx_ref, p); - - // Infer - graph.Infer(srcs, outputBlobs); - compare(*output0, dst_data_ref); - for (int i = 0; i < dst_indx_ref.size(); i++) - if (dst_indx_ref.data()[i] != (*output1).data()[i]) - FAIL() << "The difference between res_idx[i] and reference_idx[i]"; - - for (int i = 0; i < p.reference_val.size(); i++) { - if(p.reference_val.data()[i] != (*output0).data()[i]) - FAIL() << "The difference between res_val[i] and reference_val[i]"; - } - - for (int i = 0; i < p.reference_idx.size(); i++) { - if (p.reference_idx.data()[i] != (*output1).data()[i]) - FAIL() << "The difference between res_idx[i] and reference_idx[i]"; - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtTopKTests, TestsTopK) {} - -INSTANTIATE_TEST_CASE_P( - TestsTopK, MKLDNNCPUExtTopKTests, - ::testing::Values( -// Params: in_shape, input_tensor, axis, src_k, sort, mode, out_shape, reference_val, reference_idx - topk_test_params{ { 3, 4 },{}, -1,{ 1 }, "value", "max",{ 3, 1 },{ 3,7,11 },{ 3,3,3 } }, - topk_test_params{ { 3, 4 },{}, 0,{ 1 }, "value", "max",{ 1, 4 },{ 8,9,10,11 },{ 2,2,2,2 } }, - topk_test_params{ { 3, 4 },{}, -1,{ 1 }, "value", "min",{ 3, 1 },{ 0,4,8 },{ 0,0,0 } }, - topk_test_params{ { 3, 4 },{}, 0,{ 1 }, "value", "min",{ 1, 4 },{ 0,1,2,3 },{ 0,0,0,0 } }, - topk_test_params{ { 2, 3, 128, 256 },{}, 1,{ 1 }, "value", "max",{ 2, 1, 128, 256 },{},{} }, - topk_test_params{ { 3, 5, 128, 256 },{}, 1,{ 1 }, "index", "max",{ 3, 1, 128, 256 },{},{} }, - topk_test_params{ { 1, 3, 129, 257 },{}, 1,{ 1 }, "value", "max",{ 1, 1, 129, 257 },{},{} }, - topk_test_params{ { 2, 5, 129, 257 },{}, 1,{ 1 }, "index", "max",{ 2, 1, 129, 257 },{},{} }, - topk_test_params{ { 3, 4 },{}, -1,{ 3 }, "value", "max",{ 3, 3 },{ 3,2,1,7,6,5,11,10,9 },{ 3,2,1,3,2,1,3,2,1 } }, - topk_test_params{ { 3, 4 },{}, -1,{ 3 }, "value", "min",{ 3, 3 },{ 0,1,2,4,5,6,8,9,10 },{ 0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 20, 128, 128 },{}, 1,{ 3 }, "value", "max",{ 1, 3, 128, 128 },{},{} }, - topk_test_params{ { 1, 20, 128, 128 },{}, 1,{ 3 }, "index", "min",{ 1, 3, 128, 128 },{},{} }, - topk_test_params{ { 1, 20, 128, 128 },{}, 1,{ 18 }, "value", "min",{ 1, 18, 128, 128 },{},{} }, - topk_test_params{ { 1, 20, 129, 129 },{}, 1,{ 3 }, "value", "max",{ 1, 3, 129, 129 },{},{} }, - topk_test_params{ { 1, 2, 2, 4 },{}, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 },{},{} }, - topk_test_params{ { 1, 2, 2, 4 },{}, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 },{},{} }, - topk_test_params{ { 1, 2, 2, 4 },{}, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 },{},{} }, - topk_test_params{ { 1, 2, 2, 4 },{}, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 },{},{} }, - topk_test_params{ { 1, 2, 2, 4 },{}, 3,{ 1 }, "value", "max",{ 1, 2, 2, 1 },{},{} }, - topk_test_params{ { 1, 2, 2, 4 },{}, 3,{ 1 }, "index", "max",{ 1, 2, 2, 1 },{},{} }, - topk_test_params{ { 1, 2, 4, 2 },{}, 2,{ 3 }, "value", "max",{ 1, 2, 3, 2 },{},{} }, - topk_test_params{ { 1, 2, 4, 2 },{}, 2,{ 3 }, "index", "max",{ 1, 2, 3, 2 },{},{} }, - topk_test_params{ { 1, 2, 4, 2 },{}, 2,{ 3 }, "value", "min",{ 1, 2, 3, 2 },{},{} }, - topk_test_params{ { 1, 2, 4, 2 },{}, 2,{ 3 }, "index", "min",{ 1, 2, 3, 2 },{},{} }, - topk_test_params{ { 1, 2, 2, 4 },{3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3}, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 },{3,3,3,3,3,3,3,3,3,3,3,3},{0,1,2,0,1,2,0,1,2,0,1,2} }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 },{ 3,3,3,3,3,3,3,3,3,3,3,3 },{ 0,1,2,0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 },{ 3,3,3,3,3,3,3,3,3,3,3,3 },{ 0,1,2,0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 },{ 3,3,3,3,3,3,3,3,3,3,3,3 },{ 0,1,2,0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 20, 32, 32 },{}, 1,{ 18 }, "index", "max",{ 1, 18, 32, 32 },{},{} }, - topk_test_params{ { 1, 20, 129, 129 },{}, 1,{ 18 }, "index", "max",{ 1, 18, 129, 129 },{},{} }, - topk_test_params{ { 1, 20, 32, 32 },{}, 1,{ 18 }, "index", "min",{ 1, 18, 32, 32 },{},{} }, - topk_test_params{ { 1, 20, 129, 129 },{}, 1,{ 18 }, "index", "min",{ 1, 18, 129, 129 },{},{} }, - topk_test_params{ { 1, 20, 129, 129 },{}, 1,{ 18 }, "none", "min",{ 1, 18, 129, 129 },{},{} } - )); - - -class MKLDNNCPUExtTopK1OutTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - - - - - - - - - - - - - _IN_ - - - - - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(topk_test_params p) { - std::string model = model_t; - std::string in_shape; - std::string out_shape; - - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_OUT_", out_shape); - - for (auto& dct : p.in_shape) { - in_shape += ""; - in_shape += std::to_string(dct) + "\n"; - } - - REPLACE_WITH_STR(model, "_IN_", in_shape); - REPLACE_WITH_STR(model, "_SORT_", p.sort); - REPLACE_WITH_STR(model, "_MODE_", p.mode); - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - if (p.reference_val.size()) - REPLACE_WITH_STR(model, "_PRECISION_", "FP32"); - else - REPLACE_WITH_STR(model, "_PRECISION_", "I32"); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - topk_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - // Input Data - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, p.in_shape, - InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape) }); - src->allocate(); - if (p.input_tensor.size()) - memcpy(src->buffer(), &p.input_tensor[0], sizeof(float)*p.input_tensor.size()); - else - fill_data_dbgval(src->buffer(), src->size()); - auto * srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("value", src)); - InferenceEngine::Blob::Ptr seq_lengthsIdx = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, {}, - InferenceEngine::TensorDesc::getLayoutByDims({})}); - seq_lengthsIdx->allocate(); - memcpy(static_cast(seq_lengthsIdx->buffer()), &p.src_k[0], sizeof(int32_t)); - auto * seq_lengthsIdxPtr = dynamic_cast*>(seq_lengthsIdx.get()); - if (seq_lengthsIdxPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - srcs.insert(std::pair("src_k", seq_lengthsIdx)); - - - // Output Data - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - auto it = out.begin(); - std::pair item = *it; - - if (p.reference_val.size()) { - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - // Infer - graph.Infer(srcs, outputBlobs); - for (int i = 0; i < p.reference_val.size(); i++) { - if (p.reference_val.data()[i] != (*output).data()[i]) - FAIL() << "The difference between res_val[i] and reference_val[i]"; - } - } else { - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I32, p.out_shape, - InferenceEngine::TensorDesc::getLayoutByDims(p.out_shape) }); - output->allocate(); - outputBlobs[item.first] = output; - - // Infer - graph.Infer(srcs, outputBlobs); - for (int i = 0; i < p.reference_idx.size(); i++) { - if (p.reference_idx.data()[i] != (*output).data()[i]) - FAIL() << "The difference between res_val[i] and reference_idx[i]"; - } - } - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtTopK1OutTests, TestsTopK) {} - -INSTANTIATE_TEST_CASE_P( - TestsTopK1Out, MKLDNNCPUExtTopK1OutTests, - ::testing::Values( - // Params: in_shape, input_tensor, axis, src_k, sort, mode, out_shape, reference_val, reference_idx - topk_test_params{ { 3, 4 },{}, -1,{ 1 }, "value", "max",{ 3, 1 },{ 3,7,11 },{} }, - topk_test_params{ { 3, 4 },{}, -1,{ 1 }, "value", "max",{ 3, 1 },{},{ 3,3,3 } }, - topk_test_params{ { 3, 4 },{}, 0,{ 1 }, "value", "max",{ 1, 4 },{ 8,9,10,11 },{} }, - topk_test_params{ { 3, 4 },{}, 0,{ 1 }, "value", "max",{ 1, 4 },{},{ 2,2,2,2 } }, - topk_test_params{ { 3, 4 },{}, -1,{ 1 }, "value", "min",{ 3, 1 },{ 0,4,8 },{} }, - topk_test_params{ { 3, 4 },{}, -1,{ 1 }, "value", "min",{ 3, 1 },{},{ 0,0,0 } }, - topk_test_params{ { 3, 4 },{}, 0,{ 1 }, "value", "min",{ 1, 4 },{ 0,1,2,3 },{} }, - topk_test_params{ { 3, 4 },{}, 0,{ 1 }, "value", "min",{ 1, 4 },{},{ 0,0,0,0 } }, - topk_test_params{ { 3, 4 },{}, -1,{ 3 }, "value", "max",{ 3, 3 },{ 3,2,1,7,6,5,11,10,9 },{} }, - topk_test_params{ { 3, 4 },{}, -1,{ 3 }, "value", "max",{ 3, 3 },{},{ 3,2,1,3,2,1,3,2,1 } }, - topk_test_params{ { 3, 4 },{}, -1,{ 3 }, "value", "min",{ 3, 3 },{ 0,1,2,4,5,6,8,9,10 },{} }, - topk_test_params{ { 3, 4 },{}, -1,{ 3 }, "value", "min",{ 3, 3 },{},{ 0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 },{ 3,3,3,3,3,3,3,3,3,3,3,3 },{} }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 },{},{ 0,1,2,0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 },{ 3,3,3,3,3,3,3,3,3,3,3,3 },{} }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 },{},{ 0,1,2,0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 },{ 3,3,3,3,3,3,3,3,3,3,3,3 },{} }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 },{},{ 0,1,2,0,1,2,0,1,2,0,1,2 } }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 },{ 3,3,3,3,3,3,3,3,3,3,3,3 },{} }, - topk_test_params{ { 1, 2, 2, 4 },{ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 },{},{ 0,1,2,0,1,2,0,1,2,0,1,2 } } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/unique_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/unique_tests.cpp deleted file mode 100644 index da25e3ee7c9d5a..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/unique_tests.cpp +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct unique_test_params { - std::string model; - - std::string precision; - - std::string sorted; - std::string return_inverse; - std::string return_counts; - - InferenceEngine::SizeVector input_shape; - std::vector input_value; - - InferenceEngine::SizeVector output_uniques_shape; - InferenceEngine::SizeVector output_indices_shape; - InferenceEngine::SizeVector output_counts_shape; - - std::vector output_uniques_value_ref; - std::vector output_indices_value_ref; - std::vector output_counts_value_ref; - - size_t num_prim_desc; - int selectedType; - - std::vector> comp; -}; - -class MKLDNNCPUExtUniqueTests : public TestsCommon, public WithParamInterface { - std::string getModel(unique_test_params p) { - std::string model = p.model; - - std::string input_shape; - std::string output_uniques_shape; - std::string output_indices_shape; - std::string output_counts_shape; - - for (auto& shape : p.input_shape) { - input_shape += ""; - input_shape += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.output_uniques_shape) { - output_uniques_shape += ""; - output_uniques_shape += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.output_indices_shape) { - output_indices_shape += ""; - output_indices_shape += std::to_string(shape) + "\n"; - } - - for (auto& shape : p.output_counts_shape) { - output_counts_shape += ""; - output_counts_shape += std::to_string(shape) + "\n"; - } - - REPLACE_WITH_STR(model, "_SORTED_", p.sorted); - REPLACE_WITH_STR(model, "_INPUT_SHAPE_", input_shape); - REPLACE_WITH_STR(model, "_OUTPUT_UNIQUES_SHAPE_", output_uniques_shape); - REPLACE_WITH_STR(model, "_OUTPUT_INDICES_SHAPE_", output_indices_shape); - REPLACE_WITH_STR(model, "_OUTPUT_COUNTS_SHAPE_", output_counts_shape); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - unique_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - - for (auto &node : nodes) { - if (node->getName() == "Unique") { - ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - // prepare input blob and input blob map - InferenceEngine::Blob::Ptr input = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.input_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.input_shape) }); - input->allocate(); - auto *input_ptr = dynamic_cast*>(input.get()); - std::copy(p.input_value.begin(), p.input_value.end(), (float *)input_ptr->data()); - InferenceEngine::BlobMap input_blob_map; - input_blob_map["InputValues"] = input; - - // prepare output blob map - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - InferenceEngine::BlobMap output_blob_map; - for (auto iter = out.begin(); iter != out.end(); iter++) { - std::pair item = *iter; - InferenceEngine::Blob::Ptr output_blob_ptr = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output_blob_ptr->allocate(); - output_blob_map[item.first] = output_blob_ptr; - } - - // prepare blobs with reference data - InferenceEngine::Blob::Ptr output_uniques_blob_ref = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.output_uniques_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.output_uniques_shape) }); - output_uniques_blob_ref->allocate(); - auto *output_uniques_blob_ref_ptr = dynamic_cast*>(output_uniques_blob_ref.get()); - std::copy(p.output_uniques_value_ref.begin(), p.output_uniques_value_ref.end(), (float *)output_uniques_blob_ref_ptr->data()); - - InferenceEngine::Blob::Ptr output_indices_blob_ref = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.output_indices_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.output_indices_shape) }); - output_indices_blob_ref->allocate(); - auto *output_indices_blob_ref_ptr = dynamic_cast*>(output_indices_blob_ref.get()); - std::copy(p.output_indices_value_ref.begin(), p.output_indices_value_ref.end(), (float *)output_indices_blob_ref_ptr->data()); - - InferenceEngine::Blob::Ptr output_counts_blob_ref = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, - p.output_counts_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.output_counts_shape) }); - output_counts_blob_ref->allocate(); - auto *output_counts_blob_ref_ptr = dynamic_cast*>(output_counts_blob_ref.get()); - std::copy(p.output_counts_value_ref.begin(), p.output_counts_value_ref.end(), (float *)output_counts_blob_ref_ptr->data()); - - // infer - graph.Infer(input_blob_map, output_blob_map); - - // check the result - auto iter = out.begin(); - compare(*output_blob_map[iter->first], *output_uniques_blob_ref, 0.0f); - if (p.return_inverse == "true") { - iter++; - compare(*output_blob_map[iter->first], *output_indices_blob_ref, 0.0f); - } - if (p.return_counts == "true") { - iter++; - compare(*output_blob_map[iter->first], *output_counts_blob_ref, 0.0f); - } - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNCPUExtUniqueTests, TestsUnique) {} - -// model 1 that contains one Unique layer with two outputs: unique elements, indices -std::string model1 = R"V0G0N( - - - - - - _INPUT_SHAPE_ - - - - - - - - _INPUT_SHAPE_ - - - - - _OUTPUT_UNIQUES_SHAPE_ - - - _OUTPUT_INDICES_SHAPE_ - - - - - - - - -)V0G0N"; - -// model 2 that contains one Unique layer with three outputs: unique elements, indices, counts -std::string model2 = R"V0G0N( - - - - - - _INPUT_SHAPE_ - - - - - - - - _INPUT_SHAPE_ - - - - - _OUTPUT_UNIQUES_SHAPE_ - - - _OUTPUT_INDICES_SHAPE_ - - - _OUTPUT_COUNTS_SHAPE_ - - - - - - - - -)V0G0N"; - -// case 1 - input with 10 elements where some of them repeat, non-sorted -InferenceEngine::SizeVector input_shape_case1 = { 10 }; -std::vector input_value_case1 = { 8.f, 1.f, 2.f, 1.f, 8.f, 5.f, 1.f, 5.f, 0.f, 0.f }; -InferenceEngine::SizeVector output_uniques_shape_case1 = { 10 }; -InferenceEngine::SizeVector output_indicess_shape_case1 = { 10 }; -InferenceEngine::SizeVector output_counts_shape_case1 = { 10 }; -std::vector output_uniques_value_ref_case1 = { 8.f, 1.f, 2.f, 5.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }; -std::vector output_indices_value_ref_case1 = { 0.f, 1.f, 2.f, 1.f, 0.f, 3.f, 1.f, 3.f, 4.f, 4.f }; -std::vector output_counts_value_ref_case1 = { 2.f, 3.f, 1.f, 2.f, 2.f, 0.f, 0.f, 0.f, 0.f, 0.f }; - -// case 2 - input with 10 elements where all of them are unique, non-sorted -InferenceEngine::SizeVector input_shape_case2 = { 10 }; -std::vector input_value_case2 = { 8.f, 1.f, 2.f, 3.f, 10.f, 5.f, 12.f, 15.f, 0.f, 100.f }; -InferenceEngine::SizeVector output_uniques_shape_case2 = { 10 }; -InferenceEngine::SizeVector output_indicess_shape_case2 = { 10 }; -InferenceEngine::SizeVector output_counts_shape_case2 = { 10 }; -std::vector output_uniques_value_ref_case2 = { 8.f, 1.f, 2.f, 3.f, 10.f, 5.f, 12.f, 15.f, 0.f, 100.f }; -std::vector output_indices_value_ref_case2 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f }; -std::vector output_counts_value_ref_case2 = { 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f }; - -// case 3 - input with 10 elements where all of them are the same, non-sorted -InferenceEngine::SizeVector input_shape_case3 = { 10 }; -std::vector input_value_case3 = { 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f }; -InferenceEngine::SizeVector output_uniques_shape_case3 = { 10 }; -InferenceEngine::SizeVector output_indicess_shape_case3 = { 10 }; -InferenceEngine::SizeVector output_counts_shape_case3 = { 10 }; -std::vector output_uniques_value_ref_case3 = { 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f }; -std::vector output_indices_value_ref_case3 = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }; -std::vector output_counts_value_ref_case3 = { 10.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }; - -// case 4 - input with 10 elements where some of them repeat, sorted -InferenceEngine::SizeVector input_shape_case4 = { 10 }; -std::vector input_value_case4 = { 8.f, 1.f, 2.f, 1.f, 8.f, 5.f, 1.f, 5.f, 0.f, 0.f }; -InferenceEngine::SizeVector output_uniques_shape_case4 = { 10 }; -InferenceEngine::SizeVector output_indicess_shape_case4 = { 10 }; -InferenceEngine::SizeVector output_counts_shape_case4 = { 10 }; -std::vector output_uniques_value_ref_case4 = { 0.f, 1.f, 2.f, 5.f, 8.f, 8.f, 8.f, 8.f, 8.f, 8.f }; -std::vector output_indices_value_ref_case4 = { 4.f, 1.f, 2.f, 1.f, 4.f, 3.f, 1.f, 3.f, 0.f, 0.f }; -std::vector output_counts_value_ref_case4 = { 2.f, 3.f, 1.f, 2.f, 2.f, 0.f, 0.f, 0.f, 0.f, 0.f }; - -// case 5 - input with 10 elements where all of them are unique, sorted -InferenceEngine::SizeVector input_shape_case5 = { 10 }; -std::vector input_value_case5 = { 8.f, 1.f, 2.f, 3.f, 10.f, 5.f, 12.f, 15.f, 0.f, 100.f }; -InferenceEngine::SizeVector output_uniques_shape_case5 = { 10 }; -InferenceEngine::SizeVector output_indicess_shape_case5 = { 10 }; -InferenceEngine::SizeVector output_counts_shape_case5 = { 10 }; -std::vector output_uniques_value_ref_case5 = { 0.f, 1.f, 2.f, 3.f, 5.f, 8.f, 10.f, 12.f, 15.f, 100.f }; -std::vector output_indices_value_ref_case5 = { 5.f, 1.f, 2.f, 3.f, 6.f, 4.f, 7.f, 8.f, 0.f, 9.f }; -std::vector output_counts_value_ref_case5 = { 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f }; - -INSTANTIATE_TEST_CASE_P( - TestsUnique, MKLDNNCPUExtUniqueTests, - ::testing::Values( - // case 0 - model1, sorted="false", input with 10 elements where some of them repeat - unique_test_params { - model1, "FP32", "false", "true", "false", input_shape_case1, input_value_case1, - output_uniques_shape_case1, output_indicess_shape_case1, output_counts_shape_case1, - output_uniques_value_ref_case1, output_indices_value_ref_case1, output_counts_value_ref_case1, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 1 - model1, sorted="false", input with 10 elements where all of them are unique - unique_test_params{ - model1, "FP32", "false", "true", "false", input_shape_case2, input_value_case2, - output_uniques_shape_case2, output_indicess_shape_case2, output_counts_shape_case2, - output_uniques_value_ref_case2, output_indices_value_ref_case2, output_counts_value_ref_case2, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 2 - model1, sorted="false", input with 10 elements where all of them are the same - unique_test_params{ - model1, "FP32", "false", "true", "false", input_shape_case3, input_value_case3, - output_uniques_shape_case3, output_indicess_shape_case3, output_counts_shape_case3, - output_uniques_value_ref_case3, output_indices_value_ref_case3, output_counts_value_ref_case3, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 3 - model1, sorted="true", input with 10 elements where some of them repeat - unique_test_params{ - model1, "FP32", "true", "true", "false", input_shape_case4, input_value_case4, - output_uniques_shape_case4, output_indicess_shape_case4, output_counts_shape_case4, - output_uniques_value_ref_case4, output_indices_value_ref_case4, output_counts_value_ref_case4, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 4 - model1, sorted="true", input with 10 elements where all of them are unique - unique_test_params{ - model1, "FP32", "true", "true", "false", input_shape_case5, input_value_case5, - output_uniques_shape_case5, output_indicess_shape_case5, output_counts_shape_case5, - output_uniques_value_ref_case5, output_indices_value_ref_case5, output_counts_value_ref_case5, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 5 - model2, sorted="false", input with 10 elements where some of them repeat - unique_test_params{ - model2, "FP32", "false", "true", "true", input_shape_case1, input_value_case1, - output_uniques_shape_case1, output_indicess_shape_case1, output_counts_shape_case1, - output_uniques_value_ref_case1, output_indices_value_ref_case1, output_counts_value_ref_case1, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 6 - model2, sorted="false", input with 10 elements where all of them are unique - unique_test_params{ - model2, "FP32", "false", "true", "true", input_shape_case2, input_value_case2, - output_uniques_shape_case2, output_indicess_shape_case2, output_counts_shape_case2, - output_uniques_value_ref_case2, output_indices_value_ref_case2, output_counts_value_ref_case2, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 7 - model2, sorted="false", input with 10 elements where all of them are the same - unique_test_params{ - model2, "FP32", "false", "true", "true", input_shape_case3, input_value_case3, - output_uniques_shape_case3, output_indicess_shape_case3, output_counts_shape_case3, - output_uniques_value_ref_case3, output_indices_value_ref_case3, output_counts_value_ref_case3, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 8 - model2, sorted="true", input with 10 elements where some of them repeat - unique_test_params{ - model2, "FP32", "true", "true", "true", input_shape_case4, input_value_case4, - output_uniques_shape_case4, output_indicess_shape_case4, output_counts_shape_case4, - output_uniques_value_ref_case4, output_indices_value_ref_case4, output_counts_value_ref_case4, - 1, MKLDNNPlugin::impl_desc_type::unknown - }, - // case 9 - model2, sorted="true", input with 10 elements where all of them are unique - unique_test_params{ - model2, "FP32", "true", "true", "true", input_shape_case5, input_value_case5, - output_uniques_shape_case5, output_indicess_shape_case5, output_counts_shape_case5, - output_uniques_value_ref_case5, output_indices_value_ref_case5, output_counts_value_ref_case5, - 1, MKLDNNPlugin::impl_desc_type::unknown - } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp deleted file mode 100644 index a2f9f7b8d8cab0..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -constexpr auto eltwise_relu = mkldnn::algorithm::eltwise_relu; -constexpr auto eltwise_elu = mkldnn::algorithm::eltwise_elu; -constexpr auto eltwise_logistic = mkldnn::algorithm::eltwise_logistic; -constexpr auto eltwise_bounded_relu = mkldnn::algorithm::eltwise_bounded_relu; -constexpr auto eltwise_tanh = mkldnn::algorithm::eltwise_tanh; - -struct activation_test_params { - mkldnn::algorithm alg; - float alpha; - float beta; - - // Formats: NCHW, NCDHW - vector dims; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - std::vector preferTypes; - - std::vector> comp; -}; - -template inline T relu_fwd(T s, A alpha) { - return s > 0 ? s : static_cast(s * alpha); -} - -template T elu_fwd(T s, A alpha) { - return s > 0 ? s : static_cast(alpha * (::expf(s) - 1)); -} - -template -T logistic_fwd(T s) { - T v = ::expf(s); - return v / (v + 1); -} - -template -T bounded_relu_fwd(T s, A alpha) { - s = s > 0 ? s : 0; - return s > alpha ? (T)(alpha) : s; -} - -template T tanh_fwd(T s) { - return static_cast(::tanhf((float)s)); -} - -template -void ref_activation(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, activation_test_params prm) { - InferenceEngine::SizeVector dims = src.getTensorDesc().getDims(); - auto dims_size = dims.size(); - - size_t IW = dims[dims_size - 1]; - size_t IH = dims[dims_size - 2]; - size_t ID = dims_size == 5 ? dims[dims_size - 3] : 1u; - size_t IC = dims[1]; - size_t MB = dims[0]; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for(int mb = 0; mb < MB; mb++) { - for(int c = 0; c < IC; c++) { - for(int d = 0; d < ID; d++) { - for(int h = 0; h < IH; h++) { - for(int w = 0; w < IW; w++) { - int idx = mb * IC * ID * IH * IW - + c * ID * IH * IW - + d * IH * IW - + h * IW - + w; - - switch (prm.alg) { - case eltwise_relu: dst_data[idx] = relu_fwd(src_data[idx], prm.alpha); break; - case eltwise_elu: dst_data[idx] = elu_fwd(src_data[idx], prm.alpha); break; - case eltwise_logistic: dst_data[idx] = logistic_fwd(src_data[idx]); break; - case eltwise_bounded_relu: dst_data[idx] = bounded_relu_fwd(src_data[idx], prm.alpha); break; - case eltwise_tanh: dst_data[idx] = tanh_fwd(src_data[idx]); break; - default: assert(!"unknown alg_kind"); - } - } - } - } - } - } -} - -class MKLDNNGraphActivationTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - -)V0G0N"; - -protected: - virtual void TearDown() { - } - - std::string getModel(activation_test_params p) { - std::string model = model_t; - auto dims_size = p.dims.size(); - - switch (dims_size) { - case 3: - REMOVE_LINE(model, "_IH_"); - case 4: - REMOVE_LINE(model, "_ID_"); - } - - switch (p.alg) { - case eltwise_relu: REPLACE_WITH_STR(model, "_LT_", "ReLU"); break; - case eltwise_elu: REPLACE_WITH_STR(model, "_LT_", "ELU"); break; - case eltwise_logistic: REPLACE_WITH_STR(model, "_LT_", "Sigmoid"); break; - case eltwise_bounded_relu: REPLACE_WITH_STR(model, "_LT_", "ReLU6"); break; - case eltwise_tanh: REPLACE_WITH_STR(model, "_LT_", "Activation"); break; - default: assert(!"unknown alg_kind"); - } - - string P1, P2; - if (p.alg == eltwise_relu) { - P1 = string("negative_slope=\"") + to_string_c_locale(p.alpha) + string("\""); - P2 = string("beta=\"") + to_string_c_locale(p.beta) + string("\""); - } else if (p.alg == eltwise_bounded_relu) { - P1 = string("n=\"") + to_string_c_locale(p.alpha) + string("\""); - P2 = string("beta=\"") + to_string_c_locale(p.beta) + string("\""); - } else if (p.alg == eltwise_tanh) { - P1 = string("type=\"tanh\""); - } else { - P1 = string("alpha=\"") + to_string_c_locale(p.alpha) + string("\""); - P2 = string("beta=\"") + to_string_c_locale(p.beta) + string("\""); - } - REPLACE_WITH_STR(model, "_P1_", P1); - REPLACE_WITH_STR(model, "_P2_", P2); - - REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]); - REPLACE_WITH_NUM(model, "_IC_", p.dims[1]); - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - switch (dims_size) { - case 5: - REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]); - case 4: - REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]); - } - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - return model; - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - activation_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Activation) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - InferenceEngine::SizeVector dims_src = p.dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_activation(*srcPtr, dst_ref, p); - - compare(*output, dst_ref, 0.0005f); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphActivationTests, TestsActivation) {} - -INSTANTIATE_TEST_CASE_P( - TestsActivation, MKLDNNGraphActivationTests, - ::testing::Values( - activation_test_params{eltwise_relu, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_relu, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_relu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_elu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_elu, 1.0f, 1.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_logistic, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_relu, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_relu, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_relu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_elu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_elu, 1.0f, 1.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_logistic, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // 5D - activation_test_params{eltwise_tanh, 0.f, 0.f, {1, 1, 64, 64, 64}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}} - )); - -class MKLDNNGraphDynBatchActivationTests: public MKLDNNGraphActivationTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - activation_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.dims[0]; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = p.dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkActivation = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Activation; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkActivation); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkActivation); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchActivationTests, TestsDynBatchActivation) {} - - -INSTANTIATE_TEST_CASE_P( - TestsDynBatchActivation, MKLDNNGraphDynBatchActivationTests, - ::testing::Values( - activation_test_params{eltwise_relu, 0.0f, 0.0f, {2, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_relu, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_relu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_elu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_elu, 1.0f, 1.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_logistic, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit}, - activation_test_params{eltwise_relu, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_relu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_elu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_elu, 1.0f, 1.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_logistic, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}} - // TODO: rewrite to ngraph to have reshape functionality - // activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - // activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - // activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - // activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - // activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - // activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit}, - // activation_test_params{eltwise_relu, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp deleted file mode 100644 index c36beb6e6eb4c8..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include -#include - -using namespace ::testing; -using namespace mkldnn; - -struct batchnorm_scaleshift_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - // BatchNorm specific param - double epsilon; - // ScaleShift specific param - int broadcast; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - std::vector preferTypes; - - std::vector> comp; -}; - -template -void ref_batchnorm4DWithScale(const InferenceEngine::TBlob &src, const data_t *variance, const data_t *mean, const data_t *scaleShift, - InferenceEngine::TBlob &dst, double eps) { - size_t MB = src.getTensorDesc().getDims()[0]; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IW = src.getTensorDesc().getDims()[3]; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - const data_t *scale_data = scaleShift; - const data_t *shift_data = scaleShift + IC; - - for (int c = 0; c < IC; ++c) { - data_t v_mean = mean[c]; - data_t v_variance = variance[c]; - data_t sqrt_variance = 0; - data_t scale = scale_data[c]; - data_t shift = shift_data[c]; - - sqrt_variance = 1. / sqrt(v_variance + eps); - - for (int n = 0; n < MB; ++n) - for (int h = 0; h < IH; ++h) - for (int w = 0; w < IW; ++w) { - size_t idx = n * IC * IH * IW - + c * IH * IW - + h * IW + w; - // BatchNorm - dst_data[idx] = (src_data[idx] - v_mean) * sqrt_variance; - // ScaleShift - dst_data[idx] = dst_data[idx] * scale + shift; - } - } -} - -class MKLDNNGraphBatchNormScaleShiftTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - -)V0G0N"; - -protected: - virtual void TearDown() { - } - - std::string getModel(batchnorm_scaleshift_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - REPLACE_WITH_NUM(model, "_EPSILON_", p.epsilon); - REPLACE_WITH_NUM(model, "_BROADCAST_", p.broadcast); - - size_t w_data_size = p.in.c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", 2*w_data_size); - REPLACE_WITH_NUM(model, "_S3_", 3*w_data_size); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - return model; - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - batchnorm_scaleshift_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {p.in.c * 4 * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - fill_data( weights->data().as(), weights->size() / sizeof(float)); - float * data = weights->buffer(); - for (size_t i = 0; i < weights->size() / sizeof(float); i++) { - if (data[i] < 0) { - data[i] *= -1; - } - } - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if ((nodes[i]->getType() == MKLDNNPlugin::Depthwise && nodes[i]->getCnnLayer()->type == "ScaleShift") - || nodes[i]->getType() == MKLDNNPlugin::BatchNormalization) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_TRUE(nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() | p.selectedType); - } - } - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_batchnorm4DWithScale(*srcPtr, (const float*) weights->buffer(), ((const float*) weights->buffer() + p.in.c), (const float*) weights->buffer() + p.in.c*2, dst_ref, p.epsilon); - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphBatchNormScaleShiftTests, TestsBatchNormWithScaleShift) {} - -using namespace MKLDNNPlugin; - -const size_t expect_num_impl = InferenceEngine::with_cpu_x86_avx2() ? 3 : 2; - -INSTANTIATE_TEST_CASE_P( - TestsBatchNormWithScaleShift, MKLDNNGraphBatchNormScaleShiftTests, - ::testing::Values( - batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, expect_num_impl, jit}, - batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, expect_num_impl, jit}, - batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, expect_num_impl, ref, {ref_any}}, - batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, expect_num_impl, ref, {ref_any}})); - - -class MKLDNNGraphDynBatchBatchNormScaleShiftTests: public MKLDNNGraphBatchNormScaleShiftTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - batchnorm_scaleshift_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.in.n; - if (MB < 2) - MB = 2; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {p.in.c * 4 * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - fill_data( weights->data().as(), weights->size() / sizeof(float)); - float * data = weights->buffer(); - for (size_t i = 0; i < weights->size() / sizeof(float); i++) { - if (data[i] < 0) { - data[i] *= -1; - } - } - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w}; - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkScaleShift = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return (node->getType() == MKLDNNPlugin::Depthwise && node->getCnnLayer()->type == "ScaleShift") - || node->getType() == MKLDNNPlugin::BatchNormalization; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkScaleShift); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkScaleShift); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchBatchNormScaleShiftTests, TestsDynBatchBatchNormWithScaleShift) {} - -INSTANTIATE_TEST_CASE_P( - TestsDynBatchBatchNormWithScaleShift, MKLDNNGraphDynBatchBatchNormScaleShiftTests, - ::testing::Values( - // TODO: rewrite to ngraph to have reshape functionality - // batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit}, - // batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit}, - batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp deleted file mode 100644 index 279218e57ed933..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include -#include -#include "tests_common.hpp" -#include "ie_system_conf.h" - -using namespace ::testing; -using namespace MKLDNNPlugin; -using namespace mkldnn; - -struct batchnorm4D_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - double epsilon; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - std::vector preferTypes; - - std::vector> comp; -}; - -template -void ref_batchnorm4D(const InferenceEngine::TBlob &src, const data_t *variance, const data_t *mean, - InferenceEngine::TBlob &dst, batchnorm4D_test_params prm) { - size_t MB = src.getTensorDesc().getDims()[0]; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IW = src.getTensorDesc().getDims()[3]; - - const double eps = prm.epsilon; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (int c = 0; c < IC; ++c) { - data_t v_mean = mean[c]; - data_t v_variance = variance[c]; - data_t sqrt_variance = 0; - - sqrt_variance = 1. / sqrt(v_variance + eps); - - for (int n = 0; n < MB; ++n) - for (int h = 0; h < IH; ++h) - for (int w = 0; w < IW; ++w) { - size_t idx = n * IC * IH * IW - + c * IH * IW - + h * IW + w; - dst_data[idx] = (src_data[idx] - v_mean) * sqrt_variance; - } - } -} - -class MKLDNNGraphBatchNormTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _OC_ - _OH_ - _OW_ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(batchnorm4D_test_params p) { - std::string model = model_t; - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - REPLACE_WITH_NUM(model, "_EPSILON_", p.epsilon); - - REPLACE_WITH_NUM(model, "_OW_", p.in.w); - REPLACE_WITH_NUM(model, "_OH_", p.in.h); - REPLACE_WITH_NUM(model, "_OC_", p.in.c); - - size_t w_data_size = p.in.c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - batchnorm4D_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {p.in.c * 2 * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - fill_data(weights->buffer(), weights->size() / sizeof(float)); - float * data = weights->buffer(); - for (size_t i = 0; i < weights->size() / sizeof(float); i++) { - if (data[i] < 0) { - data[i] *= -1; - } - } - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::BatchNormalization) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_TRUE(nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() | p.selectedType); - } - } - ASSERT_GE(5, nodes.size()); - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_batchnorm4D(*srcPtr, (const float*) weights->buffer(), ((const float*) weights->buffer() + p.in.c), dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphBatchNormTests, TestsBatchNorm) {} - -const size_t expect_num_impl = InferenceEngine::with_cpu_x86_avx2() ? 3 : 2; - -INSTANTIATE_TEST_CASE_P( - TestsBatchNorm, MKLDNNGraphBatchNormTests, - ::testing::Values( - batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, expect_num_impl, jit}, - batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, expect_num_impl, jit}, - batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, expect_num_impl, ref, {ref_any}}, - batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, expect_num_impl, ref, {ref_any}})); - -class MKLDNNGraphDynBatchBatchNormTests: public MKLDNNGraphBatchNormTests { -protected: - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - batchnorm4D_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.in.n; - if (MB < 2) - MB = 2; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {p.in.c * 4 * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - fill_data( weights->data().as(), weights->size() / sizeof(float)); - float * data = weights->buffer(); - for (size_t i = 0; i < weights->size() / sizeof(float); i++) { - if (data[i] < 0) { - data[i] *= -1; - } - } - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w}; - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - auto* srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkScaleShift = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::BatchNormalization; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkScaleShift); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkScaleShift); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchBatchNormTests, TestsDynBatchBatchNorm) {} - -INSTANTIATE_TEST_CASE_P( - TestsDynBatchBatchNorm, MKLDNNGraphDynBatchBatchNormTests, - ::testing::Values( - // TODO: rewrite to ngraph to have reshape functionality - // batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit}, - // batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit}, - batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp deleted file mode 100644 index 5ff03d84f8678c..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp +++ /dev/null @@ -1,1030 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include -#include -#include -#include -#include "tests_common.hpp" - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct concat_test_params { - // Formats: NCHW, NCDHW - vector in1; - vector in2; - - size_t axis; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -class MKLDNNGraphConcatTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - __SRC_DIMS_1__ - - - - - - __SRC_DIMS_2__ - - - - - - - __SRC_DIMS_1__ - - __SRC_DIMS_2__ - - - - __DST_DIMS__ - - - - - - - - - -)V0G0N"; - - std::string getModel(concat_test_params p) { - std::string model = model_t; - std::string s_dims; - for (auto& dim : p.in1) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims); - - s_dims = ""; - for (auto& dim : p.in2) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims); - - s_dims = ""; - for (size_t i = 0; i < p.in1.size(); i++) { - size_t dim = p.axis == i ? p.in1[i] + p.in2[i] : p.in1[i]; - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - concat_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Concatenation) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - ASSERT_LE(3, nodes.size()); - - InferenceEngine::SizeVector dims_src1 = p.in1; - InferenceEngine::SizeVector dims_src2 = p.in2; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in1.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - case 6: - layout = InferenceEngine::BLOCKED; - break; - } - - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, layout}); - src1->allocate(); - - fill_data(src1->buffer(), src1->size()); - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, layout}); - src2->allocate(); - fill_data(src2->buffer(), src2->size()); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - // Compare - float *src1_ptr = src1->buffer(); - size_t src1_size = src1->size(); - float *src2_ptr = src2->buffer(); - size_t src2_size = src2->size(); - float *dst_ptr = output->buffer(); - size_t dst_size = output->size(); - - int len1 = 1, len2 = 1, cycles; - for (int dim = p.axis; dim < output->getTensorDesc().getDims().size(); dim++) { - len1 *= src1->getTensorDesc().getDims()[dim]; - len2 *= src2->getTensorDesc().getDims()[dim]; - } - cycles = p.axis; - - - int index1 = 0, index2 = 0, index = 0; - for (int cycle = 0; cycle < cycles; cycle ++) { - for (int i1 = 0; i1 < len1; i1++) { - if (src1_ptr[index1] != dst_ptr[index]) - { - FAIL() << "index: " << index << " src: " << src1_ptr[index1] << ", dst: " << dst_ptr[index]; - } - index1++; index++; - } - for (int i2 = 0; i2 < len2; i2++) { - if (src2_ptr[index2] != dst_ptr[index]) - { - FAIL() << "index: " << index << " src: " << src2_ptr[index2] << ", dst: " << dst_ptr[index]; - } - index2++; index++; - } - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphConcatTests, TestsConcat) {} - -INSTANTIATE_TEST_CASE_P( - TestsConcat, MKLDNNGraphConcatTests, - ::testing::Values( - concat_test_params { - {1, 3, 3, 5}, - {1, 3, 3, 5}, - 1, 2 - }, - concat_test_params { - {1, 7, 1, 5}, - {1, 7, 9, 5}, - 2, 1, MKLDNNPlugin::impl_desc_type::ref - }, - concat_test_params { - {1, 2, 3, 5, 3}, - {1, 5, 3, 5, 3}, - 1, 2 - }, - concat_test_params { - {1, 32, 3, 4, 5}, - {1, 32, 3, 4, 5}, - 1, 6, MKLDNNPlugin::impl_desc_type::unknown - }, - concat_test_params { - {1, 64, 16, 16, 16, 1}, - {1, 64, 16, 16, 16, 1}, - 5, 1, MKLDNNPlugin::impl_desc_type::ref - })); - -class MKLDNNGraphDynBatchConcatTests: public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - 1__SRC_DIMS_1__ - - - - - - - 1__SRC_DIMS_2__ - - - - - - - - 1__SRC_DIMS_1__ - - - 1__SRC_DIMS_2__ - - - - - 1__DST_DIMS__ - - - - - - - - - -)V0G0N"; - - std::string getModel(concat_test_params p) { - std::string model = model_t; - std::string s_dims; - for (size_t i = 1; i < p.in1.size(); i++) { - s_dims += "\n "; - s_dims += std::to_string(p.in1[i]) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims); - - s_dims = ""; - for (size_t i = 1; i < p.in2.size(); i++) { - s_dims += "\n "; - s_dims += std::to_string(p.in2[i]) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims); - - s_dims = ""; - for (size_t i = 1; i < p.in1.size(); i++) { - size_t dim = p.axis == i ? p.in1[i] + p.in2[i] : p.in1[i]; - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - concat_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.in1[0]; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src1 = p.in1; - InferenceEngine::SizeVector dims_src2 = p.in2; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in1.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - case 6: - layout = InferenceEngine::BLOCKED; - break; - } - - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, layout}); - src1->allocate(); - - fill_data(src1->buffer(), src1->size()); - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, layout}); - src2->allocate(); - fill_data(src2->buffer(), src2->size()); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - - auto checkConcat = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Concatenation; - }; - - MKLDNNGraphTestClass::CheckDynBatchType checkType = MKLDNNGraphTestClass::CheckDynBatchType::Both; - if (p.selectedType == MKLDNNPlugin::impl_desc_type::unknown) - checkType = MKLDNNGraphTestClass::CheckDynBatchType::Child; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkConcat, checkType); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkConcat, checkType); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchConcatTests, TestsDynBatchConcat) {} - - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsDynBatchConcat, MKLDNNGraphDynBatchConcatTests, - ::testing::Values( - concat_test_params { - {1, 7, 2, 5}, - {1, 7, 2, 5}, - 2, 1, MKLDNNPlugin::impl_desc_type::ref - }, - concat_test_params { - {1, 7, 2, 5}, - {1, 13, 2, 5}, - 1, 2, MKLDNNPlugin::impl_desc_type::unknown - }, - concat_test_params { - {1, 7, 2, 13}, - {1, 7, 2, 17}, - 3, 1, MKLDNNPlugin::impl_desc_type::ref - }, - concat_test_params { - {1, 8, 8, 16}, - {1, 16, 8, 16}, - 1, 4, MKLDNNPlugin::impl_desc_type::unknown - }, - concat_test_params { - {3, 7, 2, 5}, - {3, 13, 2, 5}, - 1, 2, MKLDNNPlugin::impl_desc_type::unknown - }, - concat_test_params { - {2, 2, 3, 3}, - {2, 3, 3, 3}, - 1, 2, MKLDNNPlugin::impl_desc_type::unknown - }, - concat_test_params { - {2, 2, 3, 3, 3}, - {2, 3, 3, 3, 3}, - 1, 2, MKLDNNPlugin::impl_desc_type::unknown - })); - -struct concat_param { - std::string name; - size_t axis; - size_t input1; - size_t input2; -}; - -struct two_concat_test_params { - // Formats: NCHW, NCDHW - vector in1; - vector in2; - vector in3; - - concat_param concat1; - concat_param concat2; -}; - -class MKLDNNGraphTwoConcatTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - __SRC_DIMS_1__ - - - - - - __SRC_DIMS_2__ - - - - - - __SRC_DIMS_3__ - - - - - - - - _CI41N_ - _CI41C_ - _CI41D_ - _CI41H_ - _CI41W_ - - - _CI42N_ - _CI42C_ - _CI42D_ - _CI42H_ - _CI42W_ - - - - __CO_DIMS_1__ - - - - - - - - _CI51N_ - _CI51C_ - _CI51D_ - _CI51H_ - _CI51W_ - - - _CI52N_ - _CI52C_ - _CI52D_ - _CI52H_ - _CI52W_ - - - - __CO_DIMS_2__ - - - - - - - - - - - -)V0G0N"; - void changeEdgeToLayer(std::string& model, int f_l, int f_p, int t_l, int t_p, vector dims) { - std::string TL = "_FL" + std::to_string(f_l) + std::to_string(f_p) + "_"; - std::string TP = "_FP" + std::to_string(f_l) + std::to_string(f_p) + "_"; - if (!FIND_STR(model, TL) || !FIND_STR(model, TP)) { - if (!FIND_STR(model, "_FSL_") || !FIND_STR(model, "_FSP_") || - !FIND_STR(model, "_FSLTL_") || !FIND_STR(model, "_FSLTP_")) { - IE_THROW() << "Incorrect configuration!"; - } - REPLACE_WITH_NUM(model, "_FSL_", f_l); - REPLACE_WITH_NUM(model, "_FSP_", f_p); - REPLACE_WITH_NUM(model, "_FSLTL_", t_l); - REPLACE_WITH_NUM(model, "_FSLTP_", t_p); - } else { - REPLACE_WITH_NUM(model, TL, t_l); - REPLACE_WITH_NUM(model, TP, t_p); - } - - std::string CI = "_CI" + std::to_string(t_l) + std::to_string(t_p); - auto dims_size = dims.size(); - REPLACE_WITH_NUM(model, CI + "N_", dims[0]); - REPLACE_WITH_NUM(model, CI + "C_", dims[1]); - REPLACE_WITH_NUM(model, CI + "H_", dims[dims_size - 2]); - REPLACE_WITH_NUM(model, CI + "W_", dims[dims_size - 1]); - if (dims_size < 5) REMOVE_LINE(model, std::string("") + CI + std::string("D_") + ""); - else REPLACE_WITH_NUM(model, CI + "D_", dims[dims_size - 3]); - } - - - std::string getModel(two_concat_test_params p) { - std::string model = model_t; - std::string s_dims; - for (size_t i = 0; i < p.in1.size(); i++) { - s_dims += "\n "; - s_dims += std::to_string(p.in1[i]) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims); - - s_dims = ""; - for (size_t i = 0; i < p.in2.size(); i++) { - s_dims += "\n "; - s_dims += std::to_string(p.in2[i]) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims); - - s_dims = ""; - for (size_t i = 0; i < p.in3.size(); i++) { - s_dims += "\n "; - s_dims += std::to_string(p.in3[i]) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS_3__", s_dims); - - vector concat11; - switch (p.concat1.input1) { - case 1: - changeEdgeToLayer(model, 2, 1, 4, 1, p.in2); - concat11 = p.in2; - break; - case 2: - changeEdgeToLayer(model, 3, 1, 4, 1, p.in3); - concat11 = p.in3; - break; - default: - changeEdgeToLayer(model, 1, 1, 4, 1, p.in1); - concat11 = p.in1; - } - - vector concat12; - switch (p.concat1.input2) { - case 1: - changeEdgeToLayer(model, 2, 1, 4, 2, p.in2); - concat12 = p.in2; - break; - case 2: - changeEdgeToLayer(model, 3, 1, 4, 2, p.in3); - concat12 = p.in3; - break; - default: - changeEdgeToLayer(model, 1, 1, 4, 2, p.in1); - concat12 = p.in1; - } - - vector concat21; - switch (p.concat2.input1) { - case 1: - changeEdgeToLayer(model, 2, 1, 5, 1, p.in2); - concat21 = p.in2; - break; - case 2: - changeEdgeToLayer(model, 3, 1, 5, 1, p.in3); - concat21 = p.in3; - break; - default: - changeEdgeToLayer(model, 1, 1, 5, 1, p.in1); - concat21 = p.in1; - } - - vector concat22; - switch (p.concat2.input2) { - case 1: - changeEdgeToLayer(model, 2, 1, 5, 2, p.in2); - concat22 = p.in2; - break; - case 2: - changeEdgeToLayer(model, 3, 1, 5, 2, p.in3); - concat22 = p.in3; - break; - default: - changeEdgeToLayer(model, 1, 1, 5, 2, p.in1); - concat22 = p.in1; - } - - s_dims = ""; - for (size_t i = 0; i < p.in2.size(); i++) { - size_t concat = p.concat1.axis == i ? concat11[i] + concat12[i] : concat21[i]; - s_dims += "\n "; - s_dims += std::to_string(concat) + ""; - } - REPLACE_WITH_STR(model, "__CO_DIMS_1__", s_dims); - - REPLACE_WITH_NUM(model, "_CONCAT1_AXIS_", p.concat1.axis); - REPLACE_WITH_STR(model, "_CONCAT1_NAME_", p.concat1.name); - - s_dims = ""; - for (size_t i = 0; i < p.in2.size(); i++) { - size_t concat = p.concat2.axis == i ? concat21[i] + concat22[i] : concat21[i]; - s_dims += "\n "; - s_dims += std::to_string(concat) + ""; - } - REPLACE_WITH_STR(model, "__CO_DIMS_2__", s_dims); - - REPLACE_WITH_NUM(model, "_CONCAT2_AXIS_", p.concat2.axis); - REPLACE_WITH_STR(model, "_CONCAT2_NAME_", p.concat2.name); - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - two_concat_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src1 = p.in1; - InferenceEngine::SizeVector dims_src2 = p.in2; - InferenceEngine::SizeVector dims_src3 = p.in3; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in1.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, layout}); - src1->allocate(); - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, layout}); - src2->allocate(); - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src3, layout}); - src3->allocate(); - fill_data(src3->buffer(), src3->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - srcs.insert(std::pair("in3", src3)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - for (auto & it : out) { - std::pair item = it; - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - } - - graph.Infer(srcs, outputBlobs); - - for (const auto& concat : {p.concat1, p.concat2}) { - float *src1_ptr; - size_t src1_size; - float *src2_ptr; - size_t src2_size; - InferenceEngine::Blob::Ptr src1_c; - InferenceEngine::Blob::Ptr src2_c; - - switch (concat.input1) { - case 1: - src1_ptr = src2->buffer(); - src1_size = src2->size(); - src1_c = src2; - break; - case 2: - src1_ptr = src3->buffer(); - src1_size = src3->size(); - src1_c = src3; - break; - default: - src1_ptr = src1->buffer(); - src1_size = src1->size(); - src1_c = src1; - } - - switch (concat.input2) { - case 1: - src2_ptr = src2->buffer(); - src2_size = src2->size(); - src2_c = src2; - break; - case 2: - src2_ptr = src3->buffer(); - src2_size = src3->size(); - src2_c = src3; - break; - default: - src2_ptr = src1->buffer(); - src2_size = src1->size(); - src2_c = src1; - } - - float *dst_ptr = outputBlobs[concat.name]->buffer(); - size_t dst_size = outputBlobs[concat.name]->size(); - - int len1 = 1, len2 = 1, cycles; - for (int dim = concat.axis; dim < outputBlobs[concat.name]->getTensorDesc().getDims().size(); dim++) { - len1 *= src1_c->getTensorDesc().getDims()[dim]; - len2 *= src2_c->getTensorDesc().getDims()[dim]; - } - cycles = concat.axis; - - int index1 = 0, index2 = 0, index = 0; - for (int cycle = 0; cycle < cycles; cycle ++) { - for (int i1 = 0; i1 < len1; i1++) { - if (src1_ptr[index1] != dst_ptr[index]) - { - FAIL() << concat.name << " index: " << index << " src: " - << src1_ptr[index1] << ", dst: " << dst_ptr[index]; - } - index1++; index++; - } - for (int i2 = 0; i2 < len2; i2++) { - if (src2_ptr[index2] != dst_ptr[index]) - { - FAIL() << concat.name << " index: " << index << " src: " - << src2_ptr[index2] << ", dst: " << dst_ptr[index]; - } - index2++; index++; - } - } - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphTwoConcatTests, TestsTwoConcat) {} - -INSTANTIATE_TEST_CASE_P( - TestsTwoConcat, MKLDNNGraphTwoConcatTests, - ::testing::Values( - two_concat_test_params { - {1, 5, 2, 5}, - {3, 5, 2, 5}, - {1, 5, 2, 5}, - {"concat1", 0, 0, 1}, - {"concat2", 0, 1, 2} - }, - two_concat_test_params { - {1, 2, 2, 5}, - {1, 5, 2, 5}, - {3, 5, 2, 5}, - {"concat1", 1, 0, 1}, - {"concat2", 0, 1, 2} - }, - two_concat_test_params { - {1, 2, 2, 2}, - {1, 1, 2, 2}, - {1, 3, 2, 2}, - {"concat1", 1, 0, 1}, - {"concat2", 1, 1, 2} - }, - two_concat_test_params { - {1, 5, 2, 5}, - {3, 5, 2, 5}, - {1, 5, 2, 5}, - {"concat1", 0, 0, 1}, - {"concat2", 0, 2, 1} - }, - two_concat_test_params { - {1, 2, 2, 5}, - {1, 5, 2, 5}, - {3, 5, 2, 5}, - {"concat1", 1, 0, 1}, - {"concat2", 0, 2, 1} - }, - two_concat_test_params { - {1, 2, 2, 2}, - {1, 1, 2, 2}, - {1, 3, 2, 2}, - {"concat1", 1, 0, 1}, - {"concat2", 1, 2, 1} - })); - - -class MKLDNNGraphTwoInputInConcatTests: public TestsCommon { - std::string model_t = R"V0G0N( - - - - - - 1 - 3 - 2 - 2 - - - - - - - 1 - 2 - 2 - 2 - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 2 - 2 - 2 - - - 1 - 3 - 2 - 2 - - - - - 1 - 5 - 2 - 2 - - - - - - - - - - - -)V0G0N"; - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - std::string model = model_t; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src1 = {1, 3, 2, 2}; - InferenceEngine::SizeVector dims_src2 = {1, 2, 2, 2}; - - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, InferenceEngine::NCHW}); - src1->allocate(); - float *src1_data = src1->buffer(); - for (size_t i = 0; i < src1->size(); i++) { - src1_data[i] = i + 1; - } - - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, InferenceEngine::NCHW}); - src2->allocate(); - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - for (auto & it : out) { - std::pair item = it; - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - } - - graph.Infer(srcs, outputBlobs); - - float *src1_ptr = src2->buffer(); - float *src2_ptr = src1->buffer(); - - float *dst_ptr = outputBlobs["o_concat"]->buffer(); - - int len1 = 1, len2 = 1, cycles; - for (int dim = 1; dim < outputBlobs["o_concat"]->getTensorDesc().getDims().size(); dim++) { - len1 *= src2->getTensorDesc().getDims()[dim]; - len2 *= src1->getTensorDesc().getDims()[dim]; - } - cycles = 1; - - int index1 = 0, index2 = 0, index = 0; - for (int cycle = 0; cycle < cycles; cycle ++) { - for (int i1 = 0; i1 < len1; i1++) { - if (src1_ptr[index1] != dst_ptr[index]) - { - FAIL() << "concat index: " << index << " src: " - << src1_ptr[index1] << ", dst: " << dst_ptr[index]; - } - index1++; index++; - } - for (int i2 = 0; i2 < len2; i2++) { - if (src2_ptr[index2] != dst_ptr[index]) - { - FAIL() << "concat index: " << index << " src: " - << src2_ptr[index2] << ", dst: " << dst_ptr[index]; - } - index2++; index++; - } - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_F(MKLDNNGraphTwoInputInConcatTests, TestSecondInputToConcat) {} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp deleted file mode 100644 index 199a92641db3b3..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include -#include "tests_common.hpp" -#include -#include - -using namespace InferenceEngine; -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct conv_test_params { - // Formats: NCHW, NCDHW - vector dims; - // Formats: WH, WHD - vector kernel; - vector strides; - vector pads_begin; - vector pads_end; - - size_t out_c; - size_t grp_c; - string auto_pad; - - size_t num_prim_desc; - - int selectedType; - bool defaultPrimitivesPriority; - vector preferTypes; - - vector> comp; -}; - -template -void ref_conv(const TBlob &src, const data_t *weights, const size_t weightsSize, - TBlob &dst, struct conv_test_params prm) { - SizeVector src_dims = src.getTensorDesc().getDims(); - auto dims_size = src_dims.size(); - - size_t KW = prm.kernel[X_AXIS]; - size_t KH = prm.kernel[Y_AXIS]; - size_t KD = dims_size == 5 ? prm.kernel[Z_AXIS] : 1u; - size_t GC = prm.grp_c; - - size_t IC = src_dims[1]; - size_t ID = dims_size == 5 ? src_dims[dims_size - 3] : 1u; - size_t IH = src_dims[dims_size - 2]; - size_t IW = src_dims[dims_size - 1]; - - size_t OW = (IW + prm.pads_end[X_AXIS] + prm.pads_begin[X_AXIS] - prm.kernel[X_AXIS]) / prm.strides[X_AXIS] + 1u; - size_t OH = (IH + prm.pads_end[Y_AXIS] + prm.pads_begin[Y_AXIS] - prm.kernel[Y_AXIS]) / prm.strides[Y_AXIS] + 1u; - size_t OD = dims_size == 5 ? (ID + 2u * prm.pads_begin[Z_AXIS] - prm.kernel[Z_AXIS]) / prm.strides[Z_AXIS] + 1u : 1u; - size_t OC = prm.out_c; - - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - const data_t *bias_data = weights_data + KW * KH * KD * OC * IC / GC; - data_t *dst_data = dst.data(); - - IE_ASSERT(KW * KH * KD * OC * IC / GC + OC == weightsSize); - SizeVector dst_dims = dst.getTensorDesc().getDims(); - auto dst_dims_size = dst_dims.size(); - IE_ASSERT(OW == dst_dims[dst_dims_size - 1]); - IE_ASSERT(OH == dst_dims[dst_dims_size - 2]); - - size_t SC1 = OH * OW; - size_t SC2 = SC1 * OD; - size_t SC3 = OC / GC; - size_t SC4 = SC2 * SC3; - - size_t IC1 = IH * IW; - size_t IC2 = IC1 * ID; - size_t IC3 = IC / GC; - size_t IC4 = IC2 * IC3; - - size_t KC1 = KH * KW; - size_t KC2 = KC1 * KD; - size_t KC3 = IC3 * KC2; - size_t KC4 = SC3 * KC3; - - for (uint32_t g = 0; g < GC; g++) { - size_t gc = g * SC4; - size_t goc = g * SC3; - size_t gic = g * IC4; - size_t gkc = g * KC4; - for (uint32_t oc = 0; oc < OC / GC; oc++) { - size_t cc = gc + oc * SC2; - size_t gooc = goc + oc; - size_t gkoc = gkc + oc * KC3; - for (uint32_t od = 0; od < OD; od++) { - size_t dc = cc + od * SC1; - for (uint32_t oh = 0; oh < OH; oh++) { - size_t hc = dc + oh * OW; - for (uint32_t ow = 0; ow < OW; ow++) { - size_t oidx = hc + ow; - - dst_data[oidx] = bias_data[gooc]; - - for (size_t ic = 0; ic < IC / GC; ic++) { - size_t icc = gkoc + ic * KC2; - size_t kicc = gic + ic * IC2; - for (size_t kd = 0; kd < KD; kd++) { - int32_t id = dims_size == 5 ? od * prm.strides[Z_AXIS] - prm.pads_begin[Z_AXIS] + kd : 0; - if (id < 0 || id >= (int32_t)ID) continue; - size_t kidc = kicc + id * IC1; - size_t kdc = icc + kd * KC1; - for (size_t kh = 0; kh < KH; kh++) { - int32_t ih = oh * prm.strides[Y_AXIS] - prm.pads_begin[Y_AXIS] + kh; - if (ih < 0 || ih >= (int32_t)IH) continue; - size_t kihc = kidc + ih * IW; - size_t khc = kdc + kh * KW; - for (size_t kw = 0; kw < KW; kw++) { - int32_t iw = ow * prm.strides[X_AXIS] - prm.pads_begin[X_AXIS] + kw; - if (iw < 0 || iw >= (int32_t)IW) continue; - - size_t iidx = kihc + iw; - size_t widx = khc + kw; - - dst_data[oidx] += src_data[iidx] * weights_data[widx]; - } - } - } - } - } - } - } - } - } -} - -class MKLDNNGraphConvolutionTests: public TestsCommon, - public WithParamInterface { - std::string model_t_5D = R"V0G0N( - - - - - __SRC_DIMS__ - - - - - - - - - - - __SRC_DIMS__ - - - - - _IN_ - _OC___DST_DIMS__ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(conv_test_params p) { - std::string model = model_t_5D; - std::string s_dims; - for (auto& dim : p.dims) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims); - - s_dims = ""; - int k_len = p.kernel.size(); - for (size_t i = 2; i < p.dims.size(); i++) { - size_t inx = k_len - i + 1; - size_t dim = (p.dims[i] + p.pads_end[inx] + p.pads_begin[inx] - p.kernel[inx]) / p.strides[inx] + 1lu; - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims); - - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end); - string auto_pad; - if (!p.auto_pad.empty()) auto_pad = string("auto_pad=") + string("\"") + p.auto_pad + string("\""); - REPLACE_WITH_STR(model, "_AP_", auto_pad); - - REPLACE_WITH_NUM(model, "_GC_", p.grp_c); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - - size_t w_data_size = 1; - for (auto ker : p.kernel) { - w_data_size *= ker; - } - - w_data_size = (w_data_size * p.out_c * p.dims[1] / p.grp_c) * sizeof(float); - size_t b_data_size = p.out_c * sizeof(float); - - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - std::string primitivesPriorityStr; - if (!p.defaultPrimitivesPriority) { - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - primitivesPriorityStr = "PrimitivesPriority=\"" + impls + "\""; - } - REPLACE_WITH_STR(model, "_PRIM_PRIORITY_", primitivesPriorityStr); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - conv_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t blob_size = p.out_c * p.dims[1] / p.grp_c; - for (auto k : p.kernel) { - blob_size *= k; - } - blob_size = (blob_size + p.out_c) * sizeof(float); - TBlob *weights = new TBlob - ({ Precision::U8, {blob_size}, C }); - weights->allocate(); - - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - - TBlob::Ptr weights_ptr = TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - bool isWino = false; - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Convolution) { - ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (const auto prim : node->getSupportedPrimitiveDescriptors()) { - if (p.defaultPrimitivesPriority) { - if (prim.getImplementationType() & MKLDNNPlugin::impl_desc_type::gemm) - FAIL() << "There should be no gemm implementation in supportedPrimitiveDescriptors"; - } - std::cout << MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(prim.getImplementationType()) << " "; - } - std::cout << std::endl; - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - if (InferenceEngine::with_cpu_x86_avx512f() && - InferenceEngine::with_cpu_x86_avx512_core() - && !p.preferTypes.empty() - && p.preferTypes[0] == MKLDNNPlugin::impl_desc_type::jit_avx512_winograd) { - isWino = true; - ASSERT_EQ(p.preferTypes[0], node->getSelectedPrimitiveDescriptor()->getImplementationType()); - } else { - ASSERT_EQ(p.selectedType, - node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - } - - Layout layout = ANY; - switch (p.dims.size()) { - case 4: - layout = NCHW; - break; - case 5: - layout = NCDHW; - break; - } - - Blob::Ptr src = make_shared_blob - ({ Precision::FP32, p.dims, layout }); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out; - out = network.getOutputsInfo(); - BlobMap outputBlobs; - - std::pair item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - ref_conv(*srcPtr, (const float *)weights->buffer(), weights->size() / sizeof(float), dst_ref, p); - compare(*output, dst_ref, 0.0002f); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphConvolutionTests, TestsConvolution) {} - - -INSTANTIATE_TEST_CASE_P( - TestConvolution, MKLDNNGraphConvolutionTests, - ::testing::Values( - /*0*/ conv_test_params{{1, 9, 16, 32}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "same_upper", 6, - MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1, false }, - conv_test_params{{1, 9, 32, 16}, {2, 4}, {1, 1}, {1, 1}, {0, 2}, 17, 1, "", 4, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 9, 32, 16}, {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 4, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 3, 40, 40}, {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 4, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 1, 40, 40}, {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 4, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 1, 32, 16}, {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 4, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 9, 32, 16}, {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 4, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 4, 54, 96}, {3, 3}, {1, 1}, {1, 1}, {0, 0}, 64, 1, "", 3, MKLDNNPlugin::impl_desc_type::jit, false }, - // 5D - /*8*/ conv_test_params{{1, 3, 15, 20, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 24, 15, 20, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 32, 15, 20, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 3, 15, 25, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 24, 15, 25, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false }, - /*13*/ conv_test_params{{1, 32, 15, 25, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 16, 30, 30, 10}, {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false }, - conv_test_params{{1, 16, 30, 30, 10}, {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, false} )); - -#ifdef USE_MKL -INSTANTIATE_TEST_CASE_P( - MKLTestConvolution, MKLDNNGraphConvolutionTests, - ::testing::Values( - conv_test_params{{1, 9, 16, 32}, - {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 6, MKLDNNPlugin::impl_desc_type::gemm, false, - {MKLDNNPlugin::impl_desc_type::gemm_any, - MKLDNNPlugin::impl_desc_type::gemm_blas, - MKLDNNPlugin::impl_desc_type::gemm_avx512, - MKLDNNPlugin::impl_desc_type::gemm_avx2, - MKLDNNPlugin::impl_desc_type::gemm_sse42} }, - conv_test_params{{1, 5, 15, 20, 20}, - {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas, false, - {MKLDNNPlugin::impl_desc_type::gemm_blas} }, - conv_test_params{{1, 5, 15, 20, 20}, - {3, 3, 3}, {3, 2, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas, false, - {MKLDNNPlugin::impl_desc_type::gemm_blas} }, - // conv_test_params{{1, 5, 15, 20, 20}, - // {3, 3, 3}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas, false, - // {MKLDNNPlugin::impl_desc_type::gemm_blas} }, - conv_test_params{{1, 16, 30, 30, 10}, - {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas, false, - {MKLDNNPlugin::impl_desc_type::gemm_blas} }, - conv_test_params{{1, 4, 16, 16, 16}, - {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, 8, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas, false, - {MKLDNNPlugin::impl_desc_type::gemm_blas} } )); -#endif - -INSTANTIATE_TEST_CASE_P( - TestConvolutionDefaultPrimitivesPriority, MKLDNNGraphConvolutionTests, - ::testing::Values( - /*0*/ conv_test_params{{1, 9, 16, 32}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "same_upper", 6, - MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1, true }, - conv_test_params{{1, 9, 32, 16}, {2, 4}, {1, 1}, {1, 1}, {0, 2}, 17, 1, "", 3, MKLDNNPlugin::impl_desc_type::jit, true }, - conv_test_params{{1, 9, 32, 16}, {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 3, MKLDNNPlugin::impl_desc_type::jit, true }, - conv_test_params{{1, 3, 40, 40}, {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 3, MKLDNNPlugin::impl_desc_type::jit, true }, - conv_test_params{{1, 1, 40, 40}, {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 3, MKLDNNPlugin::impl_desc_type::jit, true }, - conv_test_params{{1, 1, 32, 16}, {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 3, MKLDNNPlugin::impl_desc_type::jit, true }, - // 5D - /*6*/ conv_test_params{{1, 3, 15, 25, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, true }, - conv_test_params{{1, 24, 15, 25, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, true }, - conv_test_params{{1, 32, 15, 25, 20}, {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, true }, - conv_test_params{{1, 16, 30, 30, 10}, {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit, true } )); - - -class MKLDNNGraphDynBatchConvolutionTests: public MKLDNNGraphConvolutionTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - conv_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - std::vector dims = p.dims; - if (dims[0] < 2) - dims[0] = 2; - - size_t blob_size = p.out_c * dims[1] / p.grp_c; - for (auto k : p.kernel) { - blob_size *= k; - } - blob_size = (blob_size + p.out_c) * sizeof(float); - TBlob *weights = new TBlob({ Precision::U8, {blob_size}, Layout::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - TBlob::Ptr weights_ptr = TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - ResponseDesc resp; - StatusCode sts = implNet->setBatchSizeReshape(dims[0], &resp); - ASSERT_EQ((int)StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - Layout layout = ANY; - switch (dims.size()) { - case 4: - layout = NCHW; - break; - case 5: - layout = NCDHW; - break; - } - - Blob::Ptr src = make_shared_blob({ Precision::FP32, dims, layout }); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out; - out = network.getOutputsInfo(); - BlobMap outputBlobs; - - std::pair item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkConvolution = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Convolution; - }; - - graph.checkDynBatch(srcs, outputBlobs, dims[0], dims[0], checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child); - graph.checkDynBatch(srcs, outputBlobs, 1, dims[0], checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchConvolutionTests, TestsDynBatchConvolution) {} - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestDynBatchConvolution, MKLDNNGraphDynBatchConvolutionTests, - ::testing::Values( - conv_test_params{{1, 8, 16, 32}, - {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "same_upper", 7, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1, - false, {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}}, - conv_test_params{{1, 9, 32, 16}, - {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit, - false, {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} }, - conv_test_params{{1, 9, 32, 16}, - {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit, - false, {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} }, - conv_test_params{{1, 3, 40, 40}, - {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit, - false, {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} }, - conv_test_params{{1, 1, 40, 40}, - {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit, - false, {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} }, - conv_test_params{{1, 1, 32, 16}, - {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit, - false, {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} }, - conv_test_params{{1, 9, 32, 16}, - {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::ref_any, - false, {MKLDNNPlugin::impl_desc_type::ref_any} } )); - -#ifdef USE_MKL -INSTANTIATE_TEST_CASE_P( - MKLTestDynBatchConvolution, MKLDNNGraphDynBatchConvolutionTests, - ::testing::Values( - conv_test_params{{1, 9, 16, 32}, - {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 7, MKLDNNPlugin::impl_desc_type::gemm, false, - {MKLDNNPlugin::impl_desc_type::gemm_any, - MKLDNNPlugin::impl_desc_type::gemm_blas, - MKLDNNPlugin::impl_desc_type::gemm_avx512, - MKLDNNPlugin::impl_desc_type::gemm_avx2, - MKLDNNPlugin::impl_desc_type::gemm_sse42} - })); -#endif - diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp deleted file mode 100644 index c95f0df5c3be64..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" -#include "tests_common.hpp" - -#include -#include -#include - -using namespace InferenceEngine; -using namespace ::testing; -using namespace std; -using namespace mkldnn; -using namespace single_layer_tests; - - -struct deconv_test_params { - // Formats: NCHW, NCDHW - vector dims; - // Formats: WH, WHD - vector kernel; - vector strides; - vector pads_begin; - vector pads_end; - - size_t out_c; - size_t grp_c; - - bool with_bias; - string auto_pad; - - size_t num_prim_desc; - - std::vector selectedTypes; - std::vector preferTypes; - - std::vector> comp; -}; - -template -void ref_deconv(const InferenceEngine::TBlob &src, const InferenceEngine::Blob::Ptr &weights, const InferenceEngine::Blob::Ptr &bias, - InferenceEngine::TBlob &dst, struct deconv_test_params prm) { - auto dims_size = src.getTensorDesc().getDims().size(); - - size_t G = prm.grp_c; - size_t KW = prm.kernel[X_AXIS]; - size_t KH = prm.kernel[Y_AXIS]; - size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1u; - - size_t PW = prm.pads_begin[X_AXIS]; - size_t PH = prm.pads_begin[Y_AXIS]; - size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0u; - - size_t SW = prm.strides[X_AXIS]; - size_t SH = prm.strides[Y_AXIS]; - size_t SD = prm.strides.size() > Z_AXIS ? prm.strides[Z_AXIS] : 1u; - - size_t IW = src.getTensorDesc().getDims()[dims_size - 1]; - size_t IH = src.getTensorDesc().getDims()[dims_size - 2]; - size_t ID = dims_size == 5 ? src.getTensorDesc().getDims()[dims_size - 3] : 1u; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t MB = src.getTensorDesc().getDims()[0]; - - size_t OC = prm.out_c; - - size_t OW = SW * (IW - 1lu) + KW - 2lu * PW; - size_t OH = SH * (IH - 1lu) + KH - 2lu * PH; - size_t OD = dims_size == 5 ? (SD * (ID - 1) + KD - 2 * PD) : 1u; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights->buffer().as(); - const data_t *bias_data = bias->buffer().as(); - - data_t *dst_data = dst.data(); - - size_t CS1 = OH * OW; - size_t CS2 = CS1 * OD; - size_t CS3 = CS2 * OC; - - size_t CI1 = IH * IW; - size_t CI2 = CI1 * ID; - size_t CI3 = CI2 * IC; - - size_t OC_G = OC / G; - size_t IC_G = IC / G; - - size_t CK1 = KH * KW; - size_t CK2 = CK1 * KD; - size_t CK3 = CK2 * OC_G; - size_t CK4 = CK3 * IC_G; - - for (size_t g = 0lu; g < G; ++g) { - size_t g_OC_G = g * OC_G; - size_t g_IC_G = g * IC_G; - size_t g_CK4 = g * CK4; - for (size_t mb = 0lu; mb < MB; ++mb) { - size_t mb_CS3 = mb * CS3; - size_t mb_CI3 = mb * CI3; - for (size_t oc = 0lu; oc < OC_G; ++oc) { - size_t g_OC_G_oc = g_OC_G + oc; - size_t mb_CS3_g_OC_G_oc_CS2 = mb_CS3 + g_OC_G_oc * CS2; - size_t g_CK4_oc_CK2 = g_CK4 + oc * CK2; - for (size_t od = 0lu; od < OD; ++od) { - size_t mb_CS3_g_OC_G_oc_CS2_od_CS1 = mb_CS3_g_OC_G_oc_CS2 + od * CS1; - size_t od_PD = od + PD; - for (size_t oh = 0lu; oh < OH; ++oh) { - size_t mb_CS3_g_OC_G_oc_CS2_od_CS1_oh_OW = mb_CS3_g_OC_G_oc_CS2_od_CS1 + oh * OW; - size_t oh_PH = oh + PH; - for (size_t ow = 0lu; ow < OW; ++ow) { - size_t didx = mb_CS3_g_OC_G_oc_CS2_od_CS1_oh_OW + ow; - size_t ow_PW = ow + PW; - - dst_data[didx] = data_t(0); - if (prm.with_bias) dst_data[didx] += bias_data[g_OC_G_oc]; - - for (size_t ic = 0lu; ic < IC_G; ic++) { - size_t mb_CI3_g_IC_G_ic_CI2 = mb_CI3 + (g_IC_G + ic) * CI2; - size_t g_CK4_oc_CK2_ic_CK3 = g_CK4_oc_CK2 + ic * CK3; - for (int kd = 0lu; kd < KD; kd++) { - if (od_PD < kd) continue; - size_t id = od_PD - kd; - if (id % SD != 0) continue; - id /= SD; - if (id >= ID) continue; - size_t mb_CI3_g_IC_G_ic_CI2_id_CI1 = mb_CI3_g_IC_G_ic_CI2 + id * CI1; - size_t g_CK4_oc_CK2_ic_CK3_kd_CK1 = g_CK4_oc_CK2_ic_CK3 + kd * CK1; - for (size_t kh = 0lu; kh < KH; kh++) { - if (oh_PH < kh) continue; - size_t ih = oh_PH - kh; - if (ih % SH != 0) continue; - ih /= SH; - if (ih >= IH) continue; - size_t mb_CI3_g_IC_G_ic_CI2_id_CI1_ih_IW = mb_CI3_g_IC_G_ic_CI2_id_CI1 + ih * IW; - size_t g_CK4_oc_CK2_ic_CK3_kd_CK1_kh_KW = g_CK4_oc_CK2_ic_CK3_kd_CK1 + kh * KW; - for (size_t kw = 0lu; kw < KW; kw++) { - if (ow_PW < kw) continue; - size_t iw = ow_PW - kw; - if (iw % SW != 0) continue; - iw /= SW; - if (iw >= IW) continue; - - size_t sidx = mb_CI3_g_IC_G_ic_CI2_id_CI1_ih_IW + iw; - - size_t widx = g_CK4_oc_CK2_ic_CK3_kd_CK1_kh_KW + kw; - - dst_data[didx] += src_data[sidx] * weights_data[widx]; - } - } - } - } - } - } - } - } - } - } -} - -class MKLDNNGraphDeconvolutionalTests: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - - - - __SRC_DIMS__ - - - - - _IN_ - _OC_ - __DST_DIMS__ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - -protected: - std::string getModel(deconv_test_params p) { - std::string model = layers_t; - - std::string s_dims; - for (auto& dim : p.dims) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims); - - s_dims = ""; - int k_len = p.kernel.size(); - for (size_t i = 2; i < p.dims.size(); i++) { - size_t inx = k_len - i + 1; - size_t dim = p.strides[inx] * (p.dims[i] - 1) + p.kernel[inx] - 2 * p.pads_begin[inx]; - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims); - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - - if (!p.with_bias) REMOVE_LINE(model, ""); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end); - REPLACE_WITH_NUM(model, "_GC_", p.grp_c); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - string auto_pad; - if (!p.auto_pad.empty()) auto_pad = string("auto_pad=") + string("\"") + p.auto_pad + string("\""); - REPLACE_WITH_STR(model, "_AP_", auto_pad); - - size_t blob_size = p.out_c * (p.dims[1] / p.grp_c); - for (auto k : p.kernel) { - blob_size *= k; - } - size_t w_data_size = blob_size * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - - size_t b_data_size = p.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - model = IRTemplateGenerator::getIRTemplate("Deconvolution_Only", p.dims, "FP32", model, edges_t); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - deconv_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t blob_size = p.out_c * (p.dims[1] / p.grp_c); - for (auto k : p.kernel) { - blob_size *= k; - } - InferenceEngine::SizeVector dims_weights = { blob_size }; - - std::vector blob_to_model; - InferenceEngine::Blob::Ptr weights = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, dims_weights, InferenceEngine::C }); - weights->allocate(); - fill_data(weights->buffer().as(), weights->size()); - blob_to_model.push_back(weights); - - InferenceEngine::Blob::Ptr bias = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, {p.out_c}, InferenceEngine::C }); - bias->allocate(); - fill_data(bias->buffer().as(), bias->size()); - blob_to_model.push_back(bias); - - size_t total_size_in_bytes = 0; - for (InferenceEngine::Blob::Ptr blb : blob_to_model) total_size_in_bytes += blb->byteSize(); - - InferenceEngine::TBlob::Ptr model_blob = - InferenceEngine::make_shared_blob({ InferenceEngine::Precision::U8, {total_size_in_bytes}, InferenceEngine::C }); - model_blob->allocate(); - uint8_t* model_blob_ptr = model_blob->buffer().as(); - for (InferenceEngine::Blob::Ptr blb : blob_to_model) { - memcpy(model_blob_ptr, blb->buffer().as(), blb->byteSize()); - model_blob_ptr += blb->byteSize(); - } - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Deconvolution) { - ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor()); - bool good_prim = false; - for (auto & selected : p.selectedTypes) - if (selected == (node->getSelectedPrimitiveDescriptor()->getImplementationType() & selected)) - good_prim = true; - ASSERT_TRUE(good_prim); - } - } - - InferenceEngine::SizeVector dims_src = p.dims; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob( - {InferenceEngine::Precision::FP32, dims_src, InferenceEngine::TensorDesc::getLayoutByDims(p.dims)}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_deconv(*srcPtr, weights, bias, dst_ref, p); - - compare(*output, dst_ref, 0.0002f); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDeconvolutionalTests, TestsDeconvolution) {} - -// deconv_test_params(dims, kernel, strides, pads_begin, pads_end, out_c, grp_c, with_bias, auto_pad, num_prim_desc, -// selectedTypes, preferTypes, comp) - -size_t expected_num_prim_desc = InferenceEngine::with_cpu_x86_avx2() ? 3 : 2; - - -INSTANTIATE_TEST_CASE_P( - TestDeconvolution, MKLDNNGraphDeconvolutionalTests, - ::testing::Values( - /*0*/ deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - deconv_test_params{{2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - deconv_test_params{{2, 8, 5, 5}, {4, 8}, {2, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - /*5*/ deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::ref_any}, - {MKLDNNPlugin::impl_desc_type::ref_any}} - )); - -INSTANTIATE_TEST_CASE_P( - TestDeconvolutionWithBias, MKLDNNGraphDeconvolutionalTests, - ::testing::Values( - /*0*/ deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - deconv_test_params{{2, 128, 3, 3}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 128, 128, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - deconv_test_params{{2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - /*5*/ deconv_test_params{{2, 8, 5, 5}, {4, 8}, {2, 4}, {1, 1}, {0, 0}, 8, 8, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - deconv_test_params{{2, 16, 5, 5}, {4, 8}, {2, 4}, {1, 1}, {0, 0}, 16, 16, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}, - deconv_test_params{{2, 8, 5, 5}, {1, 3}, {1, 1}, {0, 1}, {0, 1}, 8, 8, true, "", 2, - {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any}}, - deconv_test_params{{1, 6, 6, 5}, {3, 1}, {1, 1}, {1, 0}, {1, 0}, 9, 3, true, "", 2, - {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any}}, - deconv_test_params{{2, 24, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 3, true, "", - InferenceEngine::with_cpu_x86_avx2() ? 4ul : 3ul, - {MKLDNNPlugin::impl_desc_type::jit}}, - /*10*/ deconv_test_params{{2, 48, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 48, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}}, - deconv_test_params{{2, 48, 3, 3}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 192, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}}, - deconv_test_params{{2, 24, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 1, true, "", 3, {MKLDNNPlugin::impl_desc_type::jit}}, - deconv_test_params{{2, 72, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 72, 3, true, "", - InferenceEngine::with_cpu_x86_avx2() ? 4ul : 3ul, - {MKLDNNPlugin::impl_desc_type::jit}}, - deconv_test_params{{1, 12, 2, 2}, {4, 4}, {2, 2}, {1, 1}, {1, 1}, 12, 12, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}}, -// In case of SSE oor pure AVX there is no JIT implementation -// deconv_test_params{{1, 32, 5, 5}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, 16, 1, true, "", -// 2, {MKLDNNPlugin::impl_desc_type::jit}}, - deconv_test_params{{1, 48, 3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, 96, 3, true, "", 2, {MKLDNNPlugin::impl_desc_type::jit}}, - // 5D - deconv_test_params{{1, 2, 8, 5, 5}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 4, 1, true, "", 4, - {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any} }, - deconv_test_params{{1, 6, 5, 5, 5}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 9, 3, true, "", 2, - {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any} }, - deconv_test_params{{2, 24, 5, 5, 5}, {4, 4, 4}, {2, 2, 1}, {1, 1, 1}, {0, 0, 0}, 24, 3, true, "", - InferenceEngine::with_cpu_x86_avx2() ? 4ul : 3ul, - {MKLDNNPlugin::impl_desc_type::jit}}, - deconv_test_params{{2, 48, 5, 5, 5}, {4, 4, 4}, {2, 2, 1}, {1, 1, 1}, {0, 0, 0}, 48, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}} - // Blocked, with biases - // TODO support on jit -// deconv_test_params{{2, 24, 5, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}}, -// deconv_test_params{{2, 24, 5, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 1, true, "", 3, {MKLDNNPlugin::impl_desc_type::jit}}, -// deconv_test_params{{2, 72, 5, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 72, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}} - )); - -#ifdef USE_MKL -INSTANTIATE_TEST_CASE_P( - MKLTestDeconvolution, MKLDNNGraphDeconvolutionalTests, - ::testing::Values( - deconv_test_params{{1, 3, 3, 3}, {4, 3}, {1, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{1, 3, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{4, 17, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 2, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm}}, - deconv_test_params{{1, 3, 3, 3}, {4, 3}, {1, 2}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{1, 3, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{4, 17, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 2, true, "", 3, {MKLDNNPlugin::impl_desc_type::gemm}}, - deconv_test_params{{1, 6, 6, 5}, {3, 1}, {1, 1}, {1, 0}, {1, 0}, 9, 3, true, "", 2, {MKLDNNPlugin::impl_desc_type::gemm_blas}}, - deconv_test_params{{1, 64, 12, 12, 2}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {1, 0, 0}, 32, 1, true, "", 4, - {MKLDNNPlugin::impl_desc_type::gemm_blas, MKLDNNPlugin::impl_desc_type::jit_avx512 }}, - deconv_test_params{{1, 32, 12, 12, 2}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {1, 0, 0}, 16, 1, true, "", 4, - {MKLDNNPlugin::impl_desc_type::gemm_blas, MKLDNNPlugin::impl_desc_type::jit_avx512 } }, - deconv_test_params{{1, 25, 1, 1, 1}, {4, 4, 4}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, true, "valid", 3, - {MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{1, 32, 16, 16, 16}, {4, 4, 4}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, 1, 1, true, "same_upper", 3, - {MKLDNNPlugin::impl_desc_type::gemm_blas, MKLDNNPlugin::impl_desc_type::jit_avx512 } }, - deconv_test_params{{1, 64, 12, 12, 2}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {1, 0, 0}, 32, 1, true, "same_upper", 3, - {MKLDNNPlugin::impl_desc_type::gemm_blas, MKLDNNPlugin::impl_desc_type::jit_avx512 } }, - deconv_test_params{{1, 50, 1, 1, 1}, {4, 4, 4}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 128, 1, true, "", 3, - {MKLDNNPlugin::impl_desc_type::gemm_blas, MKLDNNPlugin::impl_desc_type::jit_avx512 }, - {MKLDNNPlugin::impl_desc_type::gemm_blas, MKLDNNPlugin::impl_desc_type::jit_avx512 }} )); -#endif - - -class MKLDNNGraphDynBatchDeconvolutionalTests: public MKLDNNGraphDeconvolutionalTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - deconv_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.dims[0]; - if (MB < 2) - MB = 2; - - size_t blob_size = 1; - for (auto k : p.kernel) { - blob_size *= k; - } - InferenceEngine::SizeVector dims_weights = {blob_size * p.out_c * (p.dims[1] / p.grp_c)}; - - std::vector blob_to_model; - InferenceEngine::Blob::Ptr weights = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, dims_weights, InferenceEngine::C }); - weights->allocate(); - fill_data(weights->buffer().as(), weights->size()); - blob_to_model.push_back(weights); - - InferenceEngine::Blob::Ptr bias = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, {p.out_c}, InferenceEngine::C }); - bias->allocate(); - fill_data(bias->buffer().as(), bias->size()); - blob_to_model.push_back(bias); - - size_t total_size_in_bytes = 0; - for (InferenceEngine::Blob::Ptr blb : blob_to_model) total_size_in_bytes += blb->byteSize(); - - InferenceEngine::TBlob::Ptr model_blob = - InferenceEngine::make_shared_blob({ InferenceEngine::Precision::U8, {total_size_in_bytes}, InferenceEngine::C }); - model_blob->allocate(); - uint8_t* model_blob_ptr = model_blob->buffer().as(); - for (InferenceEngine::Blob::Ptr blb : blob_to_model) { - memcpy(model_blob_ptr, blb->buffer().as(), blb->byteSize()); - model_blob_ptr += blb->byteSize(); - } - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob)); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob( - {InferenceEngine::Precision::FP32, p.dims, InferenceEngine::TensorDesc::getLayoutByDims(p.dims)}); - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkDeconvolution = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Deconvolution; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkDeconvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkDeconvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchDeconvolutionalTests, TestsDynBatchDeconvolutional) {} - -INSTANTIATE_TEST_CASE_P( - TestsDynBatchDeconvolutional, MKLDNNGraphDynBatchDeconvolutionalTests, - ::testing::Values( - // TODO: rewrite to ngraph to have reshape functionality - // deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 5, {MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 5, {MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{ {2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw} }, - deconv_test_params{ {2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw} }, - deconv_test_params{ {2, 8, 5, 5}, {4, 8}, {2, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw} } )); - -#ifdef USE_MKL -INSTANTIATE_TEST_CASE_P( - MKLTestsDynBatchDeconvolutional, MKLDNNGraphDynBatchDeconvolutionalTests, - ::testing::Values( - deconv_test_params{{1, 3, 3, 3}, {4, 3}, {1, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 4, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{1, 3, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{4, 17, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} }, - deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 2, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm}} )); -#endif diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp deleted file mode 100644 index e00f6da874b5a8..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include -#include - -using namespace MKLDNNPlugin; -using namespace mkldnn; -using namespace ::testing; - -using std::vector; -using std::function; - -constexpr auto depthwise_scale_shift = mkldnn::algorithm::depthwise_scale_shift; -constexpr auto depthwise_prelu = mkldnn::algorithm::depthwise_prelu; - -struct depthwise_test_params { - algorithm alg; - - // Formats: NC, CHW (actually NCH), NCHW, NCDHW - vector dims; - - bool isBroadcast; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - std::vector preferTypes; - - std::vector> comp; -}; - -template -void ref_depthwise(const InferenceEngine::TBlob &src, const data_t *weights, const size_t weightsSize, - InferenceEngine::TBlob &dst, depthwise_test_params prm) { - auto dims_size = src.getTensorDesc().getDims().size(); - - size_t MB = src.getTensorDesc().getDims()[0]; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t ID = dims_size == 5 ? src.getTensorDesc().getDims()[2] : 1u; - size_t IH = dims_size < 3 ? 1 : dims_size == 3 ? src.getTensorDesc().getDims()[dims_size - 1] - : src.getTensorDesc().getDims()[dims_size - 2]; - size_t IW = dims_size < 4 ? 1 : src.getTensorDesc().getDims()[dims_size - 1]; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - size_t bias_offset = prm.isBroadcast ? 1 : IC; - const data_t *bias_data = weights_data + bias_offset; - data_t *dst_data = dst.data(); - - size_t c1 = IH * IW; - size_t c2 = ID * c1; - size_t c3 = IC * c2; - for (int mb = 0; mb < MB; mb++) { - size_t m1 = mb * c3; - for (int c = 0; c < IC; c++) { - size_t m2 = m1 + c * c2; - for (int d = 0; d < ID; d++) { - size_t m3 = m2 + d * c1; - for (int h = 0; h < IH; h++) { - size_t m4 = m3 + h * IW; - for (int w = 0; w < IW; w++) { - int idx = m4 + w; - - int widx = prm.isBroadcast ? 0 : c; - int bidx = prm.isBroadcast ? 0 : c; - - if (prm.alg == depthwise_scale_shift) - dst_data[idx] = src_data[idx] * weights_data[widx] + bias_data[bidx]; - else if (prm.alg == depthwise_prelu) - dst_data[idx] = src_data[idx] > 0 ? src_data[idx] : src_data[idx]*weights_data[widx]; - } - } - } - } - } -} - -class MKLDNNGraphDepthwiseTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(depthwise_test_params p) { - std::string model = model_t; - auto dims_size = p.dims.size(); - - if (dims_size < 5) - REMOVE_LINE(model, "_ID_"); - if (dims_size < 4) - REMOVE_LINE(model, "_IW_"); - if (dims_size < 3) - REMOVE_LINE(model, "_IH_"); - - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - REPLACE_WITH_NUM(model, "_IC_", p.dims[1]); - - if (dims_size > 2) - REPLACE_WITH_NUM(model, "_IH_", dims_size == 3 ? p.dims[dims_size - 1] : p.dims[dims_size - 2]); - if (dims_size > 3) - REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]); - if (dims_size > 4) - REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]); - - if (p.alg == depthwise_scale_shift) { - REPLACE_WITH_STR(model, "_LT_", "ScaleShift"); - REPLACE_WITH_STR(model, "_P_NAME_", "broadcast"); - REPLACE_WITH_NUM(model, "_P_VAL_", p.isBroadcast ? 1 : 0); - } - else if (p.alg == depthwise_prelu) { - REPLACE_WITH_STR(model, "_LT_", "PReLU"); - REPLACE_WITH_STR(model, "_P_NAME_", "channel_shared"); - REPLACE_WITH_NUM(model, "_P_VAL_", p.isBroadcast ? 1 : 0); - } - - size_t array_size = p.isBroadcast ? 1 : p.dims[1]; - size_t w_data_size = array_size * sizeof(float); - size_t b_data_size = array_size * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - return model; - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - depthwise_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t weightSize = 2 * p.dims[1] * sizeof(float); - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {weightSize}, InferenceEngine::C }); - weights->allocate(); - fill_data( weights->data().as(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Depthwise) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - InferenceEngine::SizeVector dims_src = p.dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.dims.size()) { - case 2: layout = InferenceEngine::NC; break; - // InferenceEngine::Layout doesn't have alias for 3D NCH layout so we use CHW instead - case 3: layout = InferenceEngine::CHW; break; - case 4: layout = InferenceEngine::NCHW; break; - case 5: layout = InferenceEngine::NCDHW; break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_depthwise(*srcPtr, weights->readOnly().as(), weights->size() / sizeof(float), dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDepthwiseTests, TestsDepthwise) {} - -const size_t num_2d_impl = InferenceEngine::with_cpu_x86_avx2() ? 3 : 2; - -INSTANTIATE_TEST_CASE_P( - TestsDepthwise, MKLDNNGraphDepthwiseTests, - ::testing::Values( - // 2D - depthwise_test_params{depthwise_scale_shift, {128, 32}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {4, 3 }, true, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {1, 1 }, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {37, 35}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {128, 32}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {4, 3 }, true, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {1, 1 }, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {37, 35}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {128, 32}, false, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_scale_shift, {4, 3 }, true, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_scale_shift, {1, 1 }, false, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_prelu, {128, 32}, false, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 3 }, true, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_prelu, {1, 1 }, false, num_2d_impl, ref, {ref_any}}, - // 4D - depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // 5D - depthwise_test_params{depthwise_scale_shift, {1, 32, 16, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {4, 3, 16, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {4, 4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {1, 32, 16, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {4, 3, 16, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {1, 32, 16, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {4, 3, 16, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {1, 1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {4, 4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {1, 32, 16, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 3, 16, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {4, 4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {1, 1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}} - )); - -INSTANTIATE_TEST_CASE_P( - TestsDepthwise3D, MKLDNNGraphDepthwiseTests, - ::testing::Values( - depthwise_test_params{depthwise_scale_shift, {1, 32, 16}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {8, 32, 16}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {4, 3, 2}, true, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {1, 1, 1}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {37, 35, 17}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {128, 32, 19}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {4, 3, 2}, true, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {1, 1, 1}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_prelu, {37, 35, 17}, false, num_2d_impl, jit}, - depthwise_test_params{depthwise_scale_shift, {128, 32, 19}, false, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_scale_shift, {4, 3, 2}, true, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_scale_shift, {1, 1, 1}, false, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_prelu, {128, 32, 17}, false, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 3, 19}, true, num_2d_impl, ref, {ref_any}}, - depthwise_test_params{depthwise_prelu, {1, 1, 1}, false, num_2d_impl, ref, {ref_any}} - )); - -class MKLDNNGraphDynBatchDepthwiseTests: public MKLDNNGraphDepthwiseTests { -protected: - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - depthwise_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.dims[0]; - if (MB < 2) - MB = 2; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {p.dims[1] * 4 * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - fill_data( weights->data().as(), weights->size() / sizeof(float)); - float * data = weights->buffer(); - for (size_t i = 0; i < weights->size() / sizeof(float); i++) { - if (data[i] < 0) { - data[i] *= -1; - } - } - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = p.dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkDepthwise = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Depthwise; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkDepthwise); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkDepthwise); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchDepthwiseTests, TestsDynBatchDepthwise) {} - -INSTANTIATE_TEST_CASE_P( - TestsDynBatchDepthwise, MKLDNNGraphDynBatchDepthwiseTests, - ::testing::Values( - depthwise_test_params{depthwise_scale_shift, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - depthwise_test_params{depthwise_scale_shift, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_scale_shift, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}} - // TODO: rewrite to ngraph to have reshape functionality - // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_scale_shift, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_prelu, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit}, - // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // depthwise_test_params{depthwise_scale_shift, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // depthwise_test_params{depthwise_prelu, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}} - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp deleted file mode 100644 index 6eb2b52ea88808..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NOMINMAX -#define NOMINMAX -#endif - -#include "test_graph.hpp" - -#include -#include "common_test_utils/data_utils.hpp" -#include "single_layer_common.hpp" -#include -#include -#include "tests_common.hpp" -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct eltwise_test_params { - // Formats: NCHW, NCDHW - vector dims1; - vector dims2; - vector dims3; - - enum opType { - Sum = 0, Prod, Max, Min, Sub, Div, Squared_diff, Floor_mod, Pow, - Logical_AND, Logical_OR, Logical_XOR, - Less, Less_equal, Greater, Greater_equal, Equal, Not_equal - }; - - opType op; - - std::string scales; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -template -void ref_eltwise(const std::vector> &src, InferenceEngine::TBlob &dst, eltwise_test_params prm) { - std::vector scales; - if (prm.scales != "") { - std::istringstream stream(prm.scales); - stream.imbue(std::locale("C")); - std::string str; - while (getline(stream, str, ',')) { - float val = InferenceEngine::CNNLayer::ie_parse_float(str); - scales.push_back(val); - } - } else { - for (int i = 0; i < src.size(); i++) { - scales.push_back(1.0f); - } - } - - data_t *dst_data = dst.data(); - - const data_t *src_data = src[0].readOnly(); - auto& dims = dst.getTensorDesc().getDims(); - auto& dims0 = src[0].getTensorDesc().getDims(); - - int offset_in[5] = {1, 1, 1, 1, 1}; - int offset_out[5] = {1, 1, 1, 1, 1}; - - for (int i = 0; i < dims0.size(); i++) - offset_in[5 - dims0.size() + i] = dims0[i]; - for (int i = 0; i < dims.size(); i++) - offset_out[5 - dims.size() + i] = dims[i]; - - unsigned long j = 0, k = 0; - - for (int i0 = 0; i0 < offset_out[0]; i0++) { - if (i0 > offset_in[0] - 1) { - k -= offset_in[1]*offset_in[2]*offset_in[3]*offset_in[4]; - } - for (int i1 = 0; i1 < offset_out[1]; i1++) { - if (i1 > offset_in[1] - 1) { - k -= offset_in[2]*offset_in[3]*offset_in[4]; - } - for (int i2 = 0; i2 < offset_out[2]; i2++) { - if (i2 > offset_in[2] - 1) { - k -= offset_in[3]*offset_in[4]; - } - for (int i3 = 0; i3 < offset_out[3]; i3++) { - if (i3 > offset_in[3] - 1) { - k -= offset_in[4]; - } - for (int i4 = 0; i4 < offset_out[4]; i4++) { - if (i4 > offset_in[4] - 1) { - k -= 1; - } - if (prm.op == eltwise_test_params::Sum) { - dst_data[j++] = scales[0] * src_data[k++]; - } else { - dst_data[j++] = src_data[k++]; - } - } - } - } - } - } - - for (int n = 1; n < src.size(); n++) { - j = 0; - k = 0; - src_data = src[n].readOnly(); - auto& dims1 = src[n].getTensorDesc().getDims(); - int offset_in1[5] = {1, 1, 1, 1, 1}; - for (int i = 0; i < dims1.size(); i++) - offset_in1[5 - dims1.size() + i] = dims1[i]; - - for (int i0 = 0; i0 < offset_out[0]; i0++) { - if (i0 > offset_in1[0] - 1) { - k -= offset_in1[1]*offset_in1[2]*offset_in1[3]*offset_in1[4]; - } - for (int i1 = 0; i1 < offset_out[1]; i1++) { - if (i1 > offset_in1[1] - 1) { - k -= offset_in1[2]*offset_in1[3]*offset_in1[4]; - } - for (int i2 = 0; i2 < offset_out[2]; i2++) { - if (i2 > offset_in1[2] - 1) { - k -= offset_in1[3]*offset_in1[4]; - } - for (int i3 = 0; i3 < offset_out[3]; i3++) { - if (i3 > offset_in1[3] - 1) { - k -= offset_in1[4]; - } - for (int i4 = 0; i4 < offset_out[4]; i4++, j++, k++) { - if (i4 > offset_in1[4] - 1) { - k -= 1; - } - switch (prm.op) { - case eltwise_test_params::Sum: - dst_data[j] += scales[n] * src_data[k]; - break; - case eltwise_test_params::Sub: - dst_data[j] = dst_data[j] - src_data[k]; - break; - case eltwise_test_params::Min: - dst_data[j] = (std::min)(dst_data[j], src_data[k]); - break; - case eltwise_test_params::Max: - dst_data[j] = (std::max)(dst_data[j], src_data[k]); - break; - case eltwise_test_params::Prod: - dst_data[j] = dst_data[j] * src_data[k]; - break; - case eltwise_test_params::Div: - dst_data[j] = dst_data[j] / src_data[k]; - break; - case eltwise_test_params::Squared_diff: - dst_data[j] = (dst_data[j] - src_data[k]) * (dst_data[j] - src_data[k]); - break; - case eltwise_test_params::Logical_OR: - dst_data[j] = dst_data[j] || src_data[k]; - break; - case eltwise_test_params::Logical_AND: - dst_data[j] = dst_data[j] && src_data[k]; - break; - case eltwise_test_params::Logical_XOR: - dst_data[j] = (dst_data[j] || src_data[k]) - (dst_data[j] && src_data[k]); - break; - case eltwise_test_params::Less: - dst_data[j] = dst_data[j] < src_data[k]; - break; - case eltwise_test_params::Less_equal: - dst_data[j] = dst_data[j] <= src_data[k]; - break; - case eltwise_test_params::Greater: - dst_data[j] = dst_data[j] > src_data[k]; - break; - case eltwise_test_params::Greater_equal: - dst_data[j] = dst_data[j] >= src_data[k]; - break; - case eltwise_test_params::Equal: - dst_data[j] = dst_data[j] == src_data[k]; - break; - case eltwise_test_params::Not_equal: - dst_data[j] = dst_data[j] != src_data[k]; - break; - case eltwise_test_params::Pow: - dst_data[j] = std::pow(dst_data[j], src_data[k]); - break; - case eltwise_test_params::Floor_mod: - dst_data[j] = dst_data[j] - dst_data[j] / src_data[k] * src_data[k]; - break; - } - } - } - } - } - } - } -} - -std::string select_op(eltwise_test_params::opType op) { - std::string str_op; - switch(op){ - case eltwise_test_params::opType::Sum: - str_op = "sum"; - break; - case eltwise_test_params::opType::Prod: - str_op = "prod"; - break; - case eltwise_test_params::opType::Max: - str_op = "max"; - break; - case eltwise_test_params::opType::Min: - str_op = "min"; - break; - case eltwise_test_params::opType::Sub: - str_op = "sub"; - break; - case eltwise_test_params::opType::Div: - str_op = "div"; - break; - case eltwise_test_params::opType::Squared_diff: - str_op = "squared_diff"; - break; - case eltwise_test_params::opType::Logical_AND: - str_op = "logical_and"; - break; - case eltwise_test_params::opType::Logical_OR: - str_op = "logical_or"; - break; - case eltwise_test_params::opType::Logical_XOR: - str_op = "logical_xor"; - break; - case eltwise_test_params::opType ::Less: - str_op = "less"; - break; - case eltwise_test_params::opType::Less_equal: - str_op = "less_equal"; - break; - case eltwise_test_params::opType::Greater: - str_op = "greater"; - break; - case eltwise_test_params::opType::Greater_equal: - str_op = "greater_equal"; - break; - case eltwise_test_params::opType::Equal: - str_op = "equal"; - break; - case eltwise_test_params::opType::Not_equal: - str_op = "not_equal"; - break; - case eltwise_test_params::opType::Pow: - str_op = "pow"; - break; - case eltwise_test_params::opType::Floor_mod: - str_op = "floor_mod"; - break; - } - return str_op; -} - -struct precisions_test_2params { - struct { - std::string precision0; - std::string precision1; - } in; - - size_t num_nodes; - size_t num_reorder_nodes; -}; - -class MKLDNNGraphEltwise2PrecisionsTests : public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - 1 - 2 - 3 - - - - - - - 1 - 2 - 3 - - - - - - - - 1 - 2 - 3 - - - 1 - 2 - 3 - - - - - 1 - 2 - 3 - - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(precisions_test_2params p) { - std::string model = model_t; - - REPLACE_WITH_STR(model, "_IP0_", p.in.precision0); - REPLACE_WITH_STR(model, "_IP1_", p.in.precision1); - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - precisions_test_2params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - ASSERT_NO_THROW(graph.CreateGraph(network)); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - ASSERT_EQ(nodes.size(), p.num_nodes); - - size_t actual_reorder_nodes = 0; - for (size_t i = 0; i < nodes.size(); i++) { - if(nodes[i].get()->getType() == MKLDNNPlugin::Type::Reorder && - FIND_STR(nodes[i].get()->getName(), "_U8_FP32_")) - actual_reorder_nodes ++; - } - ASSERT_EQ(actual_reorder_nodes, p.num_reorder_nodes); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphEltwise2PrecisionsTests, TestsEltwise2Precisions) {} - -INSTANTIATE_TEST_CASE_P( - TestsEltwise2Precisions, MKLDNNGraphEltwise2PrecisionsTests, - ::testing::Values( - precisions_test_2params{ {"FP32", "FP32"}, 4, 0 }, - precisions_test_2params{ { "U8", "FP32"}, 4, 0 }, - precisions_test_2params{ {"FP32", "U8"}, 4, 0 }, - precisions_test_2params{ { "U8", "U8"}, 4, 0 } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp deleted file mode 100644 index 871e8f402d078e..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct fc_test_params { - // Formats: NCHW, NCDHW - vector in_dims; - - size_t out_c; - - size_t num_prim_desc; - - int selectedType; - std::vector preferTypes; - - std::vector> comp; -}; - - -template -void ref_innerproduct(const InferenceEngine::TBlob &src, const data_t *weights, const size_t weightsSize, - InferenceEngine::TBlob &dst, fc_test_params prm) { - auto dims_size = src.getTensorDesc().getDims().size(); - - size_t IB = src.getTensorDesc().getDims()[0]; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t ID = dims_size == 5 ? src.getTensorDesc().getDims()[dims_size - 3] : 1u; - size_t IH = src.getTensorDesc().getDims()[dims_size - 2]; - size_t IW = src.getTensorDesc().getDims()[dims_size - 1]; - - size_t OC = prm.out_c; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - const data_t *bias_data = weights_data + IW*IH*ID*IC*OC; - data_t *dst_data = dst.data(); - - IE_ASSERT( IW*IH*ID*IC*OC + OC == weightsSize ); - IE_ASSERT( OC == dst.getTensorDesc().getDims()[1] ); - - for (size_t n = 0; n < IB; n++) { - for (size_t oc = 0; oc < OC; oc++) { - dst_data[n*OC + oc] = bias_data[oc]; - for (size_t ic = 0; ic < IC; ic++) { - for (size_t kd = 0; kd < ID; kd++) { - for (size_t kh = 0; kh < IH; kh++) { - for (size_t kw = 0; kw < IW; kw++) { - size_t iidx = n * IC * ID * IH * IW - + ic * ID * IH * IW - + kd * IH * IW - + kh * IW - + kw; - size_t widx = oc * IC * ID * IH * IW - + ic * ID * IH * IW - + kd * IH * IW - + kh * IW - + kw; - - dst_data[n*OC + oc] += src_data[iidx] * weights_data[widx]; - } - } - } - } - } - } -} - -class MKLDNNGraphFullyConnectedTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - __SRC_DIMS__ - - - - - - - - - - - __SRC_DIMS__ - - - - - _IN_ - _OC_ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(fc_test_params p) { - std::string model = model_t; - std::string s_dims; - for (auto& dim : p.in_dims) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims); - - REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - - size_t w_data_size = p.out_c * sizeof(float); - for (int i = 1; i < p.in_dims.size(); i++) - w_data_size *= p.in_dims[i]; - size_t b_data_size = p.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - REPLACE_WITH_NUM(model, "_S2_", b_data_size); - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - fc_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t weights_size = p.out_c; - for (int i = 1; i < p.in_dims.size(); i++) { - weights_size *= p.in_dims[i]; - } - weights_size = (weights_size + p.out_c) * sizeof(float); - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {weights_size}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::FullyConnected) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - InferenceEngine::SizeVector dims_src = p.in_dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in_dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_innerproduct(*srcPtr, (const float *)weights->buffer(), weights->size() / sizeof(float), dst_ref, p); - - compare(*output, dst_ref, 0.9f); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphFullyConnectedTests, TestsFullyConnected) {} - - -INSTANTIATE_TEST_CASE_P( - TestsFullyConnected, MKLDNNGraphFullyConnectedTests, - ::testing::Values( - fc_test_params{{1, 3, 227, 227}, 96, 6, MKLDNNPlugin::impl_desc_type::gemm }, - fc_test_params{{1, 4, 227, 227}, 8, 6, MKLDNNPlugin::impl_desc_type::gemm }, - fc_test_params{{1, 4, 227, 227}, 10, 6, MKLDNNPlugin::impl_desc_type::gemm }, - fc_test_params{{1, 3, 227, 227}, 96, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - fc_test_params{{1, 4, 227, 227}, 8, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - fc_test_params{{1, 4, 227, 227}, 10, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - //5D - fc_test_params{{1, 4, 32, 32, 32}, 10, 6, MKLDNNPlugin::impl_desc_type::gemm }, - fc_test_params{{1, 3, 32, 32, 32}, 96, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}})); - -class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTests { - virtual void SetUp() { - try { - TestsCommon::SetUp(); - fc_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.in_dims[0]; - if (MB < 2) - MB = 2; - - size_t weights_size = p.out_c; - for (int i = 1; i < p.in_dims.size(); i++) { - weights_size *= p.in_dims[i]; - } - weights_size = (weights_size + p.out_c) * sizeof(float); - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {weights_size}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = p.in_dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in_dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkFC = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::FullyConnected; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkFC); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkFC); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchFullyConnectedTests, TestsDynBatchFullyConnected) {} - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsDynBatchFullyConnected, MKLDNNGraphDynBatchFullyConnectedTests, - ::testing::Values( - fc_test_params{{1, 3, 227, 227}, 96, 6, MKLDNNPlugin::impl_desc_type::gemm }, - fc_test_params{{1, 4, 227, 227}, 8, 6, MKLDNNPlugin::impl_desc_type::gemm }, - fc_test_params{{1, 4, 227, 227}, 10, 6, MKLDNNPlugin::impl_desc_type::gemm }, - fc_test_params{{1, 3, 227, 227}, 96, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - fc_test_params{{1, 4, 227, 227}, 8, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - fc_test_params{{1, 4, 227, 227}, 10, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp deleted file mode 100644 index 7ec1753214df0a..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct gemm_test_params { - struct { - size_t MB1_A; - size_t MB2_A; - size_t MB1_B; - size_t MB2_B; - size_t MB1_C; - size_t MB2_C; - size_t MB1_D; - size_t MB2_D; - } batches; - - size_t M; - size_t N; - size_t K; - - float alpha; - float beta; - - bool transposeA; - bool transposeB; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -template -void ref_gemm(const std::vector> &src, InferenceEngine::TBlob &dst, - gemm_test_params prm) { - const data_t *src0_data = src[0].readOnly(); - const data_t *src1_data = src[1].readOnly(); - const data_t *src2_data = src.size() == 3 ? src[2].readOnly() : dst.readOnly(); - data_t *dst_data = dst.data(); - - size_t MB1 = prm.batches.MB1_D; - size_t MB2 = prm.batches.MB2_D; - size_t M = prm.M; - size_t N = prm.N; - size_t K = prm.K; - - for (int mb1 = 0; mb1 < MB1; mb1++) { - const data_t *a_data = src0_data; - const data_t *b_data = src1_data; - const data_t *c_data = src2_data; - data_t *d_data = dst_data; - - for (int mb2 = 0; mb2 < MB2; mb2++) { - for (int i = 0; i < M; i++) { - for (int j = 0; j < N; j++) { - d_data[i * N + j] = src.size() == 3 ? prm.beta * c_data[i * N + j] : 0; - - for (int k = 0; k < K; k++) { - size_t src0_off = prm.transposeA ? k * M + i : i * K + k; - size_t src1_off = prm.transposeB ? j * K + k : k * N + j; - d_data[i * N + j] += prm.alpha * a_data[src0_off] * b_data[src1_off]; - } - } - } - a_data += prm.batches.MB2_A == MB2 ? M*K : 0; - b_data += prm.batches.MB2_B == MB2 ? K*N : 0; - c_data += prm.batches.MB2_C == MB2 ? M*N : 0; - d_data += M*N; - } - - src0_data += prm.batches.MB1_A == MB1 ? prm.batches.MB2_A*M*K : 0; - src1_data += prm.batches.MB1_B == MB1 ? prm.batches.MB2_B*K*N : 0; - src2_data += prm.batches.MB1_C == MB1 ? prm.batches.MB2_C*M*N : 0; - dst_data += prm.batches.MB2_D*M*N; - } -} - -class MKLDNNGraphGemmTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _MB1_A_ - _MB2_A_ - _M_A_ - _N_A_ - - - - - - - _MB1_B_ - _MB2_B_ - _M_B_ - _N_B_ - - - - - - - _MB1_C_ - _MB2_C_ - _M_ - _N_ - - - - - - - - _MB1_A_ - _MB2_A_ - _M_A_ - _N_A_ - - - _MB1_B_ - _MB2_B_ - _M_B_ - _N_B_ - - - _MB1_C_ - _MB2_C_ - _M_ - _N_ - - - - - _MB1_D_ - _MB2_D_ - _M_ - _N_ - - - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(gemm_test_params p) { - std::string model = model_t; - std::string op; - - REPLACE_WITH_NUM(model, "_MB1_A_", p.batches.MB1_A); - REPLACE_WITH_NUM(model, "_MB2_A_", p.batches.MB2_A); - REPLACE_WITH_NUM(model, "_MB1_B_", p.batches.MB1_B); - REPLACE_WITH_NUM(model, "_MB2_B_", p.batches.MB2_B); - REPLACE_WITH_NUM(model, "_MB1_C_", p.batches.MB1_C); - REPLACE_WITH_NUM(model, "_MB2_C_", p.batches.MB2_C); - REPLACE_WITH_NUM(model, "_MB1_D_", p.batches.MB1_D); - REPLACE_WITH_NUM(model, "_MB2_D_", p.batches.MB2_D); - - auto m_A = p.transposeA ? p.K : p.M; - auto n_A = p.transposeA ? p.M : p.K; - auto m_B = p.transposeB ? p.N : p.K; - auto n_B = p.transposeB ? p.K : p.N; - - REPLACE_WITH_NUM(model, "_M_A_", m_A); - REPLACE_WITH_NUM(model, "_N_A_", n_A); - REPLACE_WITH_NUM(model, "_M_B_", m_B); - REPLACE_WITH_NUM(model, "_N_B_", n_B); - - REPLACE_WITH_NUM(model, "_M_", p.M); - REPLACE_WITH_NUM(model, "_N_", p.N); - REPLACE_WITH_NUM(model, "_K_", p.K); - - REPLACE_WITH_NUM(model, "_A_", p.alpha); - REPLACE_WITH_NUM(model, "_B_", p.beta); - REPLACE_WITH_NUM(model, "_TA_", p.transposeA); - REPLACE_WITH_NUM(model, "_TB_", p.transposeB); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - gemm_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Gemm) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - - auto m_A = p.transposeA ? p.K : p.M; - auto n_A = p.transposeA ? p.M : p.K; - auto m_B = p.transposeB ? p.N : p.K; - auto n_B = p.transposeB ? p.K : p.N; - - InferenceEngine::SizeVector dims_src1 = {p.batches.MB1_A, p.batches.MB2_A, m_A, n_A}; - InferenceEngine::SizeVector dims_src2 = {p.batches.MB1_B, p.batches.MB2_B, m_B, n_B}; - InferenceEngine::SizeVector dims_src3 = {p.batches.MB1_C, p.batches.MB2_C, p.M, p.N}; - InferenceEngine::SizeVector dims_dst = {p.batches.MB1_D, p.batches.MB2_D, p.M, p.N}; - - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, InferenceEngine::NCHW}); - src1->allocate(); - InferenceEngine::TBlob* srcPtr1 = dynamic_cast*>(src1.get()); - if (srcPtr1 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, InferenceEngine::NCHW}); - src2->allocate(); - InferenceEngine::TBlob* srcPtr2 = dynamic_cast*>(src2.get()); - if (srcPtr2 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src3, InferenceEngine::NCHW}); - src3->allocate(); - InferenceEngine::TBlob* srcPtr3 = dynamic_cast*>(src3.get()); - if (srcPtr3 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src3->buffer(), src3->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - srcs.insert(std::pair("in3", src3)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - std::vector> src_vec = {*srcPtr1, *srcPtr2, *srcPtr3}; - - ref_gemm(src_vec, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphGemmTests, TestsGemm) {} - -INSTANTIATE_TEST_CASE_P( - TestsGemm, MKLDNNGraphGemmTests, - ::testing::Values( - gemm_test_params{{2, 1, 2, 1, 2, 1, 2, 1}, 3, 3, 2, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType()); - ASSERT_EQ(3, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - } }, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 8, 5, 4, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType()); - ASSERT_EQ(3, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - } }, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 16, 10, 12, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType()); - ASSERT_EQ(3, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - } }, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 11, 10, 20, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType()); - ASSERT_EQ(3, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - } }, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 13, 2, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType()); - ASSERT_EQ(3, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - } }, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 15, 10, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType()); - ASSERT_EQ(3, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - } }, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 6, 7, 2, 0, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 6, 7, 0, 2, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 3, 7, 4, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 3, 4, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 3, 7, 4, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 3, 4, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 3, 7, 4, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 3, 4, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 2, 3, 2, 3, 2, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 2, 3, 1, 3, 2, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{2, 3, 1, 3, 1, 3, 2, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{5, 3, 5, 1, 5, 3, 5, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{5, 3, 5, 1, 5, 1, 5, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{5, 1, 5, 1, 5, 3, 5, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 1, 5, 3, 5, 3, 5, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 1, 1, 1, 5, 3, 5, 3}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{5, 4, 1, 1, 1, 1, 5, 4}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any} - )); - -class MKLDNNGraphDynBatchGemmTests: public MKLDNNGraphGemmTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - gemm_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.batches.MB1_D; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - auto m_A = p.transposeA ? p.K : p.M; - auto n_A = p.transposeA ? p.M : p.K; - auto m_B = p.transposeB ? p.N : p.K; - auto n_B = p.transposeB ? p.K : p.N; - - InferenceEngine::SizeVector dims_src1 = {MB, p.batches.MB2_A, m_A, n_A}; - InferenceEngine::SizeVector dims_src2 = {MB, p.batches.MB2_B, m_B, n_B}; - InferenceEngine::SizeVector dims_src3 = {MB, p.batches.MB2_C, p.M, p.N}; - InferenceEngine::SizeVector dims_dst = {MB, p.batches.MB2_D, p.M, p.N}; - - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, InferenceEngine::NCHW}); - src1->allocate(); - InferenceEngine::TBlob* srcPtr1 = dynamic_cast*>(src1.get()); - if (srcPtr1 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, InferenceEngine::NCHW}); - src2->allocate(); - InferenceEngine::TBlob* srcPtr2 = dynamic_cast*>(src2.get()); - if (srcPtr2 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src3, InferenceEngine::NCHW}); - src3->allocate(); - InferenceEngine::TBlob* srcPtr3 = dynamic_cast*>(src3.get()); - if (srcPtr3 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src3->buffer(), src3->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - srcs.insert(std::pair("in3", src3)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto check = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Gemm; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, check); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, check); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchGemmTests, TestsDynBatchGemm) {} - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsDynBatchGemm, MKLDNNGraphDynBatchGemmTests, - ::testing::Values( - gemm_test_params{{1, 3, 1, 3, 1, 3, 1, 3}, 3, 3, 3, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 1, 1, 3, 1, 3}, 16, 15, 12, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any} -)); - -class MKLDNNGraphSingleBatchDimGemmTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _MB_A_ - _M_A_ - _N_A_ - - - - - - - _MB_B_ - _M_B_ - _N_B_ - - - - - - - - _MB_A_ - _M_A_ - _N_A_ - - - _MB_B_ - _M_B_ - _N_B_ - - - - - _MB_D_ - _M_ - _N_ - - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(gemm_test_params p) { - std::string model = model_t; - std::string op; - - REPLACE_WITH_NUM(model, "_MB_A_", p.batches.MB2_A); - REPLACE_WITH_NUM(model, "_MB_B_", p.batches.MB2_B); - REPLACE_WITH_NUM(model, "_MB_D_", p.batches.MB2_D); - - auto m_A = p.transposeA ? p.K : p.M; - auto n_A = p.transposeA ? p.M : p.K; - auto m_B = p.transposeB ? p.N : p.K; - auto n_B = p.transposeB ? p.K : p.N; - - REPLACE_WITH_NUM(model, "_M_A_", m_A); - REPLACE_WITH_NUM(model, "_N_A_", n_A); - REPLACE_WITH_NUM(model, "_M_B_", m_B); - REPLACE_WITH_NUM(model, "_N_B_", n_B); - - REPLACE_WITH_NUM(model, "_M_", p.M); - REPLACE_WITH_NUM(model, "_N_", p.N); - REPLACE_WITH_NUM(model, "_K_", p.K); - - REPLACE_WITH_NUM(model, "_A_", p.alpha); - REPLACE_WITH_NUM(model, "_B_", p.beta); - REPLACE_WITH_NUM(model, "_TA_", p.transposeA); - REPLACE_WITH_NUM(model, "_TB_", p.transposeB); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - gemm_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Gemm) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - - auto m_A = p.transposeA ? p.K : p.M; - auto n_A = p.transposeA ? p.M : p.K; - auto m_B = p.transposeB ? p.N : p.K; - auto n_B = p.transposeB ? p.K : p.N; - - InferenceEngine::SizeVector dims_src1 = {p.batches.MB2_A, m_A, n_A}; - InferenceEngine::SizeVector dims_src2 = {p.batches.MB2_B, m_B, n_B}; - InferenceEngine::SizeVector dims_dst = {p.batches.MB2_D, p.M, p.N}; - - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, InferenceEngine::CHW}); - src1->allocate(); - InferenceEngine::TBlob* srcPtr1 = dynamic_cast*>(src1.get()); - if (srcPtr1 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, InferenceEngine::CHW}); - src2->allocate(); - InferenceEngine::TBlob* srcPtr2 = dynamic_cast*>(src2.get()); - if (srcPtr2 == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src1)); - srcs.insert(std::pair("in2", src2)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - std::vector> src_vec = {*srcPtr1, *srcPtr2}; - - ref_gemm(src_vec, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphSingleBatchDimGemmTests, TestsGemm) {} - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsGemm, MKLDNNGraphSingleBatchDimGemmTests, - ::testing::Values( - gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}, - gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any} - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp deleted file mode 100644 index 14df8044d0fc87..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp +++ /dev/null @@ -1,471 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct input_test_params { - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -class MKLDNNGraphInputTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - 1 - 3 - 3 - 3 - - - - - - - 1 - 3 - 3 - 3 - - - - - - - 1 - 3 - - - - - - - - 1 - 3 - 3 - 3 - - - - - 1 - 3 - 3 - 3 - - - - - - - - 1 - 3 - 3 - 3 - - - - - 1 - 3 - 3 - 3 - - - - - - - - 1 - 3 - - - - - 1 - 3 - - - - - - - - - - -)V0G0N"; - - std::string getModel(input_test_params p) { - return model_t; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - input_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Input || nodes[i]->getType() == MKLDNNPlugin::Output) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - size_t count = (nodes[i]->getType() == MKLDNNPlugin::Input) ? 0 : 2; - if (nodes[i]->getName() == "in3") { - count = 1; - } - if (nodes[i]->getName() == "out_power3") { - count = 3; - } - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(count)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphInputTests, TestsInput) {} - - -INSTANTIATE_TEST_CASE_P( - TestsInput, MKLDNNGraphInputTests, - ::testing::Values( - input_test_params{1, MKLDNNPlugin::impl_desc_type::unknown, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(0, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(0, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(0, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(0, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout()); - } - } } - )); - -class MKLDNNGraphConstInputTests: public TestsCommon { - std::string model_t = R"V0G0N( - - - - - - 1 - 3 - 2 - 2 - - - - - - - - - - 1 - 3 - 1 - 2 - - - - - - - - - - - 1 - 3 - 2 - 2 - - - 1 - 3 - 1 - 2 - - - - - 1 - 3 - 3 - 2 - - - - - - - - - -)V0G0N"; - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - std::string model = model_t; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {72}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - - std::cout << weights->size() << std::endl; - - InferenceEngine::SizeVector dims_src1 = {1, 3, 2, 2}; - InferenceEngine::SizeVector dims_src2 = {1, 3, 1, 2}; - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src1, InferenceEngine::NCHW}); - src1->allocate(); - float *srcData = src1->buffer(); - for (size_t i = 0; i < 12; i++, data++, srcData++) { - *data = 1; - *srcData = 1; - } - - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src2, InferenceEngine::NCHW}); - src2->allocate(); - srcData = src2->buffer(); - for (size_t i = 0; i < 6; i++, data++, srcData++) { - *data = 2; - *srcData = 2; - } - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - ASSERT_LE(3, nodes.size()); - - InferenceEngine::BlobMap srcs; - srcs["in1"] = src1; - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - // Compare - float *src1_ptr = src1->buffer(); - size_t src1_size = src1->size(); - float *src2_ptr = src2->buffer(); - size_t src2_size = src2->size(); - float *dst_ptr = output->buffer(); - size_t dst_size = output->size(); - - int len1 = 1, len2 = 1, cycles; - for (int dim = 2; dim < output->getTensorDesc().getDims().size(); dim++) { - len1 *= src1->getTensorDesc().getDims()[dim]; - len2 *= src2->getTensorDesc().getDims()[dim]; - } - cycles = 2; - - int index1 = 0, index2 = 0, index = 0; - for (int cycle = 0; cycle < cycles; cycle ++) { - for (int i1 = 0; i1 < len1; i1++) { - if (src1_ptr[index1] != dst_ptr[index]) - { - FAIL() << "index: " << index << " src: " << src1_ptr[index1] << ", dst: " << dst_ptr[index]; - } - index1++; index++; - } - for (int i2 = 0; i2 < len2; i2++) { - if (src2_ptr[index2] != dst_ptr[index]) - { - FAIL() << "index: " << index << " src: " << src2_ptr[index2] << ", dst: " << dst_ptr[index]; - } - index2++; index++; - } - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_F(MKLDNNGraphConstInputTests, TestsConstInput) {} - - -struct input_layout_test_params { - InferenceEngine::Layout layout; - std::vector reference; - MKLDNNPlugin::impl_desc_type selectedType; - std::vector> comp; -}; - -class MKLDNNGraphInputLayoutTest : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - - - - - - - - - - - - -)V0G0N"; - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - input_layout_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = model_t; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - network.getInputsInfo().begin()->second->setLayout(p.layout); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, { 1, 3, 2, 2 }, p.layout); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - fill_data_dbgval(src->buffer(), src->size()); - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - std::pair item = *out.begin(); - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - InferenceEngine::BlobMap outputBlobs; - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - // Check results - if (memcmp((*output).data(), &p.reference[0], output->byteSize()) != 0) - FAIL() << "Wrong result with compare reference!"; - } - catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphInputLayoutTest, TestsLayoutInput) {} - -INSTANTIATE_TEST_CASE_P( - TestsLayoutInput, MKLDNNGraphInputLayoutTest, - ::testing::Values( - input_layout_test_params{ InferenceEngine::NCHW, { 0,1,2,3,3,4,5,6,6,7,8,9 }, MKLDNNPlugin::impl_desc_type::unknown } -// input_layout_test_params{ InferenceEngine::NHWC, { 0,0,0,3,3,3,6,6,6,9,9,9 }, MKLDNNPlugin::impl_desc_type::unknown } -)); - diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp deleted file mode 100644 index 6612f88758542c..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct lrn_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - size_t local_size; - float alpha; - float beta; - size_t k; - - size_t num_prim_desc; - - int selectedType; - - std::vector> comp; -}; - -template -void ref_lrn(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, lrn_test_params prm) -{ - size_t IW = prm.in.w; - size_t IH = prm.in.h; - size_t IC = prm.in.c; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (uint32_t c = 0; c < IC; c++) { - for (uint32_t h = 0; h < IH; h++) { - for (uint32_t w = 0; w < IW; w++) { - uint32_t oidx = c * IH * IW - + h * IW + w; - - uint32_t sz = prm.local_size; - int32_t c_start = c - sz / 2; - int32_t c_end = c_start + sz; - if (c_start < 0) c_start = 0; - if (c_end > (int32_t)IC) c_end = IC; - data_t sum = 0.0; - for (int32_t c1 = c_start; c1 < c_end; c1++) { - uint32_t idx = c1 * IH * IW + h * IW + w; - data_t s = src_data[idx]; - - sum += s * s; - } - - data_t norm_coef = powf(1. + prm.alpha * sum / sz, -prm.beta); - dst_data[oidx] = norm_coef * src_data[oidx]; - } - } - } -} - -class MKLDNNGraphLrnTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(lrn_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - - REPLACE_WITH_NUM(model, "_LS_", p.local_size); - REPLACE_WITH_NUM(model, "_A_", p.alpha); - REPLACE_WITH_NUM(model, "_B_", p.beta); - REPLACE_WITH_NUM(model, "_K_", p.k); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - lrn_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Lrn) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, - nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - if (nodes.size() != 3 && nodes.size() != 5) - FAIL() << "Nodes amount should be 3 or 5 (in reorder case)"; - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_lrn(*srcPtr, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphLrnTests, TestsLrn) {} - -INSTANTIATE_TEST_CASE_P( - TestsLrn, MKLDNNGraphLrnTests, - ::testing::Values( - lrn_test_params{ - {1, 3, 228, 228}, - 5, 0.0001f, 0.75f, 1, 3, MKLDNNPlugin::impl_desc_type::ref_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref_any, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref_any, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(0).desc.getLayout()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref_any, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - }}, - lrn_test_params{{1, 16, 228, 228}, 5, 0.0001f, 0.75f, 1, 3, MKLDNNPlugin::impl_desc_type::jit})); - -class MKLDNNGraphDynBatchLrnTests: public MKLDNNGraphLrnTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - lrn_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.in.n; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkLRN = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Lrn; - }; - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkLRN); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkLRN); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchLrnTests, TestsDynBatchLrn) {} - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsDynBatchLrn, MKLDNNGraphDynBatchLrnTests, - ::testing::Values( - lrn_test_params{{1, 3, 228, 228}, 5, 0.0001f, 0.75f, 1, 3, MKLDNNPlugin::impl_desc_type::ref_any}, - lrn_test_params{{1, 16, 228, 228}, 5, 0.0001f, 0.75f, 1, 3, MKLDNNPlugin::impl_desc_type::jit})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp deleted file mode 100644 index f6ea2b49901a09..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp +++ /dev/null @@ -1,635 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; -using namespace InferenceEngine; -using namespace Extensions; -using namespace ::Cpu; - -namespace { - -OV_CC_DOMAINS(GraphPermuteTests); - -} // namespace - -struct permute_test_params { - Layout layout_in, layout_out; - Precision precision; - size_t num_prim_desc; - - SizeVector dims; - SizeVector permute_order; - SizeVector block_dims_in; - SizeVector block_order_in; - SizeVector block_dims_out; - SizeVector block_order_out; -}; - -class FakeLayerImpl_permute: public Cpu::ExtLayerBase, - public WithParamInterface { -public: - explicit FakeLayerImpl_permute(const CNNLayer* layer) { - try { - layout = static_cast(layer->GetParamAsUInt("layout")); - block_dims = layer->GetParamAsInts("block_dims"); - order = layer->GetParamAsInts("order"); - addConfig(layer); - } catch (InferenceEngine::Exception &ex) { - errorMsg = ex.what(); - } - } - - Layout layout; - std::vector block_dims; - std::vector order; - - void addConfig(const CNNLayer* layer) { - LayerConfig config; - - // Fill tensor parameters into config - auto fill_port = [&] (std::vector& port, const DataPtr& data) { - if (!data) IE_THROW() << "Cannot get input data!"; - - DataConfig dataConfig; - dataConfig.inPlace = 0; - dataConfig.constant = false; - - const TensorDesc& data_desc = data->getTensorDesc(); - const SizeVector& data_dims = data_desc.getDims(); - - InferenceEngine::Precision precision = data_desc.getPrecision(); - if (block_dims.empty()) { - dataConfig.desc = TensorDesc(precision, data_dims, layout); - } else { - SizeVector tmp_block_dims(block_dims.size()); - SizeVector tmp_order(order.size()); - for (size_t i = 0; i < order.size(); i++) { - tmp_block_dims[i] = block_dims[i]; - tmp_order[i] = order[i]; - } - dataConfig.desc = TensorDesc(precision, data_dims, {tmp_block_dims, tmp_order}); - } - - port.push_back(dataConfig); - }; - - fill_port(config.inConfs, layer->insData[0].lock()); - fill_port(config.outConfs, layer->outData[0]); - config.outConfs[0].desc.setPrecision(config.inConfs[0].desc.getPrecision()); - confs.push_back(config); - } - - StatusCode execute(std::vector& inputs, std::vector& outputs, - ResponseDesc *resp) noexcept override { - return OK; - } -}; - -static std::string precToStr (Precision prec) { - return prec == Precision::I8 ? "I8" : "FP32"; -} - -template -static void fill_int_data(data_t *data, size_t size) { - for (size_t i = 0 ; i < size; i++) { - data[i] = i * 13 % 21 - 10; - } -} - -template -static void ref_permute(const TBlob &src, TBlob &dst, permute_test_params prm) { - const data_t *src_data = src.readOnly(); - float *dst_data = dst.data(); - - SizeVector orderedDims; - for (auto ord : prm.permute_order) { - orderedDims.push_back(src.getTensorDesc().getDims()[ord]); - } - TensorDesc desc(Precision::FP32, src.getTensorDesc().getDims(), {orderedDims, prm.permute_order}); - - for (int i=0; i < src.size(); i++) { - dst_data[desc.offset(i)] = src_data[src.getTensorDesc().offset(i)]; - } -} - -typedef std::tuple test_params_t; - -template -class MKLDNNGraphPermuteTests: public TestsCommon, -public WithParamInterface { - std::string model_t = (std::string) R"V0G0N( - - - - - - __DIMS__ - - - - - - - - __DIMS__ - - - - - __DIMS__ - - - - - - - - __DIMS__ - - - - - __DST_DIMS__ - - - - - - - - __DST_DIMS__ - - - - - __DST_DIMS__ - - - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(permute_test_params p) { - std::string model = model_t; - std::string dims; - std::string dst_dims; - for (auto& dim : p.dims) { - dims += ""; - dims += std::to_string(dim) + "\n"; - } - - std::string order; - for (auto& ord : p.permute_order) { - if (!order.empty()) - order += ","; - order += std::to_string(ord); - dst_dims += ""; - dst_dims += std::to_string(p.dims[ord]) + "\n"; - } - - REPLACE_WITH_STR(model, "__DIMS__", dims); - REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims); - REPLACE_WITH_STR(model, "_PERMUTE_ORDER_", order); - REPLACE_WITH_STR(model, "_PREC_", precToStr(p.precision)); - REPLACE_WITH_NUM(model, "_LAYOUT_IN_", (unsigned int)p.layout_in); - REPLACE_WITH_NUM(model, "_LAYOUT_OUT_", (unsigned int)p.layout_out); - - REPLACE_WITH_NUM_VECTOR(model, "_BLOCK_DIMS_IN_", p.block_dims_in); - REPLACE_WITH_NUM_VECTOR(model, "_BLOCK_ORDER_IN_", p.block_order_in); - REPLACE_WITH_NUM_VECTOR(model, "_BLOCK_DIMS_OUT_", p.block_dims_out); - REPLACE_WITH_NUM_VECTOR(model, "_BLOCK_ORDER_OUT_", p.block_order_out); - - return model; - } - - virtual permute_test_params initialize_permute_test_params() { - auto test_params = GetParam(); - permute_test_params p; - - p.layout_in = std::get<0>(test_params); - p.layout_out = std::get<1>(test_params); - p.precision = std::get<2>(test_params); - p.num_prim_desc = std::get<3>(test_params); - p.dims = std::get<4>(test_params); - p.permute_order = std::get<5>(test_params); - p.block_dims_in = std::get<6>(test_params); - p.block_order_in = std::get<7>(test_params); - p.block_dims_out = std::get<8>(test_params); - p.block_order_out = std::get<9>(test_params); - - return p; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - permute_test_params p = initialize_permute_test_params(); - std::string model = getModel(p); - - Core core; - CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - auto manager = std::make_shared(); - { - auto defaultExt = std::make_shared(); - defaultExt->layersFactory.registerNodeIfRequired(GraphPermuteTests, FakeLayer_permute, "FakeLayer_permute", Cpu::ImplFactory); - manager->AddExtension(defaultExt); - } - graph.CreateGraph(network, manager); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Permute) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - } - } - - Blob::Ptr src = make_shared_blob({p.precision, p.dims, TensorDesc::getLayoutByDims(p.dims)}); - src->allocate(); - if (typeid(src_data_t) == typeid(int8_t)) { - fill_int_data(src->buffer().as(), src->size()); - } else { - fill_data(src->buffer(), src->size()); - } - - auto* srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out; - out = network.getOutputsInfo(); - BlobMap outputBlobs; - - auto item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - TensorDesc td(Precision::FP32, p.dims, TensorDesc::getLayoutByDims(p.dims)); - TBlob dst_ref(td); - dst_ref.allocate(); - - ref_permute(*srcPtr, dst_ref, p); - - compare(*output, dst_ref); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -using permute_f32 = MKLDNNGraphPermuteTests; -using permute_s8 = MKLDNNGraphPermuteTests; - -TEST_P(permute_f32, TestsPermute) {} -TEST_P(permute_s8, TestsPermute) {} - -#define test_cases_planar_4d(prec) ::testing::Combine( \ - ::testing::Values(Layout::NCHW, Layout::NHWC), \ - ::testing::Values(Layout::NCHW, Layout::NHWC), \ - ::testing::Values(prec), \ - ::testing::Values(2), \ - ::testing::Values(SizeVector({2, 3, 4, 5})), \ - ::testing::Values(SizeVector({0, 1, 2, 3}), SizeVector({0, 2, 3, 1}), \ - SizeVector({0, 2, 1, 3}), SizeVector({0, 1, 3, 2}), \ - SizeVector({1, 0, 2, 3})), \ - ::testing::Values(SizeVector({})), \ - ::testing::Values(SizeVector({})), \ - ::testing::Values(SizeVector({})), \ - ::testing::Values(SizeVector({})) \ -) - -#define test_cases_planar_5d(prec) ::testing::Combine( \ - ::testing::Values(Layout::NCDHW, Layout::NDHWC), \ - ::testing::Values(Layout::NCDHW, Layout::NDHWC), \ - ::testing::Values(prec), \ - ::testing::Values(2), \ - ::testing::Values(SizeVector({2, 3, 4, 5, 6})), \ - ::testing::Values(SizeVector({0, 1, 2, 3, 4}), SizeVector({0, 4, 2, 1, 3}), \ - SizeVector({0, 2, 4, 3, 1}), SizeVector({0, 3, 2, 4, 1}), \ - SizeVector({0, 3, 1, 4, 2}), SizeVector({1, 0, 2, 3, 4})), \ - ::testing::Values(SizeVector({})), \ - ::testing::Values(SizeVector({})), \ - ::testing::Values(SizeVector({})), \ - ::testing::Values(SizeVector({})) \ -) - -#define case_planar_0(prec) test_params_t(Layout::NC, Layout::NC, prec, 1, {20, 3}, {0, 1}, {}, {}, {}, {}) -#define case_planar_1(prec) test_params_t(Layout::CHW, Layout::CHW, prec, 1, {20, 30, 4}, {0, 1, 2}, {}, {}, {}, {}) -#define case_planar_2(prec) test_params_t(Layout::CHW, Layout::CHW, prec, 1, {20, 30, 4}, {0, 2, 1}, {}, {}, {}, {}) -#define case_planar_3(prec) test_params_t(Layout::CHW, Layout::CHW, prec, 1, {2, 12, 9}, {0, 2, 1}, {}, {}, {}, {}) -#define case_planar_4(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 1, {2, 80, 2, 2, 4, 5}, {0, 1, 4, 2, 5, 3}, {}, {}, {}, {}) -#define case_planar_5(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 1, {2, 8, 30, 3, 4, 5}, {0, 1, 4, 2, 5, 3}, {}, {}, {}, {}) -#define case_planar_6(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 1, {2, 8, 3, 30, 4, 5}, {0, 3, 4, 1, 5, 2}, {}, {}, {}, {}) - -#define case_blocked_0(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 1, 2, 3}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}) -#define case_blocked_1(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 2, 3, 1}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {2, 2, 20, 32, 8}, {0, 1, 2, 3, 1}) -#define case_blocked_2(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 2, 1, 3}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {2, 2, 32, 20, 8}, {0, 1, 2, 3, 1}) -#define case_blocked_3(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 1, 3, 2}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {2, 4, 20, 10, 8}, {0, 1, 2, 3, 1}) -#define case_blocked_4(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 3, {10, 24, 4, 5}, {1, 0, 2, 3}, \ -{10, 3, 4, 5, 8}, {0, 1, 2, 3, 1}, {24, 2, 4, 5, 8}, {0, 1, 2, 3, 1}) -#define case_blocked_5(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 5, 10, 20}, {0, 1, 2, 3, 4}, \ -{2, 4, 5, 10, 20, 8}, {0, 1, 2, 3, 4, 1}, {2, 4, 5, 10, 20, 8}, {0, 1, 2, 3, 4, 1}) -#define case_blocked_6(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 5, 10, 20}, {0, 4, 2, 1, 3}, \ -{2, 4, 5, 10, 20, 8}, {0, 1, 2, 3, 4, 1}, {2, 3, 5, 32, 10, 8}, {0, 1, 2, 3, 4, 1}) -#define case_blocked_7(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 5, 10, 20}, {0, 2, 4, 3, 1}, \ -{2, 4, 5, 10, 20, 8}, {0, 1, 2, 3, 4, 1}, {2, 1, 20, 10, 32, 8}, {0, 1, 2, 3, 4, 1}) -#define case_blocked_8(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 5, 10, 20}, {0, 3, 2, 4, 1}, \ -{2, 4, 5, 10, 20, 8}, {0, 1, 2, 3, 4, 1}, {2, 2, 5, 20, 32, 8}, {0, 1, 2, 3, 4, 1}) -#define case_blocked_9(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 4, {2, 32, 5, 10, 20}, {0, 3, 1, 4, 2}, \ -{2, 4, 5, 10, 20, 8}, {0, 1, 2, 3, 4, 1}, {2, 2, 32, 20, 5, 8}, {0, 1, 2, 3, 4, 1}) -#define case_blocked_10(prec) test_params_t(Layout::BLOCKED, Layout::BLOCKED, prec, 3, {10, 24, 4, 5, 6}, {1, 0, 2, 3, 4}, \ -{10, 3, 4, 5, 6, 8}, {0, 1, 2, 3, 4, 1}, {24, 2, 4, 5, 6, 8}, {0, 1, 2, 3, 4, 1}) - -#define case_planar_to_blocked_0(prec) test_params_t(Layout::NCHW, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 1, 2, 3}, \ -{}, {}, {2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_1(prec) test_params_t(Layout::NCHW, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 2, 3, 1}, \ -{}, {}, {2, 2, 20, 32, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_2(prec) test_params_t(Layout::NCHW, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 2, 1, 3}, \ -{}, {}, {2, 2, 32, 20, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_3(prec) test_params_t(Layout::NCHW, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 1, 3, 2}, \ -{}, {}, {2, 4, 20, 10, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_4(prec) test_params_t(Layout::NCHW, Layout::BLOCKED, prec, 3, {10, 24, 4, 5}, {1, 0, 2, 3}, \ -{}, {}, {24, 2, 4, 5, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_5(prec) test_params_t(Layout::NHWC, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 1, 2, 3}, \ -{}, {}, {2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_6(prec) test_params_t(Layout::NHWC, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 2, 3, 1}, \ -{}, {}, {2, 2, 20, 32, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_7(prec) test_params_t(Layout::NHWC, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 2, 1, 3}, \ -{}, {}, {2, 2, 32, 20, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_8(prec) test_params_t(Layout::NHWC, Layout::BLOCKED, prec, 4, {2, 32, 10, 20}, {0, 1, 3, 2}, \ -{}, {}, {2, 4, 20, 10, 8}, {0, 1, 2, 3, 1}) -#define case_planar_to_blocked_9(prec) test_params_t(Layout::NHWC, Layout::BLOCKED, prec, 3, {10, 24, 4, 5}, {1, 0, 2, 3}, \ -{}, {}, {24, 2, 4, 5, 8}, {0, 1, 2, 3, 1}) - -#define case_blocked_to_planar_0(prec) test_params_t(Layout::BLOCKED, Layout::NCHW, prec, 4, {2, 32, 10, 20}, {0, 1, 2, 3}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_1(prec) test_params_t(Layout::BLOCKED, Layout::NCHW, prec, 4, {2, 32, 10, 20}, {0, 2, 3, 1}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_2(prec) test_params_t(Layout::BLOCKED, Layout::NCHW, prec, 4, {2, 32, 10, 20}, {0, 2, 1, 3}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_3(prec) test_params_t(Layout::BLOCKED, Layout::NCHW, prec, 4, {2, 32, 10, 20}, {0, 1, 3, 2}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_4(prec) test_params_t(Layout::BLOCKED, Layout::NCHW, prec, 3, {10, 24, 4, 5}, {1, 0, 2, 3}, \ -{10, 3, 4, 5, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_5(prec) test_params_t(Layout::BLOCKED, Layout::NHWC, prec, 4, {2, 32, 10, 20}, {0, 1, 2, 3}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_6(prec) test_params_t(Layout::BLOCKED, Layout::NHWC, prec, 4, {2, 32, 10, 20}, {0, 2, 3, 1}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_7(prec) test_params_t(Layout::BLOCKED, Layout::NHWC, prec, 4, {2, 32, 10, 20}, {0, 2, 1, 3}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_8(prec) test_params_t(Layout::BLOCKED, Layout::NHWC, prec, 4, {2, 32, 10, 20}, {0, 1, 3, 2}, \ -{2, 4, 10, 20, 8}, {0, 1, 2, 3, 1}, {}, {}) -#define case_blocked_to_planar_9(prec) test_params_t(Layout::BLOCKED, Layout::NHWC, prec, 3, {10, 24, 4, 5}, {1, 0, 2, 3}, \ -{10, 3, 4, 5, 8}, {0, 1, 2, 3, 1}, {}, {}) - -test_params_t test_cases_fp32[] = { - case_planar_0(Precision::FP32), - case_planar_1(Precision::FP32), - case_planar_2(Precision::FP32), - case_planar_3(Precision::FP32), - case_planar_4(Precision::FP32), - case_planar_5(Precision::FP32), - case_planar_6(Precision::FP32), -}; - -test_params_t test_cases_s8[] = { - case_planar_0(Precision::I8), - case_planar_1(Precision::I8), - case_planar_2(Precision::I8), - case_planar_3(Precision::I8), - case_planar_4(Precision::I8), - case_planar_5(Precision::I8), - case_planar_6(Precision::I8), -}; - -test_params_t test_cases_blocked_fp32[] = { - case_blocked_0(Precision::FP32), - case_blocked_1(Precision::FP32), - case_blocked_2(Precision::FP32), - case_blocked_3(Precision::FP32), - case_blocked_4(Precision::FP32), - case_blocked_5(Precision::FP32), - case_blocked_6(Precision::FP32), - case_blocked_7(Precision::FP32), - case_blocked_8(Precision::FP32), - case_blocked_9(Precision::FP32), - case_blocked_10(Precision::FP32), -}; - -test_params_t test_cases_blocked_s8[] = { - case_blocked_0(Precision::I8), - case_blocked_1(Precision::I8), - case_blocked_2(Precision::I8), - case_blocked_3(Precision::I8), - case_blocked_4(Precision::I8), - case_blocked_5(Precision::I8), - case_blocked_6(Precision::I8), - case_blocked_7(Precision::I8), - case_blocked_8(Precision::I8), - case_blocked_9(Precision::I8), - case_blocked_10(Precision::I8), -}; - -test_params_t test_cases_planar_to_blocked_fp32[] = { - case_planar_to_blocked_0(Precision::FP32), - case_planar_to_blocked_1(Precision::FP32), - case_planar_to_blocked_2(Precision::FP32), - case_planar_to_blocked_3(Precision::FP32), - case_planar_to_blocked_4(Precision::FP32), - case_planar_to_blocked_5(Precision::FP32), - case_planar_to_blocked_6(Precision::FP32), - case_planar_to_blocked_7(Precision::FP32), - case_planar_to_blocked_8(Precision::FP32), - case_planar_to_blocked_9(Precision::FP32), -}; - -test_params_t test_cases_blocked_to_planar_fp32[] = { - case_blocked_to_planar_0(Precision::FP32), - case_blocked_to_planar_1(Precision::FP32), - case_blocked_to_planar_2(Precision::FP32), - case_blocked_to_planar_3(Precision::FP32), - case_blocked_to_planar_4(Precision::FP32), - case_blocked_to_planar_5(Precision::FP32), - case_blocked_to_planar_6(Precision::FP32), - case_blocked_to_planar_7(Precision::FP32), - case_blocked_to_planar_8(Precision::FP32), - case_blocked_to_planar_9(Precision::FP32), -}; - -test_params_t test_cases_planar_to_blocked_s8[] = { - case_planar_to_blocked_0(Precision::I8), - case_planar_to_blocked_1(Precision::I8), - case_planar_to_blocked_2(Precision::I8), - case_planar_to_blocked_3(Precision::I8), - case_planar_to_blocked_4(Precision::I8), - case_planar_to_blocked_5(Precision::I8), - case_planar_to_blocked_6(Precision::I8), - case_planar_to_blocked_7(Precision::I8), - case_planar_to_blocked_8(Precision::I8), - case_planar_to_blocked_9(Precision::I8), -}; - -test_params_t test_cases_blocked_to_planar_s8[] = { - case_blocked_to_planar_0(Precision::I8), - case_blocked_to_planar_1(Precision::I8), - case_blocked_to_planar_2(Precision::I8), - case_blocked_to_planar_3(Precision::I8), - case_blocked_to_planar_4(Precision::I8), - case_blocked_to_planar_5(Precision::I8), - case_blocked_to_planar_6(Precision::I8), - case_blocked_to_planar_7(Precision::I8), - case_blocked_to_planar_8(Precision::I8), - case_blocked_to_planar_9(Precision::I8), -}; - - -INSTANTIATE_TEST_CASE_P(TestsPermutePlanar4d, permute_f32, test_cases_planar_4d(Precision::FP32)); -INSTANTIATE_TEST_CASE_P(TestsPermutePlanar5d, permute_f32, test_cases_planar_5d(Precision::FP32)); -INSTANTIATE_TEST_CASE_P(TestsPermute, permute_f32, ::testing::ValuesIn(test_cases_fp32)); -INSTANTIATE_TEST_CASE_P(TestsPermuteBlocked, permute_f32, ::testing::ValuesIn(test_cases_blocked_fp32)); -INSTANTIATE_TEST_CASE_P(TestsPermutePlanarToBlocked, permute_f32, ::testing::ValuesIn(test_cases_planar_to_blocked_fp32)); -INSTANTIATE_TEST_CASE_P(TestsPermuteBlockedToPlanar, permute_f32, ::testing::ValuesIn(test_cases_blocked_to_planar_fp32)); - -INSTANTIATE_TEST_CASE_P(TestsPermutePlanar4d, permute_s8, test_cases_planar_4d(Precision::I8)); -INSTANTIATE_TEST_CASE_P(TestsPermutePlanar5d, permute_s8, test_cases_planar_5d(Precision::I8)); -INSTANTIATE_TEST_CASE_P(TestsPermute, permute_s8, ::testing::ValuesIn(test_cases_s8)); -INSTANTIATE_TEST_CASE_P(TestsPermuteBlocked, permute_s8, ::testing::ValuesIn(test_cases_blocked_s8)); -INSTANTIATE_TEST_CASE_P(TestsPermutePlanarToBlocked, permute_s8, ::testing::ValuesIn(test_cases_planar_to_blocked_s8)); -INSTANTIATE_TEST_CASE_P(TestsPermuteBlockedToPlanar, permute_s8, ::testing::ValuesIn(test_cases_blocked_to_planar_s8)); - -class MKLDNNGraphDynBatchPermuteTests: public permute_f32 { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - permute_test_params p = initialize_permute_test_params(); - std::string model = getModel(p); - size_t MB = p.dims[0]; - if (MB < 2) - MB = 2; - p.dims[0] = MB; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - auto manager = std::make_shared(); - { - auto defaultExt = std::make_shared(); - defaultExt->layersFactory.registerNodeIfRequired(GraphPermuteTests, FakeLayer_permute, "FakeLayer_permute", Cpu::ImplFactory); - manager->AddExtension(defaultExt); - } - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network, manager); - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, p.dims, InferenceEngine::TensorDesc::getLayoutByDims(p.dims)}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkPermute = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Permute; - }; - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkPermute); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkPermute); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchPermuteTests, TestsDynBatchPermute) {} - -test_params_t test_cases_dyn_batch[] = { - test_params_t(Layout::NCHW, Layout::NCHW, Precision::FP32, 2, {2, 3, 4, 5}, {0, 1, 2, 3}, {}, {}, {}, {}), - test_params_t(Layout::NCHW, Layout::NCHW, Precision::FP32, 2, {2, 3, 4, 5}, {0, 2, 3, 1}, {}, {}, {}, {}), - test_params_t(Layout::NCHW, Layout::NCHW, Precision::FP32, 2, {2, 3, 4, 5}, {0, 2, 1, 3}, {}, {}, {}, {}), - test_params_t(Layout::CHW, Layout::CHW, Precision::FP32, 2, {2, 3, 4}, {0, 1, 2}, {}, {}, {}, {}), - test_params_t(Layout::CHW, Layout::CHW, Precision::FP32, 2, {2, 3, 4}, {0, 2, 1}, {}, {}, {}, {}), - test_params_t(Layout::NC, Layout::NC, Precision::FP32, 2, {2, 3}, {0, 1}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 6}, {0, 1, 2, 3, 4}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 6}, {0, 4, 2, 1, 3}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 6}, {0, 2, 4, 3, 1}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 6}, {0, 3, 2, 4, 1}, {}, {}, {}, {}), - // FIXME: Plugin inserts reorder from blocked to goidhw format here - // test_params_t(Layout::BLOCKED, Layout::BLOCKED, Precision::FP32, 1, {2, 8, 2, 2, 4, 5}, {0, 1, 4, 2, 5, 3}, {}, {}, {}, {}), - // test_params_t(Layout::BLOCKED, Layout::BLOCKED, Precision::FP32, 1, {2, 8, 3, 3, 4, 5}, {0, 1, 4, 2, 5, 3}, {}, {}, {}, {}), - test_params_t(Layout::CHW, Layout::CHW, Precision::FP32, 2, {2, 12, 9}, {0, 2, 1}, {}, {}, {}, {}), - // test_params_t(Layout::BLOCKED, Layout::BLOCKED, Precision::FP32, 1, {2, 8, 3, 3, 4, 5}, {0, 3, 4, 1, 5, 2}, {}, {}, {}, {}), - test_params_t(Layout::NCHW, Layout::NCHW, Precision::FP32, 2, {2, 3, 4, 5}, {0, 1, 3, 2}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 7}, {0, 3, 1, 4, 2}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 7}, {0, 2, 1, 3, 4}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 7}, {0, 2, 4, 3, 1}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {2, 3, 4, 5, 7}, {0, 4, 2, 3, 1}, {}, {}, {}, {}), - test_params_t(Layout::NCHW, Layout::NCHW, Precision::FP32, 2, {2, 3, 4, 5}, {0, 3, 1, 2}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {3, 4, 7, 8, 4}, {0, 2, 3, 4, 1}, {}, {}, {}, {}), - test_params_t(Layout::NCDHW, Layout::NCDHW, Precision::FP32, 2, {3, 4, 7, 8, 4}, {0, 4, 1, 2, 3}, {}, {}, {}, {}), -}; - -INSTANTIATE_TEST_CASE_P(TestsDynBatchPermute, MKLDNNGraphDynBatchPermuteTests, ::testing::ValuesIn(test_cases_dyn_batch)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp deleted file mode 100644 index 6c248eac720917..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NOMINMAX -#define NOMINMAX -#endif - -#include "test_graph.hpp" - -#include -#include "single_layer_common.hpp" -#include -#include "tests_common.hpp" -#include "ir_gen_helper.hpp" -#include - -#include - -using namespace InferenceEngine; -using namespace ::testing; -using namespace std; -using namespace mkldnn; -using namespace single_layer_tests; - -struct pooling_test_params { - // Formats: NCHW, NCDHW - vector dims; - // Formats: WH, WHD - vector kernel; - vector strides; - vector pads_begin; - vector pads_end; - - PoolingLayer::PoolType _type; - bool _exclude_pad; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - vector preferTypes; - - vector> comp; -}; - -template -void ref_pool(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, pooling_test_params prm) -{ - int dims_size = prm.dims.size(); - - int KW = prm.kernel[X_AXIS]; - int KH = prm.kernel[Y_AXIS]; - int KD = dims_size == 5 ? prm.kernel[Z_AXIS] : 1; - - int SW = prm.strides[X_AXIS]; - int SH = prm.strides[Y_AXIS]; - int SD = prm.strides.size() > Z_AXIS ? prm.strides[Z_AXIS] : 1; - - int IW = prm.dims[dims_size - 1]; - int IH = prm.dims[dims_size - 2]; - int ID = dims_size == 5 ? prm.dims[dims_size - 3] : 1; - - int PWB = prm.pads_begin[X_AXIS]; - int PHB = prm.pads_begin[Y_AXIS]; - int PDB = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0; - int PWE = prm.pads_end[X_AXIS]; - int PHE = prm.pads_end[Y_AXIS]; - int PDE = prm.pads_end.size() > Z_AXIS ? prm.pads_end[Z_AXIS] : 0; - - int OW = (IW + PWB + PWE - KW) / SW + 1; - int OH = (IH + PHB + PHE - KH) / SH + 1; - int OD = dims_size == 5 ? (ID + PDB + PDE - KD) / SD + 1 : 1; - int OC = prm.dims[1]; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - InferenceEngine::SizeVector dims = dst.getTensorDesc().getDims(); - IE_ASSERT(OC == dims[1]); - - int k1 = OH * OW, - k2 = k1 * OD, - k3 = IH * IW, - k4 = k3 * ID; - - if (prm._type == PoolingLayer::MAX) { - for (int c = 0; c < OC; c++) { - int cc = c * k2; - for (int od = 0; od < OD; od++) { - int cd = cc + od * k1; - for (int oh = 0; oh < OH; oh++) { - int ch = cd + oh * OW; - for (int ow = 0; ow < OW; ow++) { - - int oidx = ch + ow; - data_t out_ref = data_t(0); - bool is_initialized = false; - - for (int kd = 0; kd < KD; kd++) { - int id = dims_size == 5 ? od * SD - PDB + kd : 0lu; - if (id < 0 || id >= ID) continue; - for (int kh = 0; kh < KH; kh++) { - int ih = oh * SH - PHB + kh; - if (ih < 0 || ih >= IH) continue; - for (int kw = 0; kw < KW; kw++) { - int iw = ow * SW - PWB + kw; - if (iw < 0 || iw >= IW) continue; - int iidx = c * k4 - + id * k3 - + ih * IW - + iw; - - data_t d = src_data[iidx]; - if (!is_initialized) { - out_ref = d; - is_initialized = true; - } else { - if (out_ref < d) - out_ref = d; - } - } - } - } - dst_data[oidx] = out_ref; - } - } - } - } - } else if (prm._type == PoolingLayer::AVG) { - - bool include_padding = false; - bool not_zero_l = false; - for (auto lr : prm.pads_begin) { - if (lr) { - not_zero_l = true; - break; - } - } - if (!prm._exclude_pad && not_zero_l) - include_padding = true; - - int PDBKD = KD - PDB, - PHBKH = KH - PHB, - PWBKW = KW - PWB, - IDPDE = ID + PDE, - IHPHE = IH + PHE, - IWPWE = IW + PWE; - - for (int c = 0; c < OC; c++) { - int cc = c * k2; - for (int od = 0; od < OD; od++) { - int cd = cc + od * k1; - int id_start = od * SD - PDB; - int id_end = std::min(od * SD + PDBKD, IDPDE); - for (int oh = 0; oh < OH; oh++) { - int ch = cd + oh * OW; - int ih_start = oh * SH - PHB; - int ih_end = std::min(oh * SH + PHBKH, IHPHE); - for (int ow = 0; ow < OW; ow++) { - size_t oidx = ch + ow; - dst_data[oidx] = (data_t)0; - int iw_start = ow * SW - PWB; - int iw_end = std::min(ow * SW + PWBKW, IWPWE); - - // include_padding - double num_summands = (ih_end - ih_start) * (iw_end - iw_start) * (id_end - id_start); - - id_start = std::max(id_start, 0); - ih_start = std::max(ih_start, 0); - iw_start = std::max(iw_start, 0); - id_end = std::min(id_end, ID); - ih_end = std::min(ih_end, IH); - iw_end = std::min(iw_end, IW); - - if (!include_padding) - num_summands = (id_end - id_start) * (ih_end - ih_start) * (iw_end - iw_start); - if (num_summands == 0.0) continue; - - double dst = 0.0; - for (int id = id_start; id < id_end; ++id) { - for (int ih = ih_start; ih < ih_end; ++ih) { - for (int iw = iw_start; iw < iw_end; ++iw) { - size_t iidx = c * k4 - + id * k3 - + ih * IW - + iw; - - dst += (double)src_data[iidx]; - } } } - - dst_data[oidx] = (data_t)(dst / num_summands); - } } } } } -} - -class MKLDNNGraphPoolingTests: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - - __SRC_DIMS__ - - - - - _IN_ - _IC_ - __DST_DIMS__ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - -protected: - std::string getModel(pooling_test_params p) { - std::string model = layers_t; - - std::string s_dims; - for (auto& dim : p.dims) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims); - - s_dims = ""; - int k_len = p.kernel.size(); - for (size_t i = 2lu; i < p.dims.size(); i++) { - size_t inx = k_len - i + 1lu; - size_t dim = (p.dims[i] + p.pads_begin[inx] + p.pads_end[inx] - p.kernel[inx]) / p.strides[inx] + 1lu; - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims); - - std::string pool_method; - switch (p._type) { - case PoolingLayer::AVG: pool_method = "avg"; - break; - case PoolingLayer::ROI: pool_method = "roi"; - break; - default: pool_method = "max"; - } - REPLACE_WITH_STR(model, "_PM_", pool_method); - - std::string exclude_pad = "false"; - if (p._exclude_pad) exclude_pad = "true"; - REPLACE_WITH_STR(model, "_EP_", exclude_pad); - - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - REPLACE_WITH_NUM(model, "_IC_", p.dims[1]); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - model = IRTemplateGenerator::getIRTemplate("Pooling_Only", p.dims, "FP32", model, edges_t); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - pooling_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Pooling) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_TRUE(nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() | p.selectedType); - } - } - - InferenceEngine::Layout layout = ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, p.dims, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_pool(*srcPtr, dst_ref, p); - - compare(*output, dst_ref, 0.0001f); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphPoolingTests, TestsPooling) {} - -INSTANTIATE_TEST_CASE_P( - TestsPooling, MKLDNNGraphPoolingTests, - ::testing::Values( - /*0*/ pooling_test_params{{1, 3, 228, 228}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 3, MKLDNNPlugin::impl_desc_type::jit}, - pooling_test_params{{1, 3, 228, 228}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 3, MKLDNNPlugin::impl_desc_type::ref, - {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 3, MKLDNNPlugin::impl_desc_type::ref, - {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 3, MKLDNNPlugin::impl_desc_type::ref, - {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {1u, 0u}, {0u, 0u}, PoolingLayer::AVG, false, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {1u, 0u}, {0u, 0u}, PoolingLayer::AVG, false, 3u, - MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {0u, 0u}, {0u, 0u}, PoolingLayer::AVG, true, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - /*9*/ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {0u, 0u}, {0u, 0u}, PoolingLayer::AVG, true, 3u, - MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, PoolingLayer::AVG, true, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, PoolingLayer::AVG, false, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, -// pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, PoolingLayer::MAX, false, 3u, -// MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, -// pooling_test_params{{1u, 1u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, PoolingLayer::MAX, false, 1, -// MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // TODO Fix jit implementation. End paddings -// pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 0u}, PoolingLayer::AVG, true, 3u, -// MKLDNNPlugin::impl_desc_type::jit }, -// pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 0u}, PoolingLayer::AVG, false, 3u, -// MKLDNNPlugin::impl_desc_type::jit }, -// pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 0u}, PoolingLayer::MAX, false, 3u, -// MKLDNNPlugin::impl_desc_type::jit }, - - // 5D tensor - pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::MAX, false, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::MAX, false, 3u, - MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {1u, 1u, 1u}, {1u, 1u, 1u}, PoolingLayer::MAX, false, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 32u, 60u, 60u, 60u}, {2u, 3u, 4u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {1u, 2u, 3u}, PoolingLayer::MAX, false, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, -// pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {1u, 2u, 3u}, {1u, 2u, 3u}, PoolingLayer::MAX, false, 1u, -// MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {1u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, false, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {1u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, false, 3u, - MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, true, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, true, 3u, - MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {0u, 0u, 0u}, PoolingLayer::AVG, true, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, PoolingLayer::AVG, true, 3u, - MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, PoolingLayer::AVG, false, 3u, - MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, PoolingLayer::AVG, false, 3u, - MKLDNNPlugin::impl_desc_type::jit }, - pooling_test_params{{1u, 1u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, PoolingLayer::AVG, false, 1u, - MKLDNNPlugin::impl_desc_type::ref })); - - -class MKLDNNGraphDynBatchPoolingTests: public MKLDNNGraphPoolingTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - pooling_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.dims[0]; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - - InferenceEngine::Layout layout = ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - InferenceEngine::Blob::Ptr src = - InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, p.dims, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkPooling = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Pooling; - }; - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkPooling); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkPooling); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchPoolingTests, TestsDynBatchPooling) {} - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsDynBatchPooling, MKLDNNGraphDynBatchPoolingTests, - ::testing::Values( - pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::jit}, - pooling_test_params{{1, 3, 228, 228}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp deleted file mode 100644 index 0f7535a5a91f06..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct power_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - float power; - float scale; - float shift; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -template -void ref_power(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, power_test_params prm) { - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (int i=0; i < src.size(); i++) - dst_data[i] = pow(src_data[i]*prm.scale + prm.shift, prm.power); -} - -class MKLDNNGraphPowerTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(power_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - REPLACE_WITH_NUM(model, "_POWER_", p.power); - REPLACE_WITH_NUM(model, "_SCALE_", p.scale); - REPLACE_WITH_NUM(model, "_SHIFT_", p.shift); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - power_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Eltwise) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - } - } - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_power(*srcPtr, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphPowerTests, TestsPower) {} - - -INSTANTIATE_TEST_CASE_P( - TestsPower, MKLDNNGraphPowerTests, - ::testing::Values( - power_test_params{ - {1, 3, 13, 13}, 1, 2, 0.5f, 3, MKLDNNPlugin::impl_desc_type::unknown, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - }}}, - power_test_params{{1, 1, 23, 23}, 3, 8, 2, 3 }, - power_test_params{{1, 8, 23, 23}, 8, 2, 1, 3 }, - power_test_params{{1, 8, 23, 23}, 2, 2, 4, 3 } - )); - -class MKLDNNGraphDynBatchPowerTests: public MKLDNNGraphPowerTests { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - -)V0G0N"; - - std::string getModel(power_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - REPLACE_WITH_NUM(model, "_POWER_", p.power); - REPLACE_WITH_NUM(model, "_SCALE_", p.scale); - REPLACE_WITH_NUM(model, "_SHIFT_", p.shift); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - power_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.in.n; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkPower = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Eltwise; - }; - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkPower); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkPower); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchPowerTests, TestsDynBatchPower) {} - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsDynBatchPower, MKLDNNGraphDynBatchPowerTests, - ::testing::Values( - power_test_params{ - {1, 3, 13, 13}, 1, 2, 0.5f, 3, MKLDNNPlugin::impl_desc_type::unknown, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - }}}, - power_test_params{{1, 1, 23, 23}, 3, 8, 2, 3 }, - power_test_params{{1, 8, 23, 23}, 8, 2, 1, 3 }, - power_test_params{{1, 8, 23, 23}, 2, 2, 4, 3 } - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp deleted file mode 100644 index 9086e9a2c5dc25..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct relu_test_params { - // Formats: NCHW, NCDHW - vector dims; - - float n_clope; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -template -void ref_relu(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, relu_test_params prm) -{ - auto dims_size = src.getTensorDesc().getDims().size(); - - size_t IW = src.getTensorDesc().getDims()[dims_size - 1]; - size_t IH = src.getTensorDesc().getDims()[dims_size - 2]; - size_t ID = dims_size == 5 ? src.getTensorDesc().getDims()[dims_size - 3] : 1u; - size_t IC = src.getTensorDesc().getDims()[1]; - - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (uint32_t c = 0; c < IC; c++) { - for (uint32_t d = 0; d < ID; d++) { - for (uint32_t h = 0; h < IH; h++) { - for (uint32_t w = 0; w < IW; w++) { - uint32_t oidx = c * ID * IH * IW - + d * IH * IW - + h * IW - + w; - - dst_data[oidx] = src_data[oidx] >= 0.0 ? - src_data[oidx] : - src_data[oidx] * prm.n_clope; - } - } - } - } -} - -class MKLDNNGraphReluTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - -)V0G0N"; - - std::string getModel(relu_test_params p) { - std::string model = model_t; - auto dims_size = p.dims.size(); - - switch (dims_size) { - case 3: - REMOVE_LINE(model, "_IH_"); - case 4: - REMOVE_LINE(model, "_ID_"); - } - - REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]); - REPLACE_WITH_NUM(model, "_IC_", p.dims[1]); - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - switch (dims_size) { - case 5: - REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]); - case 4: - REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]); - } - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - relu_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Activation) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_TRUE(nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() | p.selectedType); - } - } - - InferenceEngine::SizeVector dims_src = p.dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_relu(*srcPtr, dst_ref, p); - - compare(*output, dst_ref, 0.0005f); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphReluTests, TestsRelu) {} - - -INSTANTIATE_TEST_CASE_P( - TestsRelu, MKLDNNGraphReluTests, - ::testing::Values( - relu_test_params{ - {1, 3, 228, 228}, 0.0f, 5, MKLDNNPlugin::impl_desc_type::jit, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_TRUE(impl.getImplementationType() | MKLDNNPlugin::impl_desc_type::jit); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_TRUE(impl.getImplementationType() | MKLDNNPlugin::impl_desc_type::jit); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - }}, - relu_test_params{ - {1, 64, 32, 32, 32}, 0.0f, 3, MKLDNNPlugin::impl_desc_type::ref_any, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_TRUE(impl.getImplementationType() | MKLDNNPlugin::impl_desc_type::ref_any); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - }, - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_TRUE(impl.getImplementationType() | MKLDNNPlugin::impl_desc_type::ref_any); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - }} - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp deleted file mode 100644 index 5f43097726b0c2..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "tests_common.hpp" -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -class MKLDNNGraphReorderTests: public TestsCommon { -protected: - virtual void SetUp() { - TestsCommon::SetUp(); - } -}; - -TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLayers) { - std::shared_ptr node; - mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)); - - InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32})); - MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache; - node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layer, eng, {}, cache)); - ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType()); - - ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::Exception); -} - -TEST_F(MKLDNNGraphReorderTests, CreateReorder) { - std::string model = R"V0G0N( - - - - - - 1 - 9 - 16 - 32 - - - - - - - - - - - - 1 - 9 - 16 - 32 - - - - - 1 - 17 - 16 - 32 - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {(1 * 1 * 17 * 9 / 1 + 17) - * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Reorder) { - ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors().size()); - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref_any, - nodes[i]->getSupportedPrimitiveDescriptors()[0].getImplementationType()); - ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs.size()); - if (i == 1) { - ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout()); - ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout()); - } else { - ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout()); - } - ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs.size()); - } - } -} - -TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) { - std::string model = R"V0G0N( - - - - - - 1 - 9 - 16 - 32 - - - - - - - 1 - 9 - 16 - 32 - - - - - 32 - 144 - - - - - - - - 32 - 144 - - - - - 1 - 3 - 48 - 32 - - - - - - - 1 - 3 - 48 - 32 - - - - - 1 - 3 - 48 - 32 - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {24}, InferenceEngine::C }); - weights->allocate(); - float *data = weights->buffer().as(); - size_t dataSize = weights->byteSize() / sizeof(float); - for (size_t i = 0; i < dataSize; i++) { - data[i] = 2; - } - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - network.addOutput("reshape1"); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {1, 9, 16, 32}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - data = src->buffer().as(); - dataSize = src->size(); - for (size_t i = 0; i < dataSize; i++) { - data[i] = 1; - } - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - auto it = out.begin(); - std::pair item = *it; - - InferenceEngine::TBlob::Ptr output1; - output1 = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output1->allocate(); - outputBlobs[item.first] = output1; - - item = *(++it); - - InferenceEngine::TBlob::Ptr output2; - output2 = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output2->allocate(); - outputBlobs[item.first] = output2; - - graph.Infer(srcs, outputBlobs); - - data = output1->data(); - for (size_t i = 0; i < output1->size(); i++) { - ASSERT_EQ(data[i], 1); - } - data = output2->data(); - for (size_t i = 0; i < output2->size(); i++) { - ASSERT_EQ(data[i], 4); - } -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp deleted file mode 100644 index 503db07e574f0a..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct reshape_test_params { - InferenceEngine::SizeVector in; - InferenceEngine::SizeVector out; - std::vector shape; - - int axis; - int num_axes; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -template -void ref_reshape(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst) { - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - for (int i=0; i < src.size(); i++) - dst_data[i] = src_data[i]; -} - -class MKLDNNGraphReshapeTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - -__SRC_DIMS__ - - - - - - - - -__SRC_DIMS__ - - - - -__DST_DIMS__ - - - - - - - - -)V0G0N"; - - std::string getModel(reshape_test_params p) { - std::string model = model_t; - - std::string src_dims; - for (auto& dim : p.in) { - src_dims += " "; - src_dims += std::to_string(dim) + "\n"; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims); - - std::string dst_dims; - for (auto& dim : p.out) { - dst_dims += "\t\t"; - dst_dims += std::to_string(dim) + "\n"; - } - REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims); - - REPLACE_WITH_NUM(model, "_AX_", p.axis); - REPLACE_WITH_NUM(model, "_NAX_", p.num_axes); - - std::string shape_str; - for (auto& dim : p.shape) { - if (!shape_str.empty()) - shape_str += ","; - shape_str += std::to_string(dim); - } - REPLACE_WITH_STR(model, "_SHAPE_", shape_str); - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - reshape_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Reshape) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, p.in, - InferenceEngine::TensorDesc::getLayoutByDims(p.in)}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_reshape(*srcPtr, dst_ref); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphReshapeTests, TestsReshape) {} - - -INSTANTIATE_TEST_CASE_P( - TestsReshape, MKLDNNGraphReshapeTests, - ::testing::Values( - reshape_test_params{ {1, 3, 228, 228}, {1, 24, 2, 3249}, {1, 24, 2, 3249}, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4 },{ 2, 2 },{ 2, 2 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4 },{ 1, 2, 2 },{ 1, 2, 2 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4 },{ 1, 4, 1, 1 },{ 1, 4, 1, 1 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4, 4 },{ 1, 4, 4 },{ 1, 4, 4 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4, 4 },{ 1, 4, 2, 2 },{ 1, 4, 2, 2 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4, 2, 2 },{ 1, 4, 2, 2 },{ 1, 4, 2, 2 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 2, 2 },{ 4 },{ 4 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 1, 2, 2 },{ 4 },{ 4 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 1, 1, 2, 2 },{ 4 },{ 4 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4, 2, 2 },{ 4, 4 },{ 4, 4 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 1, 4, 2, 2 },{ 4, 4 },{ 4, 4 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 1, 4, 2, 2 },{ 4, 2, 2 },{ 4, 2, 2 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 1, 4, 2, 2 }, { 4, 2, 2, 1, 1 }, { 4, 2, 2, 1, 1 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 4, 2, 2, 1, 1 }, { 1, 4, 2, 2 }, { 1, 4, 2, 2 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } }, - reshape_test_params{ { 1, 200 }, { 1, 200, 1, 1, 1 }, { 1, 200, 1, 1, 1 }, 0, -1, 1, - MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } } } -)); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp deleted file mode 100644 index 0ffdfed272a6f6..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct roi_pooling_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in1; - - struct { - size_t n; - size_t c; - } in2; - - size_t pooled_h; - size_t pooled_w; - float spatial_scale; - - size_t num_prim_desc; - - int selectedType; - - std::vector> comp; -}; - -template -void ref_roipooling(const InferenceEngine::TBlob &src, const InferenceEngine::TBlob &roi, - InferenceEngine::TBlob &dst_blob, roi_pooling_test_params& params) { - data_t* dst = dst_blob.data(); - const data_t* src_data = src.readOnly(); - const data_t* src_roi = roi.readOnly(); - - int C = src.getTensorDesc().getDims()[1]; - int H = src.getTensorDesc().getDims()[2]; - int W = src.getTensorDesc().getDims()[3]; - - int ROIS = roi.getTensorDesc().getDims()[0]; - - double spatial_scale = params.spatial_scale; - int pooled_h = params.pooled_h; - int pooled_w = params.pooled_w; - - auto *arg_max_ = new data_t[dst_blob.size()]; - - for (size_t i = 0; i < dst_blob.size(); i++) { - arg_max_[i] = -1; - dst[i] = -FLT_MAX; - } - - int roi_off; - - for (int n = 0; n < ROIS; ++n) { - if(roi.getTensorDesc().getDims().size() == 4) { - roi_off = n*roi.getTensorDesc().getDims()[1]*roi.getTensorDesc().getDims()[2]*roi.getTensorDesc().getDims()[3]; - } - else { - roi_off = n*roi.getTensorDesc().getDims()[1]; - } - - const data_t* src_roi_ptr = &src_roi[roi_off]; - - int roi_batch_ind = src_roi_ptr[0]; - int roi_start_w = round(src_roi_ptr[1] * spatial_scale); - int roi_start_h = round(src_roi_ptr[2] * spatial_scale); - int roi_end_w = round(src_roi_ptr[3] * spatial_scale); - int roi_end_h = round(src_roi_ptr[4] * spatial_scale); - - int roi_height = (std::max)(roi_end_h - roi_start_h + 1, 1); - int roi_width = (std::max)(roi_end_w - roi_start_w + 1, 1); - - for (int c = 0; c < C; ++c) { - - for (int ph = 0; ph < pooled_h; ++ph) { - for (int pw = 0; pw < pooled_w; ++pw) { - int hstart = (ph * roi_height) / pooled_h; - if ( (hstart * pooled_h) > (ph * roi_height) ) { - --hstart; - } - - int wstart = (pw * roi_width) / pooled_w; - if ( (wstart * pooled_w) > (pw * roi_width) ) { - --wstart; - } - - int hend = ((ph + 1) * roi_height) / pooled_h; - if ( (hend * pooled_h) < ((ph + 1) * roi_height) ) { - ++hend; - } - - int wend = ((pw + 1) * roi_width) / pooled_w; - if ( (wend * pooled_w) < ((pw + 1) * roi_width) ) { - ++wend; - } - - hstart = (std::min)((std::max)(hstart + roi_start_h, 0), H); - hend = (std::min)((std::max)(hend + roi_start_h, 0), H); - wstart = (std::min)((std::max)(wstart + roi_start_w, 0), W); - wend = (std::min)((std::max)(wend + roi_start_w, 0), W); - - bool is_empty = (hend <= hstart) || (wend <= wstart); - - const int pool_index = n*dst_blob.getTensorDesc().getDims()[3]*dst_blob.getTensorDesc().getDims()[2]*dst_blob.getTensorDesc().getDims()[1] + - c*dst_blob.getTensorDesc().getDims()[3]*dst_blob.getTensorDesc().getDims()[2] + ph*dst_blob.getTensorDesc().getDims()[3] + pw; - - if (is_empty) { - dst[pool_index] = 0; - arg_max_[pool_index] = -1; - } - - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - int src_index_data = roi_batch_ind*src.getTensorDesc().getDims()[1]*src.getTensorDesc().getDims()[2]*src.getTensorDesc().getDims()[3] + - c*src.getTensorDesc().getDims()[2]*src.getTensorDesc().getDims()[3] + h*src.getTensorDesc().getDims()[3] + w; - data_t batch_data = src_data[src_index_data]; - - if (batch_data > dst[pool_index]) { - dst[pool_index] = batch_data; - arg_max_[pool_index] = batch_data; - } - } - } - } - } - } - } - delete[] arg_max_; -} - -class MKLDNNGraphRoiPoolingTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN1_ - _IC1_ - _IH1_ - _IW1_ - - - - - - - _IN2_ - _IC2_ - - - - - - - - _IN1_ - _IC1_ - _IH1_ - _IW1_ - - - _IN2_ - _IC2_ - - - - - _ON_ - _OC_ - _OH_ - _OW_ - - - - - - - - - -)V0G0N"; - - std::string getModel(roi_pooling_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW1_", p.in1.w); - REPLACE_WITH_NUM(model, "_IH1_", p.in1.h); - REPLACE_WITH_NUM(model, "_IC1_", p.in1.c); - REPLACE_WITH_NUM(model, "_IN1_", p.in1.n); - - REPLACE_WITH_NUM(model, "_IC2_", p.in2.c); - REPLACE_WITH_NUM(model, "_IN2_", p.in2.n); - - REPLACE_WITH_NUM(model, "_OW_", p.pooled_w); - REPLACE_WITH_NUM(model, "_OH_", p.pooled_h); - REPLACE_WITH_NUM(model, "_OC_", (std::max)(p.in1.c, p.in2.c)); - REPLACE_WITH_NUM(model, "_ON_", (std::max)(p.in1.n, p.in2.n)); - - REPLACE_WITH_NUM(model, "_PH_", p.pooled_h); - REPLACE_WITH_NUM(model, "_PW_", p.pooled_w); - REPLACE_WITH_NUM(model, "_SS_", p.spatial_scale); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - roi_pooling_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::ROIPooling) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - InferenceEngine::SizeVector dims_src = {p.in1.n, p.in1.c, p.in1.h, p.in1.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::SizeVector dims_roi = {p.in2.n, p.in2.c}; - - InferenceEngine::Blob::Ptr roi = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_roi, InferenceEngine::NC}); - roi->allocate(); - fill_data(roi->buffer(), roi->size()); - - InferenceEngine::TBlob* roiPtr = dynamic_cast*>(roi.get()); - - if (roiPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - srcs.insert(std::pair("in2", roi)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_roipooling(*srcPtr, *roiPtr, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphRoiPoolingTests, TestsRoiPooling) {} - -const size_t expect_num_impl = 1; - -INSTANTIATE_TEST_CASE_P( - TestsRoiPooling, MKLDNNGraphRoiPoolingTests, - ::testing::Values( - roi_pooling_test_params{ - {1, 256, 39, 64}, // in1 - {150, 5}, // in2 - 6, 6, // pool H and W - 0.0625f, // spatial_scale - expect_num_impl, // num_prim_desc (platform dependent) - MKLDNNPlugin::impl_desc_type::jit - })); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp deleted file mode 100644 index ca9bef048a6e5e..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp +++ /dev/null @@ -1,473 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" -#include - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct simplernms_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in_cls; - - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in_delta; - - struct { - size_t n; - size_t c; - } in_info; - - struct { - size_t n; - size_t c; - } out; - - size_t minBoxSize; - size_t featStride; - size_t preNmsTopn; - size_t postNmsTopn; - float iouThreshold; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - -struct anchor { float start_x; float start_y; float end_x; float end_y; }; - -template -struct simpler_nms_roi_t -{ - data_t x0, y0, x1, y1; - - constexpr static inline const data_t clamp_v(const data_t v, const data_t v_min, const data_t v_max) - { - return (std::max)(v_min, (std::min)(v, v_max)); - } - - data_t area() const { return std::max(0, y1 - y0 + 1) * std::max(0, x1 - x0 + 1); } - - simpler_nms_roi_t intersect (simpler_nms_roi_t other) const - { - return - { - (std::max)(x0, other.x0), - (std::max)(y0, other.y0), - (std::min)(x1, other.x1), - (std::min)(y1, other.y1) - }; - } - simpler_nms_roi_t clamp (simpler_nms_roi_t other) const - { - return - { - clamp_v(x0, other.x0, other.x1), - clamp_v(y0, other.y0, other.y1), - clamp_v(x1, other.x0, other.x1), - clamp_v(y1, other.y0, other.y1) - }; - } -}; - -template -struct simpler_nms_proposal_t { simpler_nms_roi_t roi; data_t confidence; size_t ord; }; -template -struct simpler_nms_delta_t { data_t shift_x, shift_y, log_w, log_h; }; - -template -inline simpler_nms_roi_t simpler_nms_gen_bbox( - const anchor& box, - const simpler_nms_delta_t& delta, - int anchor_shift_x, - int anchor_shift_y) -{ - auto anchor_w = box.end_x - box.start_x + 1; - auto anchor_h = box.end_y - box.start_y + 1; - auto center_x = box.start_x + anchor_w * .5f; - auto center_y = box.start_y + anchor_h *.5f; - - data_t pred_center_x = delta.shift_x * anchor_w + center_x + anchor_shift_x; - data_t pred_center_y = delta.shift_y * anchor_h + center_y + anchor_shift_y; - data_t half_pred_w = exp(delta.log_w) * anchor_w * .5f; - data_t half_pred_h = exp(delta.log_h) * anchor_h * .5f; - - return { pred_center_x - half_pred_w, - pred_center_y - half_pred_h, - pred_center_x + half_pred_w, - pred_center_y + half_pred_h }; -} -template -inline void sort_and_keep_at_most_top_n(std::vector>& proposals, size_t top_n) -{ - const auto cmp_fn = [](const simpler_nms_proposal_t& a, - const simpler_nms_proposal_t& b) - { - return a.confidence > b.confidence || (a.confidence == b.confidence && a.ord > b.ord); - }; - - if (proposals.size() > top_n) { - std::partial_sort(proposals.begin(), proposals.begin() + top_n, proposals.end(), cmp_fn); - proposals.resize(top_n); - } - else { - std::sort(proposals.begin(), proposals.end(), cmp_fn); - } -} - -template -std::vector> simpler_nms_perform_nms(const std::vector>& proposals, - float iou_threshold, size_t top_n) { - //TODO(ruv): can I mark the 1st arg, proposals as const? ifndef DONT_PRECALC_AREA, i can - //TODO(ruv): is it better to do the precalc or not? since we need to fetch the floats from memory anyway for - - // intersect calc, it's only a question of whether it's faster to do (f-f)*(f-f) or fetch another val -#define DONT_PRECALC_AREA - -#ifndef DONT_PRECALC_AREA - std::vector areas; - areas.reserve(proposals.size()); - std::transform(proposals.begin(), proposals.end(), areas.begin(), [](const simpler_nms_proposals_t>& v) - { - return v.roi.area(); - }); -#endif - - std::vector> res; - res.reserve(top_n); -#ifdef DONT_PRECALC_AREA - for (const auto & prop : proposals) { - const auto bbox = prop.roi; - const data_t area = bbox.area(); -#else - size_t proposal_count = proposals.size(); - for (size_t proposalIndex = 0; proposalIndex < proposal_count; ++proposalIndex) { - const auto & bbox = proposals[proposalIndex].roi; -#endif - - // For any realistic WL, this condition is true for all top_n values anyway - if (prop.confidence > 0) { - bool overlaps = std::any_of(res.begin(), res.end(), [&](const simpler_nms_roi_t& res_bbox) - { - data_t interArea = bbox.intersect(res_bbox).area(); -#ifdef DONT_PRECALC_AREA - data_t unionArea = res_bbox.area() + area - interArea; -#else - data_t unionArea = res_bbox.area() + areas[proposalIndex] - interArea; -#endif - return interArea > iou_threshold * unionArea; - }); - - if (! overlaps) { - res.push_back(bbox); - if (res.size() == top_n) break; - } - } - } - - return res; -} - -template -void ref_simplernms(const InferenceEngine::TBlob &src_cls, const InferenceEngine::TBlob &src_delta, const InferenceEngine::TBlob &src_info, InferenceEngine::TBlob &dst_blob, simplernms_test_params prm) { - int anchors_num = 3 * 3; - data_t *anchors_ = new data_t[anchors_num * sizeof(anchor) / sizeof(float)]; - const anchor* anchors = (anchor*)anchors_; - - IE_ASSERT(src_cls.getTensorDesc().getDims().size() == 4); - int H = src_cls.getTensorDesc().getDims()[2]; - int W = src_cls.getTensorDesc().getDims()[3]; - - int SZ = H * W; - - data_t* dst = dst_blob.data(); - - const data_t* cls_scores = src_cls.readOnly(); - const data_t* delta_pred = src_delta.readOnly(); - const data_t* im_info = src_info.readOnly(); - - int IW = im_info[0]; - int IH = im_info[1]; - int IS = im_info[2]; - - int scaled_min_bbox_size = prm.minBoxSize * IS; - - std::vector> sorted_proposals_confidence; - - for (auto y = 0; y < H; ++y) - { - int anchor_shift_y = y * prm.featStride; - - for (auto x = 0; x < W; ++x) { - int anchor_shift_x = x * prm.featStride; - int location_index = y * W + x; - - // we assume proposals are grouped by window location - for (int anchor_index = 0; anchor_index < anchors_num ; anchor_index++) { - data_t dx0 = delta_pred[location_index + SZ * (anchor_index * 4 + 0)]; - data_t dy0 = delta_pred[location_index + SZ * (anchor_index * 4 + 1)]; - data_t dx1 = delta_pred[location_index + SZ * (anchor_index * 4 + 2)]; - data_t dy1 = delta_pred[location_index + SZ * (anchor_index * 4 + 3)]; - - simpler_nms_delta_t bbox_delta { dx0, dy0, dx1, dy1 }; - - data_t proposal_confidence = cls_scores[location_index + SZ * (anchor_index + anchors_num * 1)]; - - simpler_nms_roi_t tmp_roi = simpler_nms_gen_bbox(anchors[anchor_index], bbox_delta, anchor_shift_x, anchor_shift_y); - simpler_nms_roi_t roi = tmp_roi.clamp({ 0, 0, data_t(IW - 1), data_t(IH - 1) }); - - int bbox_w = roi.x1 - roi.x0 + 1; - int bbox_h = roi.y1 - roi.y0 + 1; - - if (bbox_w >= scaled_min_bbox_size && bbox_h >= scaled_min_bbox_size) { - simpler_nms_proposal_t proposal { roi, proposal_confidence, sorted_proposals_confidence.size() }; - sorted_proposals_confidence.push_back(proposal); - } - } - } - } - - sort_and_keep_at_most_top_n(sorted_proposals_confidence, prm.preNmsTopn); - auto res = simpler_nms_perform_nms(sorted_proposals_confidence, prm.iouThreshold, prm.postNmsTopn); - - size_t res_num_rois = res.size(); - - for (size_t i = 0; i < res_num_rois; ++i) { - dst[5 * i + 0] = 0; // roi_batch_ind, always zero on test time - dst[5 * i + 1] = res[i].x0; - dst[5 * i + 2] = res[i].y0; - dst[5 * i + 3] = res[i].x1; - dst[5 * i + 4] = res[i].y1; - } - - delete[] anchors_; -} - -class MKLDNNGraphSimplerNMSTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _INC_ - _ICC_ - _IHC_ - _IWC_ - - - - - - - _IND_ - _ICD_ - _IHD_ - _IWD_ - - - - - - - _INI_ - _ICI_ - - - - - - - - - _INC_ - _ICC_ - _IHC_ - _IWC_ - - - _IND_ - _ICD_ - _IHD_ - _IWD_ - - - _INI_ - _ICI_ - - - - - _ON_ - _OC_ - - - - - - - - - - -)V0G0N"; - - std::string getModel(simplernms_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IWC_", p.in_cls.w); - REPLACE_WITH_NUM(model, "_IHC_", p.in_cls.h); - REPLACE_WITH_NUM(model, "_ICC_", p.in_cls.c); - REPLACE_WITH_NUM(model, "_INC_", p.in_cls.n); - - REPLACE_WITH_NUM(model, "_IWD_", p.in_delta.w); - REPLACE_WITH_NUM(model, "_IHD_", p.in_delta.h); - REPLACE_WITH_NUM(model, "_ICD_", p.in_delta.c); - REPLACE_WITH_NUM(model, "_IND_", p.in_delta.n); - - REPLACE_WITH_NUM(model, "_ICI_", p.in_info.c); - REPLACE_WITH_NUM(model, "_INI_", p.in_info.n); - - REPLACE_WITH_NUM(model, "_OC_", p.out.c); - REPLACE_WITH_NUM(model, "_ON_", p.out.n); - - REPLACE_WITH_NUM(model, "_MIN_BOX_SIZE_", p.minBoxSize); - REPLACE_WITH_NUM(model, "_FSRD_", p.featStride); - REPLACE_WITH_NUM(model, "_PRENT_", p.preNmsTopn); - REPLACE_WITH_NUM(model, "_POSTNT_", p.postNmsTopn); - REPLACE_WITH_NUM(model, "_IOU_THRESHOLD_", p.iouThreshold); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - simplernms_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::SimplerNMS) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - InferenceEngine::SizeVector dims_src_cls = {p.in_cls.n, p.in_cls.c, p.in_cls.h, p.in_cls.w}; - - InferenceEngine::Blob::Ptr src_cls = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src_cls, InferenceEngine::NCHW}); - src_cls->allocate(); - fill_data(src_cls->buffer(), src_cls->size()); - - InferenceEngine::TBlob* srcClsPtr = dynamic_cast*>(src_cls.get()); - - if (srcClsPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::SizeVector dims_delta = {p.in_delta.n, p.in_delta.c, p.in_delta.h, p.in_delta.w}; - - InferenceEngine::Blob::Ptr src_delta = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_delta, InferenceEngine::NCHW}); - src_delta->allocate(); - fill_data(src_delta->buffer(), src_delta->size()); - - InferenceEngine::TBlob* srcDeltaPtr = dynamic_cast*>(src_delta.get()); - - if (srcDeltaPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::SizeVector dims_info = {p.in_info.n, p.in_info.c}; - - InferenceEngine::Blob::Ptr src_info = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_info, InferenceEngine::NC}); - src_info->allocate(); - fill_data(src_info->buffer(), src_info->size()); - float * data_info = src_info->buffer(); - data_info[0] = 20; - data_info[1] = 20; - data_info[2] = 3; - - InferenceEngine::TBlob* srcInfoPtr = dynamic_cast*>(src_info.get()); - - if (srcInfoPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src_cls)); - srcs.insert(std::pair("in2", src_delta)); - srcs.insert(std::pair("in3", src_info)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_simplernms(*srcClsPtr, *srcDeltaPtr, *srcInfoPtr, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphSimplerNMSTests, TestsSimplerNMS) {} - - -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsSimplerNMS, MKLDNNGraphSimplerNMSTests, - ::testing::Values( - simplernms_test_params{{1, 18, 39, 64}, {1, 36, 39, 64}, {1, 3}, {150, 5}, 16, 16, 6000, 150, 0.7f, 1, - MKLDNNPlugin::impl_desc_type::ref, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType()); - ASSERT_EQ(3, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(1).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(2).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - }})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp deleted file mode 100644 index c7c941bacf6df9..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct softmax_test_params { - // Formats: NCHW, NCDHW - vector dims; - - int axis; - - size_t num_prim_desc; - - int selectedType; - std::vector preferTypes; - - std::vector> comp; -}; - -template -void check_softmax_fwd(const InferenceEngine::TBlob &src, softmax_test_params prm) -{ - const data_t *src_data = src.readOnly(); - - auto dims_size = prm.dims.size(); - - int axis = prm.axis; - if (dims_size == 4 && axis > 1) - axis++; - - size_t W = prm.dims[dims_size - 1]; - size_t H = prm.dims[dims_size - 2]; - size_t D = dims_size == 5 ? prm.dims[dims_size - 3] : 1u; - size_t C = prm.dims[1]; - size_t MB = prm.dims[0]; - - auto off = [=](int n, int c, int d, int h, int w) - { - return (n * W * H * D * C + c * W * H * D + d * W * H + h * W + w); - }; - - auto check_norm = [=](double res) { - if(res < 0.999f || res > 1.001) { - ASSERT_TRUE(res > 0.99f && res < 1.01); - } - }; - - if(axis == 0) { - for (int c = 0; c < C; ++c) { - for (int d = 0; d < D; ++d) { - for (int h = 0; h < H; ++h) { - for (int w = 0; w < W; ++w) { - double result = 0.0f; - - for (int n = 0; n < MB; ++n) { - result += src_data[off(n, c, d, h, w)]; - } - check_norm(result); - } - } - } - } - } - else if(axis == 1) { - for (int n = 0; n < MB; ++n) { - for (int d = 0; d < D; ++d) { - for (int h = 0; h < H; ++h) { - for (int w = 0; w < W; ++w) { - double result = 0.0f; - - for (int c = 0; c < C; ++c) { - result += src_data[off(n, c, d, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))]; - } - - check_norm(result); - } - } - } - } - } - else if(axis == 2) { - for (int n = 0; n < MB; ++n) { - for (int c = 0; c < C; ++c) { - for (int h = 0; h < H; ++h) { - for (int w = 0; w < W; ++w) { - double result = 0.0f; - - for (int d = 0; d < D; ++d) { - result += src_data[off(n, c, d, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))]; - } - - check_norm(result); - } - } - } - } - } - else if(axis == 3) { - for (int n = 0; n < MB; ++n) { - for (int c = 0; c < C; ++c) { - for (int d = 0; d < D; ++d) { - for (int w = 0; w < W; ++w) { - double result = 0.0f; - - for (int h = 0; h < H; ++h) { - result += src_data[off(n, c, d, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))]; - } - - check_norm(result); - } - } - } - } - } - else if(axis == 4) { - for (int n = 0; n < MB; ++n) { - for (int c = 0; c < C; ++c) { - for (int d = 0; d < D; ++d) { - for (int h = 0; h < H; ++h) { - double result = 0.0f; - - for (int w = 0; w < W; ++w) { - result += src_data[off(n, c, d, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))]; - } - - check_norm(result); - } - } - } - } - } -} - -class MKLDNNGraphSoftMaxTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(softmax_test_params p) { - std::string model = model_t; - - auto dims_size = p.dims.size(); - switch (dims_size) { - case 3: - REMOVE_LINE(model, "_IH_"); - case 4: - REMOVE_LINE(model, "_ID_"); - } - - REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]); - REPLACE_WITH_NUM(model, "_IC_", p.dims[1]); - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - switch (dims_size) { - case 5: - REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]); - case 4: - REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]); - } - - REPLACE_WITH_NUM(model, "_AX_", p.axis); - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - softmax_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::SoftMax) { - ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType); - } - } - - InferenceEngine::SizeVector dims_src = p.dims; - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - check_softmax_fwd(*output, p); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphSoftMaxTests, TestsSoftMax) {} - - -INSTANTIATE_TEST_CASE_P( - TestsSoftMax, MKLDNNGraphSoftMaxTests, - ::testing::Values( - softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - softmax_test_params{{1, 1000, 1, 1}, 1, 1, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{8, 1000, 1, 1}, 1, 1, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, -// softmax_test_params{{8, 100, 81, 1}, 2, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{8, 100, 81, 1}, 2, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - softmax_test_params{{1, 1, 1, 1}, 3, 1, MKLDNNPlugin::impl_desc_type::jit}, -// softmax_test_params{{1, 1, 1, 33}, 3, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{1, 1, 1, 33}, 3, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, -// softmax_test_params{{8, 1, 10, 81}, 3, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{8, 1, 10, 81}, 3, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - softmax_test_params{{2, 5, 9, 10, 11}, 0, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{2, 5, 9, 10, 11}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{2, 5, 9, 10, 11}, 2, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{2, 5, 9, 10, 11}, 3, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{2, 5, 9, 10, 11}, 4, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}} - )); - -class MKLDNNGraphDynBatchSoftMaxTests: public MKLDNNGraphSoftMaxTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - softmax_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - InferenceEngine::SizeVector dims_src = p.dims; - size_t MB = dims_src[0]; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.dims.size()) { - case 4: - layout = InferenceEngine::NCHW; - break; - case 5: - layout = InferenceEngine::NCDHW; - break; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkSoftmax = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::SoftMax; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkSoftmax); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkSoftmax); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchSoftMaxTests, TestsDynBatchSoftMax) {} - - -INSTANTIATE_TEST_CASE_P( - TestsDynBatchSoftMax, MKLDNNGraphDynBatchSoftMaxTests, - ::testing::Values( - // TODO: rewrite to ngraph to have reshape functionality - // softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - // softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - // softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // softmax_test_params{{1, 1000, 1, 1}, 1, 1, MKLDNNPlugin::impl_desc_type::ref}, - // softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - // softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - // softmax_test_params{{1, 1, 1, 1}, 3, 1, MKLDNNPlugin::impl_desc_type::ref}, - // softmax_test_params{{1, 1, 1, 33}, 3, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - softmax_test_params{{8, 1000, 1, 1}, 1, 1, MKLDNNPlugin::impl_desc_type::ref}, -// softmax_test_params{{8, 100, 81, 1}, 2, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{8, 100, 81, 1}, 2, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, -// softmax_test_params{{1, 1, 1, 33}, 3, 2, MKLDNNPlugin::impl_desc_type::jit}, -// softmax_test_params{{8, 1, 10, 81}, 3, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{8, 1, 10, 81}, 3, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}, - softmax_test_params{{2, 5, 9, 10, 11}, 1, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{2, 5, 9, 10, 11}, 2, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{2, 5, 9, 10, 11}, 3, 2, MKLDNNPlugin::impl_desc_type::jit}, - softmax_test_params{{2, 5, 9, 10, 11}, 4, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}} - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp deleted file mode 100644 index 3a3f69ab56357f..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct split_test_params { - // Formats: NCHW, NCDHW - vector dims; - std::vector> outs; - - int axis; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - std::vector preferTypes; - - std::vector> comp; -}; - -template -void ref_split(InferenceEngine::TBlob &src, std::vector>& dsts, split_test_params& prm) { - const float * srcData = src.readOnly(); - - int outerSize = 1; - for (int i = 0; i < prm.axis; i++) - outerSize *= src.getTensorDesc().getDims()[i]; - - for (size_t osIdx = 0; osIdx < outerSize; osIdx++) { - for (size_t dstIdx = 0; dstIdx < dsts.size(); dstIdx++) { - float* dstData = dsts[dstIdx].data(); - int innerSize = dsts[dstIdx].size() / outerSize; - - for (size_t j = 0; j < innerSize; j++, srcData++) { - dstData[osIdx*innerSize + j] = *srcData; - } - } - } -} - -class MKLDNNGraphSplitTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - _OP_ - - - - - - - -)V0G0N"; - - std::string port_t = R"V0G0N( - - _N_ - _C_ - _D_ - _H_ - _W_ - -)V0G0N"; - -protected: - std::string getModel(split_test_params p) { - std::string model = model_t; - auto dims_size = p.dims.size(); - - switch (dims_size) { - case 3: - REMOVE_LINE(model, "_IH_"); - case 4: - REMOVE_LINE(model, "_ID_"); - } - REPLACE_WITH_NUM(model, "_IN_", p.dims[0]); - REPLACE_WITH_NUM(model, "_IC_", p.dims[1]); - REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]); - switch (dims_size) { - case 5: - REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]); - case 4: - REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]); - } - - std::string outPorts; - for (int idx = 0; idx < p.outs.size(); idx++) { - std::string outPort = port_t; - switch (dims_size) { - case 3: - REMOVE_LINE(outPort, "_H_"); - case 4: - REMOVE_LINE(outPort, "_D_"); - } - REPLACE_WITH_NUM(outPort, "_ID_", idx); - REPLACE_WITH_NUM(outPort, "_N_", p.outs[idx][0]); - REPLACE_WITH_NUM(outPort, "_C_", p.outs[idx][1]); - REPLACE_WITH_NUM(outPort, "_W_", p.outs[idx][dims_size - 1]); - switch (dims_size) { - case 5: - REPLACE_WITH_NUM(outPort, "_D_", p.outs[idx][dims_size - 3]); - case 4: - REPLACE_WITH_NUM(outPort, "_H_", p.outs[idx][dims_size - 2]); - } - - outPorts += outPort; - } - REPLACE_WITH_STR(model, "_OP_", outPorts); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - split_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Split) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - ASSERT_LE(3, nodes.size()); - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(network.getInputsInfo().begin()->second->getTensorDesc()); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - auto srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - std::vector> dst_refs; - for (auto& item : out) { - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - dst_refs.push_back(dst_ref); - } - - graph.Infer(srcs, outputBlobs); - - ref_split(*srcPtr, dst_refs, p); - - int ref_idx = 0; - for (auto& output : outputBlobs) { - compare(*output.second, dst_refs[ref_idx++], 0.0005f); - } - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphSplitTests, TestsSplit) {} - -INSTANTIATE_TEST_CASE_P( - TestsSplit, MKLDNNGraphSplitTests, - ::testing::Values( - split_test_params { - {1, 24, 2, 5}, - {{1, 16, 2, 5}, {1, 8, 2, 5}}, - 1, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {1, 20, 2, 5}, - {{1, 13, 2, 5}, {1, 7, 2, 5}}, - 1, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {1, 20, 2, 5}, - {{1, 10, 2, 5}, {1, 10, 2, 5}}, - 1, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {2, 20, 2, 5}, - {{2, 10, 2, 5}, {2, 10, 2, 5}}, - 1, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {2, 20, 2, 5}, - {{2, 15, 2, 5}, {2, 5, 2, 5}}, - 1, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {9, 11, 7, 5}, - {{3, 11, 7, 5}, {6, 11, 7, 5}}, - 0, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {3, 11, 7, 5}, - {{3, 11, 4, 5}, {3, 11, 3, 5}}, - 2, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {3, 11, 7, 5}, - {{3, 11, 7, 1}, {3, 11, 7, 4}}, - 3, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{1, 6, 7, 15}, {2, 6, 7, 15}, {1, 6, 7, 15}, {1, 6, 7, 15}}, - 0, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{5, 1, 7, 15}, {5, 2, 7, 15}, {5, 1, 7, 15}, {5, 2, 7, 15}}, - 1, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{5, 6, 3, 15}, {5, 6, 1, 15}, {5, 6, 2, 15}, {5, 6, 1, 15}}, - 2, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{5, 6, 7, 5}, {5, 6, 7, 3}, {5, 6, 7, 4}, {5, 6, 7, 3}}, - 3, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{5, 6, 7, 15}}, - 1, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}}, - split_test_params { - {1, 32, 16, 16, 16}, - {{1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}}, - 1, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}}, - split_test_params { - {1, 32, 16, 16, 16}, - {{1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}}, - 1, 6, MKLDNNPlugin::impl_desc_type::unknown, {}})); - -class MKLDNNGraphDynBatchSplitTests: public MKLDNNGraphSplitTests { -protected: - virtual void SetUp() { - try { - split_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.dims[0]; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(network.getInputsInfo().begin()->second->getTensorDesc()); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - auto* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - auto it = out.begin(); - - std::pair item = *it; - - InferenceEngine::TBlob::Ptr output1; - output1 = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output1->allocate(); - outputBlobs[item.first] = output1; - - item = *(++it); - InferenceEngine::TBlob::Ptr output2; - output2 = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output2->allocate(); - outputBlobs[item.first] = output2; - - auto checkSplit = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Split; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkSplit); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkSplit); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchSplitTests, TestsDynBatchSplit) {} - -INSTANTIATE_TEST_CASE_P( - TestsDynBatchSplit, MKLDNNGraphDynBatchSplitTests, - ::testing::Values( - // TODO: rewrite to ngraph to have reshape functionality - // split_test_params { - // {1, 24, 2, 5}, - // {{1, 16, 2, 5}, {1, 8, 2, 5}}, - // 1, 3, MKLDNNPlugin::impl_desc_type::unknown, {}, { - // [](MKLDNNPlugin::PrimitiveDescInfo impl) { - // ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType()); - // ASSERT_EQ(1, impl.getConfig().inConfs.size()); - // ASSERT_EQ(2, impl.getConfig().outConfs.size()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout()); - // }, - // [](MKLDNNPlugin::PrimitiveDescInfo impl) { - // ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - // ASSERT_EQ(1, impl.getConfig().inConfs.size()); - // ASSERT_EQ(2, impl.getConfig().outConfs.size()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout()); - // }, - // [](MKLDNNPlugin::PrimitiveDescInfo impl) { - // ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - // ASSERT_EQ(1, impl.getConfig().inConfs.size()); - // ASSERT_EQ(2, impl.getConfig().outConfs.size()); - // ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().inConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(1).desc.getLayout()); - // } - // } - // }, - // TODO: rewrite to ngraph to have reshape functionality - // split_test_params { - // {1, 20, 2, 5}, - // {{1, 13, 2, 5}, {1, 7, 2, 5}}, - // 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, { - // [](MKLDNNPlugin::PrimitiveDescInfo impl) { - // ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType()); - // ASSERT_EQ(1, impl.getConfig().inConfs.size()); - // ASSERT_EQ(2, impl.getConfig().outConfs.size()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout()); - // }, - // [](MKLDNNPlugin::PrimitiveDescInfo impl) { - // ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - // ASSERT_EQ(1, impl.getConfig().inConfs.size()); - // ASSERT_EQ(2, impl.getConfig().outConfs.size()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout()); - // } - // } - // }, - // TODO: rewrite to ngraph to have reshape functionality - // split_test_params { - // {1, 20, 2, 5}, - // {{1, 10, 2, 5}, {1, 10, 2, 5}}, - // 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, { - // [](MKLDNNPlugin::PrimitiveDescInfo impl) { - // ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType()); - // ASSERT_EQ(1, impl.getConfig().inConfs.size()); - // ASSERT_EQ(2, impl.getConfig().outConfs.size()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout()); - // }, - // [](MKLDNNPlugin::PrimitiveDescInfo impl) { - // ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - // ASSERT_EQ(1, impl.getConfig().inConfs.size()); - // ASSERT_EQ(2, impl.getConfig().outConfs.size()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - // ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout()); - // } - // } - // }, - split_test_params { - {2, 24, 2, 5}, - {{2, 16, 2, 5}, {2, 8, 2, 5}}, - 1, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - // TODO: rewrite to ngraph to have reshape functionality - // split_test_params { - // {1, 20, 2, 5}, - // {{1, 13, 2, 5}, {1, 7, 2, 5}}, - // 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - // }, - // TODO: rewrite to ngraph to have reshape functionality - // split_test_params { - // {1, 20, 2, 5}, - // {{1, 10, 2, 5}, {1, 10, 2, 5}}, - // 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - // }, - split_test_params { - {2, 20, 2, 5}, - {{2, 10, 2, 5}, {2, 10, 2, 5}}, - 1, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {2, 20, 2, 5}, - {{2, 15, 2, 5}, {2, 5, 2, 5}}, - 1, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {3, 11, 7, 5}, - {{3, 11, 4, 5}, {3, 11, 3, 5}}, - 2, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {3, 11, 7, 5}, - {{3, 11, 7, 1}, {3, 11, 7, 4}}, - 3, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{5, 1, 7, 15}, {5, 2, 7, 15}, {5, 1, 7, 15}, {5, 2, 7, 15}}, - 1, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{5, 6, 3, 15}, {5, 6, 1, 15}, {5, 6, 2, 15}, {5, 6, 1, 15}}, - 2, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref} - }, - split_test_params { - {5, 6, 7, 15}, - {{5, 6, 7, 5}, {5, 6, 7, 3}, {5, 6, 7, 4}, {5, 6, 7, 3}}, - 3, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp deleted file mode 100644 index 6e5bafe8787051..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include - - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - - -struct tile_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - size_t axis; - size_t tiles; - - size_t num_prim_desc; - - MKLDNNPlugin::impl_desc_type selectedType; - - std::vector> comp; -}; - - -template -void ref_tile(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst_blob, tile_test_params prm) { - const float* m_src = src.readOnly(); - int m_outer_dim = 1; - int m_inner_dim = 1; - - for (int i=0; i < prm.axis; i++ ) - m_outer_dim *= src.getTensorDesc().getDims()[i]; - for (int i=prm.axis; i < src.getTensorDesc().getDims().size(); i++ ) - m_inner_dim *= src.getTensorDesc().getDims()[i]; - - float* dst = dst_blob.data(); - - for (int i = 0; i < m_outer_dim; ++i) { - for (int t = 0; t < prm.tiles; ++t) { - memcpy(dst, m_src, m_inner_dim* sizeof(float)); - dst += m_inner_dim; - } - m_src += m_inner_dim; - } -} - -class MKLDNNGraphTileTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _ON_ - _OC_ - _OH_ - _OW_ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(tile_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - - REPLACE_WITH_NUM(model, "_OW_", (p.axis == 3) ? p.in.w*p.tiles : p.in.w); - REPLACE_WITH_NUM(model, "_OH_", (p.axis == 2) ? p.in.h*p.tiles : p.in.h); - REPLACE_WITH_NUM(model, "_OC_", (p.axis == 1) ? p.in.c*p.tiles : p.in.c); - REPLACE_WITH_NUM(model, "_ON_", (p.axis == 0) ? p.in.n*p.tiles : p.in.n); - - REPLACE_WITH_NUM(model, "_AX_", p.axis); - REPLACE_WITH_NUM(model, "_TL_", p.tiles); - - return model; - } - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - tile_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - auto& nodes = graph.getNodes(); - for (int i = 0; i < nodes.size(); i++) { - if (nodes[i]->getType() == MKLDNNPlugin::Tile) { - ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size()); - for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) { - p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j)); - } - ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor()); - ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType()); - } - } - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_tile(*srcPtr, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphTileTests, TestsTile) {} - - -INSTANTIATE_TEST_CASE_P( - TestsTile, MKLDNNGraphTileTests, - ::testing::Values( - tile_test_params{ - {1, 128, 1, 1}, 3, 24, 1, MKLDNNPlugin::impl_desc_type::unknown, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - }})); - -class MKLDNNGraphDynBatchTileTests: public MKLDNNGraphTileTests { -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - tile_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - size_t MB = p.in.n; - if (MB < 2) - MB = 2; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - ASSERT_EQ(nullptr, network.getFunction()); - auto implNet = static_cast(&((InferenceEngine::ICNNNetwork&)network)); - InferenceEngine::ResponseDesc resp; - InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp); - ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg; - - MKLDNNGraphTestClass graph; - graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}}); - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - auto checkTile = [](const MKLDNNPlugin::MKLDNNNodePtr& node) { - return node->getType() == MKLDNNPlugin::Tile; - }; - - graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkTile); - graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkTile); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDynBatchTileTests, TestsDynBatchTile) {} - - -// TODO: rewrite to ngraph to have reshape functionality -INSTANTIATE_TEST_CASE_P( - DISABLED_TestsDynBatchTile, MKLDNNGraphDynBatchTileTests, - ::testing::Values( - tile_test_params{ - {1, 128, 1, 1}, 3, 24, 1, MKLDNNPlugin::impl_desc_type::unknown, { - [](MKLDNNPlugin::PrimitiveDescInfo impl) { - ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType()); - ASSERT_EQ(1, impl.getConfig().inConfs.size()); - ASSERT_EQ(1, impl.getConfig().outConfs.size()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout()); - } - }})); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_concat_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_concat_tests.cpp deleted file mode 100644 index a62cb9cf0b8d9c..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_concat_tests.cpp +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include "ir_gen_helper.hpp" -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; -using namespace single_layer_tests; -using namespace InferenceEngine; - -struct concat_params { - size_t axis; -}; - -struct conv_concat_params { - // Formats: NCHW, NCDHW - std::vector in; - - CommonTestUtils::conv_common_params conv; - concat_params concat; - - std::vector preferTypes; -}; - -class MKLDNNConvConcatTests: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - __INP_DIMS__ - - - - - _IN_ - _OC_ - __CONV_OUT_DIMS__ - - - - - - - - - - - _IN_ - _OC_ - __CONV_OUT_DIMS__ - - - __INP_DIMS__ - - - - - __CONCAT_OUT_DIMS__ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - - - -)V0G0N"; - - std::string getModel(conv_concat_params p) { - std::string model = layers_t; - - std::string s_dims; - for (auto& dim : p.in) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__INP_DIMS__", s_dims); - - s_dims = ""; - size_t conv_axis_val = p.in[p.concat.axis]; - int k_len = p.conv.kernel.size(); - for (size_t i = 2lu; i < p.in.size(); i++) { - size_t inx = k_len - i + 1; - size_t dim = (p.in[i] + 2lu * p.conv.pads_begin[inx] - p.conv.kernel[inx]) / p.conv.stride[inx] + 1lu; - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - if (i == p.concat.axis) { - conv_axis_val = dim; - } - } - REPLACE_WITH_STR(model, "__CONV_OUT_DIMS__", s_dims); - - s_dims = ""; - for (size_t i = 0lu; i < p.in.size(); i++) { - size_t val = p.in[i]; - if (i == p.concat.axis) { - val += conv_axis_val; - } - s_dims += "\n "; - s_dims += std::to_string(val) + ""; - } - REPLACE_WITH_STR(model, "__CONCAT_OUT_DIMS__", s_dims); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.conv.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.conv.stride); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.conv.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.conv.pads_end); - REPLACE_WITH_NUM(model, "_GC_", p.conv.group); - REPLACE_WITH_NUM(model, "_OC_", p.conv.out_c); - REPLACE_WITH_NUM(model, "_IN_", p.in[0]); - REPLACE_WITH_NUM(model, "__AXIS__", p.concat.axis); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - model = IRTemplateGenerator::getIRTemplate("convolution_Concat", p.in, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - conv_concat_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t blob_size = p.conv.out_c * p.in[1] / p.conv.group; - for (size_t i = 0; i < p.conv.kernel.size(); i++) { - blob_size *= p.conv.kernel[i]; - } - blob_size = (blob_size + p.conv.out_c); - InferenceEngine::SizeVector dims_weights = { blob_size }; - - std::vector blob_to_model; - InferenceEngine::Blob::Ptr weights = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, dims_weights, InferenceEngine::C }); - weights->allocate(); - fill_data(weights->buffer().as(), weights->size()); - blob_to_model.push_back(weights); - - InferenceEngine::Blob::Ptr bias = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, {p.conv.out_c}, InferenceEngine::C }); - bias->allocate(); - fill_data(bias->buffer().as(), bias->size()); - blob_to_model.push_back(bias); - - size_t total_size_in_bytes = 0; - for (InferenceEngine::Blob::Ptr blb : blob_to_model) total_size_in_bytes += blb->byteSize(); - - InferenceEngine::TBlob::Ptr model_blob = - InferenceEngine::make_shared_blob({ InferenceEngine::Precision::U8, {total_size_in_bytes}, InferenceEngine::C }); - model_blob->allocate(); - uint8_t* model_blob_ptr = model_blob->buffer().as(); - for (InferenceEngine::Blob::Ptr blb : blob_to_model) { - memcpy(model_blob_ptr, blb->buffer().as(), blb->byteSize()); - model_blob_ptr += blb->byteSize(); - } - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = p.in; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob( - {InferenceEngine::Precision::FP32, dims_src, InferenceEngine::TensorDesc::getLayoutByDims(p.in)}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - details::CNNNetworkIterator l(network), end; - for ( ; l != end; ++l) { - (*l)->params["PrimitivesPriority"] = "cpu:ref,cpu:ref_any"; - } - MKLDNNGraphTestClass graph2; - graph2.CreateGraph(network); - - InferenceEngine::BlobMap outputBlobs2; - - InferenceEngine::TBlob::Ptr output2; - output2 = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output2->allocate(); - outputBlobs2[item.first] = output2; - - graph.Infer(srcs, outputBlobs2); - - compare(*output, *output2, 0.0005f); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNConvConcatTests, TestsConvConcat) {} - -INSTANTIATE_TEST_CASE_P( - TestsConvConcat, MKLDNNConvConcatTests, - ::testing::Values( - conv_concat_params{{1, 256, 4, 4}, - { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "", 1, 256, false }, - {1}}, - conv_concat_params{{2, 256, 4, 4}, - { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "", 1, 256, false }, - {1}}, - conv_concat_params{{1, 256, 4, 4, 4}, - { {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, "", 1, 256, false }, - {1}}, - conv_concat_params{{2, 256, 4, 4, 4}, - { {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, "", 1, 256, false }, - {1}}, - conv_concat_params{{1, 256, 4, 4}, - { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}}, - conv_concat_params{{2, 256, 4, 4}, - { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}}, - conv_concat_params{{1, 256, 4, 4, 4}, - { {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}}, - conv_concat_params{{2, 256, 4, 4, 4}, - { {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}} - )); - diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp deleted file mode 100644 index 19b7e52d0394f8..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "common_test_utils/data_utils.hpp" -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -constexpr auto depthwise_scale_shift = mkldnn::algorithm::depthwise_scale_shift; -constexpr auto depthwise_prelu = mkldnn::algorithm::depthwise_prelu; - -struct conv_params { - size_t krn_w; - size_t krn_h; - size_t str_w; - size_t str_h; - size_t pad_w; - size_t pad_h; - size_t out_c; - size_t grp_c; -}; - -struct conv_depthwise_fusing_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - conv_params conv; - algorithm depthwise_alg; - bool isBroadcast; -}; - -template -void ref_conv_depthwise(const InferenceEngine::TBlob &src, const data_t *weights, - InferenceEngine::TBlob &dst, conv_depthwise_fusing_test_params& prm) { - size_t KW = prm.conv.krn_w; - size_t KH = prm.conv.krn_h; - size_t GC = prm.conv.grp_c; - - size_t IC = src.getTensorDesc().getDims()[1]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IW = src.getTensorDesc().getDims()[3]; - - size_t OW = (IW + 2 * prm.conv.pad_w - prm.conv.krn_w) / prm.conv.str_w + 1; - size_t OH = (IH + 2 * prm.conv.pad_h - prm.conv.krn_h) / prm.conv.str_h + 1; - size_t OC = prm.conv.out_c; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - const data_t *bias_data = weights_data + KW * KH * OC * IC / GC; - data_t *dst_data = dst.data(); - - const data_t *d_weights_data = bias_data + OC; - const data_t *d_bias_data = (prm.isBroadcast) ? d_weights_data + 1 : d_weights_data + OC; - - for (uint32_t g = 0; g < GC; g++) { - for (uint32_t oc = 0; oc < OC / GC; oc++) { - for (uint32_t oh = 0; oh < OH; oh++) { - for (uint32_t ow = 0; ow < OW; ow++) { - size_t bidx = g * OC / GC + oc; - size_t oidx = g * OC / GC * OH * OW - + oc * OH * OW + oh * OW + ow; - dst_data[oidx] = bias_data[bidx]; - - for (size_t ic = 0; ic < IC / GC; ic++) { - for (size_t kh = 0; kh < KH; kh++) { - for (size_t kw = 0; kw < KW; kw++) { - int32_t iw = ow * prm.conv.str_w - prm.conv.pad_w + kw; - int32_t ih = oh * prm.conv.str_h - prm.conv.pad_h + kh; - if (iw < 0 || iw >= (int32_t)IW || ih < 0 - || ih >= (int32_t)IH) - continue; - size_t iidx = g * IC / GC * IH * IW - + ic * IH * IW + ih * IW + iw; - size_t widx = g * OC / GC * IC / GC * KH * KW - + oc * IC / GC * KH * KW - + ic * KH * KW + kh * KW + kw; - - dst_data[oidx] += src_data[iidx] * weights_data[widx]; - } - } - } - - - switch(prm.depthwise_alg) { - case depthwise_scale_shift: - dst_data[oidx] = d_weights_data[prm.isBroadcast ? 0 : bidx] * dst_data[oidx] + d_bias_data[prm.isBroadcast ? 0 : bidx]; - break; - case depthwise_prelu: - dst_data[oidx] = dst_data[oidx] >= 0 ? dst_data[oidx] : d_weights_data[prm.isBroadcast ? 0 : bidx] * dst_data[oidx]; - break; - default: - assert("Unsupported depthwise algorithm"); - } - } - } - } - } -} - -class MKLDNNGraphConvDepthwiseFusingTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _C_OC_ - _C_OH_ - _C_OW_ - - - - - - - - - - - _IN_ - _C_OC_ - _C_OH_ - _C_OW_ - - - - - _IN_ - _C_OC_ - _C_OH_ - _C_OW_ - - - - - - - - - -)V0G0N"; - - std::string getModel(conv_depthwise_fusing_test_params p) { - std::string model = model_t; - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - - REPLACE_WITH_NUM(model, "_C_KW_", p.conv.krn_w); - REPLACE_WITH_NUM(model, "_C_KH_", p.conv.krn_h); - REPLACE_WITH_NUM(model, "_C_SW_", p.conv.str_w); - REPLACE_WITH_NUM(model, "_C_SH_", p.conv.str_h); - REPLACE_WITH_NUM(model, "_C_PW_", p.conv.pad_w); - REPLACE_WITH_NUM(model, "_C_PH_", p.conv.pad_h); - REPLACE_WITH_NUM(model, "_C_GC_", p.conv.grp_c); - REPLACE_WITH_NUM(model, "_C_OC_", p.conv.out_c); - size_t c_oh = (p.in.h + 2 * p.conv.pad_h - p.conv.krn_h) / p.conv.str_h + 1; - size_t c_ow = (p.in.w + 2 * p.conv.pad_w - p.conv.krn_w) / p.conv.str_w + 1; - REPLACE_WITH_NUM(model, "_C_OH_", c_oh); - REPLACE_WITH_NUM(model, "_C_OW_", c_ow); - - size_t conv_w_data_size = (p.conv.krn_w * p.conv.krn_h * p.conv.out_c * p.in.c / p.conv.grp_c) * sizeof(float); - size_t conv_b_data_size = p.conv.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_C_S1_", conv_w_data_size); - REPLACE_WITH_NUM(model, "_C_S2_", conv_b_data_size); - - if (p.depthwise_alg == depthwise_scale_shift) { - REPLACE_WITH_STR(model, "_LT_", "ScaleShift"); - REPLACE_WITH_STR(model, "_P_NAME_", "broadcast"); - REPLACE_WITH_NUM(model, "_P_VAL_", p.isBroadcast ? 1 : 0); - - } - else if (p.depthwise_alg == depthwise_prelu) { - REPLACE_WITH_STR(model, "_LT_", "PReLU"); - REPLACE_WITH_STR(model, "_P_NAME_", "channel_shared"); - REPLACE_WITH_NUM(model, "_P_VAL_", p.isBroadcast ? 1 : 0); - } - - size_t array_size = p.isBroadcast ? 1 : p.conv.out_c; - size_t depthwise_w_data_size = array_size * sizeof(float); - size_t depthwise_b_data_size = array_size * sizeof(float); - REPLACE_WITH_NUM(model, "_D_S0_", conv_w_data_size + conv_b_data_size); - REPLACE_WITH_NUM(model, "_D_S1_", depthwise_w_data_size); - REPLACE_WITH_NUM(model, "_D_S2_", conv_w_data_size + conv_b_data_size + depthwise_w_data_size); - REPLACE_WITH_NUM(model, "_D_S3_", depthwise_b_data_size); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - conv_depthwise_fusing_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t conv_w_size = p.conv.krn_w * p.conv.krn_h * p.conv.out_c * p.in.c / p.conv.grp_c + p.conv.out_c; // conv weights + biases - - size_t array_size = p.isBroadcast ? 1 : p.conv.out_c; - size_t depthwise_w_size = array_size + array_size; // depthwise weights + biases - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {(conv_w_size+depthwise_w_size) * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - CommonTestUtils::fill_data_sine((float *) weights->buffer(), weights->size() / sizeof(float), 5, 10, 0.5); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - auto& nodes = graph.getNodes(); - nodes = graph.getNodes(); - if (p.in.c == 3) { - ASSERT_EQ(nodes.size(), 3); - ASSERT_EQ(nodes[0].get()->getType(), MKLDNNPlugin::Type::Input); - ASSERT_EQ(nodes[1].get()->getType(), MKLDNNPlugin::Type::Convolution); - ASSERT_TRUE(nodes[1].get()->isFusedWith(MKLDNNPlugin::Type::Eltwise)); - ASSERT_EQ(nodes[2].get()->getType(), MKLDNNPlugin::Type::Output); - } else { - ASSERT_EQ(nodes.size(), 5); - ASSERT_EQ(nodes[0].get()->getType(), MKLDNNPlugin::Type::Input); - ASSERT_EQ(nodes[1].get()->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[2].get()->getType(), MKLDNNPlugin::Type::Convolution); - ASSERT_TRUE(nodes[2].get()->isFusedWith(MKLDNNPlugin::Type::Eltwise)); - ASSERT_EQ(nodes[3].get()->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[4].get()->getType(), MKLDNNPlugin::Type::Output); - } - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - size_t c1_oh = (p.in.h + 2 * p.conv.pad_h - p.conv.krn_h) / p.conv.str_h + 1; - size_t c1_ow = (p.in.w + 2 * p.conv.pad_w - p.conv.krn_w) / p.conv.str_w + 1; - InferenceEngine::TBlob dst_ref(InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {c1_ow, c1_oh, p.conv.out_c, p.in.n}, InferenceEngine::NCHW)); - dst_ref.allocate(); - - ref_conv_depthwise(*srcPtr, (const float *)weights->buffer(), dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphConvDepthwiseFusingTests, TestsConvDepthwiseFusing) {} - -INSTANTIATE_TEST_CASE_P( - TestsConvDepthwiseFusing, MKLDNNGraphConvDepthwiseFusingTests, - ::testing::Values( - conv_depthwise_fusing_test_params{{1, 64, 5, 5}, {1, 1, 1, 1, 0, 0, 48, 1}, depthwise_scale_shift, false}, - conv_depthwise_fusing_test_params{{1, 64, 5, 5}, {1, 1, 1, 1, 0, 0, 48, 1}, depthwise_prelu, false}, - conv_depthwise_fusing_test_params{{1, 64, 5, 5}, {1, 1, 1, 1, 0, 0, 48, 1}, depthwise_scale_shift, true}, - conv_depthwise_fusing_test_params{{1, 64, 5, 5}, {1, 1, 1, 1, 0, 0, 48, 1}, depthwise_prelu, true}, - conv_depthwise_fusing_test_params{{1, 48, 9, 9}, {3, 3, 1, 1, 1, 1, 64, 1}, depthwise_scale_shift, false}, - conv_depthwise_fusing_test_params{{1, 48, 9, 9}, {3, 3, 1, 1, 1, 1, 64, 1}, depthwise_prelu, false}, - conv_depthwise_fusing_test_params{{1, 48, 9, 9}, {3, 3, 1, 1, 1, 1, 64, 1}, depthwise_scale_shift, true}, - conv_depthwise_fusing_test_params{{1, 48, 9, 9}, {3, 3, 1, 1, 1, 1, 64, 1}, depthwise_prelu, true}, - conv_depthwise_fusing_test_params{{1, 48, 11, 11}, {3, 3, 1, 1, 1, 1, 48, 48}, depthwise_scale_shift, false}, - conv_depthwise_fusing_test_params{{1, 48, 11, 11}, {3, 3, 1, 1, 1, 1, 48, 48}, depthwise_prelu, false}, - conv_depthwise_fusing_test_params{{1, 48, 11, 11}, {3, 3, 1, 1, 1, 1, 48, 48}, depthwise_scale_shift, true}, - conv_depthwise_fusing_test_params{{1, 48, 11, 11}, {3, 3, 1, 1, 1, 1, 48, 48}, depthwise_prelu, true}, - conv_depthwise_fusing_test_params{{1, 3, 11, 11}, {3, 3, 1, 1, 1, 1, 3, 3}, depthwise_scale_shift, false}, - conv_depthwise_fusing_test_params{{1, 3, 11, 11}, {3, 3, 1, 1, 1, 1, 3, 3}, depthwise_prelu, false}, - conv_depthwise_fusing_test_params{{1, 3, 11, 11}, {3, 3, 1, 1, 1, 1, 3, 3}, depthwise_scale_shift, true}, - conv_depthwise_fusing_test_params{{1, 3, 11, 11}, {3, 3, 1, 1, 1, 1, 3, 3}, depthwise_prelu, true} - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_deconv_concat_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_deconv_concat_tests.cpp deleted file mode 100644 index 90892d6028969d..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_deconv_concat_tests.cpp +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include "ir_gen_helper.hpp" -#include -#include "common_test_utils/common_layers_params.hpp" -#include "common_test_utils/common_utils.hpp" - -using namespace ::testing; -using namespace std; -using namespace mkldnn; -using namespace single_layer_tests; - -struct concat_params { - size_t axis; -}; - -struct deconv_concat_params { - // Formats: NCHW, NCDHW - std::vector in; - - CommonTestUtils::conv_common_params deconv; - concat_params concat; - - std::vector preferTypes; -}; - -void ref_deconv_common(const InferenceEngine::Blob &src, - InferenceEngine::Blob &dst, - const float *weights_data, - size_t weights_size, - const float *bias_data, - size_t bias_size, - const CommonTestUtils::conv_common_params &prm) { - auto dims_size = src.getTensorDesc().getDims().size(); - - size_t G = prm.group; - size_t KW = prm.kernel[InferenceEngine::X_AXIS]; - size_t KH = prm.kernel[InferenceEngine::Y_AXIS]; - size_t KD = prm.kernel.size() > InferenceEngine::Z_AXIS ? prm.kernel[InferenceEngine::Z_AXIS] : 1u; - - size_t PW = prm.pads_begin[InferenceEngine::X_AXIS]; - size_t PH = prm.pads_begin[InferenceEngine::Y_AXIS]; - size_t PD = prm.pads_begin.size() > InferenceEngine::Z_AXIS ? prm.pads_begin[InferenceEngine::Z_AXIS] : 0u; - - size_t SW = prm.stride[InferenceEngine::X_AXIS]; - size_t SH = prm.stride[InferenceEngine::Y_AXIS]; - size_t SD = prm.stride.size() > InferenceEngine::Z_AXIS ? prm.stride[InferenceEngine::Z_AXIS] : 1u; - - size_t IW = src.getTensorDesc().getDims()[dims_size - 1]; - size_t IH = src.getTensorDesc().getDims()[dims_size - 2]; - size_t ID = dims_size == 5 ? src.getTensorDesc().getDims()[dims_size - 3] : 1u; - size_t IC = src.getTensorDesc().getDims()[1]; - size_t MB = src.getTensorDesc().getDims()[0]; - - size_t OC = prm.out_c; - - size_t OW = SW * (IW - 1lu) + KW - 2lu * PW; - size_t OH = SH * (IH - 1lu) + KH - 2lu * PH; - size_t OD = dims_size == 5 ? (SD * (ID - 1) + KD - 2 * PD) : 1u; - - const float *src_data = src.cbuffer().as(); - float *dst_data = dst.buffer().as(); - - size_t CS1 = OH * OW; - size_t CS2 = CS1 * OD; - size_t CS3 = CS2 * OC; - - size_t CI1 = IH * IW; - size_t CI2 = CI1 * ID; - size_t CI3 = CI2 * IC; - - size_t OC_G = OC / G; - size_t IC_G = IC / G; - - size_t CK1 = KH * KW; - size_t CK2 = CK1 * KD; - size_t CK3 = CK2 * OC_G; - size_t CK4 = CK3 * IC_G; - - for (size_t g = 0lu; g < G; ++g) { - size_t g_OC_G = g * OC_G; - size_t g_IC_G = g * IC_G; - size_t g_CK4 = g * CK4; - for (size_t mb = 0lu; mb < MB; ++mb) { - size_t mb_CS3 = mb * CS3; - size_t mb_CI3 = mb * CI3; - for (size_t oc = 0lu; oc < OC_G; ++oc) { - size_t g_OC_G_oc = g_OC_G + oc; - size_t mb_CS3_g_OC_G_oc_CS2 = mb_CS3 + g_OC_G_oc * CS2; - size_t g_CK4_oc_CK2 = g_CK4 + oc * CK2; - for (size_t od = 0lu; od < OD; ++od) { - size_t mb_CS3_g_OC_G_oc_CS2_od_CS1 = mb_CS3_g_OC_G_oc_CS2 + od * CS1; - size_t od_PD = od + PD; - for (size_t oh = 0lu; oh < OH; ++oh) { - size_t mb_CS3_g_OC_G_oc_CS2_od_CS1_oh_OW = mb_CS3_g_OC_G_oc_CS2_od_CS1 + oh * OW; - size_t oh_PH = oh + PH; - for (size_t ow = 0lu; ow < OW; ++ow) { - size_t didx = mb_CS3_g_OC_G_oc_CS2_od_CS1_oh_OW + ow; - size_t ow_PW = ow + PW; - - dst_data[didx] = float(0); - if (prm.with_bias) dst_data[didx] += bias_data[g_OC_G_oc]; - - for (size_t ic = 0lu; ic < IC_G; ic++) { - size_t mb_CI3_g_IC_G_ic_CI2 = mb_CI3 + (g_IC_G + ic) * CI2; - size_t g_CK4_oc_CK2_ic_CK3 = g_CK4_oc_CK2 + ic * CK3; - for (int kd = 0lu; kd < KD; kd++) { - if (od_PD < kd) continue; - size_t id = od_PD - kd; - if (id % SD != 0) continue; - id /= SD; - if (id >= ID) continue; - size_t mb_CI3_g_IC_G_ic_CI2_id_CI1 = mb_CI3_g_IC_G_ic_CI2 + id * CI1; - size_t g_CK4_oc_CK2_ic_CK3_kd_CK1 = g_CK4_oc_CK2_ic_CK3 + kd * CK1; - for (size_t kh = 0lu; kh < KH; kh++) { - if (oh_PH < kh) continue; - size_t ih = oh_PH - kh; - if (ih % SH != 0) continue; - ih /= SH; - if (ih >= IH) continue; - size_t mb_CI3_g_IC_G_ic_CI2_id_CI1_ih_IW = mb_CI3_g_IC_G_ic_CI2_id_CI1 + ih * IW; - size_t g_CK4_oc_CK2_ic_CK3_kd_CK1_kh_KW = g_CK4_oc_CK2_ic_CK3_kd_CK1 + kh * KW; - for (size_t kw = 0lu; kw < KW; kw++) { - if (ow_PW < kw) continue; - size_t iw = ow_PW - kw; - if (iw % SW != 0) continue; - iw /= SW; - if (iw >= IW) continue; - - size_t sidx = mb_CI3_g_IC_G_ic_CI2_id_CI1_ih_IW + iw; - - size_t widx = g_CK4_oc_CK2_ic_CK3_kd_CK1_kh_KW + kw; - - dst_data[didx] += src_data[sidx] * weights_data[widx]; - } - } - } - } - } - } - } - } - } - } -} - -class MKLDNNDeconvConcatTests: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - __INP_DIMS__ - - - - - _IN_ - _OC_ - __DECONV_OUT_DIMS__ - - - - - - - - - - - _IN_ - _OC_ - __DECONV_OUT_DIMS__ - - - __INP_DIMS__ - - - - - __CONCAT_OUT_DIMS__ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - - - -)V0G0N"; - - std::string getModel(deconv_concat_params p) { - std::string model = layers_t; - - std::string s_dims; - for (auto& dim : p.in) { - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__INP_DIMS__", s_dims); - - s_dims = ""; - size_t deconv_axis_val = p.in[p.concat.axis]; - int k_len = p.deconv.kernel.size(); - for (size_t i = 2lu; i < p.in.size(); i++) { - size_t inx = k_len - i + 1; - size_t dim = p.deconv.stride[inx] * (p.in[i] - 1) + p.deconv.kernel[inx] - 2 * p.deconv.pads_begin[inx]; - s_dims += "\n "; - s_dims += std::to_string(dim) + ""; - if (i == p.concat.axis) { - deconv_axis_val = dim; - } - } - REPLACE_WITH_STR(model, "__DECONV_OUT_DIMS__", s_dims); - - s_dims = ""; - for (size_t i = 0lu; i < p.in.size(); i++) { - size_t val = p.in[i]; - if (i == p.concat.axis) { - val += deconv_axis_val; - } - s_dims += "\n "; - s_dims += std::to_string(val) + ""; - } - REPLACE_WITH_STR(model, "__CONCAT_OUT_DIMS__", s_dims); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.deconv.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.deconv.stride); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.deconv.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.deconv.pads_end); - REPLACE_WITH_NUM(model, "_GC_", p.deconv.group); - REPLACE_WITH_NUM(model, "_OC_", p.deconv.out_c); - REPLACE_WITH_NUM(model, "_IN_", p.in[0]); - REPLACE_WITH_NUM(model, "__AXIS__", p.concat.axis); - - std::string impls; - for (const auto& preferType : p.preferTypes) { - if (!impls.empty()) - impls += ","; - impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType); - } - REPLACE_WITH_STR(model, "_IMPLS_", impls); - - model = IRTemplateGenerator::getIRTemplate("Deconvolution_Concat", p.in, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - deconv_concat_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t blob_size = p.deconv.out_c * (p.in[1] / p.deconv.group); - for (int i = 0 ; i < p.deconv.kernel.size(); i++) { - blob_size *= p.deconv.kernel[i]; - } - InferenceEngine::SizeVector dims_weights = { blob_size }; - - std::vector blob_to_model; - InferenceEngine::Blob::Ptr weights = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, dims_weights, InferenceEngine::C }); - weights->allocate(); - fill_data(weights->buffer().as(), weights->size()); - blob_to_model.push_back(weights); - - InferenceEngine::Blob::Ptr bias = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, {p.deconv.out_c}, InferenceEngine::C }); - bias->allocate(); - fill_data(bias->buffer().as(), bias->size()); - blob_to_model.push_back(bias); - - size_t total_size_in_bytes = 0; - for (InferenceEngine::Blob::Ptr blb : blob_to_model) total_size_in_bytes += blb->byteSize(); - - InferenceEngine::TBlob::Ptr model_blob = - InferenceEngine::make_shared_blob({ InferenceEngine::Precision::U8, {total_size_in_bytes}, InferenceEngine::C }); - model_blob->allocate(); - uint8_t* model_blob_ptr = model_blob->buffer().as(); - for (InferenceEngine::Blob::Ptr blb : blob_to_model) { - memcpy(model_blob_ptr, blb->buffer().as(), blb->byteSize()); - model_blob_ptr += blb->byteSize(); - } - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = p.in; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob( - {InferenceEngine::Precision::FP32, dims_src, InferenceEngine::TensorDesc::getLayoutByDims(p.in)}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::TBlob* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - // Compare with reference - - auto deconv = CommonTestUtils::getLayerByName(network, "Deconvolution_1"); - InferenceEngine::TBlob deconv_ref(deconv->outData[0]->getTensorDesc()); - deconv_ref.allocate(); - - ref_deconv_common(*srcPtr, deconv_ref, weights->buffer().as(), weights->size(), - bias->buffer().as(), bias->size(), p.deconv); - - float *src1_ptr = deconv_ref.buffer(); - size_t src1_size = deconv_ref.size(); - float *src2_ptr = src->buffer(); - size_t src2_size = src->size(); - float *dst_ptr = output->buffer(); - size_t dst_size = output->size(); - - int len1 = 1, len2 = 1; - for (int dim = p.concat.axis; dim < output->getTensorDesc().getDims().size(); dim++) { - len1 *= deconv->outData[0]->getTensorDesc().getDims()[dim]; - len2 *= src->getTensorDesc().getDims()[dim]; - } - - size_t index1 = 0, index2 = 0, index = 0; - float max_diff = 0.0001f; - for (size_t cycle = 0lu; cycle < p.concat.axis; cycle ++) { - for (int i1 = 0; i1 < len1; i1++) { - if (fabs(src1_ptr[index1] - dst_ptr[index]) > max_diff) - { - FAIL() << "index: " << index << " src: " << src1_ptr[index1] << ", dst: " << dst_ptr[index]; - } - index1++; index++; - } - for (int i2 = 0; i2 < len2; i2++) { - if (fabs(src2_ptr[index2] - dst_ptr[index]) > max_diff) - { - FAIL() << "index: " << index << " src: " << src2_ptr[index2] << ", dst: " << dst_ptr[index]; - } - index2++; index++; - } - } - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNDeconvConcatTests, TestsDwConvFusing) {} - -INSTANTIATE_TEST_CASE_P( - TestsDwConvFusing, MKLDNNDeconvConcatTests, - ::testing::Values( - deconv_concat_params{{1, 256, 4, 4}, - { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}}, - deconv_concat_params{{2, 256, 4, 4}, - { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}}, - deconv_concat_params{{1, 256, 4, 4, 4}, - { {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}}, - deconv_concat_params{{2, 256, 4, 4, 4}, - { {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, "", 1, 256, false }, - {1}, {MKLDNNPlugin::impl_desc_type::gemm_blas}} - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp deleted file mode 100644 index ff3706c1d6c241..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_graph.hpp" - -#include "single_layer_common.hpp" -#include "tests_common.hpp" -#include -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -struct conv_params { - size_t krn_w; - size_t krn_h; - size_t str_w; - size_t str_h; - size_t pad_w; - size_t pad_h; - size_t out_c; - size_t grp_c; -}; - -struct dw_conv_fusing_test_params { - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - conv_params conv1; - conv_params conv2; -}; - -template -void ref_conv(const InferenceEngine::TBlob &src, const data_t *weights, const size_t weightsSize, - InferenceEngine::TBlob &dst, conv_params prm, float negative_slope) { - size_t KW = prm.krn_w; - size_t KH = prm.krn_h; - size_t GC = prm.grp_c; - - size_t IC = src.getTensorDesc().getDims()[1]; - size_t IH = src.getTensorDesc().getDims()[2]; - size_t IW = src.getTensorDesc().getDims()[3]; - - size_t OW = (IW + 2 * prm.pad_w - prm.krn_w) / prm.str_w + 1; - size_t OH = (IH + 2 * prm.pad_h - prm.krn_h) / prm.str_h + 1; - size_t OC = prm.out_c; - - const data_t *src_data = src.readOnly(); - const data_t *weights_data = weights; - const data_t *bias_data = weights_data + KW * KH * OC * IC / GC; - data_t *dst_data = dst.data(); - - IE_ASSERT(KW * KH * OC * IC / GC + OC == weightsSize); - - for (uint32_t g = 0; g < GC; g++) { - for (uint32_t oc = 0; oc < OC / GC; oc++) { - for (uint32_t oh = 0; oh < OH; oh++) { - for (uint32_t ow = 0; ow < OW; ow++) { - size_t oidx = g * OC / GC * OH * OW - + oc * OH * OW + oh * OW + ow; - dst_data[oidx] = bias_data[g * OC / GC + oc]; - - for (size_t ic = 0; ic < IC / GC; ic++) { - for (size_t kh = 0; kh < KH; kh++) { - for (size_t kw = 0; kw < KW; kw++) { - int32_t iw = ow * prm.str_w - prm.pad_w + kw; - int32_t ih = oh * prm.str_h - prm.pad_h + kh; - if (iw < 0 || iw >= (int32_t)IW || ih < 0 - || ih >= (int32_t)IH) - continue; - size_t iidx = g * IC / GC * IH * IW - + ic * IH * IW + ih * IW + iw; - size_t widx = g * OC / GC * IC / GC * KH * KW - + oc * IC / GC * KH * KW - + ic * KH * KW + kh * KW + kw; - - dst_data[oidx] += src_data[iidx] * weights_data[widx]; - } - } - } - - if (dst_data[oidx] < 0) - dst_data[oidx] *= negative_slope; - } - } - } - } -} - -class MKLDNNGraphDWConvFusingTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _C1_OC_ - _C1_OH_ - _C1_OW_ - - - - - - - - _IN_ - _C1_OC_ - _C1_OH_ - _C1_OW_ - - - - - _IN_ - _C1_OC_ - _C1_OH_ - _C1_OW_ - - - - - - - - - - - _IN_ - _C1_OC_ - _C1_OH_ - _C1_OW_ - - - - - _IN_ - _C2_OC_ - _C2_OH_ - _C2_OW_ - - - - - - - - _IN_ - _C2_OC_ - _C2_OH_ - _C2_OW_ - - - - - _IN_ - _C2_OC_ - _C2_OH_ - _C2_OW_ - - - - - - - - - - - -)V0G0N"; - - std::string getModel(dw_conv_fusing_test_params p) { - std::string model = model_t; - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - - REPLACE_WITH_NUM(model, "_C1_KW_", p.conv1.krn_w); - REPLACE_WITH_NUM(model, "_C1_KH_", p.conv1.krn_h); - REPLACE_WITH_NUM(model, "_C1_SW_", p.conv1.str_w); - REPLACE_WITH_NUM(model, "_C1_SH_", p.conv1.str_h); - REPLACE_WITH_NUM(model, "_C1_PW_", p.conv1.pad_w); - REPLACE_WITH_NUM(model, "_C1_PH_", p.conv1.pad_h); - REPLACE_WITH_NUM(model, "_C1_GC_", p.conv1.grp_c); - REPLACE_WITH_NUM(model, "_C1_OC_", p.conv1.out_c); - size_t c1_oh = (p.in.h + 2 * p.conv1.pad_h - p.conv1.krn_h) / p.conv1.str_h + 1; - size_t c1_ow = (p.in.w + 2 * p.conv1.pad_w - p.conv1.krn_w) / p.conv1.str_w + 1; - REPLACE_WITH_NUM(model, "_C1_OH_", c1_oh); - REPLACE_WITH_NUM(model, "_C1_OW_", c1_ow); - - size_t conv1_w_data_size = (p.conv1.krn_w * p.conv1.krn_h * p.conv1.out_c * p.in.c / p.conv1.grp_c) * sizeof(float); - size_t conv1_b_data_size = p.conv1.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_C1_S1_", conv1_w_data_size); - REPLACE_WITH_NUM(model, "_C1_S2_", conv1_b_data_size); - - REPLACE_WITH_NUM(model, "_C2_KW_", p.conv2.krn_w); - REPLACE_WITH_NUM(model, "_C2_KH_", p.conv2.krn_h); - REPLACE_WITH_NUM(model, "_C2_SW_", p.conv2.str_w); - REPLACE_WITH_NUM(model, "_C2_SH_", p.conv2.str_h); - REPLACE_WITH_NUM(model, "_C2_PW_", p.conv2.pad_w); - REPLACE_WITH_NUM(model, "_C2_PH_", p.conv2.pad_h); - REPLACE_WITH_NUM(model, "_C2_GC_", p.conv2.grp_c); - REPLACE_WITH_NUM(model, "_C2_OC_", p.conv2.out_c); - REPLACE_WITH_NUM(model, "_C2_OH_", (c1_oh + 2 * p.conv2.pad_h - p.conv2.krn_h) / p.conv2.str_h + 1); - REPLACE_WITH_NUM(model, "_C2_OW_", (c1_ow + 2 * p.conv2.pad_w - p.conv2.krn_w) / p.conv2.str_w + 1); - - size_t conv2_w_data_size = (p.conv2.krn_w * p.conv2.krn_h * p.conv2.out_c * p.conv1.out_c / p.conv2.grp_c) * sizeof(float); - size_t conv2_b_data_size = p.conv2.out_c * sizeof(float); - REPLACE_WITH_NUM(model, "_C2_S0_", conv1_w_data_size + conv1_b_data_size); - REPLACE_WITH_NUM(model, "_C2_S1_", conv2_w_data_size); - REPLACE_WITH_NUM(model, "_C2_S2_", conv1_w_data_size + conv1_b_data_size + conv2_w_data_size); - REPLACE_WITH_NUM(model, "_C2_S3_", conv2_b_data_size); - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - dw_conv_fusing_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - size_t conv1_w_size = p.conv1.krn_w * p.conv1.krn_h * p.conv1.out_c * p.in.c / p.conv1.grp_c + p.conv1.out_c; // conv1 weights + biases - size_t conv2_w_size = p.conv2.krn_w * p.conv2.krn_h * p.conv2.out_c * p.conv1.out_c / p.conv2.grp_c + p.conv2.out_c; // conv2 weights + biases - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, - {(conv1_w_size+conv2_w_size) * sizeof(float)}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float), 1); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w}; - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - InferenceEngine::OutputsDataMap out; - out = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - size_t c1_oh = (p.in.h + 2 * p.conv1.pad_h - p.conv1.krn_h) / p.conv1.str_h + 1; - size_t c1_ow = (p.in.w + 2 * p.conv1.pad_w - p.conv1.krn_w) / p.conv1.str_w + 1; - InferenceEngine::TBlob conv1_dst_ref(InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {p.in.n, p.conv1.out_c, c1_oh, c1_ow}, InferenceEngine::NCHW)); - conv1_dst_ref.allocate(); - - size_t c2_oh = (c1_oh + 2 * p.conv2.pad_h - p.conv2.krn_h) / p.conv2.str_h + 1; - size_t c2_ow = (c1_ow + 2 * p.conv2.pad_w - p.conv2.krn_w) / p.conv2.str_w + 1; - InferenceEngine::TBlob conv2_dst_ref(InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {p.in.n, p.conv2.out_c, c2_oh, c2_ow}, InferenceEngine::NCHW)); - conv2_dst_ref.allocate(); - - ref_conv(*srcPtr, (const float *)weights->buffer(), conv1_w_size, conv1_dst_ref, p.conv1, 0.0f); - ref_conv(conv1_dst_ref, (const float *)weights->buffer() + conv1_w_size, conv2_w_size, conv2_dst_ref, p.conv2, 0.0f); - - - compare(*output, conv2_dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(MKLDNNGraphDWConvFusingTests, TestsDwConvFusing) {} - -INSTANTIATE_TEST_CASE_P( - TestsDwConvFusing, MKLDNNGraphDWConvFusingTests, - ::testing::Values( - dw_conv_fusing_test_params{{1, 32, 160, 320}, {1, 1, 1, 1, 0, 0, 24, 1}, {3, 3, 1, 1, 1, 1, 24, 24}} - )); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp deleted file mode 100644 index bd8b05458f551d..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "../test_graph.hpp" - -#include "single_layer_common.hpp" -#include -#include "tests_common.hpp" -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -class MKLDNNGraphOptimizationTests: public TestsCommon {}; - -TEST_F(MKLDNNGraphOptimizationTests, TestNoFuseConvSumWithOneInput) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 5 - 5 - - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - - 1 - 3 - 5 - 5 - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {48}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core ie; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = ie.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - ASSERT_NO_THROW(graph.CreateGraph(network)); - - bool fused = true; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Convolution) { - fused = false; - } - } - ASSERT_FALSE(fused); -} - -TEST_F(MKLDNNGraphOptimizationTests, DISABLED_TestNoCrashForFuseConvSumAndInput) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 5 - 5 - - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - 1 - 3 - 5 - 5 - - - 1 - 3 - 5 - 5 - - - - - 1 - 3 - 5 - 5 - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {48}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core ie; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(ie.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - ASSERT_NO_THROW(graph.CreateGraph(network)); - - bool fused = false; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->isFusedWith(MKLDNNPlugin::Eltwise)) { - fused = true; - } - } - ASSERT_TRUE(fused); -} - -namespace GraphOptimizationUtils { - -using fake_ext_factory = std::function; - -class FakeReLUImpl : public InferenceEngine::ILayerExecImpl { -public: - FakeReLUImpl(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc *resp) noexcept override { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = 0; - if (cnnLayer->outData.size() != 1 && cnnLayer->insData.size() != 1) - return InferenceEngine::GENERAL_ERROR; - InferenceEngine::DataConfig cfg; - cfg.constant = false; - cfg.inPlace = 0; - InferenceEngine::SizeVector order; - for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(), - cnnLayer->outData[0]->getTensorDesc().getDims(), - {cnnLayer->outData[0]->getTensorDesc().getDims(), order}); - config.outConfs.push_back(cfg); - config.inConfs.push_back(cfg); - conf.push_back(config); - return InferenceEngine::OK; - } - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override { - if (config.dynBatchSupport) - return InferenceEngine::NOT_IMPLEMENTED; - for(auto input : config.inConfs) { - if (input.constant) - return InferenceEngine::GENERAL_ERROR; - } - for(auto output : config.outConfs) { - if (output.constant) - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; - } - InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, InferenceEngine::ResponseDesc *resp) noexcept override { - const float *src_data = inputs[0]->buffer(); - float *dst_data = outputs[0]->buffer(); - if (src_data != dst_data) - return InferenceEngine::GENERAL_ERROR; - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer* cnnLayer; -}; - -class FakeReLUFactory : public InferenceEngine::ILayerImplFactory { -public: - FakeReLUFactory(const InferenceEngine::CNNLayer *layer) { - cnnLayer = const_cast(layer); - } - // First implementation has more priority than next - InferenceEngine::StatusCode getImplementations(std::vector& impls, InferenceEngine::ResponseDesc *resp) noexcept override { - impls.push_back(InferenceEngine::ILayerImpl::Ptr(new FakeReLUImpl(cnnLayer))); - return InferenceEngine::OK; - } - -private: - InferenceEngine::CNNLayer * cnnLayer; -}; - -class FakeFabric : public InferenceEngine::Extensions::Cpu::MKLDNNExtensions { -public: - FakeFabric() { - factories["ReLU"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new FakeReLUFactory(cnnLayer); }; - } - - virtual ~FakeFabric() { - factories.clear(); - } - - void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override {} - void Unload() noexcept override {} - InferenceEngine::StatusCode getPrimitiveTypes(char**& types, unsigned int& size, InferenceEngine::ResponseDesc* resp) noexcept override { - types = new char *[factories.size()]; - size_t count = 0; - for (auto it = factories.begin(); it != factories.end(); it++, count ++) { - types[count] = new char[it->first.size() + 1]; - std::copy(it->first.begin(), it->first.end(), types[count]); - types[count][it->first.size() ] = '\0'; - } - return InferenceEngine::OK; - }; - InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory *&factory, - const InferenceEngine::CNNLayer *cnnLayer, - InferenceEngine::ResponseDesc *resp) noexcept override { - if (factories.find(cnnLayer->type) == factories.end()) { - std::string errorMsg = std::string("Factory for ") + cnnLayer->type + " wasn't found!"; - errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); - return InferenceEngine::NOT_FOUND; - } - factory = factories[cnnLayer->type](cnnLayer); - return InferenceEngine::OK; - } - -private: - std::map factories; -}; -} - -TEST_F(MKLDNNGraphOptimizationTests, TestNoFuseCustomActivation) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 227 - 227 - - - - - - - - 1 - 3 - 227 - 227 - - - - - 1 - 96 - 55 - 55 - - - - - - - - - 1 - 96 - 55 - 55 - - - - - 1 - 96 - 55 - 55 - - - - - - - - - -)V0G0N"; - - std::shared_ptr extension; - extension.reset(new GraphOptimizationUtils::FakeFabric()); - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager()); - extMgr->AddExtension(extension); - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {139776}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - ASSERT_NO_THROW(graph.CreateGraph(network, extMgr)); - - bool fused = true; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Convolution) { - fused = false; - } - } - ASSERT_FALSE(fused); -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp deleted file mode 100644 index 80f894d0657ff4..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp +++ /dev/null @@ -1,6671 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "../test_graph.hpp" -#include "mkldnn_exec_network.h" - -#include "tests_common.hpp" -#include -#include - -#include - -using namespace ::testing; -using namespace std; -using namespace mkldnn; - -class MKLDNNGraphStructureTests: public TestsCommon { -protected: - MKLDNNPlugin::NumaNodesWeights cache; -}; - -TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReorders) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 544 - 992 - - - - - - - - 1 - 3 - 544 - 992 - - - - - 1 - 16 - 272 - 496 - - - - - - - - - - 1 - 16 - 272 - 496 - - - - - 1 - 16 - 272 - 496 - - - - - - - - 1 - 16 - 272 - 496 - - - 1 - 16 - 272 - 496 - - - - - 1 - 32 - 272 - 496 - - - - - - - 1 - 32 - 272 - 496 - - - - - 1 - 32 - 272 - 496 - - - - - - - - - - 1 - 32 - 272 - 496 - - - - - 1 - 32 - 272 - 496 - - - - - - - - 1 - 32 - 272 - 496 - - - - - 1 - 32 - 136 - 248 - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {9728}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - } - } - ASSERT_EQ(reorders_num, 3); -} - -TEST_F(MKLDNNGraphStructureTests, TestRedundantReorderBeforeConvWithC_3) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 320 - 544 - - - - - - - - 1 - 3 - 320 - 544 - - - - - 1 - 3 - 320 - 544 - - - - - - - - - 1 - 3 - 320 - 544 - - - - - 1 - 3 - 320 - 544 - - - - - - - - - - 1 - 3 - 320 - 544 - - - - - 1 - 64 - 160 - 272 - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {37936}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - if (node->getChildEdgeAt(0)->getChild()->getName() == "init_conv"){ - ASSERT_EQ(MKLDNNPlugin::Convolution, node->getChildEdgeAt(0)->getChild()->getType()); - ASSERT_EQ(InferenceEngine::Layout::NCHW, - node->getChildEdgeAt(0)->getBlob()->getTensorDesc().getLayout()); - } - } - } - size_t expected = 1; - ASSERT_EQ(reorders_num, expected); -} - -TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 7 - 7 - - - - - - - - 1 - 3 - 7 - 7 - - - - - 1 - 4 - 4 - 4 - - - - - - - - - - 1 - 4 - 4 - 4 - - - - - 1 - 4 - 4 - 4 - - - - - - - - 1 - 4 - 4 - 4 - - - 1 - 4 - 4 - 4 - - - - - 1 - 8 - 4 - 4 - - - - - - - 1 - 8 - 4 - 4 - - - - - 1 - 8 - 4 - 4 - - - - - - - - - 1 - 8 - 4 - 4 - - - - - 1 - 8 - 4 - 4 - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {2432}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - size_t idx = 592; // Convolution weights - size_t size = 8; // Scale and shift sizes - for (size_t i = 0; i < size; i++, idx++) { - data[idx] = 1.f; - } - for (size_t i = 0; i < size; i++, idx++) { - data[idx] = 0.f; - } - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder && node->getChildEdgeAt(0)->getChild()->getType() != MKLDNNPlugin::Output) { - reorders_num++; - } - } - ASSERT_EQ(reorders_num, 2); - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 7, 7}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - data = src->buffer().as(); - for (size_t i = 0; i < src->size(); i++) { - data[i] = (i % 2) ? 1 : -1; - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst = {0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.040f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.119f, 0.000f, 0.000f, 1.889f, 0.000f, 0.000f, 0.000f, 1.138f, 0.647f, 0.000f, 0.348f, - 0.000f, 1.711f, 1.311f, 0.000f, 0.000f, 3.045f, 1.203f, 0.000f, 0.927f, 2.041f, 0.000f, - 0.564f, 1.415f, 1.524f, 0.000f, 1.812f, 0.486f, 0.103f, 1.606f, 0.999f, 0.000f, 1.145f, - 2.158f, 0.712f, 0.000f, 0.009f, 0.756f, 0.000f, 0.000f, 0.008f, 0.243f, - - 0.381f, 0.363f, 1.846f, 0.804f, 1.372f, 1.113f, 2.453f, 1.609f, 0.557f, 0.000f, 3.020f, - 1.422f, 0.481f, 0.221f, 1.137f, 0.401f, 1.475f, 0.301f, 0.862f, 2.052f, 2.680f, 0.284f, - 0.000f, 2.389f, 0.917f, 0.000f, 0.358f, 1.989f, 0.355f, 0.000f, 0.000f, 0.570f, 0.000f, - 0.761f, 0.000f, 0.000f, 0.652f, 0.910f, 0.000f, 0.000f, 0.226f, 0.000f, 0.000f, 0.323f, - 0.000f, 0.000f, 0.000f, 0.108f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.866f, 0.000f, - 0.000f, 0.000f, 0.759f, 0.000f, 0.000f, 0.029f, 1.186f, 0.000f, 0.000f}; - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); - - // Compare for batch2 - network.setBatchSize(2); - graph.CreateGraph(network); - desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 3, 7, 7}, InferenceEngine::NCHW); - - InferenceEngine::Blob::Ptr srcBatch = InferenceEngine::make_shared_blob(desc); - srcBatch->allocate(); - data = srcBatch->buffer().as(); - float *originData = src->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < src->size(); i++) { - data[srcBatch->getTensorDesc().offset(b*src->size() + i)] = originData[src->getTensorDesc().offset(i)]; - } - } - - srcs.clear(); - srcs.insert(std::pair("data", srcBatch)); - out = network.getOutputsInfo(); - - outputBlobs.clear(); - item = *out.begin(); - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - dstOut->allocate(); - data = dstOut->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < refDst.size(); i++) { - data[dstOut->getTensorDesc().offset(b*refDst.size() + i)] = refDst[i]; - } - } - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeDWConvolution) { - std::string model = R"V0G0N( - - - - - - 2 - 3 - 5 - 5 - - - - - - - - 2 - 3 - 5 - 5 - - - - - 2 - 4 - 5 - 5 - - - - - - - - - - 2 - 4 - 5 - 5 - - - - - 2 - 4 - 5 - 5 - - - - - - - - 2 - 4 - 5 - 5 - - - - - 2 - 4 - 5 - 5 - - - - - - - - - - 2 - 4 - 5 - 5 - - - - - 2 - 4 - 5 - 5 - - - - - - - - 2 - 4 - 5 - 5 - - - 2 - 4 - 5 - 5 - - - - - 2 - 8 - 5 - 5 - - - - - - - 2 - 8 - 5 - 5 - - - - - 2 - 8 - 5 - 5 - - - - - - - - - - 2 - 8 - 5 - 5 - - - - - 2 - 8 - 5 - 5 - - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {288}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - } - } - size_t expected = InferenceEngine::with_cpu_x86_avx2() ? 2 : 3; - ASSERT_EQ(reorders_num, expected); - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {2, 3, 5, 5}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - auto *data = src->buffer().as(); - size_t sizeB1 = src->size() / 2; - fill_data(data, sizeB1); - for (size_t i = 0; i < sizeB1; i++) { - data[sizeB1 + i] = data[i]; - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst = {0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, - 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, - 0.920f, 0.920f, 0.920f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, - 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, - 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.185f, 0.176f, 0.000f, 0.000f, 0.000f, 0.215f, 0.000f, 0.957f, 1.092f, 0.000f, - 0.000f, 0.213f, 0.020f, 1.391f, 2.359f, 0.583f, 0.000f, 0.000f, 0.138f, 0.043f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.720f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.069f, 0.188f, 0.046f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.045f, - 0.041f, 0.000f, 0.000f, 0.056f, 0.000f, 0.000f, 0.086f, 0.025f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.012f, 0.056f, 0.000f, 0.060f, 0.055f, 0.000f, 0.000f, 0.037f, 0.000f, 0.000f, - 0.000f, 0.000f, - - 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, - 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, 0.920f, - 0.920f, 0.920f, 0.920f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, - 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, - 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.827f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.185f, 0.176f, 0.000f, 0.000f, 0.000f, 0.215f, 0.000f, 0.957f, 1.092f, 0.000f, - 0.000f, 0.213f, 0.020f, 1.391f, 2.359f, 0.583f, 0.000f, 0.000f, 0.138f, 0.043f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.720f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.069f, 0.188f, 0.046f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.045f, - 0.041f, 0.000f, 0.000f, 0.056f, 0.000f, 0.000f, 0.086f, 0.025f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.012f, 0.056f, 0.000f, 0.060f, 0.055f, 0.000f, 0.000f, 0.037f, 0.000f, 0.000f, - 0.000f, 0.000f}; - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} - -// TODO change hardcoded reference to dynamically generated -TEST_F(MKLDNNGraphStructureTests, DISABLED_TestNoRedundantReordersBeforeDWDeconvolution) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 12 - 2 - 2 - - - - - - - - - 1 - 12 - 2 - 2 - - - - - 1 - 12 - 4 - 4 - - - - - - - - - - 1 - 12 - 2 - 2 - - - - - 1 - 24 - 1 - 1 - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {5664}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - ASSERT_EQ(MKLDNNPlugin::Output, node->getChildEdgeAt(0)->getChild()->getType()); - } - } - ASSERT_EQ(reorders_num, 2); - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 2, 2}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - InferenceEngine::DataPtr item = out["deconv1"]; - InferenceEngine::TBlob::Ptr output1; - output1 = InferenceEngine::make_shared_blob(item->getTensorDesc()); - output1->allocate(); - outputBlobs["deconv1"] = output1; - - item = out["deconv2"]; - InferenceEngine::TBlob::Ptr output2; - output2 = InferenceEngine::make_shared_blob(item->getTensorDesc()); - output2->allocate(); - outputBlobs["deconv2"] = output2; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst1 = {-0.042f, -0.563f, -0.150f, 0.396f, 0.224f, 0.229f, -0.335f, -0.390f, -0.213f, 0.959f, 0.520f, -0.507f, - -0.200f, -0.202f, 0.441f, 0.499f, 0.000f, 0.000f, 0.000f, 0.000f, 0.363f, 0.141f, -0.497f, -0.332f, -0.311f, - 0.423f, 0.693f, -0.012f, -0.328f, -0.106f, 0.518f, 0.353f, 0.000f, 0.000f, 0.000f, 0.000f, 0.050f, -0.352f, - -0.045f, 0.000f, -0.303f, 0.605f, 0.754f, -0.143f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.012f, 0.298f, 0.000f, - -0.066f, -0.303f, -0.318f, -0.054f, 0.322f, 0.002f, 0.050f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.328f, -0.162f, -0.765f, -0.221f, 0.422f, 0.715f, 0.726f, 0.375f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, -0.744f, -0.038f, -0.109f, 0.000f, 0.583f, 0.892f, - 0.039f, -0.356f, 0.000f, 0.000f, 0.000f, 0.000f, -0.514f, 0.320f, 0.193f, 0.000f, -0.785f, -0.508f, 0.160f, -0.104f, - 0.473f, 0.214f, 0.129f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, -0.299f, 0.784f, 0.953f, -0.163f, -1.160f, -0.547f, - 0.401f, -0.066f, 0.275f, -0.172f, -0.683f, -0.188f, 0.384f, -0.149f, 0.151f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, - 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f, 0.000f}; - InferenceEngine::TBlob::Ptr dstOut1 = InferenceEngine::make_shared_blob(out["deconv1"]->getTensorDesc(), refDst1.data()); - - std::vector refDst2 = {-0.814f, -0.337f, -1.081f, 1.139f, -0.197f, 1.547f, -0.778f, -2.467f, 1.409f, -1.472f, 2.827f, 0.663f, - -0.645f, 0.105f, -1.873f, -0.272f, 1.071f, 2.706f, -1.705f, 0.602f, -1.956f, 0.734f, 2.325f, -2.147f}; - InferenceEngine::TBlob::Ptr dstOut2 = InferenceEngine::make_shared_blob(out["deconv2"]->getTensorDesc(), refDst2.data()); - - compare(*output1, *dstOut1); - compare(*output2, *dstOut2); -} - -TEST_F(MKLDNNGraphStructureTests, TestSeveralOutputToNextLayer) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - - - - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - } - } - ASSERT_EQ(reorders_num, 3); - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 2, 2}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - compare(*output, *src); -} - - -TEST_F(MKLDNNGraphStructureTests, TestOutputAfterInplacePlusConcat) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - - - - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - - - 1 - 3 - 2 - 2 - - - - - - - - - 1 - 3 - 2 - 2 - - - - - 1 - 12 - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}, cache)); - InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); - InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo(); - execNetwork->setNetworkInputs(_networkInputs); - execNetwork->setNetworkOutputs(_networkOutputs); - InferenceEngine::IInferRequestInternal::Ptr inferRequest = execNetwork->CreateInferRequest(); - - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 2, 2}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::ResponseDesc resp; - - ASSERT_NO_THROW(inferRequest->SetBlob("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - - ASSERT_NO_THROW(inferRequest->SetBlob(item.first, output)); - ASSERT_NO_THROW(inferRequest->Infer()); - - compare(*output, *src); -} - -TEST_F(MKLDNNGraphStructureTests, TestResnetPart) { - std::string modelB = R"V0G0N( - - - - - - 1 - 3 - 224 - 224 - - - - - - - - 1 - 3 - 224 - 224 - - - - - 1 - 64 - 112 - 112 - - - - - - - - - 1 - 64 - 112 - 112 - - - - - 1 - 64 - 112 - 112 - - - - - - - - 1 - 64 - 112 - 112 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 256 - 56 - 56 - - - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 256 - 56 - 56 - - - - - - - - - - 1 - 256 - 56 - 56 - - - 1 - 256 - 56 - 56 - - - - - 1 - 256 - 56 - 56 - - - - - - - 1 - 256 - 56 - 56 - - - - - 1 - 256 - 56 - 56 - - - - - - - - 1 - 256 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - )V0G0N"; - std::string modelE =R"V0G0N( - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 256 - 56 - 56 - - - - - - - - - - 1 - 256 - 56 - 56 - - - 1 - 256 - 56 - 56 - - - - - 1 - 256 - 56 - 56 - - - - - - - 1 - 256 - 56 - 56 - - - - - 1 - 256 - 56 - 56 - - - - - - - - 1 - 256 - 56 - 56 - - - - - 1 - 256 - 1 - 1 - - - - - - - - 1 - 256 - 1 - 1 - - - - - 1 - 1000 - - - - - - - - - 1 - 1000 - - - - - 1 - 1000 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -)V0G0N"; - - std::string model = modelB + modelE; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {1643424}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}, cache)); - InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); - InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo(); - execNetwork->setNetworkInputs(_networkInputs); - execNetwork->setNetworkOutputs(_networkOutputs); - InferenceEngine::IInferRequestInternal::Ptr inferRequest = execNetwork->CreateInferRequest(); - - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 224, 224}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - fill_data(src->buffer(), src->size()); - - InferenceEngine::ResponseDesc resp; - - ASSERT_NO_THROW(inferRequest->SetBlob("input", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - - ASSERT_NO_THROW(inferRequest->SetBlob(item.first.c_str(), output)); - - ASSERT_NO_THROW(inferRequest->Infer()); -} - -TEST_F(MKLDNNGraphStructureTests, TestConcatAfterConcat) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 20 - 20 - - - - - - - 1 - 4 - 20 - 20 - - - - - - - 1 - 2 - 20 - 20 - - - - - - - - 1 - 3 - 20 - 20 - - - 1 - 2 - 20 - 20 - - - - - 1 - 5 - 20 - 20 - - - - - - - - 1 - 4 - 20 - 20 - - - 1 - 5 - 20 - 20 - - - - - 1 - 9 - 20 - 20 - - - - - - - - 1 - 9 - 20 - 20 - - - - - 1 - 9 - 1 - 1 - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}, cache)); - InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); - InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo(); - execNetwork->setNetworkInputs(_networkInputs); - execNetwork->setNetworkOutputs(_networkOutputs); - InferenceEngine::IInferRequestInternal::Ptr inferRequest = execNetwork->CreateInferRequest(); - - InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 20, 20}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob(desc1); - src1->allocate(); - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::TensorDesc desc2(InferenceEngine::Precision::FP32, {1, 4, 20, 20}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob(desc2); - src2->allocate(); - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::TensorDesc desc3(InferenceEngine::Precision::FP32, {1, 2, 20, 20}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob(desc3); - src3->allocate(); - fill_data(src3->buffer(), src3->size()); - - InferenceEngine::ResponseDesc resp; - - ASSERT_NO_THROW(inferRequest->SetBlob("data1", src1)); - ASSERT_NO_THROW(inferRequest->SetBlob("data2", src2)); - ASSERT_NO_THROW(inferRequest->SetBlob("data3", src3)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - - ASSERT_NO_THROW(inferRequest->SetBlob(item.first, output)); - - ASSERT_NO_THROW(inferRequest->Infer()); - -// compare(*output, *src); -} - -TEST_F(MKLDNNGraphStructureTests, Test2ConcatFromConcat) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 2 - 2 - - - - - - - 1 - 4 - 2 - 2 - - - - - - - 1 - 2 - 2 - 2 - - - - - - - 1 - 1 - 2 - 2 - - - - - - - - 1 - 3 - 2 - 2 - - - 1 - 2 - 2 - 2 - - - - - 1 - 5 - 2 - 2 - - - - - - - - 1 - 5 - 2 - 2 - - - 1 - 4 - 2 - 2 - - - - - 1 - 9 - 2 - 2 - - - - - - - - 1 - 5 - 2 - 2 - - - 1 - 1 - 2 - 2 - - - - - 1 - 6 - 2 - 2 - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}, cache)); - InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); - InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo(); - execNetwork->setNetworkInputs(_networkInputs); - execNetwork->setNetworkOutputs(_networkOutputs); - InferenceEngine::IInferRequestInternal::Ptr inferRequest = execNetwork->CreateInferRequest(); - - InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 2, 2}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob(desc1); - src1->allocate(); - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::TensorDesc desc2(InferenceEngine::Precision::FP32, {1, 4, 2, 2}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob(desc2); - src2->allocate(); - fill_data(src2->buffer(), src2->size()); - - InferenceEngine::TensorDesc desc3(InferenceEngine::Precision::FP32, {1, 2, 2, 2}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob(desc3); - src3->allocate(); - fill_data(src3->buffer(), src3->size()); - - InferenceEngine::TensorDesc desc4(InferenceEngine::Precision::FP32, {1, 1, 2, 2}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src4 = InferenceEngine::make_shared_blob(desc4); - src4->allocate(); - fill_data(src4->buffer(), src4->size()); - - InferenceEngine::ResponseDesc resp; - - ASSERT_NO_THROW(inferRequest->SetBlob("data1", src1)); - ASSERT_NO_THROW(inferRequest->SetBlob("data2", src2)); - ASSERT_NO_THROW(inferRequest->SetBlob("data3", src3)); - ASSERT_NO_THROW(inferRequest->SetBlob("data4", src4)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - std::vector::Ptr> outputs; - std::vector::Ptr> refOutputs; - for (const auto& it : out) { - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(it.second->getTensorDesc()); - output->allocate(); - outputs.push_back(output); - - InferenceEngine::TBlob::Ptr refOutput; - refOutput = InferenceEngine::make_shared_blob(it.second->getTensorDesc()); - refOutput->allocate(); - - float * refData = refOutput->buffer().as(); - size_t ref_idx = 0; - if (it.first == "Concat1") { - float *srcData = src1->buffer().as(); - for (size_t i = 0; i < src1->size(); i++, ref_idx++) { - refData[ref_idx] = srcData[i]; - } - srcData = src3->buffer().as(); - for (size_t i = 0; i < src3->size(); i++, ref_idx++) { - refData[ref_idx] = srcData[i]; - } - srcData = src2->buffer().as(); - for (size_t i = 0; i < src2->size(); i++, ref_idx++) { - refData[ref_idx] = srcData[i]; - } - - - } else if (it.first == "Concat2") { - float *srcData = src1->buffer().as(); - for (size_t i = 0; i < src1->size(); i++, ref_idx++) { - refData[ref_idx] = srcData[i]; - } - srcData = src3->buffer().as(); - for (size_t i = 0; i < src3->size(); i++, ref_idx++) { - refData[ref_idx] = srcData[i]; - } - srcData = src4->buffer().as(); - for (size_t i = 0; i < src4->size(); i++, ref_idx++) { - refData[ref_idx] = srcData[i]; - } - - } - refOutputs.push_back(refOutput); - - ASSERT_NO_THROW(inferRequest->SetBlob(it.first, output)); - } - - ASSERT_NO_THROW(inferRequest->Infer()); - - for (size_t i = 0; i < outputs.size(); i++) { - compare(*outputs[i], *refOutputs[i]); - } -} - -TEST_F(MKLDNNGraphStructureTests, TestResultsAfterGroupedConvWithStrides) { - std::string model = R"V0G0N( - - - - - - 1 - 24 - 80 - 80 - - - - - - - - 1 - 24 - 80 - 80 - - - - - 1 - 24 - 80 - 80 - - - - - - - - - - 1 - 24 - 80 - 80 - - - - - 1 - 24 - 80 - 80 - - - - - - - - 1 - 24 - 80 - 80 - - - 1 - 24 - 80 - 80 - - - - - 1 - 48 - 80 - 80 - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {3552}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 24, 80, 80}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - fill_data((float *) src->buffer(), src->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr refOutput; - refOutput = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - refOutput->allocate(); - outputBlobs[item.first] = refOutput; - - graph.Infer(srcs, outputBlobs); - - // Compare for batch2 - network.setBatchSize(2); - graph.CreateGraph(network); - desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 24, 80, 80}, InferenceEngine::NCHW); - - InferenceEngine::Blob::Ptr srcBatch = InferenceEngine::make_shared_blob(desc); - srcBatch->allocate(); - data = srcBatch->buffer().as(); - float *originData = src->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < src->size(); i++) { - data[srcBatch->getTensorDesc().offset(b*src->size() + i)] = originData[src->getTensorDesc().offset(i)]; - } - } - - srcs.clear(); - srcs.insert(std::pair("data", srcBatch)); - out = network.getOutputsInfo(); - - outputBlobs.clear(); - item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - dstOut->allocate(); - data = dstOut->buffer().as(); - originData = refOutput->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < refOutput->size(); i++) { - data[dstOut->getTensorDesc().offset(b*refOutput->size() + i)] = originData[refOutput->getTensorDesc().offset(i)]; - } - } - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithConstLayer) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 20 - 20 - - - - - - - 1 - 4 - 20 - 20 - - - - - - - - - - - 1 - 3 - 20 - 20 - - - 1 - 4 - 20 - 20 - - - - - 1 - 7 - 20 - 20 - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {6400}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}, cache)); - InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); - InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo(); - execNetwork->setNetworkInputs(_networkInputs); - execNetwork->setNetworkOutputs(_networkOutputs); - InferenceEngine::IInferRequestInternal::Ptr inferRequest = execNetwork->CreateInferRequest(); - - InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 20, 20}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob(desc1); - src1->allocate(); - fill_data(src1->buffer(), src1->size()); - - InferenceEngine::ResponseDesc resp; - - ASSERT_NO_THROW(inferRequest->SetBlob("data", src1)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - - ASSERT_NO_THROW(inferRequest->SetBlob(item.first.c_str(), output)); - - ASSERT_NO_THROW(inferRequest->Infer()); -} - -TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithEltwiseBeforeConcat) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 20 - 20 - - - - - - - 1 - 3 - 20 - 20 - - - - - - - - - - 1 - 1 - 20 - 20 - - - - - - - - - - - 1 - 3 - 20 - 20 - - - 1 - 3 - 20 - 20 - - - - - 1 - 3 - 20 - 20 - - - - - - - - 1 - 1 - 20 - 20 - - - 1 - 3 - 20 - 20 - - - - - 1 - 4 - 20 - 20 - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {6400}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - for (size_t i = 0; i < 1200; i++) { - data[i] = 3; - } - for (size_t i = 1200; i < 1600; i++) { - data[i] = 4; - } - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}, cache)); - InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); - InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo(); - execNetwork->setNetworkInputs(_networkInputs); - execNetwork->setNetworkOutputs(_networkOutputs); - InferenceEngine::IInferRequestInternal::Ptr inferRequest = execNetwork->CreateInferRequest(); - - InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 20, 20}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob(desc1); - src1->allocate(); - data = src1->buffer(); - for (size_t i = 0; i < 1200; i++) { - data[i] = 1; - } - - InferenceEngine::ResponseDesc resp; - - ASSERT_NO_THROW(inferRequest->SetBlob("data", src1)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - - ASSERT_NO_THROW(inferRequest->SetBlob(item.first.c_str(), output)); - - ASSERT_NO_THROW(inferRequest->Infer()); - - auto *res_ptr = output->buffer().as(); - size_t res_size = output->size(); - - for (size_t i = 0; i < res_size; i++) { - ASSERT_NEAR(res_ptr[i], 4, 0.01f); - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - ASSERT_EQ(MKLDNNPlugin::Input, node->getParentEdgeAt(0)->getParent()->getType()); - ASSERT_EQ(MKLDNNPlugin::Eltwise, node->getChildEdgeAt(0)->getChild()->getType()); - } - } - ASSERT_EQ(reorders_num, 0); -} -TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersRmnet_SSSSD) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 320 - 544 - - - - - - - 1 - 3 - 320 - 544 - - - - - 1 - 3 - 320 - 544 - - - - - - - - - - - - 1 - 3 - 320 - 544 - - - - - 1 - 32 - 160 - 272 - - - - - - - - - - - - 1 - 32 - 160 - 272 - - - - - 1 - 32 - 160 - 272 - - - - - - - - 1 - 32 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 32 - 160 - 272 - - - - - - - - - - - - 1 - 32 - 160 - 272 - - - 1 - 32 - 160 - 272 - - - - - 1 - 32 - 160 - 272 - - - - - - - - 1 - 32 - 160 - 272 - - - - - 1 - 32 - 160 - 272 - - - - - - - - 1 - 32 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 8 - 160 - 272 - - - - - - - - 1 - 8 - 160 - 272 - - - - - 1 - 32 - 160 - 272 - - - - - - - - - - - - 1 - 32 - 160 - 272 - - - 1 - 32 - 160 - 272 - - - - - 1 - 32 - 160 - 272 - - - - - - - - 1 - 32 - 160 - 272 - - - - - 1 - 32 - 160 - 272 - - - - - - - - - - - - - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {8664}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - ASSERT_EQ(MKLDNNPlugin::Output, node->getChildEdgeAt(0)->getChild()->getType()); - } - } - - ASSERT_EQ(reorders_num, 1); -} - -TEST_F(MKLDNNGraphStructureTests, TestFailedPartDPN92) { - std::string model = R"V0G0N( - - - - - - 1 - 32 - 14 - 14 - - - - - - - 1 - 64 - 28 - 28 - - - - - - - - 1 - 64 - 28 - 28 - - - - - 1 - 96 - 14 - 14 - - - - - - - - - - - 1 - 96 - 14 - 14 - - - - - 1 - 64 - 14 - 14 - - - 1 - 32 - 14 - 14 - - - - - - - - 1 - 32 - 14 - 14 - - - - - 1 - 72 - 14 - 14 - - - - - - - - - - - 1 - 72 - 14 - 14 - - - - - 1 - 64 - 14 - 14 - - - 1 - 8 - 14 - 14 - - - - - - - - 1 - 64 - 14 - 14 - - - 1 - 64 - 14 - 14 - - - - - 1 - 64 - 14 - 14 - - - - - - - - 1 - 32 - 14 - 14 - - - 1 - 8 - 14 - 14 - - - - - 1 - 40 - 14 - 14 - - - - - - - - 1 - 64 - 14 - 14 - - - 1 - 40 - 14 - 14 - - - - - 1 - 104 - 14 - 14 - - - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {33792}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 32, 14, 14}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob(desc); - src1->allocate(); - fill_data((float *) src1->buffer(), src1->size()); - - - desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 64, 28, 28}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob(desc); - src2->allocate(); - fill_data((float *) src2->buffer(), src2->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src1)); - srcs.insert(std::pair("data2", src2)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst(output->size()); - auto *data = output->buffer().as(); - for (size_t i = 0; i < output->size(); i++) { - refDst[i] = data[output->getTensorDesc().offset(i)]; - } - - // Compare for batch2 - network.setBatchSize(2); - graph.CreateGraph(network); - desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 32, 14, 14}, InferenceEngine::NCHW); - - InferenceEngine::Blob::Ptr src1Batch = InferenceEngine::make_shared_blob(desc); - src1Batch->allocate(); - data = src1Batch->buffer().as(); - auto *originData = src1->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < src1->size(); i++) { - data[src1Batch->getTensorDesc().offset(b*src1->size() + i)] = originData[src1->getTensorDesc().offset(i)]; - } - } - - desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 64, 28, 28}, InferenceEngine::NCHW); - - InferenceEngine::Blob::Ptr src2Batch = InferenceEngine::make_shared_blob(desc); - src2Batch->allocate(); - data = src2Batch->buffer().as(); - originData = src2->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < src2->size(); i++) { - data[src2Batch->getTensorDesc().offset(b*src2->size() + i)] = originData[src2->getTensorDesc().offset(i)]; - } - } - - srcs.clear(); - srcs.insert(std::pair("data", src1Batch)); - srcs.insert(std::pair("data2", src2Batch)); - out = network.getOutputsInfo(); - - outputBlobs.clear(); - item = *out.begin(); - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - dstOut->allocate(); - data = dstOut->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < refDst.size(); i++) { - data[dstOut->getTensorDesc().offset(b*refDst.size() + i)] = refDst[i]; - } - } - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersForXceptionTopology) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 299 - 299 - - - - - - - - 1 - 3 - 299 - 299 - - - - - 1 - 32 - 149 - 149 - - - - - - - - - 1 - 32 - 149 - 149 - - - - - 1 - 32 - 149 - 149 - - - - - - - - 1 - 32 - 149 - 149 - - - - - 1 - 64 - 147 - 147 - - - - - - - - - 1 - 64 - 147 - 147 - - - - - 1 - 64 - 147 - 147 - - - - - - - - 1 - 64 - 147 - 147 - - - - - 1 - 64 - 147 - 147 - - - - - - - - - 1 - 64 - 147 - 147 - - - - - 1 - 128 - 147 - 147 - - - - - - - - - 1 - 128 - 147 - 147 - - - - - 1 - 128 - 147 - 147 - - - - - - - - 1 - 128 - 147 - 147 - - - - - 1 - 128 - 147 - 147 - - - - - - - - - 1 - 128 - 147 - 147 - - - - - 1 - 128 - 147 - 147 - - - - - - - - - - 1 - 64 - 147 - 147 - - - - - 1 - 128 - 74 - 74 - - - - - - - - - - 1 - 128 - 147 - 147 - - - - - 1 - 128 - 74 - 74 - - - - - - - 1 - 128 - 74 - 74 - - - 1 - 128 - 74 - 74 - - - - - 1 - 128 - 74 - 74 - - - - - - - 1 - 128 - 74 - 74 - - - - - 1 - 128 - 74 - 74 - - - - - - - - 1 - 128 - 74 - 74 - - - - - 1 - 128 - 74 - 74 - - - - - - - - - 1 - 128 - 74 - 74 - - - - - 1 - 256 - 74 - 74 - - - - - - - - - 1 - 256 - 74 - 74 - - - - - 1 - 256 - 74 - 74 - - - - - - - - 1 - 256 - 74 - 74 - - - - - 1 - 256 - 74 - 74 - - - - - - - - - 1 - 256 - 74 - 74 - - - - - 1 - 256 - 74 - 74 - - - - - - - - - - 1 - 128 - 74 - 74 - - - - - 1 - 256 - 37 - 37 - - - - - - - - - - 1 - 256 - 74 - 74 - - - - - 1 - 256 - 37 - 37 - - - - - - - 1 - 256 - 37 - 37 - - - 1 - 256 - 37 - 37 - - - - - 1 - 256 - 37 - 37 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {758272}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - ASSERT_EQ(MKLDNNPlugin::Output, node->getChildEdgeAt(0)->getChild()->getType()); - } - } - ASSERT_EQ(reorders_num, 1); -} - -TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersForGrayscaleInput) { - std::string model = R"V0G0N( - - - - - - 1 - 1 - 40 - 40 - - - - - - - - 1 - 1 - 40 - 40 - - - - - 1 - 32 - 40 - 40 - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {1280}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t reorders_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if (node->getType() == MKLDNNPlugin::Reorder) { - reorders_num++; - ASSERT_EQ(MKLDNNPlugin::Output, node->getChildEdgeAt(0)->getChild()->getType()); - } - } - ASSERT_EQ(reorders_num, 1); -} - -TEST_F(MKLDNNGraphStructureTests, TestFailedPartPlateRecognitionBarrier0001) { - std::string model = R"V0G0N( - - - - - - 1 - 128 - 1 - 88 - - - - - - - - 1 - 128 - 1 - 88 - - - - - 1 - 71 - 1 - 88 - - - - - - - - - - - - 1 - 71 - 1 - 88 - - - - - 1 - 71 - 1 - 88 - - - - - - - - 1 - 71 - 1 - 88 - - - - - 1 - 128 - - - - - - - - - - - - 1 - 128 - - - - - 1 - 128 - 1 - 1 - - - - - - - - 1 - 128 - 1 - 1 - - - - - 1 - 128 - 1 - 88 - - - - - - - - 1 - 71 - 1 - 88 - - - 1 - 128 - 1 - 88 - - - - - 1 - 199 - 1 - 88 - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {3672348}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 128, 1, 88}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob(desc); - src1->allocate(); - fill_data((float *) src1->buffer(), src1->size()); - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src1)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst(output->size()); - auto *data = output->buffer().as(); - for (size_t i = 0; i < output->size(); i++) { - refDst[i] = data[output->getTensorDesc().offset(i)]; - } - - // Compare for batch2 - network.setBatchSize(2); - graph.CreateGraph(network); - desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 128, 1, 88}, InferenceEngine::NCHW); - - InferenceEngine::Blob::Ptr src1Batch = InferenceEngine::make_shared_blob(desc); - src1Batch->allocate(); - data = src1Batch->buffer().as(); - auto *originData = src1->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < src1->size(); i++) { - data[src1Batch->getTensorDesc().offset(b*src1->size() + i)] = originData[src1->getTensorDesc().offset(i)]; - } - } - - srcs.clear(); - srcs.insert(std::pair("data", src1Batch)); - out = network.getOutputsInfo(); - - outputBlobs.clear(); - item = *out.begin(); - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - dstOut->allocate(); - data = dstOut->buffer().as(); - for(size_t b = 0; b < 2; b++) { - for (size_t i = 0; i < refDst.size(); i++) { - data[dstOut->getTensorDesc().offset(b*refDst.size() + i)] = refDst[i]; - } - } - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0001) { - std::string model = R"V0G0N( - - - - - - 1 - 256 - 23 - 23 - - - - - - - - 1 - 256 - 23 - 23 - - - - - 1 - 63 - 46 - 46 - - - - - - - - - - - 1 - 63 - 46 - 46 - - - 1 - 63 - 46 - 46 - - - - - 1 - 63 - 46 - 46 - - - - - - - - 1 - 63 - 46 - 46 - - - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, { 1032192 }, InferenceEngine::C }); - weights->allocate(); - fill_data((float *)weights->buffer(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - ASSERT_NO_THROW(graph.CreateGraph(network)); -} - -TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0002) { - std::string model = R"V0G0N( - - - - - - 1 - 128 - 46 - 46 - - - - - - - - 1 - 128 - 46 - 46 - - - - - 1 - 84 - 46 - 46 - - - - - - - - - - - 1 - 84 - 46 - 46 - - - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, { 43008 }, InferenceEngine::C }); - weights->allocate(); - fill_data((float *)weights->buffer(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - size_t outputs_num = 0; - auto& nodes = graph.getNodes(); - for (auto &node : nodes) { - if ( node->getType() == MKLDNNPlugin::Output && - (node->getName() == "out_slice_heatmaps.0" || - node->getName() == "out_slice_heatmaps.1" || - node->getName() == "out_slice_heatmaps.2" || - node->getName() == "out_slice_heatmaps.3" ) ) { - outputs_num++; - } - } - ASSERT_EQ(outputs_num, 4); -} - - -TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0003) { - std::string model = R"V0G0N( - - - - - - 1 - 128 - 46 - 46 - - - - - - - 1 - 63 - 46 - 46 - - - - - - - 1 - 21 - 46 - 46 - - - - - - - - 1 - 128 - 46 - 46 - - - - - 1 - 128 - 46 - 46 - - - - - - - - 1 - 63 - 46 - 46 - - - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - - - - - - 1 - 21 - 46 - 46 - - - - - 1 - 21 - 46 - 46 - - - - - - - - 1 - 128 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - 1 - 21 - 46 - 46 - - - - - 1 - 212 - 46 - 46 - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr())); - - MKLDNNGraphTestClass graph; - ASSERT_NO_THROW(graph.CreateGraph(network)); -} - -TEST_F(MKLDNNGraphStructureTests, TestConvolutionDWConvolutionSumFusing) { - std::string model = R"V0G0N( - - - - - - 1 - 32 - 300 - 600 - - - - - - - 1 - 48 - 150 - 300 - - - - - - - - 1 - 32 - 300 - 600 - - - - - 1 - 48 - 300 - 600 - - - - - - - - - - 1 - 48 - 300 - 600 - - - - - 1 - 48 - 150 - 300 - - - - - - - - - - 1 - 48 - 150 - 300 - - - 1 - 48 - 150 - 300 - - - - - 1 - 48 - 150 - 300 - - - - - - - 1 - 48 - 150 - 300 - - - - - 1 - 48 - 150 - 300 - - - - - - - - 1 - 48 - 150 - 300 - - - - - 1 - 48 - 150 - 300 - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {8064}, InferenceEngine::C }); - weights->allocate(); - float * data = weights->buffer(); - memset((float *) weights->buffer(), 0, weights->size()); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - network = core.ReadNetwork(model, weights_ptr); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::TensorDesc src0_desc(InferenceEngine::Precision::FP32, {1, 32, 300, 600}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src0 = InferenceEngine::make_shared_blob(src0_desc); - src0->allocate(); - data = src0->buffer().as(); - for (size_t i = 0; i < src0->size(); i++) { - data[i] = 0; - } - - InferenceEngine::TensorDesc src1_desc(InferenceEngine::Precision::FP32, {1, 48, 150, 300}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob(src1_desc); - src1->allocate(); - data = src1->buffer().as(); - for (size_t i = 0; i < src1->size(); i++) { - data[i] = i % 10; - } - - std::vector refDst(src1->size()); - for (size_t i = 0; i < refDst.size(); i++) { - refDst[i] = -1 * data[i]; - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data0", src0)); - srcs.insert(std::pair("data1", src1)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestConstantLayerAsOutput) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 10 - 10 - - - - - - - 1 - 3 - 10 - 10 - - - - - 1 - 3 - 10 - 10 - - - - - - - - - - - - 1 - 3 - 10 - 10 - - - - - 1 - 64 - 5 - 5 - - - - - - - - - - - - 1 - 64 - 5 - 5 - - - 1 - 3 - 10 - 10 - - - - - 1 - 2 - 600 - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {37912}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 10, 10}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(desc); - src->allocate(); - auto *data = src->buffer().as(); - size_t sizeB1 = src->size() / 2; - fill_data(data, sizeB1); - for (size_t i = 0; i < sizeB1; i++) { - data[sizeB1 + i] = data[i]; - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - std::vector refDst = {-3.603f,-4.313f,6.803f,7.513f,-4.918f,-3.661f,8.118f,6.861f,-5.243f,-5.458f,8.443f,8.658f,-7.395f,-4.832f,10.595f,8.032f, - -7.459f,-7.113f,10.659f,10.313f,-10.814f,-7.249f,14.014f,10.449f,-0.403f,-4.313f,10.003f,7.513f,-1.718f,-3.661f,11.318f,6.861f, - -2.043f,-5.458f,11.643f,8.658f,-4.195f,-4.832f,13.795f,8.032f,-4.259f,-7.113f,13.859f,10.313f,-7.614f,-7.249f,17.214f,10.449f, - 2.797f,-4.313f,13.203f,7.513f,1.482f,-3.661f,14.518f,6.861f,1.157f,-5.458f,14.843f,8.658f,-0.995f,-4.832f,16.995f,8.032f, - -1.059f,-7.113f,17.059f,10.313f,-4.414f,-7.249f,20.414f,10.449f,5.997f,-4.313f,16.403f,7.513f,4.682f,-3.661f,17.718f,6.861f, - 4.357f,-5.458f,18.043f,8.658f,2.205f,-4.832f,20.195f,8.032f,2.141f,-7.113f,20.259f,10.313f,-1.214f,-7.249f,23.614f,10.449f, - 9.197f,-4.313f,19.603f,7.513f,7.882f,-3.661f,20.918f,6.861f,7.557f,-5.458f,21.243f,8.658f,5.405f,-4.832f,23.395f,8.032f,5.341f, - -7.113f,23.459f,10.313f,1.986f,-7.249f,26.814f,10.449f,-3.603f,-1.113f,6.803f,10.713f,-4.918f,-0.461f,8.118f,10.061f,-5.243f,-2.258f, - 8.443f,11.858f,-7.395f,-1.632f,10.595f,11.232f,-7.459f,-3.913f,10.659f,13.513f,-10.814f,-4.049f,14.014f,13.649f,-0.403f,-1.113f, - 10.003f,10.713f,-1.718f,-0.461f,11.318f,10.061f,-2.043f,-2.258f,11.643f,11.858f,-4.195f,-1.632f,13.795f,11.232f,-4.259f,-3.913f, - 13.859f,13.513f,-7.614f,-4.049f,17.214f,13.649f,2.797f,-1.113f,13.203f,10.713f,1.482f,-0.461f,14.518f,10.061f,1.157f,-2.258f,14.843f, - 11.858f,-0.995f,-1.632f,16.995f,11.232f,-1.059f,-3.913f,17.059f,13.513f,-4.414f,-4.049f,20.414f,13.649f,5.997f,-1.113f,16.403f,10.713f, - 4.682f,-0.461f,17.718f,10.061f,4.357f,-2.258f,18.043f,11.858f,2.205f,-1.632f,20.195f,11.232f,2.141f,-3.913f,20.259f,13.513f,-1.214f, - -4.049f,23.614f,13.649f,9.197f,-1.113f,19.603f,10.713f,7.882f,-0.461f,20.918f,10.061f,7.557f,-2.258f,21.243f,11.858f,5.405f,-1.632f, - 23.395f,11.232f,5.341f,-3.913f,23.459f,13.513f,1.986f,-4.049f,26.814f,13.649f,-3.603f,2.087f,6.803f,13.913f,-4.918f,2.739f,8.118f, - 13.261f,-5.243f,0.942f,8.443f,15.058f,-7.395f,1.568f,10.595f,14.432f,-7.459f,-0.713f,10.659f,16.713f,-10.814f,-0.849f,14.014f,16.849f, - -0.403f,2.087f,10.003f,13.913f,-1.718f,2.739f,11.318f,13.261f,-2.043f,0.942f,11.643f,15.058f,-4.195f,1.568f,13.795f,14.432f,-4.259f, - -0.713f,13.859f,16.713f,-7.614f,-0.849f,17.214f,16.849f,2.797f,2.087f,13.203f,13.913f,1.482f,2.739f,14.518f,13.261f,1.157f,0.942f,14.843f, - 15.058f,-0.995f,1.568f,16.995f,14.432f,-1.059f,-0.713f,17.059f,16.713f,-4.414f,-0.849f,20.414f,16.849f,5.997f,2.087f,16.403f,13.913f, - 4.682f,2.739f,17.718f,13.261f,4.357f,0.942f,18.043f,15.058f,2.205f,1.568f,20.195f,14.432f,2.141f,-0.713f,20.259f,16.713f,-1.214f,-0.849f, - 23.614f,16.849f,9.197f,2.087f,19.603f,13.913f,7.882f,2.739f,20.918f,13.261f,7.557f,0.942f,21.243f,15.058f,5.405f,1.568f,23.395f,14.432f, - 5.341f,-0.713f,23.459f,16.713f,1.986f,-0.849f,26.814f,16.849f,-3.603f,5.287f,6.803f,17.113f,-4.918f,5.939f,8.118f,16.461f,-5.243f,4.142f, - 8.443f,18.258f,-7.395f,4.768f,10.595f,17.632f,-7.459f,2.487f,10.659f,19.913f,-10.814f,2.351f,14.014f,20.049f,-0.403f,5.287f,10.003f, - 17.113f,-1.718f,5.939f,11.318f,16.461f,-2.043f,4.142f,11.643f,18.258f,-4.195f,4.768f,13.795f,17.632f,-4.259f,2.487f,13.859f,19.913f, - -7.614f,2.351f,17.214f,20.049f,2.797f,5.287f,13.203f,17.113f,1.482f,5.939f,14.518f,16.461f,1.157f,4.142f,14.843f,18.258f,-0.995f,4.768f, - 16.995f,17.632f,-1.059f,2.487f,17.059f,19.913f,-4.414f,2.351f,20.414f,20.049f,5.997f,5.287f,16.403f,17.113f,4.682f,5.939f,17.718f,16.461f, - 4.357f,4.142f,18.043f,18.258f,2.205f,4.768f,20.195f,17.632f,2.141f,2.487f,20.259f,19.913f,-1.214f,2.351f,23.614f,20.049f,9.197f,5.287f, - 19.603f,17.113f,7.882f,5.939f,20.918f,16.461f,7.557f,4.142f,21.243f,18.258f,5.405f,4.768f,23.395f,17.632f,5.341f,2.487f,23.459f,19.913f, - 1.986f,2.351f,26.814f,20.049f,-3.603f,8.487f,6.803f,20.313f,-4.918f,9.139f,8.118f,19.661f,-5.243f,7.342f,8.443f,21.458f,-7.395f,7.968f, - 10.595f,20.832f,-7.459f,5.687f,10.659f,23.113f,-10.814f,5.551f,14.014f,23.249f,-0.403f,8.487f,10.003f,20.313f,-1.718f,9.139f,11.318f, - 19.661f,-2.043f,7.342f,11.643f,21.458f,-4.195f,7.968f,13.795f,20.832f,-4.259f,5.687f,13.859f,23.113f,-7.614f,5.551f,17.214f,23.249f,2.797f, - 8.487f,13.203f,20.313f,1.482f,9.139f,14.518f,19.661f,1.157f,7.342f,14.843f,21.458f,-0.995f,7.968f,16.995f,20.832f,-1.059f,5.687f,17.059f, - 23.113f,-4.414f,5.551f,20.414f,23.249f,5.997f,8.487f,16.403f,20.313f,4.682f,9.139f,17.718f,19.661f,4.357f,7.342f,18.043f,21.458f,2.205f, - 7.968f,20.195f,20.832f,2.141f,5.687f,20.259f,23.113f,-1.214f,5.551f,23.614f,23.249f,9.197f,8.487f,19.603f,20.313f,7.882f,9.139f,20.918f, - 19.661f,7.557f,7.342f,21.243f,21.458f,5.405f,7.968f,23.395f,20.832f,5.341f,5.687f,23.459f,23.113f,1.986f,5.551f,26.814f,23.249f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f, - 0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f,0.100f,0.100f,0.200f,0.200f}; - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWithConcat) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 227 - 227 - - - - - - - - 1 - 3 - 227 - 227 - - - - - 1 - 64 - 113 - 113 - - - - - - - - - - - - 1 - 64 - 113 - 113 - - - - - 1 - 64 - 113 - 113 - - - - - - - - 1 - 64 - 113 - 113 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 16 - 56 - 56 - - - - - - - - - - - - 1 - 16 - 56 - 56 - - - - - 1 - 16 - 56 - 56 - - - - - - - - 1 - 16 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 16 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - 1 - 64 - 56 - 56 - - - - - 1 - 128 - 56 - 56 - - - - - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {52800}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - auto graphInfer = [](InferenceEngine::CNNNetwork network, InferenceEngine::BlobMap& inBlobs, - InferenceEngine::BlobMap& outBlobs, std::string primitivesPriority) { - for (auto it = InferenceEngine::details::CNNNetworkIterator(network); !primitivesPriority.empty() && - it != InferenceEngine::details::CNNNetworkIterator(); it++) { - (*it)->params["PrimitivesPriority"] = primitivesPriority; - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - graph.Infer(inBlobs, outBlobs); - }; - - InferenceEngine::InputsDataMap inputsMap = network.getInputsInfo(); - InferenceEngine::BlobMap inputBlobs; - - for (const auto& input : inputsMap) { - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(input.second->getTensorDesc()); - src->allocate(); - fill_data((float *) src->buffer(), src->size()); - inputBlobs[input.first] = src; - } - - InferenceEngine::OutputsDataMap outsMap = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs1; - InferenceEngine::BlobMap outputBlobs2; - for (const auto& output : outsMap) { - InferenceEngine::TBlob::Ptr dst1, dst2; - dst1 = InferenceEngine::make_shared_blob(output.second->getTensorDesc()); - dst1->allocate(); - outputBlobs1[output.first] = dst1; - dst2 = InferenceEngine::make_shared_blob(output.second->getTensorDesc()); - dst2->allocate(); - outputBlobs2[output.first] = dst2; - } - - graphInfer(network, inputBlobs, outputBlobs1, ""); - graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_blas"); - compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second); - - graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_avx512"); - compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second); - - graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_avx2"); - compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second); - - graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_sse42"); - compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second); - - graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_any"); - compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second); -} - - -TEST_F(MKLDNNGraphStructureTests, TestRefPoolingWithConcat) { - std::string model = R"V0G0N( - - - - - - 1 - 3 - 227 - 227 - - - - - - - - 1 - 3 - 227 - 227 - - - - - 1 - 64 - 113 - 113 - - - - - - - - - - - - 1 - 64 - 113 - 113 - - - - - 1 - 64 - 113 - 113 - - - - - - - - 1 - 64 - 113 - 113 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 16 - 56 - 56 - - - - - - - - - - - - 1 - 16 - 56 - 56 - - - - - 1 - 16 - 56 - 56 - - - - - - - - 1 - 16 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - - - - - 1 - 64 - 56 - 56 - - - - - 1 - 64 - 56 - 56 - - - - - - - - 1 - 16 - 56 - 56 - - - - - 1 - 16 - 56 - 56 - - - - - - - - - - - - 1 - 64 - 56 - 56 - - - 1 - 16 - 56 - 56 - - - - - 1 - 80 - 56 - 56 - - - - - - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {52800}, InferenceEngine::C }); - weights->allocate(); - fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - auto graphInfer = [](InferenceEngine::CNNNetwork network, InferenceEngine::BlobMap& inBlobs, - InferenceEngine::BlobMap& outBlobs, std::string primitivesPriority) { - for (auto it = InferenceEngine::details::CNNNetworkIterator(network); !primitivesPriority.empty() && - it != InferenceEngine::details::CNNNetworkIterator(); it++) { - (*it)->params["PrimitivesPriority"] = primitivesPriority; - } - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - graph.Infer(inBlobs, outBlobs); - }; - - InferenceEngine::InputsDataMap inputsMap = network.getInputsInfo(); - InferenceEngine::BlobMap inputBlobs; - - for (const auto& input : inputsMap) { - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(input.second->getTensorDesc()); - src->allocate(); - fill_data((float *) src->buffer(), src->size()); - inputBlobs[input.first] = src; - } - - InferenceEngine::OutputsDataMap outsMap = network.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs1; - InferenceEngine::BlobMap outputBlobs2; - for (const auto& output : outsMap) { - InferenceEngine::TBlob::Ptr dst1, dst2; - dst1 = InferenceEngine::make_shared_blob(output.second->getTensorDesc()); - dst1->allocate(); - outputBlobs1[output.first] = dst1; - dst2 = InferenceEngine::make_shared_blob(output.second->getTensorDesc()); - dst2->allocate(); - outputBlobs2[output.first] = dst2; - } - - graphInfer(network, inputBlobs, outputBlobs1, ""); - graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_blas,cpu:ref_any"); - compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second); - - graphInfer(network, inputBlobs, outputBlobs2, "cpu:ref_any"); - compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second); -} - -TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2DepthwiseOpFusing) { - std::string model = R"V0G0N( - - - - - - 1 - 32 - 300 - 600 - - - - - - - - 1 - 32 - 300 - 600 - - - - - 1 - 48 - 300 - 600 - - - - - - - - - - 1 - 48 - 300 - 600 - - - - - 1 - 48 - 300 - 600 - - - - - - - - 1 - 48 - 300 - 600 - - - - - 1 - 48 - 300 - 600 - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {6724}, InferenceEngine::C }); - weights->allocate(); - float* wdata = weights->buffer(); - - for (int i = 0; i < weights->size() / sizeof(float); i++) - wdata[i] = 1; - wdata[1584] = 2; // 2 for prelu weights - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - const auto& nodes = graph.getNodes(); - ASSERT_EQ(nodes.size(), 5); - ASSERT_EQ(nodes[0].get()->getType(), MKLDNNPlugin::Type::Input); - ASSERT_EQ(nodes[1].get()->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[2].get()->getType(), MKLDNNPlugin::Type::Convolution); - ASSERT_TRUE(nodes[2].get()->isFusedWith(MKLDNNPlugin::Type::Eltwise)); - ASSERT_EQ(nodes[3].get()->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[4].get()->getType(), MKLDNNPlugin::Type::Output); - - InferenceEngine::TensorDesc src_desc(InferenceEngine::Precision::FP32, {1, 32, 300, 600}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(src_desc); - src->allocate(); - float* sdata = src->buffer().as(); - for (size_t i = 0; i < src->size(); i++) { - sdata[i] = -1; - } - - std::vector refDst(1 * 48 * 300 * 600); - for (size_t i = 0; i < refDst.size(); i++) { - refDst[i] = -61; // (-32 + 1) * 2 * 1 + 1 - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2EltwiseOpFusing) { - std::string model = R"V0G0N( - - - - - - 1 - 1 - 300 - 600 - - - - - - - - 1 - 1 - 300 - 600 - - - - - 1 - 48 - 300 - 600 - - - - - - - - - 1 - 48 - 300 - 600 - - - - - 1 - 48 - 300 - 600 - - - - - - - - 1 - 48 - 300 - 600 - - - - - 1 - 48 - 300 - 600 - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {384}, InferenceEngine::C }); - weights->allocate(); - float* wdata = weights->buffer(); - - for (int i = 0; i < weights->size() / sizeof(float); i++) - wdata[i] = 1; - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - const auto& nodes = graph.getNodes(); - ASSERT_EQ(nodes.size(), 4); - ASSERT_EQ(nodes[0].get()->getType(), MKLDNNPlugin::Type::Input); - ASSERT_EQ(nodes[1].get()->getType(), MKLDNNPlugin::Type::Convolution); - ASSERT_TRUE(nodes[1].get()->isFusedWith(MKLDNNPlugin::Type::Eltwise)); - ASSERT_EQ(nodes[2].get()->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[3].get()->getType(), MKLDNNPlugin::Type::Output); - - InferenceEngine::TensorDesc src_desc(InferenceEngine::Precision::FP32, {1, 1, 300, 600}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(src_desc); - src->allocate(); - float* sdata = src->buffer().as(); - for (size_t i = 0; i < src->size(); i++) { - sdata[i] = i % 2 == 0 ? 2 : -2; - } - - std::vector refDst(1 * 48 * 300 * 600); - for (size_t i = 0; i < refDst.size(); i++) { - refDst[i] = i % 2 == 0 ? 0.952574127f : 0.3f; - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWith2DepthwiseOpFusing) { - std::string model = R"V0G0N( - - - - - - 1 - 8 - 300 - 600 - - - - - - - - 1 - 8 - 300 - 600 - - - - - 1 - 8 - 300 - 600 - - - - - - - - - - 1 - 8 - 300 - 600 - - - - - 1 - 8 - 300 - 600 - - - - - - - - 1 - 8 - 300 - 600 - - - - - 1 - 8 - 300 - 600 - - - - - - - - - - - - -)V0G0N"; - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {228}, InferenceEngine::C }); - weights->allocate(); - float* wdata = weights->buffer(); - - for (int i = 0; i < weights->size() / sizeof(float); i++) - wdata[i] = 1; - wdata[40] = 2; // 2 for prelu weights - - InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - const auto& nodes = graph.getNodes(); - ASSERT_EQ(nodes.size(), 3); - ASSERT_EQ(nodes[0].get()->getType(), MKLDNNPlugin::Type::Input); - ASSERT_EQ(nodes[1].get()->getType(), MKLDNNPlugin::Type::Convolution); - ASSERT_TRUE(nodes[1].get()->isFusedWith(MKLDNNPlugin::Type::Eltwise)); - ASSERT_EQ(nodes[2].get()->getType(), MKLDNNPlugin::Type::Output); - - InferenceEngine::TensorDesc src_desc(InferenceEngine::Precision::FP32, {1, 8, 300, 600}, InferenceEngine::NCHW); - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(src_desc); - src->allocate(); - float* sdata = src->buffer().as(); - for (size_t i = 0; i < src->size(); i++) { - sdata[i] = -1; - } - - std::vector refDst(1 * 8 * 300 * 600); - for (size_t i = 0; i < refDst.size(); i++) { - refDst[i] = -5; // (-4 + 1) * 2 * 1 + 1 - } - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("data", src)); - - InferenceEngine::OutputsDataMap out = network.getOutputsInfo(); - - InferenceEngine::BlobMap outputBlobs; - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - graph.Infer(srcs, outputBlobs); - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(item.second->getTensorDesc(), refDst.data()); - - compare(*output, *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithSplit) { - std::string model = R"V0G0N( - - - - - - 1 - 2 - 8 - 8 - - - - - - - - 1 - 2 - 8 - 8 - - - - - 1 - 1 - 8 - 8 - - - 1 - 1 - 8 - 8 - - - - - - - - -)V0G0N"; - - const size_t batchHeight = 8; - const size_t batchWidth = 8; - const InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::FP32, { 1, 2, batchHeight, batchWidth }, InferenceEngine::NCHW); - const size_t batchSize = batchHeight * batchWidth; - const float channel1Value = 1.0; - const float channel2Value = 2.0; - - InferenceEngine::Blob::Ptr inputBlob = InferenceEngine::make_shared_blob(tensorDesc); - inputBlob->allocate(); - float* inputData = inputBlob->buffer().as(); - for (size_t i = 0; i < inputBlob->size(); i++) { - inputData[i] = (i < batchSize) ? channel1Value : channel2Value; - } - - InferenceEngine::TBlob* weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, { 228 }, InferenceEngine::C }); - weights->allocate(); - float* weightsData = weights->buffer(); - for (size_t i = 0ULL; i < weights->size() / sizeof(float); i++) { - weightsData[i] = 1.0; - } - - const InferenceEngine::TBlob::Ptr weightsPtr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weightsPtr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - const auto& nodes = graph.getNodes(); - ASSERT_EQ(nodes.size(), 5); - ASSERT_EQ(nodes[0].get()->getType(), MKLDNNPlugin::Type::Input); - ASSERT_EQ(nodes[1].get()->getType(), MKLDNNPlugin::Type::Split); - ASSERT_EQ(nodes[2].get()->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[3].get()->getType(), MKLDNNPlugin::Type::Output); - ASSERT_EQ(nodes[4].get()->getType(), MKLDNNPlugin::Type::Output); - - InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo(); - const std::pair splitOutputItem1 {"Split.0", outputs["Split.0"]}; - const std::pair splitOutputItem2 {"Split.1", outputs["Split.1"]}; - - std::vector splitExpectedOutputData1(batchSize); - std::vector splitExpectedOutputData2(batchSize); - for (size_t i = 0; i < splitExpectedOutputData1.size(); i++) { - splitExpectedOutputData1[i] = 1.0; - splitExpectedOutputData2[i] = 2.0; - } - const InferenceEngine::TBlob::Ptr splitExpectedOutputBlob1 = InferenceEngine::make_shared_blob( - splitOutputItem1.second->getTensorDesc(), - splitExpectedOutputData1.data()); - const InferenceEngine::TBlob::Ptr splitExpectedOutputBlob2 = InferenceEngine::make_shared_blob( - splitOutputItem2.second->getTensorDesc(), - splitExpectedOutputData2.data()); - - InferenceEngine::BlobMap outputBlobs; - - // Reshape - InferenceEngine::TBlob::Ptr splitOutputBlob1 = InferenceEngine::make_shared_blob(splitOutputItem1.second->getTensorDesc()); - splitOutputBlob1->allocate(); - outputBlobs[splitOutputItem1.first] = splitOutputBlob1; - - // Split - InferenceEngine::TBlob::Ptr splitOutputBlob2 = InferenceEngine::make_shared_blob(splitOutputItem2.second->getTensorDesc()); - splitOutputBlob2->allocate(); - outputBlobs[splitOutputItem2.first] = splitOutputBlob2; - - const InferenceEngine::BlobMap inputsBlobMap = { std::pair("data", inputBlob) }; - graph.Infer(inputsBlobMap, outputBlobs); - - compare(*splitOutputBlob1, *splitExpectedOutputBlob1); - compare(*splitOutputBlob2, *splitExpectedOutputBlob2); -} - -TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithFakeOutput) { - std::string modelTemplate = R"V0G0N( - - - - - - 1 - 2 - 8 - 8 - - - - - - - - 1 - 2 - 8 - 8 - - - - - 1 - 1 - 8 - 8 - - - 1 - 1 - 8 - 8 - - - - - - - - 1 - 1 - 8 - 8 - - - - - 1 - 8 - 8 - - - - - - - - - -)V0G0N"; - - const size_t bufferForValues = 1024; - std::vector model(modelTemplate.size() + bufferForValues); - - const size_t batchHeight = 8; - const size_t batchWidth = 8; - const InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::FP32, { 1, 2, batchHeight, batchWidth }, InferenceEngine::NCHW); - const size_t batchSize = batchHeight * batchWidth; - const float channel1Value = 1.0; - const float channel2Value = 2.0; - - InferenceEngine::Blob::Ptr inputBlob = InferenceEngine::make_shared_blob(tensorDesc); - inputBlob->allocate(); - float* inputData = inputBlob->buffer().as(); - for (size_t i = 0; i < inputBlob->size(); i++) { - inputData[i] = (i < batchSize) ? channel1Value : channel2Value; - } - - for (int splitFromPortNumber = 1; splitFromPortNumber <= 2; ++splitFromPortNumber) { - sprintf(model.data(), modelTemplate.c_str(), splitFromPortNumber); - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, { 228 }, InferenceEngine::C }); - weights->allocate(); - float* weightsData = weights->buffer(); - for (size_t i = 0ULL; i < weights->size() / sizeof(float); i++) { - weightsData[i] = 1.0; - } - - const InferenceEngine::TBlob::Ptr weightsPtr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(&model[0], weightsPtr)); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo(); - const std::pair reshapeOutputItem = std::make_pair("Reshape", outputs["Reshape"]); - const std::string splitOutputName = std::string("Split.") + (splitFromPortNumber == 1 ? "1" : "0"); - const std::pair splitOutputItem = std::make_pair(splitOutputName, outputs[splitOutputName]); - - std::vector reshapeExpectedOutputData(batchSize); - std::vector splitExpectedOutputData(batchSize); - for (size_t i = 0; i < reshapeExpectedOutputData.size(); i++) { - reshapeExpectedOutputData[i] = (splitFromPortNumber == 1) ? 1.0 : 2.0; - splitExpectedOutputData[i] = (splitFromPortNumber == 1) ? 2.0 : 1.0; - } - const InferenceEngine::TBlob::Ptr reshapeExpectedOutputBlob = InferenceEngine::make_shared_blob( - reshapeOutputItem.second->getTensorDesc(), - reshapeExpectedOutputData.data()); - const InferenceEngine::TBlob::Ptr splitExpectedOutputBlob = InferenceEngine::make_shared_blob( - splitOutputItem.second->getTensorDesc(), - splitExpectedOutputData.data()); - - InferenceEngine::BlobMap outputBlobs; - - // Reshape - InferenceEngine::TBlob::Ptr reshapeOutputBlob = InferenceEngine::make_shared_blob(reshapeOutputItem.second->getTensorDesc()); - reshapeOutputBlob->allocate(); - outputBlobs[reshapeOutputItem.first] = reshapeOutputBlob; - - // Split - InferenceEngine::TBlob::Ptr splitOutputBlob = InferenceEngine::make_shared_blob(splitOutputItem.second->getTensorDesc()); - splitOutputBlob->allocate(); - outputBlobs[splitOutputItem.first] = splitOutputBlob; - - const InferenceEngine::BlobMap inputsBlobMap = { std::pair("data", inputBlob) }; - graph.Infer(inputsBlobMap, outputBlobs); - - compare(*reshapeOutputBlob, *reshapeExpectedOutputBlob); - compare(*splitOutputBlob, *splitExpectedOutputBlob); - } -} - -TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData) { - std::string model = R"V0G0N( - - - - - - 1 - 2 - 8 - 8 - - - - - - - - 1 - 2 - 8 - 8 - - - - - 1 - 1 - 8 - 8 - - - 1 - 1 - 8 - 8 - - - - - - - - 1 - 1 - 8 - 8 - - - - - 1 - 8 - 8 - - - - - - - - 1 - 1 - 8 - 8 - - - - - 1 - 8 - 8 - - - - - - - - 1 - 1 - 8 - 8 - - - - - 1 - 8 - 8 - - - - - - - - - - - -)V0G0N"; - - const size_t batchHeight = 8; - const size_t batchWidth = 8; - const InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::FP32, { 1, 2, batchHeight, batchWidth }, InferenceEngine::NCHW); - const size_t batchSize = batchHeight * batchWidth; - const float channel1Value = 1.0; - const float channel2Value = 2.0; - - InferenceEngine::Blob::Ptr inputBlob = InferenceEngine::make_shared_blob(tensorDesc); - inputBlob->allocate(); - float* inputData = inputBlob->buffer().as(); - for (size_t i = 0; i < inputBlob->size(); i++) { - inputData[i] = (i < batchSize) ? channel1Value : channel2Value; - } - - InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, { 228 }, InferenceEngine::C }); - weights->allocate(); - float* weightsData = weights->buffer(); - for (size_t i = 0ULL; i < weights->size() / sizeof(float); i++) { - weightsData[i] = 1.0; - } - - const InferenceEngine::TBlob::Ptr weightsPtr = InferenceEngine::TBlob::Ptr(weights); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weightsPtr)); - - network.addOutput("split"); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(network); - - const auto& nodes = graph.getNodes(); - ASSERT_EQ(nodes.size(), 12); - ASSERT_EQ(nodes[0]->getType(), MKLDNNPlugin::Type::Input); - ASSERT_EQ(nodes[1]->getType(), MKLDNNPlugin::Type::Split); - ASSERT_EQ(nodes[2]->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[3]->getType(), MKLDNNPlugin::Type::Reshape); - ASSERT_EQ(nodes[4]->getType(), MKLDNNPlugin::Type::Output); - ASSERT_EQ(nodes[5]->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[6]->getType(), MKLDNNPlugin::Type::Reshape); - ASSERT_EQ(nodes[7]->getType(), MKLDNNPlugin::Type::Output); - ASSERT_EQ(nodes[8]->getType(), MKLDNNPlugin::Type::Reorder); - ASSERT_EQ(nodes[9]->getType(), MKLDNNPlugin::Type::Reshape); - ASSERT_EQ(nodes[10]->getType(), MKLDNNPlugin::Type::Output); - ASSERT_EQ(nodes[11]->getType(), MKLDNNPlugin::Type::Output); - - InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo(); - std::vector> outputItems = { - std::make_pair("reshape1", outputs.find("reshape1")->second), - std::make_pair("reshape2", outputs.find("reshape2")->second), - std::make_pair("reshape3", outputs.find("reshape3")->second), - std::make_pair("split.0", outputs.find("split.0")->second) - }; - - std::vector> expectedOutputData = { - std::vector(batchSize), - std::vector(batchSize), - std::vector(batchSize), - std::vector(batchSize) - }; - for (size_t i = 0; i < batchSize; i++) { - expectedOutputData[0][i] = channel1Value; - expectedOutputData[1][i] = channel1Value; - expectedOutputData[2][i] = channel2Value; - - expectedOutputData[3][i] = channel1Value; - } - - std::vector::Ptr> expectedOutputBlobs(outputs.size()); - for (size_t i = 0; i < outputs.size(); i++) { - expectedOutputBlobs[i] = InferenceEngine::make_shared_blob( - outputItems[i].second->getTensorDesc(), - expectedOutputData[i].data()); - } - - std::vector::Ptr> outputBlobs; - outputBlobs.reserve(outputItems.size()); - - InferenceEngine::BlobMap outputBlobsMap; - for(const std::pair& item : outputItems) { - InferenceEngine::TBlob::Ptr blob = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - outputBlobs.push_back(blob); - blob->allocate(); - outputBlobsMap[item.first] = blob; - } - - const InferenceEngine::BlobMap inputsBlobMap = { std::pair("data", inputBlob) }; - graph.Infer(inputsBlobMap, outputBlobsMap); - - for(size_t i = 0; i < 3; i++) { - compare(*outputBlobs[i], *expectedOutputBlobs[i]); - } -} - -TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData_2) { - std::string model = R"V0G0N( - - - - - - 1 - 2 - 8 - 8 - - - - - - - - 1 - 2 - 8 - 8 - - - - - 1 - 1 - 8 - 8 - - - 1 - 1 - 8 - 8 - - - - - - - - 1 - 1 - 8 - 8 - - - - - 1 - 1 - 8 - 8 - - - - - - - - - -)V0G0N"; - using namespace InferenceEngine; - - const size_t H = 8; - const size_t W = 8; - const size_t imgSz = H * W; - const float channel1Value = 1.0; - const float channel2Value = 2.0; - - const auto weights = make_shared_blob(TensorDesc(Precision::U8, SizeVector{0}, Layout::C)); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork net; - ASSERT_NO_THROW(net = core.ReadNetwork(model, weights)); - - net.addOutput("split", 0); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(net); - - auto inBlob = make_shared_blob({ Precision::FP32, SizeVector{1, 2, H, W}, Layout::NCHW }); - auto outBlob1 = make_shared_blob({ Precision::FP32, SizeVector{1, 1, H, W}, Layout::NCHW }); - auto outBlob2 = make_shared_blob({ Precision::FP32, SizeVector{1, 1, H, W}, Layout::NCHW }); - auto outBlob3 = make_shared_blob({ Precision::FP32, SizeVector{1, 1, H, W}, Layout::NCHW }); - - inBlob->allocate(); - outBlob1->allocate(); - outBlob2->allocate(); - outBlob3->allocate(); - - auto in_ptr = inBlob->buffer().as(); - for (int i = 0; i < imgSz; i++) { - in_ptr[i] = channel1Value; - in_ptr[i + imgSz] = channel2Value; - } - - BlobMap inputBlobMap = { {"data" , inBlob } }, - outputBlobMap = { {"split.0", outBlob1}, - {"split.1", outBlob2}, - {"power" , outBlob3} }; - - graph.Infer(inputBlobMap, outputBlobMap); - - auto out_check = [] ( Blob::Ptr blob, float val) { - auto size = blob->size(); - auto ptr = blob->buffer().as(); - bool res = true; - for (int i = 0; i < size; i++) - res &= ( std::abs( ptr[i] - val ) < 0.00001f ); - return res; - }; - - EXPECT_TRUE(out_check(outBlob1, 1)); - EXPECT_TRUE(out_check(outBlob2, 2)); - EXPECT_TRUE(out_check(outBlob3, -1)); -} - -TEST_F(MKLDNNGraphStructureTests, TestCreateGraphAllDataToConcat) { - std::shared_ptr function; - { - ngraph::element::Type elementType = ngraph::element::Type_t::f32; - ngraph::Shape shape { 1, 1, 4, 5 }; - auto input = std::make_shared(elementType, shape); - input->set_friendly_name("input"); - - auto weights1 = std::make_shared( - elementType, ngraph::Shape{1, 1, 1, 1}, std::vector(1, 2.0f)); - auto conv1 = std::make_shared( - input, weights1, ngraph::Strides { 1, 1 }, - ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 }); - - auto weights2 = std::make_shared( - elementType, ngraph::Shape{1, 1, 1, 1}, std::vector(1, 3.0f)); - auto conv2 = std::make_shared( - input, weights2, ngraph::Strides { 1, 1 }, - ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 }); - - auto concat = std::make_shared(ngraph::OutputVector { input, conv1, conv2 }, 1); - concat->set_friendly_name("concat"); - auto result = std::make_shared(concat); - - ngraph::ResultVector results { result }; - ngraph::ParameterVector params { input }; - function = std::make_shared(results, params); - } - - auto cnn = InferenceEngine::CNNNetwork(function); - - // Load the network - std::vector inpSize = {1, 1, 4, 5}; - std::vector outSize = {1, 3, 4, 5}; - - InferenceEngine::BlobMap inputBlobs; - InferenceEngine::BlobMap outputBlobs; - - std::vector inpData(4*5, 1); - std::vector outData(3*4*5, 1); - for (int i = 0; i < 4*5; ++i) { - inpData[i] = i; - } - - inputBlobs["input"] = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, inpSize, - InferenceEngine::TensorDesc::getLayoutByDims(inpSize) }, &inpData[0]); - outputBlobs["concat"] = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, outSize, - InferenceEngine::TensorDesc::getLayoutByDims(outSize) }, &outData[0]); - - MKLDNNGraphTestClass graph; - graph.CreateGraph(cnn); - graph.Infer(inputBlobs, outputBlobs); - - std::vector refDst = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, - 0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57}; - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(outputBlobs["concat"]->getTensorDesc(), refDst.data()); - - compare(*outputBlobs["concat"], *dstOut); -} - -TEST_F(MKLDNNGraphStructureTests, TestCreateGraphAllDataFromInputToConcat) { - std::shared_ptr function; - { - ngraph::element::Type elementType = ngraph::element::Type_t::f32; - auto input = std::make_shared(elementType, ngraph::Shape { 1, 1, 4, 5 }); - input->set_friendly_name("input"); - auto concat = std::make_shared(ngraph::OutputVector { input, input, input }, 1); - concat->set_friendly_name("concat"); - auto result = std::make_shared(concat); - - ngraph::ResultVector results { result }; - ngraph::ParameterVector params { input }; - function = std::make_shared(results, params); - } - - auto cnn = InferenceEngine::CNNNetwork(function); - - // Load the network - std::vector inpSize = {1, 1, 4, 5}; - std::vector outSize = {1, 3, 4, 5}; - - InferenceEngine::BlobMap inputBlobs; - InferenceEngine::BlobMap outputBlobs; - - std::vector inpData(4*5, 1); - std::vector outData(3*4*5, 1); - for (int i = 0; i < 4*5; ++i) - { - inpData[i] = i; - } - - inputBlobs["input"] = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, inpSize, - InferenceEngine::TensorDesc::getLayoutByDims(inpSize) }, &inpData[0]); - outputBlobs["concat"] = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, outSize, - InferenceEngine::TensorDesc::getLayoutByDims(outSize) }, &outData[0]); - - - MKLDNNGraphTestClass graph; - graph.CreateGraph(cnn); - graph.Infer(inputBlobs, outputBlobs); - - std::vector refDst = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,}; - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(outputBlobs["concat"]->getTensorDesc(), refDst.data()); - - compare(*outputBlobs["concat"], *dstOut); -} - - -TEST_F(MKLDNNGraphStructureTests, TestCheckIncorrectScaleShift) { - std::string model = R"V0G0N( - - - - - - 1 - 1000 - 16 - - - - - - - 1 - 1000 - 16 - - - - - 1 - 100 - 16 - - - - - - - - - - - - -)V0G0N"; - using namespace InferenceEngine; - auto weights = make_shared_blob(TensorDesc(Precision::U8, SizeVector{64}, Layout::C)); - weights->allocate(); - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network; - ASSERT_NO_THROW(network = core.ReadNetwork(model, weights)); - - MKLDNNGraphTestClass graph; - ASSERT_THROW(graph.CreateGraph(network), InferenceEngine::Exception); -} - -TEST_F(MKLDNNGraphStructureTests, TestConcatWithFourInputs) { - std::shared_ptr function; - { - ngraph::element::Type elementType = ngraph::element::Type_t::f32; - ngraph::Shape shape { 1, 1, 4, 5 }; - auto input = std::make_shared(elementType, shape); - input->set_friendly_name("input"); - - auto weights1 = std::make_shared( - elementType, ngraph::Shape{1, 1, 1, 1}, std::vector(1, 2.0f)); - auto conv1 = std::make_shared( - input, weights1, ngraph::Strides { 1, 1 }, - ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 }); - - auto weights2 = std::make_shared( - elementType, ngraph::Shape{1, 1, 1, 1}, std::vector(1, 3.0f)); - auto conv2 = std::make_shared( - input, weights2, ngraph::Strides { 1, 1 }, - ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 }); - - auto weights3 = std::make_shared( - elementType, ngraph::Shape{1, 1, 1, 1}, std::vector(1, -1.0f)); - auto conv3 = std::make_shared( - input, weights3, ngraph::Strides { 1, 1 }, - ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 }); - - auto concat = std::make_shared(ngraph::OutputVector { input, conv1, conv2, conv3 }, 1); - concat->set_friendly_name("concat"); - auto result = std::make_shared(concat); - - ngraph::ResultVector results { result }; - ngraph::ParameterVector params { input }; - function = std::make_shared(results, params); - } - - auto cnn = InferenceEngine::CNNNetwork(function); - - // Load the network - std::vector inpSize = {1, 1, 4, 5}; - std::vector outSize = {1, 4, 4, 5}; - - InferenceEngine::BlobMap inputBlobs; - InferenceEngine::BlobMap outputBlobs; - - std::vector inpData(4*5, 1); - std::vector outData(4*4*5, 1); - for (int i = 0; i < 4*5; ++i) { - inpData[i] = i; - } - - inputBlobs["input"] = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, inpSize, - InferenceEngine::TensorDesc::getLayoutByDims(inpSize) }, &inpData[0]); - outputBlobs["concat"] = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, outSize, - InferenceEngine::TensorDesc::getLayoutByDims(outSize) }, &outData[0]); - - - MKLDNNGraphTestClass graph; - graph.CreateGraph(cnn); - graph.Infer(inputBlobs, outputBlobs); - - std::vector refDst = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, - 0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, - 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18, -19}; - - InferenceEngine::TBlob::Ptr dstOut = InferenceEngine::make_shared_blob(outputBlobs["concat"]->getTensorDesc(), refDst.data()); - - compare(*outputBlobs["concat"], *dstOut); -} diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/test_graph.hpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/test_graph.hpp deleted file mode 100644 index f6d0de68f0ccdc..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/test_graph.hpp +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -// WA for windows.h -#ifdef _WIN32 -# ifndef NOMINMAX -# define NOMINMAX -# endif -# ifndef _WINSOCKAPI_ -# define _WINSOCKAPI_ -# endif -# ifndef _WINSOCK2API_ -# define _WINSOCK2API_ -# endif -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define GARB_VAL(x) ((x + 100.0f + sin(x)) / (x + 150.f)) - -class MKLDNNGraphTestClass: public MKLDNNPlugin::MKLDNNGraph { -private: - MKLDNNPlugin::MKLDNNExtensionManager::Ptr extensionManager = std::make_shared(); - -public: - enum class CheckDynBatchType { - Both, - Parent, - Child - }; - MKLDNNGraphTestClass(): MKLDNNPlugin::MKLDNNGraph() { - auto defaultExtensions = std::make_shared(); - extensionManager->AddExtension(defaultExtensions); - - } - virtual ~MKLDNNGraphTestClass() = default; - - static std::string getStrPrimitiveDescriptorType(MKLDNNPlugin::impl_desc_type type) { - std::string str_type; - - auto add_type = [&](std::string t) { - if (!str_type.empty() && t.c_str()[0] != '_') - str_type += "_"; - str_type += t; - }; - -#define SEARCH_TYPE(_type) \ - if ((type & MKLDNNPlugin::impl_desc_type::_type) == MKLDNNPlugin::impl_desc_type::_type) \ - add_type(#_type) - - SEARCH_TYPE(undef); - SEARCH_TYPE(reorder); - SEARCH_TYPE(jit); - SEARCH_TYPE(gemm); - SEARCH_TYPE(ref); - - SEARCH_TYPE(avx512); - SEARCH_TYPE(avx2); - SEARCH_TYPE(sse42); - SEARCH_TYPE(blas); - SEARCH_TYPE(any); - - SEARCH_TYPE(winograd); - SEARCH_TYPE(_dw); - SEARCH_TYPE(_1x1); - - if (type == MKLDNNPlugin::impl_desc_type::unknown) - str_type = "unknown"; - else if (str_type.empty()) - str_type = "undef"; - return str_type; - } - - void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in, int batch) { - if (!IsReady()) IE_THROW()<< "Wrong state. Topology not ready."; - - auto input = inputNodesMap.find(name); - if (input != inputNodesMap.end()) { - MKLDNNPlugin::MKLDNNDims outDims; - if(input->second->getChildEdgeAt(0)->getDims().ndims() == 0 ) - outDims = MKLDNNPlugin::MKLDNNDims(InferenceEngine::SizeVector(1,1)); - else - outDims = input->second->getChildEdgeAt(0)->getDims(); - if (batch < 1) - batch = outDims[0]; - - const void *ext_data_ptr = in->cbuffer(); - void *inter_data_ptr = input->second->getChildEdgeAt(0)->getMemory().GetData(); - - if (ext_data_ptr != inter_data_ptr) { - MKLDNNPlugin::MKLDNNMemoryDesc ext_tdesc(in->getTensorDesc()); - - if (ext_tdesc.getDims().ndims() == 0) { - ext_tdesc = MKLDNNPlugin::MKLDNNMemoryDesc{ {1}, ext_tdesc.getDataType(), mkldnn::memory::format_tag::a}; - } - - MKLDNNPlugin::MKLDNNMemory ext_mem(eng); - ext_mem.Create(ext_tdesc, ext_data_ptr, false); - - input->second->getChildEdgeAt(0)->getMemory().SetData(ext_mem, in->byteSize() / outDims[0] * batch, false); - } - - // todo: make sure 'name' exists in this map... - if (_meanImages.find(name) != _meanImages.end()) { - if (in->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) { - _meanImages[name].Subtract(outDims, reinterpret_cast(inter_data_ptr), in->getTensorDesc().getLayout()); - } else { - IE_THROW() << "Mean image of type " << in->getTensorDesc().getPrecision().name() << " is unsupported"; - } - } - } else { - IE_THROW() << "Input blob for infer '" << name << "' doesn't correspond to input in network"; - } - } - - void Infer(const InferenceEngine::BlobMap& inputs, InferenceEngine::BlobMap& result, int batch = -1) { - try { - // need to retain converted blobs until infer finish - std::vector convertedInputs; - for (auto input : inputs) { - switch (input.second->getTensorDesc().getPrecision()) { - case InferenceEngine::Precision::FP32: { - InferenceEngine::TBlob *in_f = nullptr; - in_f = dynamic_cast *>(input.second.get()); - if (in_f == nullptr) { - FAIL() << "Input data precision not supported. Expected float."; - } - - if (in_f->readOnly() == nullptr) { - IE_THROW() << "Input data was not allocated."; - } - } - break; - case InferenceEngine::Precision::I32: { - InferenceEngine::TBlob *in_f = nullptr; - in_f = dynamic_cast *>(input.second.get()); - if (in_f == nullptr) { - FAIL() << "Input data precision not supported. Expected float."; - } - - if (in_f->readOnly() == nullptr) { - IE_THROW() << "Input data was not allocated."; - } - } - break; - case InferenceEngine::Precision::U16: { - InferenceEngine::TBlob *in_f = nullptr; - in_f = dynamic_cast *>(input.second.get()); - if (in_f == nullptr) { - FAIL() << "Input data precision not supported. Expected float."; - } - - if (in_f->readOnly() == nullptr) { - IE_THROW() << "Input data was not allocated."; - } - } - break; - case InferenceEngine::Precision::I16: { - InferenceEngine::TBlob *in_f = nullptr; - in_f = dynamic_cast *>(input.second.get()); - if (in_f == nullptr) { - FAIL() << "Input data precision not supported. Expected float."; - } - - if (in_f->readOnly() == nullptr) { - IE_THROW() << "Input data was not allocated."; - } - } - break; - case InferenceEngine::Precision::U8: { - InferenceEngine::TBlob *in_f = nullptr; - in_f = dynamic_cast *>(input.second.get()); - if (in_f == nullptr) { - FAIL() << "Input data precision not supported. Expected float."; - } - - if (in_f->readOnly() == nullptr) { - IE_THROW() << "Input data was not allocated."; - } - } - break; - case InferenceEngine::Precision::I8: { - InferenceEngine::TBlob *in_f = nullptr; - in_f = dynamic_cast *>(input.second.get()); - if (in_f == nullptr) { - FAIL() << "Input data precision not supported. Expected float."; - } - - if (in_f->readOnly() == nullptr) { - IE_THROW() << "Input data was not allocated."; - } - } - break; - default: - IE_THROW() << "Unsupported input precision " << input.second->getTensorDesc().getPrecision(); - } - - PushInputData(input.first, input.second, batch); - } - MKLDNNPlugin::MKLDNNGraph::Infer(nullptr, batch); - } catch (const std::exception &e) { - FAIL() << e.what(); - } - - PullOutputData(result); - } - - std::vector& getNodes() { - return graphNodes; - } - - void MoveInternalBlobsToConstLayers(InferenceEngine::details::CNNNetworkImpl* netImpl) { - auto createConstInputTo = [&](InferenceEngine::CNNLayerPtr layer, InferenceEngine::Blob::Ptr blob, std::string name) { - InferenceEngine::LayerParams attrs = {layer.get()->name + "_const_" + name, "Const", InferenceEngine::Precision::FP32}; - auto constLayer = std::make_shared(attrs); - constLayer->blobs["custom"] = blob; - - std::vector constDims(layer->insData[0].lock()->getDims().size(), 1); - if (constDims.size() > 1) - constDims[1] = blob.get()->size(); - else - constDims[0] = blob.get()->size(); - const InferenceEngine::TensorDesc& td = {InferenceEngine::Precision::FP32, constDims, InferenceEngine::TensorDesc::getLayoutByDims(constDims)}; - - InferenceEngine::DataPtr newEdgeAfterLayer(new InferenceEngine::Data(constLayer->name, td)); - newEdgeAfterLayer->setName(constLayer->name); - getCreatorLayer(newEdgeAfterLayer) = constLayer; - getInputTo(newEdgeAfterLayer).clear(); - - - netImpl->addData(constLayer->name.c_str(), newEdgeAfterLayer); - IE_SUPPRESS_DEPRECATED_START - netImpl->addLayer(constLayer); - IE_SUPPRESS_DEPRECATED_END - - constLayer->outData.push_back(newEdgeAfterLayer); - getInputTo(newEdgeAfterLayer)[layer->name] = layer; - layer->insData.push_back(newEdgeAfterLayer); - }; - - auto all_layers = InferenceEngine::details::CNNNetSortTopologically( - InferenceEngine::CNNNetwork(netImpl->shared_from_this())); - for (auto &layer : all_layers) { - if (layer->type == "ScaleShift" && layer->insData.size() == 1) { - InferenceEngine::Blob::Ptr scalesBlob = layer->blobs["weights"]; - if (scalesBlob != nullptr) - createConstInputTo(layer, scalesBlob, "weights"); - - InferenceEngine::Blob::Ptr shiftBlob = layer->blobs["biases"]; - if (shiftBlob != nullptr) - createConstInputTo(layer, shiftBlob, "biases"); - } else if (layer->type == "PReLU" && layer->insData.size() == 1) { - InferenceEngine::Blob::Ptr scalesBlob = layer->blobs["weights"]; - if (scalesBlob != nullptr) - createConstInputTo(layer, scalesBlob, "weights"); - } - } - } - - void CreateGraph(InferenceEngine::CNNNetwork &network, const MKLDNNPlugin::MKLDNNExtensionManager::Ptr& extMgr, - MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache = {}) { - if (network.getFunction()) { - auto convertedNetwork = std::make_shared(network); - MoveInternalBlobsToConstLayers(convertedNetwork.get()); - MKLDNNGraph::CreateGraph(InferenceEngine::CNNNetwork(convertedNetwork), extMgr, cache); - } else { - auto & icnnnet = static_cast(network); - InferenceEngine::details::CNNNetworkImpl* netImpl = static_cast(&icnnnet); - MoveInternalBlobsToConstLayers(netImpl); - MKLDNNGraph::CreateGraph(network, extMgr, cache); - } - } - - void CreateGraph(InferenceEngine::CNNNetwork &network) { - MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache; - if (network.getFunction()) { - auto convertedNetwork = std::make_shared(network); - MoveInternalBlobsToConstLayers(convertedNetwork.get()); - MKLDNNGraph::CreateGraph(InferenceEngine::CNNNetwork(convertedNetwork), extensionManager, cache); - } else { - auto & icnnnet = static_cast(network); - InferenceEngine::details::CNNNetworkImpl* netImpl = static_cast(&icnnnet); - MoveInternalBlobsToConstLayers(netImpl); - MKLDNNGraph::CreateGraph(network, extensionManager, cache); - } - } - - void checkDynBatch(InferenceEngine::BlobMap& srcs, InferenceEngine::BlobMap& outputBlobs, int batch, size_t MB, - const std::function& comp, CheckDynBatchType type = CheckDynBatchType::Both) { - for (auto &node : getNodes()) { - if (comp(node)) { - auto inputBlob = node->getParentEdgeAt(0)->getBlob(); - auto *data = inputBlob->buffer().as(); - size_t dataSize = inputBlob->getTensorDesc().getBlockingDesc().getStrides()[0] * MB; - for (size_t j = 0; j < dataSize; j++) { - data[j] = GARB_VAL(j); - } - - auto outputBlob = node->getChildEdgeAt(0)->getBlob(); - data = outputBlob->buffer().as(); - dataSize = outputBlob->getTensorDesc().getBlockingDesc().getStrides()[0] * MB; - for (size_t j = 0; j < dataSize; j++) { - data[j] = GARB_VAL(j); - } - } - } - - Infer(srcs, outputBlobs, batch); - - for (auto &node : getNodes()) { - if (comp(node)) { - auto inputBlob = node->getParentEdgeAt(0)->getBlob(); - auto *data = inputBlob->buffer().as(); - auto inputNoBatchSize = inputBlob->getTensorDesc().getBlockingDesc().getStrides()[0]; - for (size_t i = 0; i < batch; i++) { - for (size_t j = 0; j < inputNoBatchSize; j++) { - ASSERT_NE(data[i*inputNoBatchSize + j], GARB_VAL(i*inputNoBatchSize + j)); - } - } - - if (type == CheckDynBatchType::Both || type == CheckDynBatchType::Parent) { - for (size_t i = static_cast(batch); i < MB; i++) { - for (size_t j = 0; j < inputNoBatchSize; j++) { - ASSERT_NEAR(data[i * inputNoBatchSize + j], - GARB_VAL(i * inputNoBatchSize + j), 0.001f); - } - } - } - - auto outputBlob = node->getChildEdgeAt(0)->getBlob(); - data = outputBlob->buffer().as(); - auto outputNoBatchSize = outputBlob->getTensorDesc().getBlockingDesc().getStrides()[0]; - for (size_t i = 0; i < batch; i++) { - for (size_t j = 0; j < outputNoBatchSize; j++) { - ASSERT_NE(data[i*outputNoBatchSize + j], GARB_VAL(i*outputNoBatchSize + j)); - } - } - if (type == CheckDynBatchType::Both || type == CheckDynBatchType::Child) { - for (size_t i = static_cast(batch); i < MB; i++) { - for (size_t j = 0; j < outputNoBatchSize; j++) { - ASSERT_NEAR(data[i * outputNoBatchSize + j], - GARB_VAL(i * outputNoBatchSize + j), 0.001f); - } - } - } - } - } - } -}; diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp deleted file mode 100644 index 743f433131cee5..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "mkldnn_memory.h" -#include "mkldnn_graph.h" - -using namespace std; -using namespace MKLDNNPlugin; -using namespace mkldnn; -using namespace ::testing; - -class MKLDNNPrimitiveTest : public ::testing::Test { -protected: - virtual void TearDown() override{ - } - - virtual void SetUp() override{ - } -}; - -//class ChildConv : public MKLDNNConvolution { -// public: -// explicit ChildConv(const engine& eng) : MKLDNNConvolution(eng) {} -// // Add the following two lines to the mock class. -// MOCK_METHOD0(die, void()); -// ~ChildConv () { die(); } -//}; - - -TEST_F(MKLDNNPrimitiveTest, DISABLED_canDeleteWeightInweitableLayer) { - //simulate how convlayer gets created - engine e(engine::kind::cpu, 0); - //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::factory().create(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, "")); -// ChildConv *conv = new ChildConv(e); -// EXPECT_CALL(*conv, die()).Times(1); - - std::vector weights = {1,2,3,4}; - std::vector weightsData = {(void*)&*weights.begin()}; - std::vector weightsSize = {weights.size() * sizeof(float)}; - - memory::dims dims(4); - dims[0] = weights.size(); - -// conv->CreateWeightsMemory(dims, memory::f32, memory::nchw); -// conv->SetWeights(weightsData, weightsSize); - FAIL() << "Should change the test"; -// node->SetPrimitive(conv); -// node.reset(); - -// Mock::VerifyAndClear(conv); -} \ No newline at end of file diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/test_layers.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/test_layers.cpp deleted file mode 100644 index e72cab66708339..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/test_layers.cpp +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -//#include -//#include "mkldnn_layers.h" -// -//using namespace std; -// -//class MKLDNNLayersTests : public ::testing::Test { -//protected: -// virtual void TearDown() override{ -// } -// -// virtual void SetUp() override{ -// } -// -//}; -// -//TEST_F(MKLDNNLayersTests, canCreateContext) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr dl ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreateConvLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// InferenceEngine::TBlob::Ptr blobPtr(new InferenceEngine::TBlob()); -// unique_ptr ctx ( new MKLDNNPlugin::Context(blobPtr, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::ConvolutionLayer convLayer({}); -// InferenceEngine::DataPtr dPtr(new InferenceEngine::Data("testData")); -// dPtr->dims = {0, 0, 0, 0}; -// -// convLayer.insData.push_back(dPtr); -// convLayer.outData.push_back(dPtr); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&convLayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreateLRNLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr ctx ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::NormLayer normLayer({}); -// InferenceEngine::DataPtr dPtr(new InferenceEngine::Data("testData")); -// dPtr->dims = {1, 1, 27, 27}; -// -// normLayer.insData.push_back(dPtr); -// normLayer.outData.push_back(dPtr); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&normLayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreatePoolingLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr ctx ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::PoolingLayer poolingLayer({}); -// InferenceEngine::DataPtr dPtr(new InferenceEngine::Data("testData")); -// dPtr->dims = {1, 1, 27, 27}; -// -// poolingLayer.insData.push_back(dPtr); -// poolingLayer.outData.push_back(dPtr); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&poolingLayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreateSplitLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr ctx ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::SplitLayer splitLayer({}); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&splitLayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreateConcatLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr ctx ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::ConcatLayer concatLayer({}); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&concatLayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreateFullyConnectedLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// InferenceEngine::TBlob::Ptr blobPtr(new InferenceEngine::TBlob()); -// unique_ptr ctx ( new MKLDNNPlugin::Context(blobPtr, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::FullyConnectedLayer fcLayer({}); -// InferenceEngine::DataPtr dPtr(new InferenceEngine::Data("testData")); -// dPtr->dims = {0, 0, 0, 0}; -// InferenceEngine::DataPtr dPtr2(new InferenceEngine::Data("testData2")); -// dPtr2->dims = {0, 0}; -// -// fcLayer.insData.push_back(dPtr); -// fcLayer.outData.push_back(dPtr2); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&fcLayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreateSoftMaxLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr ctx ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::SoftMaxLayer softmaxLayer({}); -// InferenceEngine::DataPtr dPtr(new InferenceEngine::Data("testData")); -// dPtr->dims = {0, 0, 0, 0}; -// InferenceEngine::DataPtr dPtr2(new InferenceEngine::Data("testData2")); -// dPtr2->dims = {0, 0}; -// -// softmaxLayer.insData.push_back(dPtr); -// softmaxLayer.outData.push_back(dPtr2); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&softmaxLayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canCreateReLULayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr ctx ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::ReLULayer reLULayer({}); -// InferenceEngine::DataPtr dPtr(new InferenceEngine::Data("testData")); -// dPtr->dims = {1, 1, 27, 27}; -// -// reLULayer.insData.push_back(dPtr); -// reLULayer.outData.push_back(dPtr); -// unique_ptr dl ( MKLDNNPlugin::LayerRegistry::CreateLayer(&reLULayer, nullptr, dynamic_cast(ctx.get()))); -// -// ASSERT_NE(nullptr, dynamic_cast(dl.get())); -//} -// -//TEST_F(MKLDNNLayersTests, canNotCreateCNNLayer) { -// std::vector sd; -// std::vector dd; -// std::vector ds; -// unique_ptr ctx ( new MKLDNNPlugin::Context({}, mkldnn::engine(mkldnn::engine::cpu, 0), &sd, &dd, &ds)); -// ASSERT_NE(nullptr, dynamic_cast(ctx.get())); -// -// InferenceEngine::CNNLayer cnnLayer({}); -// EXPECT_THROW(MKLDNNPlugin::LayerRegistry::CreateLayer(&cnnLayer, nullptr, dynamic_cast(ctx.get())) , InferenceEngine::Exception); -//} -// -//TEST_F(MKLDNNLayersTests, canNotCreateLayerWithoutContext) { -// InferenceEngine::ConvolutionLayer convLayer({}); -// EXPECT_THROW(MKLDNNPlugin::LayerRegistry::CreateLayer(&convLayer, nullptr, nullptr), InferenceEngine::Exception); -//} \ No newline at end of file