diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/crop_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/crop_ie.hpp index 945cb5b50ceab3..8a93adce030061 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/crop_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/crop_ie.hpp @@ -27,6 +27,8 @@ class INFERENCE_ENGINE_API_CLASS(CropIE) : public Op { void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) const override; std::vector axes, dim, offset; }; diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp index 575d84aab1958e..0e0affcd7dc3ab 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp @@ -45,3 +45,80 @@ void op::CropIE::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), PartialShape(output_shape)); } + +bool op::CropIE::evaluate(const HostTensorVector &outputs, const HostTensorVector &inputs) const { + if (inputs.front()->get_element_type() != outputs.front()->get_element_type()) { + throw ngraph_error("Input and output data types must be the same!"); + } + + auto *dst_ptr = outputs.front()->get_data_ptr(); + + const int ndims = dim.size(); + + const size_t OFFSET_N = (ndims > 0) ? offset.at(0) : 0; + const size_t OFFSET_C = (ndims > 1) ? offset.at(1) : 0; + const size_t OFFSET_D = (ndims > 4) ? offset.at(ndims - 3) : 0; + const size_t OFFSET_H = (ndims > 2) ? offset.at(ndims - 2) : 0; + const size_t OFFSET_W = (ndims > 3) ? offset.at(ndims - 1) : 0; + + auto outputShape = get_output_partial_shape(0).get_shape(); + + const size_t ON = (ndims > 0) ? outputShape.at(0) : 1; + const size_t OC = (ndims > 1) ? outputShape.at(1) : 1; + const size_t OD = (ndims > 4) ? outputShape.at(ndims - 3) : 1; + const size_t OH = (ndims > 2) ? outputShape.at(ndims - 2) : 1; + const size_t OW = (ndims > 3) ? outputShape.at(ndims - 1) : 1; + + auto inputShape = get_input_partial_shape(0).get_shape(); + + const size_t IN = (ndims > 0) ? inputShape.at(0) : 1; + const size_t IC = (ndims > 1) ? inputShape.at(1) : 1; + const size_t ID = (ndims > 4) ? inputShape.at(ndims - 3) : 1; + const size_t IH = (ndims > 2) ? inputShape.at(ndims - 2) : 1; + const size_t IW = (ndims > 3) ? inputShape.at(ndims - 1) : 1; + + auto dst_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t { + return (n * OC * OD * OH * OW + c * OD * OH * OW + d * OH * OW + h * OW + w); + }; + auto src_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t { + return (n * IC * ID * IH * IW + c * ID * IH * IW + d * IH * IW + h * IW + w); + }; + + if (IN - OFFSET_N < ON) { + throw ngraph_error("Wrong offset!"); + } + if (IC - OFFSET_C < OC) { + throw ngraph_error("Wrong offset!"); + } + if (IC - OFFSET_C < OC) { + throw ngraph_error("Wrong offset!"); + } + if (ID - OFFSET_D < OD) { + throw ngraph_error("Wrong offset!"); + } + if (IH - OFFSET_H < OH) { + throw ngraph_error("Wrong offset!"); + } + if (IW - OFFSET_W < OW) { + throw ngraph_error("Wrong offset!"); + } + + size_t dataSize = inputs.front()->get_element_type().size(); + + auto src_ptr = inputs.front()->get_data_ptr(); + for (size_t n = 0; n < ON; ++n) { + for (size_t c = 0; c < OC; ++c) { + for (size_t d = 0; d < OD; ++d) { + for (size_t h = 0; h < OH; ++h) { + for (size_t w = 0; w < OW; ++w) { + memcpy(dst_ptr + dataSize * dst_off(n, c, d, h, w), + src_ptr + dataSize * src_off(n + OFFSET_N, c + OFFSET_C, d + OFFSET_D, h + OFFSET_H, w + OFFSET_W), + dataSize); + } + } + } + } + } + + return true; +} diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp index 3b90a458ddb3af..d6dde5692f1516 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp @@ -58,13 +58,12 @@ void MKLDNNCropNode::initSupportedPrimitiveDescriptors() { return; InferenceEngine::Precision precision = getCnnLayer()->insData[0].lock()->getPrecision(); - if (precision != InferenceEngine::Precision::FP32) - precision = InferenceEngine::Precision::FP32; auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision); precision = getCnnLayer()->outData[0]->getPrecision(); - if (precision != InferenceEngine::Precision::FP32) - precision = InferenceEngine::Precision::FP32; auto outputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision); + if (inputDataType != outputDataType) { + outputDataType = inputDataType; // Crop doesn't convert precisions, only moves data + } auto& inDims = getParentEdgeAt(0)->getDims(); if (inDims.ndims() != 2 && inDims.ndims() != 4 && inDims.ndims() != 5) { @@ -125,19 +124,19 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { if (!MKLDNNMemory::IsPlainFormat(parentMem.GetFormat())) { m_block_size = parentMem.GetDescriptor().data.layout_desc.blocking.block_dims[1]; } - int m_inner_dim = dims[dims.size() - 1] * m_block_size; + const int m_inner_dim = dims[dims.size() - 1] * m_block_size; const memory &dst_d = getChildEdgeAt(0)->getMemory().GetPrimitive(); - int dst_ndims = dst_d.get_primitive_desc().desc().data.ndims; + const int dst_ndims = dst_d.get_primitive_desc().desc().data.ndims; // TODO: Rewrite it in general case. For every tensor // and rank, without using letter N,C,D,H,W - int OFFSET_N = (dst_ndims > 0) ? offsets[0] : 0; - int OFFSET_C = (dst_ndims > 1) ? offsets[1] : 0; - int OFFSET_D = (dst_ndims > 4) ? offsets[offsets.size() - 3] : 0; - int OFFSET_H = (dst_ndims > 2) ? offsets[offsets.size() - 2] : 0; - int OFFSET_W = (dst_ndims > 3) ? offsets[offsets.size() - 1] : 0; + const int OFFSET_N = (dst_ndims > 0) ? offsets[0] : 0; + const int OFFSET_C = (dst_ndims > 1) ? offsets[1] : 0; + const int OFFSET_D = (dst_ndims > 4) ? offsets[offsets.size() - 3] : 0; + const int OFFSET_H = (dst_ndims > 2) ? offsets[offsets.size() - 2] : 0; + const int OFFSET_W = (dst_ndims > 3) ? offsets[offsets.size() - 1] : 0; // TODO: Check applicability of dyn_batch_lim in early steps. // crop of batch dimension doesn't support dyn batch. @@ -155,42 +154,16 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { const int IH = (src_ndims > 2) ? src_dims[src_dims.size() - 2] : 1; const int IW = (src_ndims > 3) ? src_dims[src_dims.size() - 1] : 1; - const auto *src_data = reinterpret_cast(parentMem.GetData()) + - parentMem.GetDescriptor().data.layout_desc.blocking.offset_padding; - float *dst_data = reinterpret_cast(getChildEdgeAt(0)->getMemory().GetData()) + - getChildEdgeAt(0)->getMemory().GetDescriptor().data.layout_desc.blocking.offset_padding; + const uint8_t itemSize = MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(parentMem.GetDataType())); + + const auto *src_data = reinterpret_cast(parentMem.GetData()) + + itemSize * parentMem.GetDescriptor().data.layout_desc.blocking.offset_padding; + auto *dst_data = reinterpret_cast(getChildEdgeAt(0)->getMemory().GetData()) + + itemSize * getChildEdgeAt(0)->getMemory().GetDescriptor().data.layout_desc.blocking.offset_padding; -#ifdef _WIN32 - if (OD == 1 && OH == 1 && OW == 1 && ID == 1 && IH == 1 && IW == 1) { - for (int n = 0; n < ON; ++n) { - cpu_memcpy(&dst_data[n*OC], &src_data[(n+OFFSET_N)*IC + OFFSET_C], OC * sizeof(float)); - } - } else { - for (int n = 0; n < ON; ++n) { - for (int c = 0; c < OC; c += m_block_size) { - for (int d = 0; d < OD; ++d) { - for (int h = 0; h < OH; ++h) { - int dst_ind = - n*OC*OD*OH*OW + c*OD*OH*OW + d*OH*OW*m_block_size + - h*OW*m_block_size; - - int src_ind = - (n+OFFSET_N)*IC*ID*IH*IW + - (c+OFFSET_C)*ID*IH*IW + - (d+OFFSET_D)*IH*IW*m_block_size + - (h+OFFSET_H)*IW*m_block_size + - OFFSET_W*m_block_size; - - cpu_memcpy(dst_data + dst_ind, src_data + src_ind, m_inner_dim * sizeof(float)); - } - } - } - } - } -#else if (OD == 1 && OH == 1 && OW == 1 && ID == 1 && IH == 1 && IW == 1) { parallel_for(ON, [&](int n) { - cpu_memcpy(&dst_data[n*OC], &src_data[(n+OFFSET_N)*IC + OFFSET_C], OC * sizeof(float)); + cpu_memcpy(dst_data + itemSize * n * OC, src_data + itemSize *((n+OFFSET_N)*IC + OFFSET_C), OC * itemSize); }); } else { parallel_for2d(ON, (OC / m_block_size), [&](int n, int c) { @@ -201,7 +174,7 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { ((d+OFFSET_D)*IH*IW + OFFSET_H*IW + OFFSET_W)*m_block_size; for (int h = 0; h < OH; ++h) { - cpu_memcpy(dst_data + dst_ind, src_data + src_ind, m_inner_dim * sizeof(float)); + cpu_memcpy(dst_data + itemSize * dst_ind, src_data + itemSize * src_ind, m_inner_dim * itemSize); src_ind += IW * m_block_size; dst_ind += OW * m_block_size; @@ -209,7 +182,6 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { } }); } -#endif } bool MKLDNNCropNode::created() const { diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/crop.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/crop.cpp new file mode 100644 index 00000000000000..414faf6c62a7a5 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/crop.cpp @@ -0,0 +1,196 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "../src/legacy_api/include/legacy/ngraph_ops/crop_ie.hpp" +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + std::vector, //input shape + std::vector, //dims + std::vector // offset + > testCaseParams; + +typedef std::tuple< + testCaseParams, + InferenceEngine::Precision, // Net precision. We'll use only the net precision because the primitive is not supposed to convert precisions. + std::string, // Device name + std::map, // Additional network configuration + CPUSpecificParams> CropLayerCPUTestParamSet; + +class CropLayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + testCaseParams testCase; + InferenceEngine::Precision netPrc; + std::string targetName; + std::map additionalConfig; + + CPUSpecificParams cpuParams; + std::tie(testCase, netPrc, targetName, additionalConfig, cpuParams) = obj.param; + + std::ostringstream result; + result << "inShape=" << CommonTestUtils::vec2str(std::get<0>(testCase)) << "_"; + result << "dims=" << CommonTestUtils::vec2str(std::get<1>(testCase)) << "_"; + result << "offset=" << CommonTestUtils::vec2str(std::get<2>(testCase)) << "_"; + result << "netPRC=" << netPrc.name() << "_"; + result << "targetDevice=" << targetName; + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } +protected: + void SetUp() override { + testCaseParams testCase; + std::vector inpShape; + std::vector dims; + std::vector offset; + InferenceEngine::Precision netPrecision; + std::map additionalConfig; + CPUSpecificParams cpuParams; + std::tie(testCase, netPrecision, targetDevice, additionalConfig, cpuParams) = this->GetParam(); + std::tie(inpShape, dims, offset) = testCase; + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + inPrc = outPrc = netPrecision; + + configuration.insert(additionalConfig.begin(), additionalConfig.end()); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {inpShape}); + auto paramOuts = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + + std::vector axes; + for (size_t i = 0; i < inpShape.size(); ++i) { + axes.push_back(i); + } + auto ss = std::make_shared(paramOuts[0], axes, dims, offset); + + std::string strExpectedPrc; + if (Precision::BF16 == inPrc) { + strExpectedPrc = "BF16"; + } else if (Precision::FP32 == inPrc) { + strExpectedPrc = "FP32"; + } + + selectedType = "unknown_" + strExpectedPrc; + + ss->get_rt_info() = getCPUInfo(); + + ngraph::ResultVector results{std::make_shared(ss)}; + function = std::make_shared(results, params, "Crop"); + } +}; + +TEST_P(CropLayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "Crop"); +} + +namespace { +// Withing the test scope we don't need any implicit bf16 optimisations, so let's run the network as is. +std::map additional_config = {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO}}; + +std::vector netPrc = {Precision::BF16, Precision::FP32}; + +std::vector testCasesPlain2D = {testCaseParams{{32, 32}, {32, 10}, {0, 20}}, + testCaseParams{{32, 20}, {30, 10}, {2, 10}}}; + +const auto CropParamsPlain2D = ::testing::Combine( + ::testing::ValuesIn(testCasesPlain2D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(emptyCPUSpec)); + +INSTANTIATE_TEST_CASE_P(CompareWithRefs_Plain_2D, CropLayerCPUTest, CropParamsPlain2D, CropLayerCPUTest::getTestCaseName); + +std::vector testCasesPlain4D = {testCaseParams{{1, 5, 32, 32}, {1, 2, 23, 23}, {0, 2, 5, 4}}, + testCaseParams{{1, 5, 32, 32}, {1, 5, 5, 5}, {0, 0, 20, 20}}, + testCaseParams{{1, 5, 32, 32}, {1, 5, 32, 10}, {0, 0, 0, 20}}, + testCaseParams{{1, 5, 32, 20}, {1, 5, 30, 10}, {0, 0, 2, 10}}}; + +std::vector cpuParams_4D = { + CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {}) +}; + +const auto CropParamsPlain4D = ::testing::Combine( + ::testing::ValuesIn(testCasesPlain4D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(cpuParams_4D.at(1))); + +INSTANTIATE_TEST_CASE_P(CompareWithRefs_Plain_4D, CropLayerCPUTest, CropParamsPlain4D, CropLayerCPUTest::getTestCaseName); + +std::vector testCasesBlocked4D = {testCaseParams{{1, 16, 32, 32}, {1, 16, 5, 5}, {0, 0, 20, 20}}, + testCaseParams{{1, 32, 32, 32}, {1, 16, 32, 10}, {0, 0, 0, 20}}}; + +const auto CropParamsBlocked4D = ::testing::Combine( + ::testing::ValuesIn(testCasesBlocked4D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(filterCPUSpecificParams(cpuParams_4D).front())); + +INSTANTIATE_TEST_CASE_P(CompareWithRefs_Blocked_4D, CropLayerCPUTest, CropParamsBlocked4D, CropLayerCPUTest::getTestCaseName); + +std::vector testCasesPlain4DynBatch = {testCaseParams{{10, 5, 32, 32}, {1, 2, 23, 23}, {0, 2, 5, 4}}, + testCaseParams{{10, 5, 32, 32}, {1, 5, 5, 5}, {0, 0, 20, 20}}, + testCaseParams{{10, 5, 32, 32}, {1, 5, 32, 10}, {0, 0, 0, 20}}, + testCaseParams{{10, 5, 32, 20}, {1, 5, 30, 10}, {0, 0, 2, 10}}}; + +std::map additional_config_dyn_batch = {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO}, + {PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::YES}}; + +const auto CropParamsPlain4DynBatch = ::testing::Combine( + ::testing::ValuesIn(testCasesPlain4DynBatch), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config_dyn_batch), + ::testing::Values(cpuParams_4D.at(1))); + +INSTANTIATE_TEST_CASE_P(CompareWithRefs_Blocked_4DynBatch, CropLayerCPUTest, CropParamsPlain4DynBatch, CropLayerCPUTest::getTestCaseName); + +std::vector testCasesPlain5D = {testCaseParams{{1, 5, 32, 20, 14}, {1, 5, 30, 10, 8}, {0, 0, 2, 10, 6}}, + testCaseParams{{5, 9, 32, 20, 14}, {2, 5, 30, 10, 8}, {3, 4, 2, 10, 6}}}; + +std::vector cpuParams_5D = { + CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) +}; + +const auto CropParamsPlain5D = ::testing::Combine( + ::testing::ValuesIn(testCasesPlain5D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(cpuParams_5D.at(1))); + +INSTANTIATE_TEST_CASE_P(CompareWithRefs_Plain_5D, CropLayerCPUTest, CropParamsPlain5D, CropLayerCPUTest::getTestCaseName); + +std::vector testCasesBlocked5D = {testCaseParams{{1, 32, 32, 20, 14}, {1, 16, 30, 10, 8}, {0, 0, 2, 10, 6}}, + testCaseParams{{5, 32, 32, 20, 14}, {2, 32, 30, 10, 8}, {3, 0, 2, 10, 6}}}; + +const auto CropParamsBlocked5D = ::testing::Combine( + ::testing::ValuesIn(testCasesBlocked5D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(cpuParams_5D.at(0))); + +INSTANTIATE_TEST_CASE_P(CompareWithRefs_Blocked_5D, CropLayerCPUTest, CropParamsBlocked5D, CropLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace CPULayerTestsDefinitions + diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp deleted file mode 100644 index a845eaf77e5c50..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct crop_base_params { - std::vector in_dims; - std::vector out_dims; - std::vector offsets; -}; - -#ifdef IN -#undef IN -#endif - -struct crop_test_params : crop_base_params { - std::string device_name; - - crop_test_params(std::string name, crop_base_params params) : - crop_base_params(params), device_name(name) {} -}; - -template -void ref_crop(InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, crop_test_params prm) { - data_t *dst_ptr = dst.data(); - - int ndims = prm.in_dims.size(); - - size_t OFFSET_N = prm.offsets.at(0); - size_t OFFSET_C = prm.offsets.at(1); - size_t OFFSET_D = ndims == 5 ? prm.offsets.at(ndims - 3) : 0; - size_t OFFSET_H = prm.offsets.at(ndims - 2); - size_t OFFSET_W = prm.offsets.at(ndims - 1); - - size_t ON = prm.out_dims[0]; - size_t OC = prm.out_dims[1]; - size_t OD = ndims == 5 ? prm.out_dims[ndims - 3] : 1; - size_t OH = prm.out_dims[ndims - 2]; - size_t OW = prm.out_dims[ndims - 1]; - - size_t IN = prm.in_dims[0]; - size_t IC = prm.in_dims[1]; - size_t ID = ndims == 5 ? prm.in_dims[ndims - 3] : 1; - size_t IH = prm.in_dims[ndims - 2]; - size_t IW = prm.in_dims[ndims - 1]; - - auto dst_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t { - return (n * OC * OD * OH * OW + c * OD * OH * OW + d * OH * OW + h * OW + w); - }; - auto src_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t { - return (n * IC * ID * IH * IW + c * ID * IH * IW + d * IH * IW + h * IW + w); - }; - - ASSERT_GE(IN - OFFSET_N, ON); - ASSERT_GE(IC - OFFSET_C, OC); - ASSERT_GE(ID - OFFSET_D, OD); - ASSERT_GE(IH - OFFSET_H, OH); - ASSERT_GE(IW - OFFSET_W, OW); - - data_t* src_ptr = src.data(); - for (size_t n = 0; n < ON; ++n) { - for (size_t c = 0; c < OC; ++c) { - for (size_t d = 0; d < OD; ++d) { - for (size_t h = 0; h < OH; ++h) { - for (size_t w = 0; w < OW; ++w) { - dst_ptr[dst_off(n, c, d, h, w)] = src_ptr[src_off(n + OFFSET_N, c + OFFSET_C, d + OFFSET_D, - h + OFFSET_H, w + OFFSET_W)]; - } - } - } - } - } -} - -class smoke_CropOnlyTest: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - - - - - - _ID0_ - _ID1_ - _ID2_ - _ID3_ - _ID4_ - - - - - _OD0_ - _OD1_ - _OD2_ - _OD3_ - _OD4_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(crop_test_params p) { - std::string model = layers_t; - - auto dims_size = p.in_dims.size(); - - if (dims_size == 4) { - REMOVE_LINE(model, ""); - REMOVE_LINE(model, "_ID4_"); - REMOVE_LINE(model, "_OD4_"); - } - - REPLACE_WITH_NUM(model, "_ID0_", p.in_dims[0]); - REPLACE_WITH_NUM(model, "_ID1_", p.in_dims[1]); - REPLACE_WITH_NUM(model, "_ID2_", p.in_dims[2]); - REPLACE_WITH_NUM(model, "_ID3_", p.in_dims[3]); - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_ID4_", p.in_dims[4]); - - REPLACE_WITH_NUM(model, "_OD0_", p.out_dims[0]); - REPLACE_WITH_NUM(model, "_OD1_", p.out_dims[1]); - REPLACE_WITH_NUM(model, "_OD2_", p.out_dims[2]); - REPLACE_WITH_NUM(model, "_OD3_", p.out_dims[3]); - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_OD4_", p.out_dims[4]); - - REPLACE_WITH_NUM(model, "_OF0_", p.offsets[0]); - REPLACE_WITH_NUM(model, "_OF1_", p.offsets[1]); - REPLACE_WITH_NUM(model, "_OF2_", p.offsets[2]); - REPLACE_WITH_NUM(model, "_OF3_", p.offsets[3]); - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_OF4_", p.offsets[4]); - - model = IRTemplateGenerator::getIRTemplate("Crop_Only", p.in_dims, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - try { - crop_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in_dims.size()) { - case 4: layout = InferenceEngine::NCHW; break; - case 5: layout = InferenceEngine::NCDHW; break; - } - - InputsDataMap inputs = network.getInputsInfo(); - DataPtr inPtr1 = inputs["in1"]->getInputData(); - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(inPtr1->getTensorDesc()); - src->allocate(); - fill_data(src->buffer(), src->size()); - - TBlob* srcPtr = dynamic_cast*>(src.get()); - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out = network.getOutputsInfo(); - BlobMap dstBlobs; - std::pair item = *out.begin(); - TBlob::Ptr dst; - dst = make_shared_blob(item.second->getTensorDesc()); - dst->allocate(); - dstBlobs[item.first] = dst; - - TBlob::Ptr dst_ref; - dst_ref = make_shared_blob(item.second->getTensorDesc()); - dst_ref->allocate(); - - ref_crop(*srcPtr, *dst_ref, p); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(srcs); - inferRequest.SetOutput(dstBlobs); - inferRequest.Infer(); - - compare(*dstBlobs.begin()->second, *dst_ref); - - } catch (const details::InferenceEngineException &e) { - FAIL() << e.what(); - } - } -}; - -#define case_1 crop_base_params({{1, 5, 32, 32}, {1, 2, 23, 23}, {0, 2, 5, 4}}) -#define case_2 crop_base_params({{1, 5, 32, 32}, {1, 5, 5, 5}, {0, 0, 20, 20}}) -#define case_3 crop_base_params({{1, 5, 32, 32}, {1, 5, 32, 10}, {0, 0, 0, 20}}) -#define case_4 crop_base_params({{1, 5, 32, 20}, {1, 5, 30, 10}, {0, 0, 2, 10}}) -#define case_5 crop_base_params({{1, 5, 32, 20, 14}, {1, 5, 30, 10, 8}, {0, 0, 2, 10, 6}}) -#define case_6 crop_base_params({{5, 9, 32, 20, 14}, {2, 5, 30, 10, 8}, {3, 4, 2, 10, 6}}) - -TEST_P(smoke_CropOnlyTest, TestsCrop) {} - -std::string getTestCaseName(testing::TestParamInfo obj) { - int ndims = obj.param.in_dims.size(); - - return obj.param.device_name + - "_in" + std::to_string(obj.param.in_dims[0]) + - "_ic" + std::to_string(obj.param.in_dims[1]) + - "_id" + std::to_string(ndims == 5 ? obj.param.in_dims[ndims - 3] : 1) + - "_ih" + std::to_string(obj.param.in_dims[ndims - 2]) + - "_iw" + std::to_string(obj.param.in_dims[ndims - 1]) + - "_on" + std::to_string(obj.param.out_dims[0]) + - "_oc" + std::to_string(obj.param.out_dims[1]) + - "_od" + std::to_string(ndims == 5 ? obj.param.out_dims[ndims - 3] : 1) + - "_oh" + std::to_string(obj.param.out_dims[ndims - 2]) + - "_ow" + std::to_string(obj.param.out_dims[ndims - 1]); -} - -crop_test_params crop_only_test_cases[] = { - crop_test_params("CPU", case_1), - crop_test_params("CPU", case_2), - crop_test_params("CPU", case_3), - crop_test_params("CPU", case_4), - crop_test_params("CPU", case_5), - crop_test_params("CPU", case_6), -}; - -INSTANTIATE_TEST_CASE_P( - TestsPooling, smoke_CropOnlyTest, ::testing::ValuesIn(crop_only_test_cases), getTestCaseName);