From 2b87f000edb834fc0e6c014831b1cc3a9ffb7ee2 Mon Sep 17 00:00:00 2001 From: Mikhail Nosov Date: Sat, 29 Jan 2022 10:52:52 +0300 Subject: [PATCH] ROI tensor support for Template plugin (#9914) * ROI tensor support for Template plugin + tests for Template and CPU plugins GPU doesn'tsupport ROI tensors, so tests were not added for GPU * Added asserts for unsupported mixed axis order (like 0,3,1,2), and unsupported types like int4/int2 for ROI tensors --- .../src/template_infer_request.cpp | 55 ++++++++++- .../behavior/ov_infer_request/inference.cpp | 20 ++++ .../behavior/ov_infer_request/inference.cpp | 20 ++++ .../behavior/ov_infer_request/inference.hpp | 93 +++++++++++++++++++ .../behavior/ov_infer_request/inference.cpp | 74 +++++++++++++++ 5 files changed, 258 insertions(+), 4 deletions(-) create mode 100644 docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp create mode 100644 src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference.cpp create mode 100644 src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp create mode 100644 src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp diff --git a/docs/template_plugin/src/template_infer_request.cpp b/docs/template_plugin/src/template_infer_request.cpp index c780225608a7b3..d69c9a15fea4bf 100644 --- a/docs/template_plugin/src/template_infer_request.cpp +++ b/docs/template_plugin/src/template_infer_request.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -277,11 +278,57 @@ void TemplateInferRequest::inferPreprocess() { auto index = _executableNetwork->_inputIndex[networkInput.first]; const auto& parameter = _executableNetwork->_function->get_parameters()[index]; auto parameterShape = networkInput.second->getTensorDesc().getDims(); + auto srcShape = networkInput.second->getTensorDesc().getBlockingDesc().getBlockDims(); const auto& parameterType = parameter->get_element_type(); - _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( - parameterType, - parameterShape, - InferenceEngine::as(networkInput.second)->rmap().as()); + auto mem_blob = InferenceEngine::as(networkInput.second); + auto isNonRoiDesc = [](const BlockingDesc& desc) { + size_t exp_stride = 1; + for (size_t i = 0; i < desc.getBlockDims().size(); i++) { + size_t rev_idx = desc.getBlockDims().size() - i - 1; + OPENVINO_ASSERT(desc.getOrder()[rev_idx] == rev_idx, + "Template plugin: unsupported tensors with mixed axes order: ", + ngraph::vector_to_string(desc.getOrder())); + if (desc.getStrides()[rev_idx] != exp_stride || desc.getOffsetPaddingToData()[rev_idx] != 0) { + return false; + } + exp_stride *= desc.getBlockDims()[rev_idx]; + } + return true; + }; + if (isNonRoiDesc(networkInput.second->getTensorDesc().getBlockingDesc())) { + // No ROI extraction is needed + _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, + parameterShape, + mem_blob->rmap().as()); + } else { + OPENVINO_ASSERT(parameterType.bitwidth() % 8 == 0, + "Template plugin: Unsupported ROI tensor with element type having ", + std::to_string(parameterType.bitwidth()), + " bits size"); + // Perform manual extraction of ROI tensor + // Basic implementation doesn't take axis order into account `desc.getBlockingDesc().getOrder()` + // Performance of manual extraction is not optimal, but it is ok for template implementation + _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape); + auto desc = mem_blob->getTensorDesc(); + auto* src_data = mem_blob->rmap().as(); + auto dst_tensor = std::dynamic_pointer_cast(_inputTensors[index]); + OPENVINO_ASSERT(dst_tensor, "Template plugin error: Can't cast created tensor to HostTensor"); + auto* dst_data = dst_tensor->get_data_ptr(); + std::vector indexes(parameterShape.size()); + for (size_t dst_idx = 0; dst_idx < ov::shape_size(parameterShape); dst_idx++) { + size_t val = dst_idx; + size_t src_idx = 0; + for (size_t j1 = 0; j1 < indexes.size(); j1++) { + size_t j = indexes.size() - j1 - 1; + indexes[j] = val % parameterShape[j] + desc.getBlockingDesc().getOffsetPaddingToData()[j]; + val /= parameterShape[j]; + src_idx += indexes[j] * desc.getBlockingDesc().getStrides()[j]; + } + memcpy(dst_data + dst_idx * parameterType.size(), + src_data + src_idx * parameterType.size(), + parameterType.size()); + } + } } for (auto&& output : _outputs) { auto outputBlob = output.second; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp new file mode 100644 index 00000000000000..940922b49d5936 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/inference.hpp" + +namespace { + +using namespace ov::test::behavior; +using namespace ov; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestInferenceTests, + ::testing::Combine( + ::testing::Values(tensor_roi::roi_nchw(), tensor_roi::roi_1d()), + ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), + OVInferRequestInferenceTests::getTestCaseName); + +} // namespace diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference.cpp new file mode 100644 index 00000000000000..29410617f4241c --- /dev/null +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/ov_infer_request/inference.hpp" + +namespace { + +using namespace ov::test::behavior; +using namespace ov; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestInferenceTests, + ::testing::Combine( + ::testing::Values(tensor_roi::roi_nchw(), tensor_roi::roi_1d()), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + OVInferRequestInferenceTests::getTestCaseName); + +} // namespace diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp new file mode 100644 index 00000000000000..8dfb6cca28a90b --- /dev/null +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp @@ -0,0 +1,93 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include "functional_test_utils/ov_plugin_cache.hpp" +#include + +namespace ov { +namespace test { +namespace behavior { + +struct OVInferReqInferParam { + ov::Shape m_shape; + ov::Tensor m_input_tensor; + std::vector m_expected; + std::string m_test_name; +}; + +using OVInferRequestInferenceTestsParams = std::tuple; + +namespace tensor_roi { +inline OVInferReqInferParam roi_nchw() { + OVInferReqInferParam res; + res.m_test_name = "roi_nchw"; + res.m_shape = Shape{1, 2, 3, 3}; + auto in_tensor = ov::Tensor(element::f32, Shape{1, 2, 5, 5}); + auto in_data = std::vector{ + 0, 1, 2, 3, 4, + 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4, + 5, 6, 7, 8, 9, + 9, 8, 7, 6, 5, + + 5, 6, 7, 8, 9, + 9, 8, 7, 6, 5, + 0, 1, 2, 3, 4, + 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4 + }; + memcpy(in_tensor.data(), in_data.data(), in_data.size() * sizeof(float)); + res.m_input_tensor = ov::Tensor(in_tensor, Coordinate{0, 0, 1, 1}, Coordinate{1, 2, 4, 4}); + // Extracted 3x3 boxes, add 1 to each element + res.m_expected = std::vector{ + 7, 8, 9, + 2, 3, 4, + 7, 8, 9, + + 9, 8, 7, + 2, 3, 4, + 7, 8, 9, + }; + return res; +} + +inline OVInferReqInferParam roi_1d() { + OVInferReqInferParam res; + res.m_test_name = "roi_1d"; + res.m_shape = Shape{3}; + auto in_tensor = ov::Tensor(element::f32, Shape{5}); + auto in_data = std::vector{10, 20, 30, 40, 50}; + memcpy(in_tensor.data(), in_data.data(), in_data.size() * sizeof(float)); + res.m_input_tensor = ov::Tensor(in_tensor, Coordinate{1}, Coordinate{4}); + res.m_expected = std::vector{21, 31, 41}; + return res; +} + +} // namespace tensor_roi + +class OVInferRequestInferenceTests : public testing::WithParamInterface, + public CommonTestUtils::TestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo& device_name); + +protected: + void SetUp() override; + + void TearDown() override; + + static std::shared_ptr create_n_inputs(size_t num, element::Type type, + const PartialShape& shape); + + std::shared_ptr ie = utils::PluginCache::get().core(); + OVInferReqInferParam m_param; + std::string m_device_name; +}; + +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp new file mode 100644 index 00000000000000..4996eafad29816 --- /dev/null +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "openvino/opsets/opset8.hpp" +#include "functional_test_utils/ov_plugin_cache.hpp" +#include "behavior/ov_infer_request/inference.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestInferenceTests::getTestCaseName( + const testing::TestParamInfo& obj) { + return std::get<0>(obj.param).m_test_name + "_targetDevice=" + std::get<1>(obj.param); +} + +void OVInferRequestInferenceTests::SetUp() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + m_param = std::get<0>(GetParam()); + m_device_name = std::get<1>(GetParam()); +} + +void OVInferRequestInferenceTests::TearDown() { +} + +std::shared_ptr OVInferRequestInferenceTests::create_n_inputs(size_t n, + element::Type type, + const PartialShape& shape) { + ResultVector res; + ParameterVector params; + for (size_t i = 0; i < n; i++) { + auto index_str = std::to_string(i); + auto data1 = std::make_shared(type, shape); + data1->set_friendly_name("input" + index_str); + data1->get_output_tensor(0).set_names({"tensor_input" + index_str}); + auto constant = opset8::Constant::create(type, {1}, {1}); + auto op1 = std::make_shared(data1, constant); + op1->set_friendly_name("Add" + index_str); + auto res1 = std::make_shared(op1); + res1->set_friendly_name("Result" + index_str); + res1->get_output_tensor(0).set_names({"tensor_output" + index_str}); + params.push_back(data1); + res.push_back(res1); + } + return std::make_shared(res, params); +} + +TEST_P(OVInferRequestInferenceTests, Inference_ROI_Tensor) { + auto shape_size = ov::shape_size(m_param.m_shape); + auto model = OVInferRequestInferenceTests::create_n_inputs(1, element::f32, m_param.m_shape); + auto execNet = ie->compile_model(model, m_device_name); + // Create InferRequest + ov::InferRequest req; + req = execNet.create_infer_request(); + const std::string tensor_name = "tensor_input0"; + req.set_tensor(tensor_name, m_param.m_input_tensor); + req.infer(); + auto actual_out_tensor = req.get_tensor("tensor_output0"); + auto out_ptr = actual_out_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + EXPECT_EQ(out_ptr[i], m_param.m_expected[i]) << "Expected=" + << m_param.m_expected[i] + << ", actual=" + << out_ptr[i] + << " for " + << i; + } +} + +} // namespace behavior +} // namespace test +} // namespace ov