Skip to content

Commit

Permalink
ROI tensor support for Template plugin (openvinotoolkit#9914)
Browse files Browse the repository at this point in the history
* ROI tensor support for Template plugin + tests for Template and CPU plugins

GPU doesn'tsupport ROI tensors, so tests were not added for GPU

* Added asserts for unsupported mixed axis order (like 0,3,1,2), and unsupported types like int4/int2 for ROI tensors
  • Loading branch information
nosovmik authored Jan 29, 2022
1 parent 1176b0f commit 2b87f00
Show file tree
Hide file tree
Showing 5 changed files with 258 additions and 4 deletions.
55 changes: 51 additions & 4 deletions docs/template_plugin/src/template_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <algorithm>
#include <map>
#include <memory>
#include <ngraph/runtime/host_tensor.hpp>
#include <ngraph/runtime/reference/convert.hpp>
#include <string>
#include <utility>
Expand Down Expand Up @@ -277,11 +278,57 @@ void TemplateInferRequest::inferPreprocess() {
auto index = _executableNetwork->_inputIndex[networkInput.first];
const auto& parameter = _executableNetwork->_function->get_parameters()[index];
auto parameterShape = networkInput.second->getTensorDesc().getDims();
auto srcShape = networkInput.second->getTensorDesc().getBlockingDesc().getBlockDims();
const auto& parameterType = parameter->get_element_type();
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
parameterType,
parameterShape,
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
auto mem_blob = InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second);
auto isNonRoiDesc = [](const BlockingDesc& desc) {
size_t exp_stride = 1;
for (size_t i = 0; i < desc.getBlockDims().size(); i++) {
size_t rev_idx = desc.getBlockDims().size() - i - 1;
OPENVINO_ASSERT(desc.getOrder()[rev_idx] == rev_idx,
"Template plugin: unsupported tensors with mixed axes order: ",
ngraph::vector_to_string(desc.getOrder()));
if (desc.getStrides()[rev_idx] != exp_stride || desc.getOffsetPaddingToData()[rev_idx] != 0) {
return false;
}
exp_stride *= desc.getBlockDims()[rev_idx];
}
return true;
};
if (isNonRoiDesc(networkInput.second->getTensorDesc().getBlockingDesc())) {
// No ROI extraction is needed
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType,
parameterShape,
mem_blob->rmap().as<void*>());
} else {
OPENVINO_ASSERT(parameterType.bitwidth() % 8 == 0,
"Template plugin: Unsupported ROI tensor with element type having ",
std::to_string(parameterType.bitwidth()),
" bits size");
// Perform manual extraction of ROI tensor
// Basic implementation doesn't take axis order into account `desc.getBlockingDesc().getOrder()`
// Performance of manual extraction is not optimal, but it is ok for template implementation
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape);
auto desc = mem_blob->getTensorDesc();
auto* src_data = mem_blob->rmap().as<uint8_t*>();
auto dst_tensor = std::dynamic_pointer_cast<ngraph::runtime::HostTensor>(_inputTensors[index]);
OPENVINO_ASSERT(dst_tensor, "Template plugin error: Can't cast created tensor to HostTensor");
auto* dst_data = dst_tensor->get_data_ptr<uint8_t>();
std::vector<size_t> indexes(parameterShape.size());
for (size_t dst_idx = 0; dst_idx < ov::shape_size(parameterShape); dst_idx++) {
size_t val = dst_idx;
size_t src_idx = 0;
for (size_t j1 = 0; j1 < indexes.size(); j1++) {
size_t j = indexes.size() - j1 - 1;
indexes[j] = val % parameterShape[j] + desc.getBlockingDesc().getOffsetPaddingToData()[j];
val /= parameterShape[j];
src_idx += indexes[j] * desc.getBlockingDesc().getStrides()[j];
}
memcpy(dst_data + dst_idx * parameterType.size(),
src_data + src_idx * parameterType.size(),
parameterType.size());
}
}
}
for (auto&& output : _outputs) {
auto outputBlob = output.second;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>

#include "behavior/ov_infer_request/inference.hpp"

namespace {

using namespace ov::test::behavior;
using namespace ov;

INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestInferenceTests,
::testing::Combine(
::testing::Values(tensor_roi::roi_nchw(), tensor_roi::roi_1d()),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
OVInferRequestInferenceTests::getTestCaseName);

} // namespace
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>

#include "behavior/ov_infer_request/inference.hpp"

namespace {

using namespace ov::test::behavior;
using namespace ov;

INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestInferenceTests,
::testing::Combine(
::testing::Values(tensor_roi::roi_nchw(), tensor_roi::roi_1d()),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
OVInferRequestInferenceTests::getTestCaseName);

} // namespace
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <gtest/gtest.h>
#include <string>
#include "functional_test_utils/ov_plugin_cache.hpp"
#include <base/behavior_test_utils.hpp>

namespace ov {
namespace test {
namespace behavior {

struct OVInferReqInferParam {
ov::Shape m_shape;
ov::Tensor m_input_tensor;
std::vector<float> m_expected;
std::string m_test_name;
};

using OVInferRequestInferenceTestsParams = std::tuple<OVInferReqInferParam, std::string>;

namespace tensor_roi {
inline OVInferReqInferParam roi_nchw() {
OVInferReqInferParam res;
res.m_test_name = "roi_nchw";
res.m_shape = Shape{1, 2, 3, 3};
auto in_tensor = ov::Tensor(element::f32, Shape{1, 2, 5, 5});
auto in_data = std::vector<float>{
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
9, 8, 7, 6, 5,

5, 6, 7, 8, 9,
9, 8, 7, 6, 5,
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
0, 1, 2, 3, 4
};
memcpy(in_tensor.data(), in_data.data(), in_data.size() * sizeof(float));
res.m_input_tensor = ov::Tensor(in_tensor, Coordinate{0, 0, 1, 1}, Coordinate{1, 2, 4, 4});
// Extracted 3x3 boxes, add 1 to each element
res.m_expected = std::vector<float>{
7, 8, 9,
2, 3, 4,
7, 8, 9,

9, 8, 7,
2, 3, 4,
7, 8, 9,
};
return res;
}

inline OVInferReqInferParam roi_1d() {
OVInferReqInferParam res;
res.m_test_name = "roi_1d";
res.m_shape = Shape{3};
auto in_tensor = ov::Tensor(element::f32, Shape{5});
auto in_data = std::vector<float>{10, 20, 30, 40, 50};
memcpy(in_tensor.data(), in_data.data(), in_data.size() * sizeof(float));
res.m_input_tensor = ov::Tensor(in_tensor, Coordinate{1}, Coordinate{4});
res.m_expected = std::vector<float>{21, 31, 41};
return res;
}

} // namespace tensor_roi

class OVInferRequestInferenceTests : public testing::WithParamInterface<OVInferRequestInferenceTestsParams>,
public CommonTestUtils::TestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<OVInferRequestInferenceTestsParams>& device_name);

protected:
void SetUp() override;

void TearDown() override;

static std::shared_ptr<Model> create_n_inputs(size_t num, element::Type type,
const PartialShape& shape);

std::shared_ptr<ov::Core> ie = utils::PluginCache::get().core();
OVInferReqInferParam m_param;
std::string m_device_name;
};

} // namespace behavior
} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <gtest/gtest.h>
#include "openvino/opsets/opset8.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "behavior/ov_infer_request/inference.hpp"

namespace ov {
namespace test {
namespace behavior {

std::string OVInferRequestInferenceTests::getTestCaseName(
const testing::TestParamInfo<OVInferRequestInferenceTestsParams>& obj) {
return std::get<0>(obj.param).m_test_name + "_targetDevice=" + std::get<1>(obj.param);
}

void OVInferRequestInferenceTests::SetUp() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
m_param = std::get<0>(GetParam());
m_device_name = std::get<1>(GetParam());
}

void OVInferRequestInferenceTests::TearDown() {
}

std::shared_ptr<Model> OVInferRequestInferenceTests::create_n_inputs(size_t n,
element::Type type,
const PartialShape& shape) {
ResultVector res;
ParameterVector params;
for (size_t i = 0; i < n; i++) {
auto index_str = std::to_string(i);
auto data1 = std::make_shared<opset8::Parameter>(type, shape);
data1->set_friendly_name("input" + index_str);
data1->get_output_tensor(0).set_names({"tensor_input" + index_str});
auto constant = opset8::Constant::create(type, {1}, {1});
auto op1 = std::make_shared<opset8::Add>(data1, constant);
op1->set_friendly_name("Add" + index_str);
auto res1 = std::make_shared<opset8::Result>(op1);
res1->set_friendly_name("Result" + index_str);
res1->get_output_tensor(0).set_names({"tensor_output" + index_str});
params.push_back(data1);
res.push_back(res1);
}
return std::make_shared<Model>(res, params);
}

TEST_P(OVInferRequestInferenceTests, Inference_ROI_Tensor) {
auto shape_size = ov::shape_size(m_param.m_shape);
auto model = OVInferRequestInferenceTests::create_n_inputs(1, element::f32, m_param.m_shape);
auto execNet = ie->compile_model(model, m_device_name);
// Create InferRequest
ov::InferRequest req;
req = execNet.create_infer_request();
const std::string tensor_name = "tensor_input0";
req.set_tensor(tensor_name, m_param.m_input_tensor);
req.infer();
auto actual_out_tensor = req.get_tensor("tensor_output0");
auto out_ptr = actual_out_tensor.data<float>();
for (size_t i = 0; i < shape_size; ++i) {
EXPECT_EQ(out_ptr[i], m_param.m_expected[i]) << "Expected="
<< m_param.m_expected[i]
<< ", actual="
<< out_ptr[i]
<< " for "
<< i;
}
}

} // namespace behavior
} // namespace test
} // namespace ov

0 comments on commit 2b87f00

Please sign in to comment.