Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Template reference tests #6456

Merged
merged 22 commits into from
Jul 12, 2021
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
90177f5
Added convert tests
ilyachur Jun 29, 2021
f89c127
Moved convert tests to template plugin
ilyachur Jun 30, 2021
55ed4da
Fixed build
ilyachur Jun 30, 2021
5be48ff
Fixed comments
ilyachur Jun 30, 2021
405c169
Fixed templateFuncTests
ilyachur Jun 30, 2021
8c4b6c0
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 1, 2021
c1743c5
Fixed review comments
ilyachur Jul 1, 2021
4303df6
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 1, 2021
1d347c9
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 4, 2021
64c8e54
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 7, 2021
b5807ec
Added dynamic registration of template plugin
ilyachur Jul 7, 2021
e8cac0b
Fixed functional tests
ilyachur Jul 7, 2021
351c558
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 8, 2021
f1b2983
Fixed behavior tests
ilyachur Jul 8, 2021
6c8e416
Added ticket
ilyachur Jul 8, 2021
c110843
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 8, 2021
4aeae27
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 8, 2021
d711581
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 9, 2021
77a81c4
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 9, 2021
050af74
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 11, 2021
ec0dd8e
Try to avoid fails
ilyachur Jul 12, 2021
88204c1
Merge remote-tracking branch 'upstream/master' into template_referenc…
ilyachur Jul 12, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions docs/template_plugin/src/template_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,17 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// TODO: add post-processing based on outputsInfoMap
// Example: register CommonOptimizations transformation from transformations library
passManager.register_pass<ngraph::pass::CommonOptimizations>();
// Template plugin handles only FP32 networks
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// GAPI supports only FP32 networks for pre-processing
bool needF16toF32 = false;
for (const auto& param : function->get_parameters()) {
if (param->get_element_type() == ngraph::element::f16 &&
inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16) {
needF16toF32 = true;
break;
}
}
if (needF16toF32)
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// Example: register plugin specific transformation
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "base_reference_test.hpp"

#include <gtest/gtest.h>

#include "transformations/utils/utils.hpp"

using namespace InferenceEngine;

CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
core = PluginCache::get().ie(targetDevice);
ilyachur marked this conversation as resolved.
Show resolved Hide resolved
}

void CommonReferenceTest::Exec() {
LoadNetwork();
FillInputs();
Infer();
Validate();
}

void CommonReferenceTest::LoadNetwork() {
InferenceEngine::CNNNetwork cnnNetwork(function);
auto inputInfo = cnnNetwork.getInputsInfo();
auto outputInfo = cnnNetwork.getOutputsInfo();
for (const auto& param : function->get_parameters()) {
inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type()));
}
for (const auto& result : function->get_results()) {
outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision(
InferenceEngine::details::convertPrecision(result->get_element_type()));
}
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
}

void CommonReferenceTest::FillInputs() {
const auto& inputInfo = executableNetwork.GetInputsInfo();
const auto& params = function->get_parameters();
ASSERT_EQ(params.size(), inputData.size());
ASSERT_EQ(inputInfo.size(), inputData.size());

for (size_t i = 0; i < params.size(); i++) {
const auto& param = params[i];
const auto infoIt = inputInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputInfo.cend());

const auto& info = infoIt->second;
auto blob = make_blob_with_precision(info->getTensorDesc());
blob->allocate();

ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize());

MemoryBlob::Ptr mInputData = as<MemoryBlob>(inputData[i]);
ASSERT_NE(mInputData, nullptr);
auto minputDataHolder = mInputData->rmap();

MemoryBlob::Ptr mBlob = as<MemoryBlob>(blob);
ASSERT_NE(mBlob, nullptr);
auto mBlobHolder = mBlob->wmap();

std::memcpy(mBlobHolder.as<void*>(), minputDataHolder.as<const void*>(), inputData[i]->byteSize());
inputData[i] = blob;
}
}

void CommonReferenceTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();

const auto& inputsInfo = executableNetwork.GetInputsInfo();
const auto& functionParams = function->get_parameters();
for (size_t i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());

const auto& info = infoIt->second;
auto blob = inputData[i];

inferRequest.SetBlob(info->name(), blob);
}
inferRequest.Infer();
}

void CommonReferenceTest::Validate() {
ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size());
std::vector<InferenceEngine::Blob::Ptr> outputs;
for (const auto& result : function->get_results()) {
auto name = ngraph::op::util::create_ie_output_name(result->input_value(0));
outputs.emplace_back(inferRequest.GetBlob(name));
}

ASSERT_EQ(refOutData.size(), outputs.size());
for (size_t i = 0; i < refOutData.size(); i++) {
ValidateBlobs(refOutData[i], outputs[i]);
}
}
void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) {
ASSERT_TRUE(refBlob != nullptr);
ASSERT_TRUE(outBlob != nullptr);
ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision());
ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize());

auto mRef = as<InferenceEngine::MemoryBlob>(refBlob);
IE_ASSERT(mRef);
const auto refLockMemory = mRef->rmap();
const auto refBuffer = refLockMemory.as<const std::uint8_t*>();

auto mOut = as<InferenceEngine::MemoryBlob>(outBlob);
IE_ASSERT(mOut);
const auto outLockMemory = mOut->rmap();
const auto outBuffer = outLockMemory.as<const std::uint8_t*>();

const auto& precision = refBlob->getTensorDesc().getPrecision();
switch (precision) {
ilyachur marked this conversation as resolved.
Show resolved Hide resolved
case InferenceEngine::Precision::BF16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::bfloat16, ngraph::bfloat16>(
reinterpret_cast<const ngraph::bfloat16*>(refBuffer), reinterpret_cast<const ngraph::bfloat16*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::FP16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::float16, ngraph::float16>(
reinterpret_cast<const ngraph::float16*>(refBuffer), reinterpret_cast<const ngraph::float16*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::FP32:
LayerTestsUtils::LayerTestsCommon::Compare<float, float>(reinterpret_cast<const float*>(refBuffer), reinterpret_cast<const float*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I8:
LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(reinterpret_cast<const int8_t*>(refBuffer), reinterpret_cast<const int8_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I16:
LayerTestsUtils::LayerTestsCommon::Compare<int16_t, int16_t>(reinterpret_cast<const int16_t*>(refBuffer), reinterpret_cast<const int16_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I32:
LayerTestsUtils::LayerTestsCommon::Compare<int32_t, int32_t>(reinterpret_cast<const int32_t*>(refBuffer), reinterpret_cast<const int32_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I64:
LayerTestsUtils::LayerTestsCommon::Compare<int64_t, int64_t>(reinterpret_cast<const int64_t*>(refBuffer), reinterpret_cast<const int64_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::BOOL:
case InferenceEngine::Precision::U8:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U16:
LayerTestsUtils::LayerTestsCommon::Compare<uint16_t, uint16_t>(reinterpret_cast<const uint16_t*>(refBuffer),
reinterpret_cast<const uint16_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U32:
LayerTestsUtils::LayerTestsCommon::Compare<uint32_t, uint32_t>(reinterpret_cast<const uint32_t*>(refBuffer),
reinterpret_cast<const uint32_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U64:
LayerTestsUtils::LayerTestsCommon::Compare<uint64_t, uint64_t>(reinterpret_cast<const uint64_t*>(refBuffer),
reinterpret_cast<const uint64_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I4:
case InferenceEngine::Precision::U4:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size() / 2, threshold);
break;
case InferenceEngine::Precision::BIN:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size() / 8, threshold);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>

class CommonReferenceTest {
iefode marked this conversation as resolved.
Show resolved Hide resolved
public:
CommonReferenceTest();

void Exec();

void LoadNetwork();

void FillInputs();

void Infer();

void Validate();

private:
void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob);

protected:
const std::string targetDevice;
std::shared_ptr<InferenceEngine::Core> core;
std::shared_ptr<ngraph::Function> function;

InferenceEngine::ExecutableNetwork executableNetwork;
InferenceEngine::InferRequest inferRequest;
std::vector<InferenceEngine::Blob::Ptr> inputData;
std::vector<InferenceEngine::Blob::Ptr> refOutData;
float threshold = 1e-2f;
};

template <class T>
InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector<T>& values, size_t size = 0) {
size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
auto blob = make_blob_with_precision(
InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C));
blob->allocate();
InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
IE_ASSERT(minput);
auto minputHolder = minput->wmap();

std::memcpy(minputHolder.as<void*>(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));

return blob;
}

Loading