Skip to content

Commit

Permalink
Template reference tests (#6456)
Browse files Browse the repository at this point in the history
* Added convert tests

* Moved convert tests to template plugin

* Fixed build

* Fixed comments

* Fixed templateFuncTests

* Fixed review comments

* Added dynamic registration of template plugin

* Fixed functional tests

* Fixed behavior tests

* Added ticket

* Try to avoid fails
  • Loading branch information
ilyachur authored Jul 12, 2021
1 parent 1a5bd8a commit 811f4c5
Show file tree
Hide file tree
Showing 16 changed files with 815 additions and 1,742 deletions.
1 change: 0 additions & 1 deletion .ci/azure/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ jobs:
cmakeArgs: >
-GNinja
-DVERBOSE_BUILD=ON
-DENABLE_TEMPLATE_PLUGIN=ON
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
-DENABLE_PYTHON=ON
-DPYTHON_EXECUTABLE=/usr/bin/python3.6
Expand Down
2 changes: 0 additions & 2 deletions cmake/features.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@ Supported values:\

ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON)

ie_option(ENABLE_TEMPLATE_PLUGIN "Register template plugin into plugins.xml" OFF)

ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \
Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF
Expand Down
6 changes: 2 additions & 4 deletions docs/template_plugin/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})

# ATTENTION: uncomment to register a plugin in the plugins.xml file
if(ENABLE_TEMPLATE_PLUGIN)
ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
POSSIBLE_PLUGINS ${TARGET_NAME})
endif()
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]

# ATTENTION: uncomment to install component
Expand Down
13 changes: 11 additions & 2 deletions docs/template_plugin/src/template_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,17 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// TODO: add post-processing based on outputsInfoMap
// Example: register CommonOptimizations transformation from transformations library
passManager.register_pass<ngraph::pass::CommonOptimizations>();
// Template plugin handles only FP32 networks
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// GAPI supports only FP32 networks for pre-processing
bool needF16toF32 = false;
for (const auto& param : function->get_parameters()) {
if (param->get_element_type() == ngraph::element::f16 &&
inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16) {
needF16toF32 = true;
break;
}
}
if (needF16toF32)
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// Example: register plugin specific transformation
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "base_reference_test.hpp"

#include <gtest/gtest.h>

#include "transformations/utils/utils.hpp"

using namespace InferenceEngine;

CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
core = PluginCache::get().ie(targetDevice);
}

void CommonReferenceTest::Exec() {
LoadNetwork();
FillInputs();
Infer();
Validate();
}

void CommonReferenceTest::LoadNetwork() {
InferenceEngine::CNNNetwork cnnNetwork(function);
auto inputInfo = cnnNetwork.getInputsInfo();
auto outputInfo = cnnNetwork.getOutputsInfo();
for (const auto& param : function->get_parameters()) {
inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type()));
}
for (const auto& result : function->get_results()) {
outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision(
InferenceEngine::details::convertPrecision(result->get_element_type()));
}
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
}

void CommonReferenceTest::FillInputs() {
const auto& inputInfo = executableNetwork.GetInputsInfo();
const auto& params = function->get_parameters();
ASSERT_EQ(params.size(), inputData.size());
ASSERT_EQ(inputInfo.size(), inputData.size());

for (size_t i = 0; i < params.size(); i++) {
const auto& param = params[i];
const auto infoIt = inputInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputInfo.cend());

const auto& info = infoIt->second;
auto blob = make_blob_with_precision(info->getTensorDesc());
blob->allocate();

ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize());

MemoryBlob::Ptr mInputData = as<MemoryBlob>(inputData[i]);
ASSERT_NE(mInputData, nullptr);
auto minputDataHolder = mInputData->rmap();

MemoryBlob::Ptr mBlob = as<MemoryBlob>(blob);
ASSERT_NE(mBlob, nullptr);
auto mBlobHolder = mBlob->wmap();

std::memcpy(mBlobHolder.as<void*>(), minputDataHolder.as<const void*>(), inputData[i]->byteSize());
inputData[i] = blob;
}
}

void CommonReferenceTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();

const auto& inputsInfo = executableNetwork.GetInputsInfo();
const auto& functionParams = function->get_parameters();
for (size_t i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());

const auto& info = infoIt->second;
auto blob = inputData[i];

inferRequest.SetBlob(info->name(), blob);
}
inferRequest.Infer();
}

void CommonReferenceTest::Validate() {
ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size());
std::vector<InferenceEngine::Blob::Ptr> outputs;
for (const auto& result : function->get_results()) {
auto name = ngraph::op::util::create_ie_output_name(result->input_value(0));
outputs.emplace_back(inferRequest.GetBlob(name));
}

ASSERT_EQ(refOutData.size(), outputs.size());
for (size_t i = 0; i < refOutData.size(); i++) {
ValidateBlobs(refOutData[i], outputs[i]);
}
}
void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) {
ASSERT_TRUE(refBlob != nullptr);
ASSERT_TRUE(outBlob != nullptr);
ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision());
ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize());

auto mRef = as<InferenceEngine::MemoryBlob>(refBlob);
IE_ASSERT(mRef);
const auto refLockMemory = mRef->rmap();
const auto refBuffer = refLockMemory.as<const std::uint8_t*>();

auto mOut = as<InferenceEngine::MemoryBlob>(outBlob);
IE_ASSERT(mOut);
const auto outLockMemory = mOut->rmap();
const auto outBuffer = outLockMemory.as<const std::uint8_t*>();

const auto& precision = refBlob->getTensorDesc().getPrecision();
switch (precision) {
case InferenceEngine::Precision::BF16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::bfloat16, ngraph::bfloat16>(
reinterpret_cast<const ngraph::bfloat16*>(refBuffer), reinterpret_cast<const ngraph::bfloat16*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::FP16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::float16, ngraph::float16>(
reinterpret_cast<const ngraph::float16*>(refBuffer), reinterpret_cast<const ngraph::float16*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::FP32:
LayerTestsUtils::LayerTestsCommon::Compare<float, float>(reinterpret_cast<const float*>(refBuffer), reinterpret_cast<const float*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I8:
LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(reinterpret_cast<const int8_t*>(refBuffer), reinterpret_cast<const int8_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I16:
LayerTestsUtils::LayerTestsCommon::Compare<int16_t, int16_t>(reinterpret_cast<const int16_t*>(refBuffer), reinterpret_cast<const int16_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I32:
LayerTestsUtils::LayerTestsCommon::Compare<int32_t, int32_t>(reinterpret_cast<const int32_t*>(refBuffer), reinterpret_cast<const int32_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I64:
LayerTestsUtils::LayerTestsCommon::Compare<int64_t, int64_t>(reinterpret_cast<const int64_t*>(refBuffer), reinterpret_cast<const int64_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::BOOL:
case InferenceEngine::Precision::U8:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U16:
LayerTestsUtils::LayerTestsCommon::Compare<uint16_t, uint16_t>(reinterpret_cast<const uint16_t*>(refBuffer),
reinterpret_cast<const uint16_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U32:
LayerTestsUtils::LayerTestsCommon::Compare<uint32_t, uint32_t>(reinterpret_cast<const uint32_t*>(refBuffer),
reinterpret_cast<const uint32_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U64:
LayerTestsUtils::LayerTestsCommon::Compare<uint64_t, uint64_t>(reinterpret_cast<const uint64_t*>(refBuffer),
reinterpret_cast<const uint64_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I4:
case InferenceEngine::Precision::U4:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size() / 2, threshold);
break;
case InferenceEngine::Precision::BIN:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size() / 8, threshold);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>

class CommonReferenceTest {
public:
CommonReferenceTest();

void Exec();

void LoadNetwork();

void FillInputs();

void Infer();

void Validate();

private:
void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob);

protected:
const std::string targetDevice;
std::shared_ptr<InferenceEngine::Core> core;
std::shared_ptr<ngraph::Function> function;

InferenceEngine::ExecutableNetwork executableNetwork;
InferenceEngine::InferRequest inferRequest;
std::vector<InferenceEngine::Blob::Ptr> inputData;
std::vector<InferenceEngine::Blob::Ptr> refOutData;
float threshold = 1e-2f;
};

template <class T>
InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector<T>& values, size_t size = 0) {
size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
auto blob = make_blob_with_precision(
InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C));
blob->allocate();
InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
IE_ASSERT(minput);
auto minputHolder = minput->wmap();

std::memcpy(minputHolder.as<void*>(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));

return blob;
}

Loading

0 comments on commit 811f4c5

Please sign in to comment.