diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 146775f6189f02..f9fcee6a66eb77 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -103,7 +103,6 @@ jobs: cmakeArgs: > -GNinja -DVERBOSE_BUILD=ON - -DENABLE_TEMPLATE_PLUGIN=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 diff --git a/cmake/features.cmake b/cmake/features.cmake index 6042d9d2238881..1f0c198913cc23 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -24,8 +24,6 @@ Supported values:\ ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON) -ie_option(ENABLE_TEMPLATE_PLUGIN "Register template plugin into plugins.xml" OFF) - ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \ In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \ Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF diff --git a/docs/template_plugin/src/CMakeLists.txt b/docs/template_plugin/src/CMakeLists.txt index e91b6f215860a4..a6411523780629 100644 --- a/docs/template_plugin/src/CMakeLists.txt +++ b/docs/template_plugin/src/CMakeLists.txt @@ -36,10 +36,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) # ATTENTION: uncomment to register a plugin in the plugins.xml file -if(ENABLE_TEMPLATE_PLUGIN) - ie_register_plugins(MAIN_TARGET ${TARGET_NAME} - POSSIBLE_PLUGINS ${TARGET_NAME}) -endif() +# ie_register_plugins(MAIN_TARGET ${TARGET_NAME} +# POSSIBLE_PLUGINS ${TARGET_NAME}) # [cmake:plugin] # ATTENTION: uncomment to install component diff --git a/docs/template_plugin/src/template_plugin.cpp b/docs/template_plugin/src/template_plugin.cpp index a0f7a30ee171cf..c92918983cdcfe 100644 --- a/docs/template_plugin/src/template_plugin.cpp +++ b/docs/template_plugin/src/template_plugin.cpp @@ -66,8 +66,17 @@ std::shared_ptr TransformNetwork(const std::shared_ptr(); - // Template plugin handles only FP32 networks - passManager.register_pass(precisions_array {{ngraph::element::f16, ngraph::element::f32}}); + // GAPI supports only FP32 networks for pre-processing + bool needF16toF32 = false; + for (const auto& param : function->get_parameters()) { + if (param->get_element_type() == ngraph::element::f16 && + inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16) { + needF16toF32 = true; + break; + } + } + if (needF16toF32) + passManager.register_pass(precisions_array {{ngraph::element::f16, ngraph::element::f32}}); // Example: register plugin specific transformation passManager.register_pass(); passManager.register_pass(); diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp new file mode 100644 index 00000000000000..51af4d2ea1a221 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp @@ -0,0 +1,173 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "base_reference_test.hpp" + +#include + +#include "transformations/utils/utils.hpp" + +using namespace InferenceEngine; + +CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") { + core = PluginCache::get().ie(targetDevice); +} + +void CommonReferenceTest::Exec() { + LoadNetwork(); + FillInputs(); + Infer(); + Validate(); +} + +void CommonReferenceTest::LoadNetwork() { + InferenceEngine::CNNNetwork cnnNetwork(function); + auto inputInfo = cnnNetwork.getInputsInfo(); + auto outputInfo = cnnNetwork.getOutputsInfo(); + for (const auto& param : function->get_parameters()) { + inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type())); + } + for (const auto& result : function->get_results()) { + outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision( + InferenceEngine::details::convertPrecision(result->get_element_type())); + } + executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice); +} + +void CommonReferenceTest::FillInputs() { + const auto& inputInfo = executableNetwork.GetInputsInfo(); + const auto& params = function->get_parameters(); + ASSERT_EQ(params.size(), inputData.size()); + ASSERT_EQ(inputInfo.size(), inputData.size()); + + for (size_t i = 0; i < params.size(); i++) { + const auto& param = params[i]; + const auto infoIt = inputInfo.find(param->get_friendly_name()); + GTEST_ASSERT_NE(infoIt, inputInfo.cend()); + + const auto& info = infoIt->second; + auto blob = make_blob_with_precision(info->getTensorDesc()); + blob->allocate(); + + ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize()); + + MemoryBlob::Ptr mInputData = as(inputData[i]); + ASSERT_NE(mInputData, nullptr); + auto minputDataHolder = mInputData->rmap(); + + MemoryBlob::Ptr mBlob = as(blob); + ASSERT_NE(mBlob, nullptr); + auto mBlobHolder = mBlob->wmap(); + + std::memcpy(mBlobHolder.as(), minputDataHolder.as(), inputData[i]->byteSize()); + inputData[i] = blob; + } +} + +void CommonReferenceTest::Infer() { + inferRequest = executableNetwork.CreateInferRequest(); + + const auto& inputsInfo = executableNetwork.GetInputsInfo(); + const auto& functionParams = function->get_parameters(); + for (size_t i = 0; i < functionParams.size(); ++i) { + const auto& param = functionParams[i]; + const auto infoIt = inputsInfo.find(param->get_friendly_name()); + GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); + + const auto& info = infoIt->second; + auto blob = inputData[i]; + + inferRequest.SetBlob(info->name(), blob); + } + inferRequest.Infer(); +} + +void CommonReferenceTest::Validate() { + ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size()); + std::vector outputs; + for (const auto& result : function->get_results()) { + auto name = ngraph::op::util::create_ie_output_name(result->input_value(0)); + outputs.emplace_back(inferRequest.GetBlob(name)); + } + + ASSERT_EQ(refOutData.size(), outputs.size()); + for (size_t i = 0; i < refOutData.size(); i++) { + ValidateBlobs(refOutData[i], outputs[i]); + } +} +void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) { + ASSERT_TRUE(refBlob != nullptr); + ASSERT_TRUE(outBlob != nullptr); + ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision()); + ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize()); + + auto mRef = as(refBlob); + IE_ASSERT(mRef); + const auto refLockMemory = mRef->rmap(); + const auto refBuffer = refLockMemory.as(); + + auto mOut = as(outBlob); + IE_ASSERT(mOut); + const auto outLockMemory = mOut->rmap(); + const auto outBuffer = outLockMemory.as(); + + const auto& precision = refBlob->getTensorDesc().getPrecision(); + switch (precision) { + case InferenceEngine::Precision::BF16: + LayerTestsUtils::LayerTestsCommon::Compare( + reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), refBlob->size(), threshold); + break; + case InferenceEngine::Precision::FP16: + LayerTestsUtils::LayerTestsCommon::Compare( + reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), refBlob->size(), threshold); + break; + case InferenceEngine::Precision::FP32: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size(), threshold); + break; + case InferenceEngine::Precision::I8: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size(), threshold); + break; + case InferenceEngine::Precision::I16: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size(), threshold); + break; + case InferenceEngine::Precision::I32: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size(), threshold); + break; + case InferenceEngine::Precision::I64: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size(), threshold); + break; + case InferenceEngine::Precision::BOOL: + case InferenceEngine::Precision::U8: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size(), threshold); + break; + case InferenceEngine::Precision::U16: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), + reinterpret_cast(outBuffer), refBlob->size(), threshold); + break; + case InferenceEngine::Precision::U32: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), + reinterpret_cast(outBuffer), refBlob->size(), threshold); + break; + case InferenceEngine::Precision::U64: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), + reinterpret_cast(outBuffer), refBlob->size(), threshold); + break; + case InferenceEngine::Precision::I4: + case InferenceEngine::Precision::U4: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size() / 2, threshold); + break; + case InferenceEngine::Precision::BIN: + LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), + refBlob->size() / 8, threshold); + break; + default: + FAIL() << "Comparator for " << precision << " precision isn't supported"; + } +} diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp new file mode 100644 index 00000000000000..6e3fd942a9e722 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +class CommonReferenceTest { +public: + CommonReferenceTest(); + + void Exec(); + + void LoadNetwork(); + + void FillInputs(); + + void Infer(); + + void Validate(); + +private: + void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob); + +protected: + const std::string targetDevice; + std::shared_ptr core; + std::shared_ptr function; + + InferenceEngine::ExecutableNetwork executableNetwork; + InferenceEngine::InferRequest inferRequest; + std::vector inputData; + std::vector refOutData; + float threshold = 1e-2f; +}; + +template +InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector& values, size_t size = 0) { + size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size(); + auto blob = make_blob_with_precision( + InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C)); + blob->allocate(); + InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as(blob); + IE_ASSERT(minput); + auto minputHolder = minput->wmap(); + + std::memcpy(minputHolder.as(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size())); + + return blob; +} + diff --git a/docs/template_plugin/tests/functional/op_reference/convert.cpp b/docs/template_plugin/tests/functional/op_reference/convert.cpp new file mode 100644 index 00000000000000..fb32fda4cbbfd8 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/convert.cpp @@ -0,0 +1,441 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include + +#include "base_reference_test.hpp" + +using namespace ngraph; +using namespace InferenceEngine; + +struct ConvertParams { + template + ConvertParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const ngraph::element::Type& oType, const std::vector& iValues, + const std::vector& oValues, size_t iSize = 0, size_t oSize = 0) + : pshape(shape), inType(iType), outType(oType), inputData(CreateBlob(iType, iValues, iSize)), refData(CreateBlob(oType, oValues, oSize)) {} + ngraph::PartialShape pshape; + ngraph::element::Type inType; + ngraph::element::Type outType; + InferenceEngine::Blob::Ptr inputData; + InferenceEngine::Blob::Ptr refData; +}; + +class ReferenceConvertLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.pshape, params.inType, params.outType); + inputData = {params.inputData}; + refOutData = {params.refData}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "shape=" << param.pshape << "_"; + result << "iType=" << param.inType << "_"; + result << "oType=" << param.outType; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type, + const element::Type& expected_output_type) { + const auto in = std::make_shared(input_type, input_shape); + const auto convert = std::make_shared(in, expected_output_type); + return std::make_shared(NodeVector {convert}, ParameterVector {in}); + } +}; + +TEST_P(ReferenceConvertLayerTest, CompareWithHardcodedRefs) { + Exec(); +} + +INSTANTIATE_TEST_SUITE_P( + smoke_Convert_With_Hardcoded_Refs, ReferenceConvertLayerTest, + ::testing::Values( + // destination boolean + ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::u8, ngraph::element::boolean, + std::vector {0, 12, 23, 0, std::numeric_limits::lowest(), std::numeric_limits::max()}, + std::vector {0, 1, 1, 0, 0, 1}), + ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::i32, ngraph::element::boolean, + std::vector {0, -12, 23, 0, std::numeric_limits::lowest(), std::numeric_limits::max()}, + std::vector {0, 1, 1, 0, 1, 1}), + ConvertParams(ngraph::PartialShape {3, 3}, ngraph::element::f32, ngraph::element::boolean, + std::vector {0.f, 1.5745f, 0.12352f, 0.f, std::numeric_limits::lowest(), std::numeric_limits::max(), + std::numeric_limits::min(), std::numeric_limits::infinity(), -std::numeric_limits::infinity()}, + std::vector {0, 1, 1, 0, 1, 1, 1, 1, 1}), + + // destination bf16 + ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::bf16, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}), + ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::bf16, + std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}, + std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}), + + // destination f16 + ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f16, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}), + ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::f16, std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}, + std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}), + + // destination f32 + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u1, ngraph::element::f32, std::vector {0xA0}, + std::vector {1.0f, 0.0f, 1.0f, 0.0f}, 4), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u4, ngraph::element::f32, std::vector {0xFB, 0x0A}, + std::vector {15.0f, 11.0f, 0.0f, 10.0f}, 4), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u8, ngraph::element::f32, std::vector {255, 128, 32, 0}, + std::vector {255.0f, 128.0f, 32.0f, 0.0f}), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u16, ngraph::element::f32, std::vector {64000, 32000, 128, 0}, + std::vector {64000.0f, 32000.0f, 128.0f, 0.0f}), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u32, ngraph::element::f32, std::vector {4000000, 2000000, 128, 0}, + std::vector {4000000.0f, 2000000.0f, 128.0f, 0.0f}), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u64, ngraph::element::f32, std::vector {4000000, 2000000, 128, 0}, + std::vector {4000000.0f, 2000000.0f, 128.0f, 0.0f}), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i4, ngraph::element::f32, std::vector {0xFE, 0xF2}, + std::vector {-1.0f, -2.0f, -1.0f, 2.0f}, 4), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i8, ngraph::element::f32, std::vector {-127, -0, 0, 127}, + std::vector {-127.0f, -0.0f, 0.0f, 127.0f}), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i16, ngraph::element::f32, std::vector {-32000, -0, 0, 32000}, + std::vector {-32000.0f, -0.0f, 0.0f, 32000.0f}), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i32, ngraph::element::f32, std::vector {-64000, -0, 0, 64000}, + std::vector {-64000.0f, -0.0f, 0.0f, 64000.0f}), + ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i64, ngraph::element::f32, std::vector {-64000, -0, 0, 64000}, + std::vector {-64000.0f, -0.0f, 0.0f, 64000.0f}), + ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::bf16, ngraph::element::f32, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}), + ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f16, ngraph::element::f32, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}), + ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f32, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}, + std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}), + + // destination i4 + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::i4, std::vector {0xA0}, std::vector {0x10, 0x10}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i4, std::vector {0x12, 0x03}, std::vector {0x12, 0x03}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i4, std::vector {1, 2, 0, 3}, std::vector {0x12, 0x03}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i4, std::vector {1, 2, 0, 3}, + std::vector {0x12, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i4, std::vector {1, 2, 0, 3}, + std::vector {0x12, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i4, std::vector {1, 2, 0, 3}, + std::vector {0x12, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i4, std::vector {0xFE, 0x03}, std::vector {0xFE, 0x03}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i4, std::vector {-1, -2, 2, 3}, std::vector {0xFE, 0x23}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i4, std::vector {-1, -2, 2, 3}, + std::vector {0xFE, 0x23}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i4, std::vector {-1, -2, 2, 3}, + std::vector {0xFE, 0x23}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i4, std::vector {-1, -2, 2, 3}, + std::vector {0xFE, 0x23}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i4, std::vector {-1, -2, 0, 3}, + std::vector {0xFE, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i4, std::vector {-1, -2, 0, 3}, + std::vector {0xFE, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i4, std::vector {-1, -2, 2, 3}, std::vector {0xFE, 0x23}, + 4, 4), + // destination i8 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i8, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i8, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i8, std::vector {1, 2, 0, 3}, std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i8, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i8, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i8, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i8, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i8, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i8, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i8, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i8, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + // destination i16 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i16, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i16, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i16, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i16, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i16, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i16, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i16, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i16, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i16, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i16, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + // destination i32 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i32, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i32, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i32, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i32, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i32, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i32, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i32, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i32, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i32, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i32, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + // destination i64 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i64, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i64, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i64, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i64, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i64, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i64, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i64, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i64, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i64, std::vector {-1, -2, 0, 3}, + std::vector {-1, -2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i64, std::vector {-1, -2, 2, 3}, + std::vector {-1, -2, 2, 3}), + + // destination u1 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u1, std::vector {0xA0}, std::vector {0xA0}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u4, ngraph::element::u1, std::vector {0x10, 0x01, 0x00, 0x00}, + std::vector {0x90}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u8, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u16, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u32, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u64, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::i4, ngraph::element::u1, std::vector {0x10, 0x01, 0x00, 0x00}, + std::vector {0x90}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::i8, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::i16, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::i32, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::i64, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::f16, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::bf16, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + ConvertParams(ngraph::PartialShape {8}, ngraph::element::f32, ngraph::element::u1, std::vector {1, 0, 1, 0, 0, 0, 0, 1}, + std::vector {0xA1}, 8, 8), + + // destination u4 + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::u4, std::vector {0xA0}, std::vector {0x10, 0x10}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u4, std::vector {0x12, 0x03}, std::vector {0x12, 0x03}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u4, std::vector {1, 2, 0, 3}, std::vector {0x12, 0x03}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u4, std::vector {1, 2, 0, 3}, + std::vector {0x12, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u4, std::vector {1, 2, 0, 3}, + std::vector {0x12, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u4, std::vector {1, 2, 0, 3}, + std::vector {0x12, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u4, std::vector {0xFE, 0x03}, std::vector {0xFE, 0x03}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u4, std::vector {-1, -2, 2, 3}, std::vector {0xFE, 0x23}, + 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u4, std::vector {-1, -2, 2, 3}, + std::vector {0xFE, 0x23}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u4, std::vector {-1, -2, 2, 3}, + std::vector {0xFE, 0x23}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u4, std::vector {-1, -2, 2, 3}, + std::vector {0xFE, 0x23}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u4, std::vector {-1, -2, 0, 3}, + std::vector {0xFE, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u4, std::vector {-1, -2, 0, 3}, + std::vector {0xFE, 0x03}, 4, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u4, std::vector {-1, -2, 2, 3}, std::vector {0xFE, 0x23}, + 4, 4), + + // destination u8 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u8, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u8, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u8, std::vector {1, 2, 0, 3}, std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u8, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3}, + 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u8, std::vector {1, 2, 2, 3}, std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u8, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u8, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u8, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u8, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u8, std::vector {1, 2, 2, 3}, std::vector {1, 2, 2, 3}), + + // destination u16 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u16, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u16, std::vector {0x21, 0x43}, + std::vector {2, 1, 4, 3}, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u16, std::vector {0x21, 0x43}, + std::vector {2, 1, 4, 3}, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u16, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u16, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u16, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u16, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u16, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u16, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + + // destination u32 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u32, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u32, std::vector {0x21, 0x43}, + std::vector {2, 1, 4, 3}, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u32, std::vector {0x21, 0x43}, + std::vector {2, 1, 4, 3}, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u32, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u32, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u32, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u32, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u32, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u32, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + + // destination u64 + ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u64, std::vector {0x81}, + std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u64, std::vector {0x21, 0x43}, + std::vector {2, 1, 4, 3}, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u64, std::vector {0x21, 0x43}, + std::vector {2, 1, 4, 3}, 4), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u64, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u64, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u64, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u64, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u64, std::vector {1, 2, 0, 3}, + std::vector {1, 2, 0, 3}), + ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u64, std::vector {1, 2, 2, 3}, + std::vector {1, 2, 2, 3})), + ReferenceConvertLayerTest::getTestCaseName); diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/core_integration.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/core_integration.cpp index 2c067aaf7b68ab..60ffbf048934e6 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/core_integration.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/core_integration.cpp @@ -73,7 +73,7 @@ using IEClassSetConfigTestHETERO = IEClassNetworkTest; TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) { { - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO")); @@ -84,7 +84,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) { } { - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO)}}, "HETERO")); @@ -95,7 +95,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) { } { - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS))); @@ -118,7 +118,7 @@ INSTANTIATE_TEST_SUITE_P( using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest; TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) { - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; std::string deviceName = CommonTestUtils::DEVICE_TEMPLATE; @@ -209,4 +209,4 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)); #endif // ENABLE_MKL_DNN -} // namespace \ No newline at end of file +} // namespace diff --git a/inference-engine/src/plugin_api/ie_ngraph_utils.hpp b/inference-engine/src/plugin_api/ie_ngraph_utils.hpp index 40904bb07215ca..48a9a026daceab 100644 --- a/inference-engine/src/plugin_api/ie_ngraph_utils.hpp +++ b/inference-engine/src/plugin_api/ie_ngraph_utils.hpp @@ -134,6 +134,8 @@ inline Precision convertPrecision(const ::ngraph::element::Type& precision) { return Precision(Precision::BIN); case ::ngraph::element::Type_t::boolean: return Precision(Precision::BOOL); + case ::ngraph::element::Type_t::dynamic: + return Precision(Precision::UNSPECIFIED); default: IE_THROW() << "Incorrect precision " << precision.get_type_name() << "!"; return{}; } diff --git a/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp b/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp index 1a5729d8ecdff5..8abda399c9c049 100644 --- a/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp +++ b/inference-engine/src/transformations/include/ngraph_ops/framework_node.hpp @@ -55,7 +55,7 @@ class TRANSFORMATIONS_API FrameworkNode : public Op { public: NGRAPH_RTTI_DECLARATION; - explicit FrameworkNode(const OutputVector& inputs); + explicit FrameworkNode(const OutputVector& inputs, size_t output_size = 1); void validate_and_infer_types() override; diff --git a/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp b/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp index b25143c20f5aef..94d0008c11064e 100644 --- a/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp +++ b/inference-engine/src/transformations/src/ngraph_ops/framework_node.cpp @@ -10,8 +10,9 @@ using namespace ngraph; NGRAPH_RTTI_DEFINITION(op::FrameworkNode, "FrameworkNode", 0); -op::FrameworkNode::FrameworkNode(const OutputVector& inputs) +op::FrameworkNode::FrameworkNode(const OutputVector& inputs, size_t output_size) : Op(inputs) { + set_output_size(output_size); constructor_validate_and_infer_types(); } diff --git a/inference-engine/src/transformations/src/transformations/serialize.cpp b/inference-engine/src/transformations/src/transformations/serialize.cpp index 1fd41881125b41..93f9c24e4b81bb 100644 --- a/inference-engine/src/transformations/src/transformations/serialize.cpp +++ b/inference-engine/src/transformations/src/transformations/serialize.cpp @@ -495,6 +495,7 @@ std::string get_opset_name( std::string get_precision_name(const ngraph::element::Type & elem_type) { switch (elem_type) { case ::ngraph::element::Type_t::undefined: + case ::ngraph::element::Type_t::dynamic: return "UNSPECIFIED"; case ::ngraph::element::Type_t::f16: return "FP16"; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert.cpp index 70ed6c6512fc41..e7ac96b6540990 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert.cpp @@ -14,13 +14,23 @@ namespace { const std::vector> inShape = {{1, 2, 3, 4}}; const std::vector precisions = { - Precision::U8, + // Ticket: 59594 + // Precision::I4, Precision::I8, - Precision::U16, Precision::I16, Precision::I32, - Precision::U64, Precision::I64, + // Ticket: 59594 + // Precision::BIN, + // Precision::BOOL, + // Precision::U4, + Precision::U8, + Precision::U16, + // Ticket: 59594 + // Precision::U32, + Precision::U64, + Precision::BF16, + Precision::FP16, Precision::FP32 }; @@ -34,4 +44,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvertLayerTest, ConvertLayerTest, ::testing::Values(CommonTestUtils::DEVICE_CPU)), ConvertLayerTest::getTestCaseName); -} // namespace \ No newline at end of file +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp index adcd0e525b43bf..1064edaa570865 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp @@ -62,6 +62,14 @@ namespace BehaviorTestsDefinitions { } +inline Core createCoreWithTemplate() { + Core ie; + std::string pluginName = "templatePlugin"; + pluginName += IE_BUILD_POSTFIX; + ie.RegisterPlugin(pluginName, "TEMPLATE"); + return ie; +} + class IEClassBasicTestP : public ::testing::Test, public WithParamInterface > { protected: std::string deviceName; @@ -211,20 +219,20 @@ TEST(IEClassBasicTest, smoke_createDefault) { TEST_P(IEClassBasicTestP, registerExistingPluginThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.RegisterPlugin(pluginName, deviceName), Exception); } TEST_P(IEClassBasicTestP, registerNewPluginNoThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.RegisterPlugin(pluginName, "NEW_DEVICE_NAME")); ASSERT_NO_THROW(ie.GetMetric("NEW_DEVICE_NAME", METRIC_KEY(SUPPORTED_CONFIG_KEYS))); } TEST(IEClassBasicTest, smoke_registerExistingPluginFileThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.RegisterPlugins("nonExistPlugins.xml"), Exception); } @@ -277,7 +285,7 @@ TEST_P(IEClassBasicTestP, smoke_registerPluginsXMLUnicodePath) { GTEST_COUT << "Test " << testIndex << std::endl; - Core ie; + Core ie = createCoreWithTemplate(); GTEST_COUT << "Core created " << testIndex << std::endl; ASSERT_NO_THROW(ie.RegisterPlugins(::FileUtils::wStringtoMBCSstringChar(pluginsXmlW))); CommonTestUtils::removeFile(pluginsXmlW); @@ -310,19 +318,19 @@ TEST_P(IEClassBasicTestP, smoke_registerPluginsXMLUnicodePath) { TEST_P(IEClassBasicTestP, getVersionsByExactDeviceNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.GetVersions(deviceName + ".0")); } TEST_P(IEClassBasicTestP, getVersionsByDeviceClassNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.GetVersions(deviceName)); } TEST_P(IEClassBasicTestP, getVersionsNonEmpty) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_EQ(2, ie.GetVersions(CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName).size()); } @@ -332,7 +340,7 @@ TEST_P(IEClassBasicTestP, getVersionsNonEmpty) { TEST_P(IEClassBasicTestP, unregisterExistingPluginNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); // device instance is not created yet ASSERT_THROW(ie.UnregisterPlugin(deviceName), Exception); @@ -344,7 +352,7 @@ TEST_P(IEClassBasicTestP, unregisterExistingPluginNoThrow) { TEST_P(IEClassBasicTestP, accessToUnregisteredPluginThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.UnregisterPlugin(deviceName), Exception); ASSERT_NO_THROW(ie.GetVersions(deviceName)); ASSERT_NO_THROW(ie.UnregisterPlugin(deviceName)); @@ -355,7 +363,7 @@ TEST_P(IEClassBasicTestP, accessToUnregisteredPluginThrows) { TEST(IEClassBasicTest, smoke_unregisterNonExistingPluginThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.UnregisterPlugin("unkown_device"), Exception); } @@ -365,45 +373,45 @@ TEST(IEClassBasicTest, smoke_unregisterNonExistingPluginThrows) { TEST_P(IEClassBasicTestP, SetConfigAllThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.SetConfig({{"unsupported_key", "4"}})); ASSERT_ANY_THROW(ie.GetVersions(deviceName)); } TEST_P(IEClassBasicTestP, SetConfigForUnRegisteredDeviceThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.SetConfig({{"unsupported_key", "4"}}, "unregistered_device"), Exception); } TEST_P(IEClassBasicTestP, SetConfigNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.SetConfig({{KEY_PERF_COUNT, YES}}, deviceName)); } TEST_P(IEClassBasicTestP, SetConfigAllNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.SetConfig({{KEY_PERF_COUNT, YES}})); ASSERT_NO_THROW(ie.GetVersions(deviceName)); } TEST(IEClassBasicTest, smoke_SetConfigHeteroThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.SetConfig({{KEY_PERF_COUNT, YES}}, CommonTestUtils::DEVICE_HETERO)); } TEST_P(IEClassBasicTestP, SetConfigHeteroTargetFallbackThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.SetConfig({{"TARGET_FALLBACK", deviceName}}, CommonTestUtils::DEVICE_HETERO)); } TEST(IEClassBasicTest, smoke_SetConfigHeteroNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); bool value = false; ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), YES}}, CommonTestUtils::DEVICE_HETERO)); @@ -421,7 +429,7 @@ TEST(IEClassBasicTest, smoke_SetConfigHeteroNoThrow) { TEST_P(IEClassBasicTestP, ImportNetworkThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); if (deviceName == CommonTestUtils::DEVICE_CPU || deviceName == CommonTestUtils::DEVICE_GPU) { @@ -440,20 +448,20 @@ TEST_P(IEClassBasicTestP, ImportNetworkThrows) { TEST(IEClassBasicTest, smoke_ImportNetworkHeteroThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.ImportNetwork("model", CommonTestUtils::DEVICE_HETERO), NetworkNotRead); } TEST(IEClassBasicTest, smoke_ImportNetworkMultiThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Core ie; + InferenceEngine::Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.ImportNetwork("model", CommonTestUtils::DEVICE_MULTI), NetworkNotRead); } TEST_P(IEClassBasicTestP, ImportNetworkWithNullContextThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); RemoteContext::Ptr context = nullptr; std::istringstream stream("None"); ASSERT_THROW(ie.ImportNetwork(stream, context, {}), Exception); @@ -465,19 +473,19 @@ TEST_P(IEClassBasicTestP, ImportNetworkWithNullContextThrows) { TEST_P(IEClassNetworkTestP, LoadNetworkActualNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, deviceName)); } TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDeviceNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName)); } TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}})); } @@ -487,7 +495,7 @@ TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) { TEST_P(IEClassImportExportTestP, smoke_ImportNetworkThrowsIfNoDeviceName) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); std::stringstream strm; ExecutableNetwork executableNetwork; ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName)); @@ -500,7 +508,7 @@ TEST_P(IEClassImportExportTestP, smoke_ImportNetworkThrowsIfNoDeviceName) { TEST_P(IEClassImportExportTestP, smoke_ImportNetworkNoThrowWithDeviceName) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); std::stringstream strm; ExecutableNetwork executableNetwork; ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName)); @@ -511,7 +519,7 @@ TEST_P(IEClassImportExportTestP, smoke_ImportNetworkNoThrowWithDeviceName) { TEST_P(IEClassImportExportTestP, smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ExecutableNetwork executableNetwork; std::string fileName{"ExportedNetwork"}; { @@ -534,13 +542,13 @@ TEST_P(IEClassImportExportTestP, smoke_ExportUsingFileNameImportFromStreamNoThro TEST_P(IEClassNetworkTestP, QueryNetworkActualThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName)); } TEST_P(IEClassNetworkTestP, QueryNetworkActualNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); try { ie.QueryNetwork(actualNetwork, deviceName); @@ -552,7 +560,7 @@ TEST_P(IEClassNetworkTestP, QueryNetworkActualNoThrow) { TEST_P(IEClassNetworkTestP, QueryNetworkWithKSO) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); try { auto rres = ie.QueryNetwork(ksoNetwork, deviceName); @@ -571,7 +579,7 @@ TEST_P(IEClassNetworkTestP, QueryNetworkWithKSO) { TEST_P(IEClassNetworkTestP, SetAffinityWithConstantBranches) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); try { std::shared_ptr func; @@ -623,7 +631,7 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithConstantBranches) { TEST_P(IEClassNetworkTestP, SetAffinityWithKSO) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); try { auto rres = ie.QueryNetwork(ksoNetwork, deviceName); @@ -647,7 +655,7 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithKSO) { TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); QueryNetworkResult res; ASSERT_NO_THROW(res = ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}})); ASSERT_LT(0, res.supportedLayersMap.size()); @@ -655,13 +663,13 @@ TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) { TEST_P(IEClassNetworkTestP, QueryNetworkMultiThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_MULTI), Exception); } TEST(IEClassBasicTest, smoke_GetMetricSupportedMetricsHeteroNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; std::string deviceName = CommonTestUtils::DEVICE_HETERO; @@ -678,7 +686,7 @@ TEST(IEClassBasicTest, smoke_GetMetricSupportedMetricsHeteroNoThrow) { TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; std::string deviceName = CommonTestUtils::DEVICE_HETERO; @@ -695,7 +703,7 @@ TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroNoThrow) { TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); // TODO: check std::string targetDevice = CommonTestUtils::DEVICE_HETERO + std::string(":") + CommonTestUtils::DEVICE_CPU; ASSERT_THROW(ie.GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)), Exception); @@ -703,7 +711,7 @@ TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroThrows) { TEST_P(IEClassGetMetricTest_SUPPORTED_METRICS, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS))); @@ -719,7 +727,7 @@ TEST_P(IEClassGetMetricTest_SUPPORTED_METRICS, GetMetricAndPrintNoThrow) { TEST_P(IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); @@ -735,7 +743,7 @@ TEST_P(IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricAndPrintNoThrow) { TEST_P(IEClassGetMetricTest_AVAILABLE_DEVICES, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES))); @@ -751,7 +759,7 @@ TEST_P(IEClassGetMetricTest_AVAILABLE_DEVICES, GetMetricAndPrintNoThrow) { TEST_P(IEClassGetMetricTest_FULL_DEVICE_NAME, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(FULL_DEVICE_NAME))); @@ -763,7 +771,7 @@ TEST_P(IEClassGetMetricTest_FULL_DEVICE_NAME, GetMetricAndPrintNoThrow) { TEST_P(IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(OPTIMIZATION_CAPABILITIES))); @@ -779,7 +787,7 @@ TEST_P(IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, GetMetricAndPrintNoThrow) TEST_P(IEClassGetMetricTest_DEVICE_GOPS, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(DEVICE_GOPS))); @@ -795,7 +803,7 @@ TEST_P(IEClassGetMetricTest_DEVICE_GOPS, GetMetricAndPrintNoThrow) { TEST_P(IEClassGetMetricTest_DEVICE_TYPE, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(DEVICE_TYPE))); @@ -808,7 +816,7 @@ TEST_P(IEClassGetMetricTest_DEVICE_TYPE, GetMetricAndPrintNoThrow) { TEST_P(IEClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(NUMBER_OF_WAITING_INFER_REQUESTS))); @@ -821,7 +829,7 @@ TEST_P(IEClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS, GetMetricAndPrintN TEST_P(IEClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(NUMBER_OF_EXEC_INFER_REQUESTS))); @@ -834,7 +842,7 @@ TEST_P(IEClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS, GetMetricAndPrintNoTh TEST_P(IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS))); @@ -857,7 +865,7 @@ TEST_P(IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, GetMetricAndPrintNoT TEST_P(IEClassGetMetricTest_RANGE_FOR_STREAMS, GetMetricAndPrintNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(RANGE_FOR_STREAMS))); @@ -877,7 +885,7 @@ TEST_P(IEClassGetMetricTest_RANGE_FOR_STREAMS, GetMetricAndPrintNoThrow) { TEST_P(IEClassGetMetricTest_ThrowUnsupported, GetMetricThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_THROW(p = ie.GetMetric(deviceName, "unsupported_metric"), Exception); @@ -885,7 +893,7 @@ TEST_P(IEClassGetMetricTest_ThrowUnsupported, GetMetricThrow) { TEST_P(IEClassGetConfigTest, GetConfigNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); @@ -900,7 +908,7 @@ TEST_P(IEClassGetConfigTest, GetConfigNoThrow) { TEST_P(IEClassGetConfigTest, GetConfigHeteroNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); @@ -913,7 +921,7 @@ TEST_P(IEClassGetConfigTest, GetConfigHeteroNoThrow) { TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_THROW(p = ie.GetConfig(CommonTestUtils::DEVICE_HETERO, "unsupported_config"), Exception); @@ -921,7 +929,7 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) { TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_THROW(p = ie.GetConfig(CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName, HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)), @@ -930,7 +938,7 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) { TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_THROW(p = ie.GetConfig(deviceName, "unsupported_config"), Exception); @@ -938,7 +946,7 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigThrow) { TEST_P(IEClassGetAvailableDevices, GetAvailableDevicesNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); std::vector devices; ASSERT_NO_THROW(devices = ie.GetAvailableDevices()); @@ -962,7 +970,7 @@ TEST_P(IEClassGetAvailableDevices, GetAvailableDevicesNoThrow) { // TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -981,7 +989,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoT TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1000,7 +1008,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow TEST_P(IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1015,7 +1023,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) { TEST_P(IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1030,7 +1038,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, G TEST_P(IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, GetMetricThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1040,7 +1048,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, GetMetricThrow) TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1057,7 +1065,7 @@ TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoThrow) { TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1067,7 +1075,7 @@ TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigThrows) { TEST_P(IEClassExecutableNetworkSetConfigTest, SetConfigThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1077,7 +1085,7 @@ TEST_P(IEClassExecutableNetworkSetConfigTest, SetConfigThrows) { TEST_P(IEClassExecutableNetworkSupportedConfigTest, SupportedConfigWorks) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1090,7 +1098,7 @@ TEST_P(IEClassExecutableNetworkSupportedConfigTest, SupportedConfigWorks) { TEST_P(IEClassExecutableNetworkUnsupportedConfigTest, UnsupportedConfigThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName); @@ -1099,7 +1107,7 @@ TEST_P(IEClassExecutableNetworkUnsupportedConfigTest, UnsupportedConfigThrows) { TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoEmptyNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); @@ -1122,7 +1130,7 @@ TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoEmptyNoThrow) { TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter pHetero, pDevice; ExecutableNetwork heteroExeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName); @@ -1156,7 +1164,7 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter pHetero, pDevice; ExecutableNetwork heteroExeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName); @@ -1196,7 +1204,7 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; ExecutableNetwork exeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName); @@ -1209,7 +1217,7 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThro TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, GetMetricNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); Parameter p; setHeteroNetworkAffinity(deviceName); @@ -1237,7 +1245,7 @@ bool supportsDeviceID(Core &ie, const std::string &deviceName) { TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); @@ -1252,7 +1260,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) { TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { try { @@ -1268,7 +1276,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) { TEST_P(IEClassQueryNetworkTest, QueryNetworkWithBigDeviceIDThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.QueryNetwork(actualNetwork, deviceName + ".110"), Exception); @@ -1279,7 +1287,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithBigDeviceIDThrows) { TEST_P(IEClassQueryNetworkTest, QueryNetworkWithInvalidDeviceIDThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.QueryNetwork(actualNetwork, deviceName + ".l0"), Exception); @@ -1290,7 +1298,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithInvalidDeviceIDThrows) { TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO, @@ -1305,7 +1313,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) { // TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithDeviceIDNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); @@ -1320,7 +1328,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithDeviceIDNoThrow) { TEST_P(IEClassLoadNetworkTest, LoadNetworkWithDeviceIDNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); @@ -1334,7 +1342,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithDeviceIDNoThrow) { TEST_P(IEClassLoadNetworkTest, LoadNetworkWithBigDeviceIDThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.LoadNetwork(actualNetwork, deviceName + ".10"), Exception); @@ -1345,7 +1353,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithBigDeviceIDThrows) { TEST_P(IEClassLoadNetworkTest, LoadNetworkWithInvalidDeviceIDThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.LoadNetwork(actualNetwork, deviceName + ".l0"), Exception); @@ -1356,7 +1364,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithInvalidDeviceIDThrows) { TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.LoadNetwork(actualNetwork, "HETERO", @@ -1368,7 +1376,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) { TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.LoadNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO, @@ -1385,7 +1393,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) { TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); @@ -1406,7 +1414,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) { TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; @@ -1431,7 +1439,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; @@ -1466,7 +1474,7 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) { TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; @@ -1500,7 +1508,7 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { TEST_P(IEClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; + Core ie = createCoreWithTemplate(); { auto versions = ie.GetVersions(std::string(CommonTestUtils::DEVICE_MULTI) + ":" + deviceName + "," + CommonTestUtils::DEVICE_CPU); ASSERT_EQ(3, versions.size()); @@ -1510,7 +1518,7 @@ TEST_P(IEClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins config.insert({"CPU_THREADS_NUM", "3"}); } ASSERT_NO_THROW({ - Core ie; + Core ie = createCoreWithTemplate(); std::string name = actualNetwork.getInputsInfo().begin()->first; actualNetwork.getInputsInfo().at(name)->setPrecision(Precision::U8); auto executableNetwork = ie.LoadNetwork(actualNetwork, deviceName, config); diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/cpp_holders.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/cpp_holders.cpp index f253ee322bf990..49d955c25f6d1b 100644 --- a/inference-engine/tests/functional/plugin/shared/src/behavior/cpp_holders.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/cpp_holders.cpp @@ -8,6 +8,13 @@ #include "behavior/cpp_holders.hpp" namespace BehaviorTestsDefinitions { + inline InferenceEngine::Core createCoreWithTemplate() { + InferenceEngine::Core ie; + std::string pluginName = "templatePlugin"; + pluginName += IE_BUILD_POSTFIX; + ie.RegisterPlugin(pluginName, "TEMPLATE"); + return ie; + } std::string HoldersTest::getTestCaseName(testing::TestParamInfo obj) { std::string targetDevice; std::vector order; @@ -47,7 +54,7 @@ namespace BehaviorTestsDefinitions { void release_order_test(std::vector order, const std::string &deviceName, std::shared_ptr function) { InferenceEngine::CNNNetwork cnnNet(function); - InferenceEngine::Core core; + InferenceEngine::Core core = createCoreWithTemplate(); auto exe_net = core.LoadNetwork(cnnNet, deviceName); auto request = exe_net.CreateInferRequest(); std::vector states = {}; @@ -60,7 +67,7 @@ namespace BehaviorTestsDefinitions { auto release = [&](int i) { switch (i) { case 0: - core = InferenceEngine::Core{}; + core = createCoreWithTemplate(); break; case 1: exe_net = {}; @@ -84,7 +91,7 @@ namespace BehaviorTestsDefinitions { std::vector order, const std::string &deviceName, std::shared_ptr function) { InferenceEngine::CNNNetwork cnnNet(function); - InferenceEngine::Core core; + InferenceEngine::Core core = createCoreWithTemplate(); std::stringstream stream; { auto exe_net = core.LoadNetwork(cnnNet, deviceName); @@ -142,7 +149,7 @@ namespace BehaviorTestsDefinitions { TEST_P(HoldersTestOnImportedNetwork, CreateRequestWithCoreRemoved) { InferenceEngine::CNNNetwork cnnNet(function); - InferenceEngine::Core core; + InferenceEngine::Core core = createCoreWithTemplate(); std::stringstream stream; { auto exe_net = core.LoadNetwork(cnnNet, targetDevice); diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index 9d13251574331f..72172e6992482a 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -101,7 +101,7 @@ class LayerTestsCommon : public CommonTestUtils::TestsCommon { double diff = static_cast(absoluteDifference) / max; if (max == 0 || (diff > static_cast(threshold)) || std::isnan(static_cast(res)) || std::isnan(static_cast(ref))) { - IE_THROW() << "Relative comparison of values expected: " << ref << " and actual: " << res + IE_THROW() << "Relative comparison of values expected: " << std::to_string(ref) << " and actual: " << std::to_string(res) << " at index " << i << " with threshold " << threshold << " failed"; } diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/src/plugin_cache.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/src/plugin_cache.cpp index 95b8e29eff417d..95c7e18a18115c 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/src/plugin_cache.cpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/src/plugin_cache.cpp @@ -49,6 +49,13 @@ std::shared_ptr PluginCache::ie(const std::string &device } assert(0 != ie_core.use_count()); + // register template plugin if it is needed + try { + std::string pluginName = "templatePlugin"; + pluginName += IE_BUILD_POSTFIX; + ie_core->RegisterPlugin(pluginName, "TEMPLATE"); + } catch (...) {} + if (!deviceToCheck.empty()) { std::vector metrics = ie_core->GetMetric(deviceToCheck, METRIC_KEY(SUPPORTED_METRICS)); @@ -61,11 +68,13 @@ std::shared_ptr PluginCache::ie(const std::string &device std::exit(EXIT_FAILURE); } +#ifndef NDEBUG std::cout << "Available devices for " << deviceToCheck << ":" << std::endl; for (const auto &device : availableDevices) { std::cout << " " << device << std::endl; } +#endif } } return ie_core; diff --git a/ngraph/frontend/onnx_import/CMakeLists.txt b/ngraph/frontend/onnx_import/CMakeLists.txt index 0ddb78ad071510..bb6a4e7ff99580 100644 --- a/ngraph/frontend/onnx_import/CMakeLists.txt +++ b/ngraph/frontend/onnx_import/CMakeLists.txt @@ -45,7 +45,7 @@ if(COMMAND ie_faster_build) ) endif() -target_link_libraries(onnx_importer PRIVATE onnx_common ngraph::builder +target_link_libraries(onnx_importer PRIVATE onnx_common ngraph::builder inference_engine_transformations PUBLIC ngraph) target_include_directories(onnx_importer PUBLIC $ diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/node.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/node.hpp index c2a3e6b820cdbc..cb5d11fde31e5c 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/node.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/node.hpp @@ -75,9 +75,8 @@ namespace ngraph bool has_attribute(const std::string& name) const; - Subgraph get_subgraph_from_attribute( - const std::string& name, - const std::map& carried_dependencies_map) const; + bool has_subgraph() const; + std::shared_ptr get_subgraph() const; template T get_attribute_value(const std::string& name, T default_value) const; diff --git a/ngraph/frontend/onnx_import/include/onnx_import/onnx_framework_node.hpp b/ngraph/frontend/onnx_import/include/onnx_import/onnx_framework_node.hpp new file mode 100644 index 00000000000000..bfa902a5ac449c --- /dev/null +++ b/ngraph/frontend/onnx_import/include/onnx_import/onnx_framework_node.hpp @@ -0,0 +1,100 @@ +//***************************************************************************** +// Copyright 2017-2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include +#include +#include + +namespace ONNX_NAMESPACE +{ + // forward declaration + class ModelProto; +} // namespace ONNX_NAMESPACE + +namespace ngraph +{ + namespace onnx_import + { + class Model; + } + + namespace frontend + { + class ONNXFrameworkNode : public op::FrameworkNode + { + public: + NGRAPH_RTTI_DECLARATION; + + ONNXFrameworkNode(const onnx_import::Node& node) + : FrameworkNode(node.get_ng_inputs(), node.get_outputs_size()) + , m_node(node) + { + } + + ONNXFrameworkNode(const onnx_import::Node& node, const OutputVector& inputs) + : FrameworkNode(inputs, node.get_outputs_size()) + , m_node(node) + { + } + + const onnx_import::Node& get_onnx_node() const { return m_node; } + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& inputs) const override; + + virtual bool visit_attributes(AttributeVisitor& visitor) override + { + // TODO: implement reading as well, now it work for serialization only + std::string domain = m_node.domain(); + std::string op_type = m_node.op_type(); + visitor.on_attribute("ONNX_META_domain", domain); + visitor.on_attribute("ONNX_META_type", op_type); + return true; + } + + private: + onnx_import::Node m_node; + }; + + class ONNXSubgraphFrameworkNode : public ONNXFrameworkNode + { + public: + NGRAPH_RTTI_DECLARATION; + + ONNXSubgraphFrameworkNode(const onnx_import::Node& node, const OutputVector& inputs) + : ONNXFrameworkNode(node, inputs) + { + } + + void infer_inputs_from_parent() + { + get_onnx_node().get_subgraph()->infer_inputs_from_parent(); + } + + std::shared_ptr get_subgraph_body() const + { + auto subgraph = get_onnx_node().get_subgraph(); + return std::make_shared(subgraph->get_ng_outputs(), + subgraph->get_ng_parameters(), + subgraph->get_name()); + } + }; + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/core/attribute.cpp b/ngraph/frontend/onnx_import/src/core/attribute.cpp index 8eaa8c93517d8e..1fd61931de9629 100644 --- a/ngraph/frontend/onnx_import/src/core/attribute.cpp +++ b/ngraph/frontend/onnx_import/src/core/attribute.cpp @@ -11,9 +11,7 @@ namespace ngraph { namespace onnx_import { - Subgraph Attribute::get_subgraph( - const Graph& parent_graph, - const std::map& carried_dependencies_map) const + Subgraph Attribute::get_subgraph(const Graph& parent_graph) const { if (m_attribute_proto->type() != ONNX_NAMESPACE::AttributeProto_AttributeType_GRAPH) { @@ -25,33 +23,6 @@ namespace ngraph const auto& graph = m_attribute_proto->g(); model_proto->mutable_graph()->CopyFrom(graph); - const std::size_t subgraph_inputs_count = - static_cast(model_proto->mutable_graph()->mutable_input()->size()); - // Use the `carried_dependencies_map` to infer the types for the subgraph inputs - for (const auto& carried_dependency : carried_dependencies_map) - { - if (carried_dependency.first >= subgraph_inputs_count) - { - NGRAPH_WARN << "Input with index: '" << carried_dependency.first - << "' was not found in the subgraph"; - } - else - { - const auto& parent_in = - parent_graph.get_ng_node_from_cache(carried_dependency.second); - const auto& carried_type = parent_in.get_element_type(); - auto subgraph_in = - model_proto->mutable_graph()->mutable_input(carried_dependency.first); - auto subgraph_in_tensor_type = - subgraph_in->mutable_type()->mutable_tensor_type(); - if (!subgraph_in_tensor_type->has_elem_type()) - { - subgraph_in_tensor_type->set_elem_type( - onnx_common::ng_to_onnx_data_type(carried_type)); - } - } - } - // set opset version and domain from the parent graph model_proto->mutable_opset_import()->CopyFrom(parent_graph.get_opset_imports()); auto model = common::make_unique(std::move(model_proto)); diff --git a/ngraph/frontend/onnx_import/src/core/attribute.hpp b/ngraph/frontend/onnx_import/src/core/attribute.hpp index bc192e7b392fcb..963dab22cb53de 100644 --- a/ngraph/frontend/onnx_import/src/core/attribute.hpp +++ b/ngraph/frontend/onnx_import/src/core/attribute.hpp @@ -316,9 +316,7 @@ namespace ngraph float get_float() const { return m_attribute_proto->f(); } int64_t get_integer() const { return m_attribute_proto->i(); } const std::string& get_string() const { return m_attribute_proto->s(); } - Subgraph get_subgraph( - const Graph& parent_graph, - const std::map& carried_dependencies_map) const; + Subgraph get_subgraph(const Graph& parent_graph) const; std::vector get_tensor_array() const { diff --git a/ngraph/frontend/onnx_import/src/core/graph.cpp b/ngraph/frontend/onnx_import/src/core/graph.cpp index c8f56327d6bb22..569d3849774859 100644 --- a/ngraph/frontend/onnx_import/src/core/graph.cpp +++ b/ngraph/frontend/onnx_import/src/core/graph.cpp @@ -14,6 +14,7 @@ #include "ngraph/node.hpp" #include "ngraph/provenance.hpp" #include "onnx_import/core/node.hpp" +#include "onnx_import/onnx_framework_node.hpp" #include "utils/common.hpp" #include "utils/provenance_tag.hpp" @@ -55,25 +56,6 @@ namespace ngraph Graph::Graph(std::unique_ptr&& model) : Graph(std::move(model), common::make_unique()) { - // Remove dangling Parameters - for (auto param_it = m_parameters.begin(); param_it != m_parameters.end();) - { - if ((*param_it)->get_output_target_inputs(0).size() == 0) - { - const auto& name = (*param_it)->get_friendly_name(); - auto out_it = std::find_if( - m_outputs.begin(), m_outputs.end(), [&name](const ValueInfo& info) { - return info.get_name() == name; - }); - if (out_it == m_outputs.end()) - { - m_cache->remove_node(name); - param_it = m_parameters.erase(param_it); - continue; - } - } - param_it++; - } } Graph::Graph(std::unique_ptr&& model, std::unique_ptr&& cache) @@ -174,14 +156,82 @@ namespace ngraph NGRAPH_CHECK(unknown_operators.empty(), "nGraph does not support the following ONNX operations: ", detail::to_string(unknown_operators)); + } + void Graph::convert_to_ngraph_nodes() + { // Process ONNX graph nodes, convert to nGraph nodes for (const auto& node_proto : m_model->get_graph().node()) { m_nodes.emplace_back(node_proto, *this); const Node& node{m_nodes.back()}; - + if (node.has_subgraph()) + { + auto subgraph = node.get_subgraph(); + auto body_func = subgraph->convert(); + } OutputVector ng_nodes{node.get_ng_nodes()}; + set_friendly_names(node, ng_nodes); + for (std::size_t i{0}; i < node.get_outputs_size(); ++i) + { + m_cache->emplace_node(node.output(i), std::move(ng_nodes.at(i))); + } + } + } + + void Graph::remove_dangling_parameters() + { + for (auto param_it = m_parameters.begin(); param_it != m_parameters.end();) + { + if ((*param_it)->get_output_target_inputs(0).size() == 0) + { + const auto& name = (*param_it)->get_friendly_name(); + auto out_it = std::find_if( + m_outputs.begin(), m_outputs.end(), [&name](const ValueInfo& info) { + return info.get_name() == name; + }); + if (out_it == m_outputs.end()) + { + m_cache->remove_node(name); + param_it = m_parameters.erase(param_it); + continue; + } + } + param_it++; + } + } + + std::shared_ptr Graph::convert() + { + convert_to_ngraph_nodes(); + remove_dangling_parameters(); + return create_function(); + } + + void Graph::decode_to_framework_nodes() + { + // Process ONNX graph nodes, convert to nGraph nodes + for (const auto& node_proto : m_model->get_graph().node()) + { + m_nodes.emplace_back(node_proto, *this); + const Node& node{m_nodes.back()}; + std::shared_ptr framework_node; + if (node.has_subgraph()) + { + auto subgraph = node.get_subgraph(); + auto body_func = subgraph->decode(); + auto inputs = node.get_ng_inputs(); + for (const auto& input : subgraph->get_inputs_from_parent()) + inputs.push_back(input); + framework_node = + std::make_shared(node, inputs); + } + else + { + framework_node = std::make_shared(node); + } + OutputVector ng_nodes{framework_node->outputs()}; + set_friendly_names(node, ng_nodes); // Iterate over the number of outputs for given node in graph. // Some of them may be optional and trimmed. See: // https://github.com/onnx/onnx/blob/master/docs/IR.md#optional-inputs-and-outputs @@ -192,12 +242,24 @@ namespace ngraph } } - const GraphCache& Graph::get_graph_cache() const { return *m_cache.get(); } - bool Graph::is_node_in_cache(const std::string& name) const + std::shared_ptr Graph::create_function() + { + auto function = std::make_shared(get_ng_outputs(), m_parameters, get_name()); + for (std::size_t i{0}; i < function->get_output_size(); ++i) + { + function->get_output_op(i)->set_friendly_name(m_outputs.at(i).get_name()); + } + return function; + } + + std::shared_ptr Graph::decode() { - return m_cache->contains(name); + decode_to_framework_nodes(); + return create_function(); } + const GraphCache& Graph::get_graph_cache() const { return *m_cache.get(); } + Output Graph::get_ng_node_from_cache(const std::string& name) const { return m_cache->get_node(name); @@ -247,6 +309,12 @@ namespace ngraph set_friendly_names(onnx_node, ng_node_vector); add_provenance_tags(onnx_node, ng_node_vector); + for (std::size_t i{0}; i < onnx_node.get_outputs_size(); ++i) + { + auto ng_node = ng_node_vector.at(i); + m_cache->emplace_node(onnx_node.output(i), std::move(ng_node)); + } + return ng_node_vector; } @@ -323,9 +391,21 @@ namespace ngraph } Subgraph::Subgraph(std::unique_ptr&& model, const Graph& parent_graph) - : Graph( - std::move(model), - std::unique_ptr(new SubgraphCache(parent_graph.get_graph_cache()))) + : Graph(std::move(model), common::make_unique()) + , m_parent_graph_cache(&parent_graph.get_graph_cache()) + { + } + + Output Subgraph::get_ng_node_from_cache(const std::string& name) const + { + if (m_cache->contains(name)) + { + return m_cache->get_node(name); + } + return m_parent_graph_cache->get_node(name); + } + + void Subgraph::find_inputs_from_parent() { // find all nodes on edge parent graph-subgraph // (it means input of node from parent graph, output from subgraph) @@ -334,16 +414,16 @@ namespace ngraph int input_index = 0; for (const auto& in_name : node_proto.input()) { - if (m_cache->node_scope(in_name) == NodeScope::ParentGraph) + if (m_parent_graph_cache->contains(in_name)) { - const auto& from_parent_node = m_cache->get_node(in_name); + const auto& from_parent_node = m_parent_graph_cache->get_node(in_name); // constants are skipped if (!ngraph::is_type( from_parent_node.get_node_shared_ptr())) { for (const auto& out_name : node_proto.output()) { - if (m_cache->node_scope(out_name) == NodeScope::SubGraph) + if (m_cache->contains(out_name)) { auto out_node_to_replace_input = m_cache->get_node(out_name); auto new_param = std::make_shared( @@ -353,8 +433,10 @@ namespace ngraph out_node_to_replace_input.get_node() ->input(input_index) .replace_source_output(new_param); + m_parameter_to_parent_node_map.insert({new_param, in_name}); + m_cache->emplace_node(in_name, new_param); m_parameters.push_back(new_param); - m_outputs_from_parent.push_back(from_parent_node); + m_inputs_from_parent.push_back(in_name); } } } @@ -364,11 +446,39 @@ namespace ngraph } } - const std::vector> Subgraph::get_outputs_from_parent() const + std::shared_ptr Subgraph::convert() { - return m_outputs_from_parent; + convert_to_ngraph_nodes(); + find_inputs_from_parent(); + return create_function(); } + void Subgraph::decode_to_framework_nodes() + { + Graph::decode_to_framework_nodes(); + find_inputs_from_parent(); + } + + const std::vector> Subgraph::get_inputs_from_parent() const + { + OutputVector result; + for (const auto& name : m_inputs_from_parent) + { + result.push_back(m_parent_graph_cache->get_node(name)); + } + return result; + } + + void Subgraph::infer_inputs_from_parent() + { + for (auto& it : m_parameter_to_parent_node_map) + { + const auto& node = m_parent_graph_cache->get_node(it.second); + auto& parameter = it.first; + parameter->set_element_type(node.get_element_type()); + parameter->set_partial_shape(node.get_partial_shape()); + } + } } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/core/graph.hpp b/ngraph/frontend/onnx_import/src/core/graph.hpp index 6cbd880410984c..33c2be5d4d20e8 100644 --- a/ngraph/frontend/onnx_import/src/core/graph.hpp +++ b/ngraph/frontend/onnx_import/src/core/graph.hpp @@ -31,13 +31,14 @@ namespace ngraph Graph& operator=(const Graph&) = delete; Graph& operator=(Graph&&) = default; + virtual std::shared_ptr convert(); + std::shared_ptr decode(); const std::vector& get_nodes() const { return m_nodes; } const std::vector& get_inputs() const { return m_inputs; } const std::vector& get_outputs() const { return m_outputs; } OutputVector get_ng_outputs() const; const ParameterVector& get_ng_parameters() const { return m_parameters; } - bool is_node_in_cache(const std::string& name) const; - Output get_ng_node_from_cache(const std::string& name) const; + virtual Output get_ng_node_from_cache(const std::string& name) const; const std::string& get_name() const { return m_model->get_graph().name(); } OutputVector make_ng_nodes(const Node& onnx_node) const; const GraphCache& get_graph_cache() const; @@ -60,6 +61,11 @@ namespace ngraph const OutputVector& ng_node_vector) const; protected: + virtual void decode_to_framework_nodes(); + void convert_to_ngraph_nodes(); + void remove_dangling_parameters(); + std::shared_ptr create_function(); + ParameterVector m_parameters; std::unique_ptr m_model; std::unique_ptr m_cache; @@ -82,9 +88,11 @@ namespace ngraph /// \param[in] parent_graph The reference to the parent graph. Subgraph(std::unique_ptr&& model, const Graph& parent_graph); - /// \brief Return outputs which are on the edge the subgraph and the parent graph. + /// \brief Return nodes which are on the edge the subgraph and the parent graph. /// \return Vector of edge nodes from parent scope. - const std::vector> get_outputs_from_parent() const; + const std::vector> get_inputs_from_parent() const; + + std::shared_ptr convert() override; Subgraph() = delete; @@ -94,8 +102,17 @@ namespace ngraph Subgraph& operator=(const Subgraph&) = delete; Subgraph& operator=(Subgraph&&) = default; + Output get_ng_node_from_cache(const std::string& name) const override; + void infer_inputs_from_parent(); + private: - std::vector> m_outputs_from_parent; + void decode_to_framework_nodes() override; + void find_inputs_from_parent(); + + const GraphCache* m_parent_graph_cache; + std::vector m_inputs_from_parent; + std::unordered_map, std::string> + m_parameter_to_parent_node_map; }; inline std::ostream& operator<<(std::ostream& outs, const Graph& graph) diff --git a/ngraph/frontend/onnx_import/src/core/graph_cache.cpp b/ngraph/frontend/onnx_import/src/core/graph_cache.cpp index 9a0e0b59bbc42e..69593c062a3e69 100644 --- a/ngraph/frontend/onnx_import/src/core/graph_cache.cpp +++ b/ngraph/frontend/onnx_import/src/core/graph_cache.cpp @@ -39,55 +39,5 @@ namespace ngraph { return (m_graph_cache_map.count(name) > 0); } - - NodeScope GraphCache::node_scope(const std::string& name) const - { - return contains(name) ? NodeScope::ParentGraph : NodeScope::Lack; - } - - SubgraphCache::SubgraphCache(const GraphCache& parent_graph_cache) - : m_parent_graph_cache{&parent_graph_cache} - { - if (m_parent_graph_cache == nullptr) - { - throw ngraph_error("Parent graph cache is not initialized"); - } - } - - Output SubgraphCache::get_node(const std::string& name) const - { - // present in subgraph scope - if (GraphCache::contains(name)) - { - return GraphCache::get_node(name); - } - else // present in parent graph scope - { - return m_parent_graph_cache->get_node(name); - } - } - - bool SubgraphCache::contains(const std::string& name) const - { - // the node is in subgraph or in parent graph scope - return GraphCache::contains(name) || m_parent_graph_cache->contains(name); - } - - NodeScope SubgraphCache::node_scope(const std::string& name) const - { - if (GraphCache::contains(name)) - { - return NodeScope::SubGraph; - } - else if (m_parent_graph_cache->contains(name)) - { - return NodeScope::ParentGraph; - } - else - { - return NodeScope::Lack; - } - } - } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/core/graph_cache.hpp b/ngraph/frontend/onnx_import/src/core/graph_cache.hpp index a59af9b4a9f146..556811a91df326 100644 --- a/ngraph/frontend/onnx_import/src/core/graph_cache.hpp +++ b/ngraph/frontend/onnx_import/src/core/graph_cache.hpp @@ -14,17 +14,6 @@ namespace ngraph { namespace onnx_import { - /// \brief Enum which determines scope (visibility) of nodes in GraphCache. - enum class NodeScope - { - // in parent graph scope - ParentGraph = 1, - // in subgraph scope - SubGraph, - // not available at all - Lack - }; - /// \brief GraphCache stores and provides access to ONNX graph initializers. class GraphCache { @@ -58,58 +47,10 @@ namespace ngraph /// \return true if the node named `name` exist in the cache, false otherwise. virtual bool contains(const std::string& name) const; - /// \brief Return NodeScope enum which determines scope of the node. - /// \note If the method is called on GraphCache the ParentGraph enum - /// value is retunred always. - /// - /// \param[in] name The name of the node. - /// - /// \return SubGraph if node belongs to SubgraphCache, ParentGraph if - /// is avalible in parent_graph_cache, otherwise Lack - virtual NodeScope node_scope(const std::string& name) const; - virtual ~GraphCache() = default; private: std::map> m_graph_cache_map; }; - - class SubgraphCache : public GraphCache - { - public: - /// \brief Constructs a SubgraphCache class object. - /// - /// \param[in] parent_graph_cache The reference to the parent graph. - SubgraphCache(const GraphCache& parent_graph_cache); - - /// \brief Get the node from the cache (subgraph or parent graph) - /// - /// \note If the node is not found the ngraph_error exception is thrown. - /// - /// \param[in] name The name of the node. - /// - /// \return The node named `name` from subgraph (as present) or from parent graph. - Output get_node(const std::string& name) const override; - - /// \brief Return true if the node named `name` exist in the cache. - /// - /// \param[in] name The name of the node. - /// - /// \return true if the node named `name` exist in the cache - /// (subgraph or parent graph), false otherwise. - bool contains(const std::string& name) const override; - - /// \brief Return NodeScope enum which determines scope of the node. - /// - /// \param[in] name The name of the node. - /// - /// \return SubGraph if the node belongs to SubgraphCache, ParentGraph if - /// is avalible in parent_graph_cache, otherwise Lack - NodeScope node_scope(const std::string& name) const override; - - private: - const GraphCache* m_parent_graph_cache; - }; - } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/core/model.cpp b/ngraph/frontend/onnx_import/src/core/model.cpp index 452aea7b4775e4..2ddd3edac02e7a 100644 --- a/ngraph/frontend/onnx_import/src/core/model.cpp +++ b/ngraph/frontend/onnx_import/src/core/model.cpp @@ -6,6 +6,7 @@ #include "core/model.hpp" #include "ngraph/log.hpp" +#include "onnx_import/onnx_framework_node.hpp" #include "ops_bridge.hpp" namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/core/node.cpp b/ngraph/frontend/onnx_import/src/core/node.cpp index 1361e802bbff24..b6f2797263b384 100644 --- a/ngraph/frontend/onnx_import/src/core/node.cpp +++ b/ngraph/frontend/onnx_import/src/core/node.cpp @@ -26,6 +26,29 @@ namespace ngraph , m_graph{&graph} , m_attributes{std::begin(node_proto.attribute()), std::end(node_proto.attribute())} , m_output_names{std::begin(node_proto.output()), std::end(node_proto.output())} + { + const auto it = + std::find_if(std::begin(m_attributes), + std::end(m_attributes), + [&](const Attribute& attribute) { return attribute.is_graph(); }); + m_has_subgraph = it != std::end(m_attributes); + if (m_has_subgraph) + { + m_subgraph = std::make_shared(it->get_subgraph(*m_graph)); + } + } + + Impl(const ONNX_NAMESPACE::NodeProto& node_proto, + const Graph& graph, + std::shared_ptr subgraph) + : m_node_proto{&node_proto} + , m_name{node_proto.has_name() ? node_proto.name() : ""} + , m_domain{get_node_domain(node_proto)} + , m_graph{&graph} + , m_attributes{std::begin(node_proto.attribute()), std::end(node_proto.attribute())} + , m_output_names{std::begin(node_proto.output()), std::end(node_proto.output())} + , m_has_subgraph(subgraph != nullptr) + , m_subgraph(subgraph) { } @@ -44,9 +67,8 @@ namespace ngraph bool has_attribute(const std::string& name) const; - Subgraph get_subgraph_from_attribute( - const std::string& name, - const std::map& carried_dependencies_map) const; + bool has_subgraph() const; + std::shared_ptr get_subgraph() const; template T get_attribute_value(const std::string& name, T default_value) const; @@ -58,6 +80,8 @@ namespace ngraph const Graph& graph() const; private: + Subgraph get_subgraph_from_attribute(const std::string& name) const; + const ONNX_NAMESPACE::NodeProto* m_node_proto; std::string m_name; std::string m_domain; @@ -65,6 +89,9 @@ namespace ngraph std::vector m_attributes; std::vector> m_output_names; mutable std::string m_description; + + bool m_has_subgraph; + std::shared_ptr m_subgraph; }; const ONNX_NAMESPACE::NodeProto& Node::Impl::node_proto() const { return *m_node_proto; } @@ -94,9 +121,7 @@ namespace ngraph return it != std::end(m_attributes); } - Subgraph Node::Impl::get_subgraph_from_attribute( - const std::string& name, - const std::map& carried_dependencies_map) const + Subgraph Node::Impl::get_subgraph_from_attribute(const std::string& name) const { auto it = std::find_if( std::begin(m_attributes), std::end(m_attributes), [&](const Attribute& attribute) { @@ -106,9 +131,13 @@ namespace ngraph { throw error::node::UnknownAttribute{this->name(), name}; } - return it->get_subgraph(graph(), carried_dependencies_map); + return it->get_subgraph(*m_graph); } + bool Node::Impl::has_subgraph() const { return m_has_subgraph; } + + std::shared_ptr Node::Impl::get_subgraph() const { return m_subgraph; } + template T Node::Impl::get_attribute_value(const std::string& name, T default_value) const { @@ -140,8 +169,7 @@ namespace ngraph template <> Subgraph Node::Impl::get_attribute_value(const std::string& name) const { - const std::map empty_map; - return get_subgraph_from_attribute(name, empty_map); + return get_subgraph_from_attribute(name); } OutputVector Node::Impl::get_ng_nodes(const Node& node) const @@ -196,7 +224,9 @@ namespace ngraph } Node::Node(const Node& other) - : m_pimpl{new Impl{other.m_pimpl->node_proto(), other.m_pimpl->graph()}, + : m_pimpl{new Impl{other.m_pimpl->node_proto(), + other.m_pimpl->graph(), + other.get_subgraph()}, [](Impl* impl) { delete impl; }} { } @@ -219,12 +249,9 @@ namespace ngraph return m_pimpl->has_attribute(name); } - Subgraph Node::get_subgraph_from_attribute( - const std::string& name, - const std::map& carried_dependencies_map) const - { - return m_pimpl->get_subgraph_from_attribute(name, carried_dependencies_map); - } + bool Node::has_subgraph() const { return m_pimpl->has_subgraph(); } + + std::shared_ptr Node::get_subgraph() const { return m_pimpl->get_subgraph(); } std::vector Node::get_attribute_names() const { @@ -462,7 +489,6 @@ namespace ngraph { return m_pimpl->template get_attribute_value>(name); } - } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/core/null_node.hpp b/ngraph/frontend/onnx_import/src/core/null_node.hpp index c02a06ecfd2706..dd75770488c435 100644 --- a/ngraph/frontend/onnx_import/src/core/null_node.hpp +++ b/ngraph/frontend/onnx_import/src/core/null_node.hpp @@ -36,7 +36,10 @@ namespace ngraph public: static constexpr NodeTypeInfo type_info{"NullNode", 0}; const NodeTypeInfo& get_type_info() const override { return type_info; } - NullNode() = default; + NullNode() + : Node(1) + { + } virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; diff --git a/ngraph/frontend/onnx_import/src/core/value_info.hpp b/ngraph/frontend/onnx_import/src/core/value_info.hpp index 67f2c5f7e2b779..76b3357c6ab3bb 100644 --- a/ngraph/frontend/onnx_import/src/core/value_info.hpp +++ b/ngraph/frontend/onnx_import/src/core/value_info.hpp @@ -19,20 +19,6 @@ namespace ngraph { namespace onnx_import { - namespace error - { - namespace value_info - { - struct unspecified_element_type : ngraph_error - { - unspecified_element_type() - : ngraph_error{"value info has no element type specified"} - { - } - }; - } // namespace value_info - } // namespace error - class ValueInfo { public: @@ -65,12 +51,12 @@ namespace ngraph const PartialShape& get_shape() const { return m_partial_shape; } const element::Type& get_element_type() const { - if (!m_value_info_proto->type().tensor_type().has_elem_type()) + if (m_value_info_proto->type().tensor_type().has_elem_type()) { - throw error::value_info::unspecified_element_type{}; + return common::get_ngraph_element_type( + m_value_info_proto->type().tensor_type().elem_type()); } - return common::get_ngraph_element_type( - m_value_info_proto->type().tensor_type().elem_type()); + return ngraph::element::dynamic; } std::shared_ptr diff --git a/ngraph/frontend/onnx_import/src/onnx_framework_node.cpp b/ngraph/frontend/onnx_import/src/onnx_framework_node.cpp new file mode 100644 index 00000000000000..bf52a1a2c0b8a0 --- /dev/null +++ b/ngraph/frontend/onnx_import/src/onnx_framework_node.cpp @@ -0,0 +1,34 @@ +//***************************************************************************** +// Copyright 2017-2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include + +namespace ngraph +{ + namespace frontend + { + NGRAPH_RTTI_DEFINITION(ONNXFrameworkNode, "ONNXFrameworkNode", 1); + + std::shared_ptr + ONNXFrameworkNode::clone_with_new_inputs(const OutputVector& inputs) const + { + return std::make_shared(m_node, inputs); + } + + NGRAPH_RTTI_DEFINITION(ONNXSubgraphFrameworkNode, "ONNXSubgraphFrameworkNode", 1); + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/op/loop.cpp b/ngraph/frontend/onnx_import/src/op/loop.cpp index 23ded7464e2d3b..dbe4f68d8c983a 100644 --- a/ngraph/frontend/onnx_import/src/op/loop.cpp +++ b/ngraph/frontend/onnx_import/src/op/loop.cpp @@ -77,10 +77,18 @@ namespace ngraph loop_carried_dependencies[i].get_node()->get_friendly_name(); } - const Subgraph& body_graph{ - node.get_subgraph_from_attribute("body", loop_carried_dependencies_map)}; - auto body_outputs = body_graph.get_ng_outputs(); - const auto& body_inputs = body_graph.get_ng_parameters(); + auto body_graph = node.get_subgraph(); + auto body_outputs = body_graph->get_ng_outputs(); + const auto& body_inputs = body_graph->get_ng_parameters(); + + // Infer loop body inputs' element type based on carried dependencies + for (size_t i = 0; i < loop_carried_dependencies.size(); i++) + { + body_inputs[i + 2]->set_element_type( + loop_carried_dependencies[i].get_element_type()); + body_inputs[i + 2]->set_partial_shape( + loop_carried_dependencies[i].get_partial_shape()); + } // optional inputs Output trip_count; @@ -190,22 +198,22 @@ namespace ngraph final_values.push_back(loop->get_iter_value(*body_outputs_it++, -1)); } - const auto& outputs_from_parent = body_graph.get_outputs_from_parent(); + const auto& inputs_from_parent = body_graph->get_inputs_from_parent(); CHECK_VALID_NODE( node, static_cast(std::distance(body_inputs_it, body_inputs.end())) == - outputs_from_parent.size(), + inputs_from_parent.size(), "Expected number of invariant parameters is" - " not equal number of provided outputs from parent scope"); + " not equal number of provided inputs from parent scope"); // Set-up parameters from parent graph which are not changed during Loop's // iterations - for (auto out_from_parent_it = outputs_from_parent.begin(); + for (auto in_from_parent_it = inputs_from_parent.begin(); body_inputs_it != body_inputs.end() && - out_from_parent_it != outputs_from_parent.end(); - ++body_inputs_it, ++out_from_parent_it) + in_from_parent_it != inputs_from_parent.end(); + ++body_inputs_it, ++in_from_parent_it) { - loop->set_invariant_input(*body_inputs_it, *out_from_parent_it); + loop->set_invariant_input(*body_inputs_it, *in_from_parent_it); } // Set-up scan outputs diff --git a/ngraph/frontend/onnx_import/src/utils/onnx_internal.cpp b/ngraph/frontend/onnx_import/src/utils/onnx_internal.cpp index 74bb4a72d5c19c..8e60171a198c91 100644 --- a/ngraph/frontend/onnx_import/src/utils/onnx_internal.cpp +++ b/ngraph/frontend/onnx_import/src/utils/onnx_internal.cpp @@ -6,7 +6,9 @@ #include "core/graph.hpp" #include "core/model.hpp" +#include "core/null_node.hpp" #include "core/transform.hpp" +#include "onnx_import/onnx_framework_node.hpp" #include "onnx_import/utils/onnx_internal.hpp" namespace ngraph @@ -15,21 +17,81 @@ namespace ngraph { namespace detail { - std::shared_ptr - convert_to_ng_function(const ONNX_NAMESPACE::ModelProto& model_proto) + void remove_dangling_parameters(std::shared_ptr& function) { - auto p_model_proto = common::make_unique(model_proto); - auto model = common::make_unique(std::move(p_model_proto)); + const auto parameters = function->get_parameters(); + for (auto parameter : parameters) + { + const auto parameter_users = parameter->get_users(); + // if a Parameter is connected to a ONNXFrameworkNode that was not converted + // during convert_function it means, this Parameter is dangling and we can + // remove it from function + const bool is_dangling_parameter = std::all_of( + parameter_users.begin(), + parameter_users.end(), + [](const std::shared_ptr& node) -> bool { + return std::dynamic_pointer_cast(node) != + nullptr; + }); + if (is_dangling_parameter) + { + function->remove_parameter(parameter); + } + } + } - Graph graph{std::move(model)}; - auto function = std::make_shared( - graph.get_ng_outputs(), graph.get_ng_parameters(), graph.get_name()); - for (std::size_t i{0}; i < function->get_output_size(); ++i) + void remove_dangling_results(std::shared_ptr& function) + { + const auto results = function->get_results(); + for (auto result : results) { - function->get_output_op(i)->set_friendly_name( - graph.get_outputs().at(i).get_name()); + // we can remove Result from function if after function conversion, + // Result is connected to NullNode only + const auto result_inputs = result->input_values(); + const bool is_dangling_result = + std::all_of(result_inputs.begin(), + result_inputs.end(), + [](const Output& node) -> bool { + return ngraph::op::is_null(node); + }); + if (is_dangling_result) + { + function->remove_result(result); + } } - return function; + } + + void convert_decoded_function(std::shared_ptr function) + { + for (const auto& node : function->get_ordered_ops()) + { + if (auto raw_node = + std::dynamic_pointer_cast(node)) + { + if (auto subgraph_node = + std::dynamic_pointer_cast( + node)) + { + subgraph_node->infer_inputs_from_parent(); + convert_decoded_function(subgraph_node->get_subgraph_body()); + } + const auto& onnx_node = raw_node->get_onnx_node(); + OutputVector ng_nodes{onnx_node.get_ng_nodes()}; + if (ng_nodes.size() > raw_node->get_output_size()) + { + ng_nodes.resize(raw_node->get_output_size()); + } + replace_node(raw_node, ng_nodes); + } + else + { + // Have to revalidate node because new intpus can affect shape/type + // propagation for already translated nodes + node->revalidate_and_infer_types(); + } + } + remove_dangling_parameters(function); + remove_dangling_results(function); } std::shared_ptr import_onnx_model(ONNX_NAMESPACE::ModelProto& model_proto, @@ -39,7 +101,10 @@ namespace ngraph transform::fixup_legacy_operators(model_proto); transform::update_external_data_paths(model_proto, model_path); - return detail::convert_to_ng_function(model_proto); + auto p_model_proto = common::make_unique(model_proto); + auto model = common::make_unique(std::move(p_model_proto)); + Graph graph{std::move(model)}; + return graph.convert(); } } // namespace detail } // namespace onnx_import diff --git a/ngraph/python/tests/test_onnx/test_ops_unary.py b/ngraph/python/tests/test_onnx/test_ops_unary.py index 01c9eeb9f55888..22d6b54f539c29 100644 --- a/ngraph/python/tests/test_onnx/test_ops_unary.py +++ b/ngraph/python/tests/test_onnx/test_ops_unary.py @@ -390,8 +390,7 @@ def test_cast_errors(): for name, value in zip(node.input, [input_data]) ] output_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.FLOAT16, value.shape) - for name, value in zip(node.output, ()) + make_tensor_value_info(node.output[0], onnx.TensorProto.FLOAT16, input_data.shape) ] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) @@ -406,8 +405,7 @@ def test_cast_errors(): for name, value in zip(node.input, [input_data]) ] output_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape) - for name, value in zip(node.output, ()) + make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape) ] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) @@ -422,8 +420,7 @@ def test_cast_errors(): for name, value in zip(node.input, [input_data]) ] output_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape) - for name, value in zip(node.output, ()) + make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape) ] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) @@ -438,8 +435,7 @@ def test_cast_errors(): for name, value in zip(node.input, [input_data]) ] output_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.COMPLEX128, value.shape) - for name, value in zip(node.output, ()) + make_tensor_value_info(node.output[0], onnx.TensorProto.COMPLEX128, input_data.shape) ] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index fb6e91406d9922..c47d5b5d333fca 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -388,7 +388,6 @@ set(MULTI_TEST_SRC backend/comparison.in.cpp backend/concat.in.cpp backend/constant.in.cpp - backend/convert.in.cpp backend/convert_like.in.cpp backend/convolution_backprop.in.cpp backend/convolution.in.cpp diff --git a/ngraph/test/backend/convert.in.cpp b/ngraph/test/backend/convert.in.cpp deleted file mode 100644 index 20cb1d16953d20..00000000000000 --- a/ngraph/test/backend/convert.in.cpp +++ /dev/null @@ -1,1547 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/runtime/reference/convert.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/engine/test_engines.hpp" -#include "util/ndarray.hpp" -#include "util/test_case.hpp" -#include "util/test_control.hpp" -#include "util/test_tools.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); -namespace -{ - std::shared_ptr CreateFunction(const Shape& input_shape, - const element::Type& input_type, - const element::Type& expected_output_type) - { - const auto in = make_shared(input_type, input_shape); - const auto convert = make_shared(in, expected_output_type); - return make_shared(NodeVector{convert}, ParameterVector{in}); - } - - template - void ConvertTest(const std::vector& input, - const Shape& input_shape, - const ngraph::element::Type& input_type, - const std::vector& expected_output, - const ngraph::element::Type& expected_output_type) - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto test_case = test::TestCase(f); - test_case.add_input(input); - test_case.add_expected_output(expected_output); - - test_case.run(); - } - - // TestCase doesn't support LP types - template - void LPConvertTest(const std::vector& input, - const Shape& input_shape, - const ngraph::element::Type& input_type, - const std::vector& expected_output, - const ngraph::element::Type& expected_output_type) - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint8_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} // namespace - -// destination: boolean -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_boolean) -{ - const uint8_t lowest = std::numeric_limits::lowest(); - const uint8_t max = std::numeric_limits::max(); - - const std::vector input{0, 12, 23, 0, lowest, max}; - const Shape input_shape{2, 3}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 1, 1, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::boolean; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_boolean) -{ - const int32_t lowest = std::numeric_limits::lowest(); - const int32_t max = std::numeric_limits::max(); - - const std::vector input{0, -12, 23, 0, lowest, max}; - const Shape input_shape{2, 3}; - const element::Type input_type = ngraph::element::i32; - - const std::vector expected_output{0, 1, 1, 0, 1, 1}; - const element::Type expected_output_type = ngraph::element::boolean; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_boolean) -{ - const float lowest = std::numeric_limits::lowest(); - const float max = std::numeric_limits::max(); - const float min = std::numeric_limits::min(); - const float pos_inf = std::numeric_limits::infinity(); - const float neg_inf = -std::numeric_limits::infinity(); - - const std::vector input{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf}; - const Shape input_shape{3, 3}; - const element::Type input_type = ngraph::element::f32; - - const std::vector expected_output{0, 1, 1, 0, 1, 1, 1, 1, 1}; - const element::Type expected_output_type = ngraph::element::boolean; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: bf16 -NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_bf16) -{ - const std::vector input{ - 0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}; - const Shape input_shape{1, 1, 3, 5}; - const element::Type input_type = ngraph::element::f32; - - const std::vector expected_output(std::begin(input), std::end(input)); - const element::Type expected_output_type = ngraph::element::bf16; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: f16 -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_f16) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const element::Type expected_output_type = ngraph::element::f16; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: f32 -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_f32) -{ - const std::vector input{0xFE, 0xF2}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{-1.0f, -2.0f, -1.0f, 2.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(float)); - EXPECT_TRUE(test::all_close_f(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i8_to_f32) -{ - const std::vector input{-127, -0, 0, 127}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::i8; - - const std::vector expected_output{-127.0f, -0.0f, 0.0f, 127.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i16_to_f32) -{ - const std::vector input{-32000, -0, 0, 32000}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::i16; - - const std::vector expected_output{-32000.0f, -0.0f, 0.0f, 32000.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_f32) -{ - const std::vector input{-64000, -0, 0, 64000}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::i32; - - const std::vector expected_output{-64000.0f, -0.0f, 0.0f, 64000.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i64_to_f32) -{ - const std::vector input{-64000, -0, 0, 64000}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::i64; - - const std::vector expected_output{-64000.0f, -0.0f, 0.0f, 64000.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_f32) -{ - const std::vector input{0xA0}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1.0f, 0.0f, 1.0f, 0.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(float)); - EXPECT_TRUE(test::all_close_f(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_f32) -{ - const std::vector input{0xFB, 0x0A}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{15.0f, 11.0f, 0.0f, 10.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(float)); - EXPECT_TRUE(test::all_close_f(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_f32) -{ - const std::vector input{255, 128, 32, 0}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{255.0f, 128.0f, 32.0f, 0.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u16_to_f32) -{ - const std::vector input{64000, 32000, 128, 0}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::u16; - - const std::vector expected_output{64000.0f, 32000.0f, 128.0f, 0.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u32_to_f32) -{ - const std::vector input{4000000, 2000000, 128, 0}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::u32; - - const std::vector expected_output{4000000.0f, 2000000.0f, 128.0f, 0.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u64_to_f32) -{ - const std::vector input{4000000, 2000000, 128, 0}; - const Shape input_shape{2, 2}; - const element::Type input_type = ngraph::element::u64; - - const std::vector expected_output{4000000.0f, 2000000.0f, 128.0f, 0.0f}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_to_f32) -{ - const std::vector input{ - 0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5}; - const Shape input_shape{1, 1, 3, 5}; - const element::Type input_type = ngraph::element::bf16; - - const std::vector expected_output(std::begin(input), std::end(input)); - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f16_to_f32) -{ - const std::vector input{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; - const Shape input_shape{3, 3}; - const element::Type input_type = ngraph::element::f16; - - const std::vector expected_output{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_f32) -{ - const std::vector input{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; - const Shape input_shape{3, 3}; - const element::Type input_type = ngraph::element::f32; - - const std::vector expected_output{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; - const element::Type expected_output_type = ngraph::element::f32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: i4 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_i4) -{ - const std::vector input{0xA0}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{0x10, 0x10}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_i4) -{ - const std::vector input{0x12, 0x03}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{0x12, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i4) -{ - const std::vector input{1, 2, 0, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0x12, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u16_to_i4) -{ - const std::vector input{1, 2, 0, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u16; - - const std::vector expected_output{0x12, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u32_to_i4) -{ - const std::vector input{1, 2, 0, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u32; - - const std::vector expected_output{0x12, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u64_to_i4) -{ - const std::vector input{1, 2, 0, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u64; - - const std::vector expected_output{0x12, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i8_to_i4) -{ - const std::vector input{-1, -2, 2, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i8; - - const std::vector expected_output{0xFE, 0x23}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i16_to_i4) -{ - const std::vector input{-1, -2, 2, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i16; - - const std::vector expected_output{0xFE, 0x23}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_i4) -{ - const std::vector input{-1, -2, 2, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i32; - - const std::vector expected_output{0xFE, 0x23}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i64_to_i4) -{ - const std::vector input{-1, -2, 2, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i64; - - const std::vector expected_output{0xFE, 0x23}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f16_to_i4) -{ - const std::vector input{-1, -2, 0, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::f16; - - const std::vector expected_output{0xFE, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_to_i4) -{ - const std::vector input{-1, -2, 0, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::bf16; - - const std::vector expected_output{0xFE, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_i4) -{ - const std::vector input{-1, -2, 0, 3}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::f32; - - const std::vector expected_output{0xFE, 0x03}; - const element::Type expected_output_type = ngraph::element::i4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: i8 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_i8) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::i8; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int8_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_i8) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::i8; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int8_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_i8) -{ - const std::vector input{0xFE, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{-1, -2, 4, 3}; - const element::Type expected_output_type = ngraph::element::i8; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int8_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i8) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 128}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const element::Type expected_output_type = ngraph::element::i8; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: i16 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_i16) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::i16; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int16_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_i16) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::i16; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int16_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_i16) -{ - const std::vector input{0xFE, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{-1, -2, 4, 3}; - const element::Type expected_output_type = ngraph::element::i16; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int16_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i16) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const element::Type expected_output_type = ngraph::element::i16; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: i32 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_i32) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::i32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int32_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_i32) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::i32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int32_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_i32) -{ - const std::vector input{0xFE, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{-1, -2, 4, 3}; - const element::Type expected_output_type = ngraph::element::i32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int32_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i32) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const element::Type expected_output_type = ngraph::element::i32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: i64 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_i64) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::i64; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int64_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_i64) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::i64; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int64_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_i64) -{ - const std::vector input{0xFE, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{-1, -2, 4, 3}; - const element::Type expected_output_type = ngraph::element::i64; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(int64_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i64) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - const element::Type expected_output_type = ngraph::element::i64; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: u1 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_u1) -{ - const std::vector input{0xF0}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{0xF0}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_u1) -{ - const std::vector input{0x10, 0x01, 0x00, 0x00}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{0x90}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u16_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u16; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u32_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u32; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u64_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u64; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_u1) -{ - const std::vector input{0x10, 0x01, 0x00, 0x00}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{0x90}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i8_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::i8; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i16_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::i16; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::i32; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i64_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::i64; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f16_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::f16; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::bf16; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_u1) -{ - const std::vector input{1, 0, 1, 0, 0, 0, 0, 1}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::f32; - - const std::vector expected_output{0xA1}; - const element::Type expected_output_type = ngraph::element::u1; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: u4 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_u4) -{ - const std::vector input{0xF0}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{0x11, 0x11}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_u4) -{ - const std::vector input{0x22, 0x33}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{0x22, 0x33}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u16_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u16; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u32_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u32; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u64_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u64; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_u4) -{ - const std::vector input{0x22, 0x33}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{0x22, 0x33}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i8_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i8; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i16_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i16; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i32; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i64_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i64; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f16_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::f16; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::bf16; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_u4) -{ - const std::vector input{7, 0, 1, 15}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::f32; - - const std::vector expected_output{0x70, 0x1F}; - const element::Type expected_output_type = ngraph::element::u4; - - LPConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: u8 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_u8) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::u8; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint8_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_u8) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::u8; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint8_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_u8) -{ - const std::vector input{0x12, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{1, 2, 4, 3}; - const element::Type expected_output_type = ngraph::element::u8; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint8_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u8) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const element::Type expected_output_type = ngraph::element::u8; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: u16 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_u16) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::u16; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint16_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_u16) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::u16; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint16_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_u16) -{ - const std::vector input{0x12, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{1, 2, 4, 3}; - const element::Type expected_output_type = ngraph::element::u16; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint16_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u16) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const element::Type expected_output_type = ngraph::element::u16; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: u32 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_u32) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::u32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint32_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_u32) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::u32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint32_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_u32) -{ - const std::vector input{0x12, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{1, 2, 4, 3}; - const element::Type expected_output_type = ngraph::element::u32; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint32_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u32) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const element::Type expected_output_type = ngraph::element::u32; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// destination: u64 -NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_u64) -{ - const std::vector input{0x81}; - const Shape input_shape{8}; - const element::Type input_type = ngraph::element::u1; - - const std::vector expected_output{1, 0, 0, 0, 0, 0, 0, 1}; - const element::Type expected_output_type = ngraph::element::u64; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint64_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_u64) -{ - const std::vector input{0x21, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::u4; - - const std::vector expected_output{2, 1, 4, 3}; - const element::Type expected_output_type = ngraph::element::u64; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint64_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_u64) -{ - const std::vector input{0x12, 0x43}; - const Shape input_shape{4}; - const element::Type input_type = ngraph::element::i4; - - const std::vector expected_output{1, 2, 4, 3}; - const element::Type expected_output_type = ngraph::element::u64; - - { - const auto f = CreateFunction(input_shape, input_type, expected_output_type); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto input_tesnor = backend->create_tensor(input_type, input_shape); - copy_data(input_tesnor, input); - auto output = backend->create_tensor(expected_output_type, input_shape); - auto handle = backend->compile(f); - handle->call_with_validate({output}, {input_tesnor}); - - std::vector result(expected_output.size()); - output->read(result.data(), result.size() * sizeof(uint64_t)); - EXPECT_TRUE(test::all_close(expected_output, result)); - } -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u64) -{ - const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const Shape input_shape{11}; - const element::Type input_type = ngraph::element::u8; - - const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; - const element::Type expected_output_type = ngraph::element::u64; - - ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); -} - -// jit tests -NGRAPH_TEST(${BACKEND_NAME}, convert_float32_int8) -{ - std::vector f32vec = {-100.5, -20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5, 100.5}; - std::vector result(f32vec.size()); - std::vector i8vec(std::begin(f32vec), std::end(f32vec)); - runtime::reference::convert(f32vec.data(), result.data(), f32vec.size()); - EXPECT_EQ(result, i8vec); -} - -NGRAPH_TEST(${BACKEND_NAME}, convert_fp16_int8) -{ - std::vector f32vec = {-100.5, -20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5, 100.5}; - std::vector f16vec(std::begin(f32vec), std::end(f32vec)); - std::vector i8vec(std::begin(f16vec), std::end(f16vec)); - std::vector result(i8vec.size()); - runtime::reference::convert(f16vec.data(), result.data(), f16vec.size()); - EXPECT_EQ(result, i8vec); -} diff --git a/ngraph/test/models/onnx/constant_fill_shape_attribute.prototxt b/ngraph/test/models/onnx/constant_fill_shape_attribute.prototxt index 806f01ffd89ddf..cdbbf99419a241 100644 --- a/ngraph/test/models/onnx/constant_fill_shape_attribute.prototxt +++ b/ngraph/test/models/onnx/constant_fill_shape_attribute.prototxt @@ -2,7 +2,6 @@ ir_version: 7 producer_name: "backend-test" graph { node { - input: "target_shape" output: "output" op_type: "ConstantFill" attribute { diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index aa399c525660c9..7faa42ddee0940 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -929,90 +929,6 @@ roll_3d_input roll_3d_input_negative_shift roll_negative_axes -# convert operation -IE_CPU.convert_u8_to_boolean -IE_CPU.convert_i32_to_boolean -IE_CPU.convert_f32_to_boolean -IE_CPU.convert_u8_to_f16 -IE_CPU.convert_u8_to_i16 -IE_CPU.convert_u8_to_i64 -IE_CPU.convert_u8_to_u16 -IE_CPU.convert_u8_to_u32 -IE_CPU.convert_u8_to_u64 -IE_CPU.convert_f16_to_f32 -IE_CPU.convert_u32_to_f32 -IE_CPU.convert_i4_to_f32 -IE_CPU.convert_u1_to_f32 -IE_CPU.convert_u4_to_f32 - -IE_CPU.convert_u1_to_u1 -IE_CPU.convert_u4_to_u1 -IE_CPU.convert_u8_to_u1 -IE_CPU.convert_u16_to_u1 -IE_CPU.convert_u32_to_u1 -IE_CPU.convert_u64_to_u1 -IE_CPU.convert_i4_to_u1 -IE_CPU.convert_i8_to_u1 -IE_CPU.convert_i16_to_u1 -IE_CPU.convert_i32_to_u1 -IE_CPU.convert_i64_to_u1 -IE_CPU.convert_f16_to_u1 -IE_CPU.convert_bf16_to_u1 -IE_CPU.convert_f32_to_u1 - -IE_CPU.convert_u1_to_i4 -IE_CPU.convert_u4_to_i4 -IE_CPU.convert_u8_to_i4 -IE_CPU.convert_u16_to_i4 -IE_CPU.convert_u32_to_i4 -IE_CPU.convert_u64_to_i4 -IE_CPU.convert_i8_to_i4 -IE_CPU.convert_i16_to_i4 -IE_CPU.convert_i32_to_i4 -IE_CPU.convert_i64_to_i4 -IE_CPU.convert_f16_to_i4 -IE_CPU.convert_bf16_to_i4 -IE_CPU.convert_f32_to_i4 - -IE_CPU.convert_u1_to_u4 -IE_CPU.convert_u4_to_u4 -IE_CPU.convert_u8_to_u4 -IE_CPU.convert_u16_to_u4 -IE_CPU.convert_u32_to_u4 -IE_CPU.convert_u64_to_u4 -IE_CPU.convert_i4_to_u4 -IE_CPU.convert_i8_to_u4 -IE_CPU.convert_i16_to_u4 -IE_CPU.convert_i32_to_u4 -IE_CPU.convert_i64_to_u4 -IE_CPU.convert_f16_to_u4 -IE_CPU.convert_bf16_to_u4 -IE_CPU.convert_f32_to_u4 - -IE_CPU.convert_u1_to_i8 -IE_CPU.convert_u4_to_i8 -IE_CPU.convert_i4_to_i8 -IE_CPU.convert_u1_to_i16 -IE_CPU.convert_u4_to_i16 -IE_CPU.convert_i4_to_i16 -IE_CPU.convert_u1_to_i32 -IE_CPU.convert_u4_to_i32 -IE_CPU.convert_i4_to_i32 -IE_CPU.convert_u1_to_i64 -IE_CPU.convert_u4_to_i64 -IE_CPU.convert_i4_to_i64 -IE_CPU.convert_u1_to_u8 -IE_CPU.convert_u4_to_u8 -IE_CPU.convert_i4_to_u8 -IE_CPU.convert_u1_to_u16 -IE_CPU.convert_u4_to_u16 -IE_CPU.convert_i4_to_u16 -IE_CPU.convert_u1_to_u32 -IE_CPU.convert_u4_to_u32 -IE_CPU.convert_i4_to_u32 -IE_CPU.convert_u1_to_u64 -IE_CPU.convert_u4_to_u64 -IE_CPU.convert_i4_to_u64 #------------------------------------------------------------------------------- # # Inference Engine CPU plugin excludes