From 4e043faba77a8251aa0d6c911963191329e47d11 Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Mon, 15 Jan 2024 16:27:39 +0400 Subject: [PATCH 001/122] Additional API1.0 removal from CPU tests (#22155) * Remove API1.0 usage from CPU subgraph tests * Drop `ParameterResultSubgraphTestLegacyApi` test class --- .../subgraph_tests/parameter_result.cpp | 3 +- .../src/concat_multiple_query_sdp.cpp | 18 ++-- .../src/concat_transpose_sdp_transpose.cpp | 48 +++++----- .../src/param_result_custom_blob.cpp | 87 ------------------- .../subgraph_tests/parameter_result.cpp | 28 ------ .../subgraph_tests/parameter_result.hpp | 10 +-- .../subgraph/parameter_result.hpp | 20 +---- .../src/subgraph/parameter_result.cpp | 16 +--- 8 files changed, 36 insertions(+), 194 deletions(-) delete mode 100644 src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp index a70b3c7bbc3659..357f27eb04a4e3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; using namespace ov::test; namespace { @@ -22,6 +21,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Check, ParameterResultSubgraphTest, ::testing::Combine(::testing::ValuesIn(inputShapes), ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); + ParameterResultSubgraphTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp index 1c8ad07f8fd549..696cf36225e759 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp @@ -2,23 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include "openvino/opsets/opset13.hpp" +#include "transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -using namespace InferenceEngine; - -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using InputShapeAndTransposeOrder = std::pair, std::vector>; using ConcatMultiQuerySDPParams = std::tuple(results, sinks, inputParams, "ConcatTranposeSDP"); + function = std::make_shared(results, sinks, inputParams, "ConcatTranposeSDP"); targetDevice = ov::test::utils::DEVICE_CPU; functionRefs = function->clone(); @@ -330,4 +325,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConcatMultiQuerySDPTest, ::testing::Values(true, false)), ConcatMultiQuerySDPTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp index 2eddaa63050855..61ddf873aec6f1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp @@ -2,23 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include "openvino/opsets/opset13.hpp" +#include "transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -using namespace InferenceEngine; - -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using InputShapeAndTransposeOrder = std::pair, std::vector>; using ConcatSDPTransposeTestParams = std::tuple(inputParams[3], var_v); pastv->set_friendly_name("pastv_r"); - std::shared_ptr pastk_shapeof, pastv_shapeof; + std::shared_ptr pastk_shapeof, pastv_shapeof; if (hasShapeOf) { pastk_shapeof = std::make_shared(pastk); pastv_shapeof = std::make_shared(pastv); @@ -132,10 +127,10 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface(ElementType::i32, ov::PartialShape{-1}); beam_idx->set_friendly_name("beam_idx"); inputParams.push_back(beam_idx); - auto gatherK = std::make_shared(pastk, beam_idx, op::v0::Constant::create(ElementType::i32, {1}, {0})); - auto gatherV = std::make_shared(pastv, beam_idx, op::v0::Constant::create(ElementType::i32, {1}, {0})); - auto concatK = std::make_shared(OutputVector{gatherK, inputParams[1]}, concat_axis); - auto concatV = std::make_shared(OutputVector{gatherV, inputParams[2]}, concat_axis); + auto gatherK = std::make_shared(pastk, beam_idx, ov::op::v0::Constant::create(ElementType::i32, {1}, {0})); + auto gatherV = std::make_shared(pastv, beam_idx, ov::op::v0::Constant::create(ElementType::i32, {1}, {0})); + auto concatK = std::make_shared(ov::OutputVector{gatherK, inputParams[1]}, concat_axis); + auto concatV = std::make_shared(ov::OutputVector{gatherV, inputParams[2]}, concat_axis); auto transposeK = std::make_shared(concatK, preOrder); auto transposeV = std::make_shared(concatV, preOrder); @@ -159,7 +154,7 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface(transposeSDP, constReshape, true); // BLHS -> B,L,HxS - auto add = std::make_shared(reshapeSDP, op::v0::Constant::create(inType, {1}, {1.0f})); + auto add = std::make_shared(reshapeSDP, ov::op::v0::Constant::create(inType, {1}, {1.0f})); auto pastk_assign = std::make_shared(concatK, var_k); auto pastv_assign = std::make_shared(concatV, var_v); pastk_assign->set_friendly_name("pastk_w"); @@ -170,12 +165,12 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface(results, sinks, inputParams, "ConcatTranposeSDP"); + ov::SinkVector sinks{pastk_assign, pastv_assign}; + function = std::make_shared(results, sinks, inputParams, "ConcatTranposeSDP"); targetDevice = ov::test::utils::DEVICE_CPU; functionRefs = function->clone(); - pass::Manager manager; + ov::pass::Manager manager; // decompose ScaledDotProductAttention manager.register_pass(); manager.run_passes(functionRefs); @@ -197,8 +192,8 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface& targetInputStaticShapes) { inputs.clear(); - auto create_input = [this] (std::shared_ptr param, ov::Shape shape, float val) { - if (param->get_element_type() == element::i32) { + auto create_input = [this] (std::shared_ptr param, ov::Shape shape, float val) { + if (param->get_element_type() == ov::element::i32) { ov::Tensor t{ov::element::i32, shape}; auto size = shape[0]; auto* p = static_cast(t.data()); @@ -207,12 +202,12 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterfaceget_element_type() == element::f32) { + } else if (param->get_element_type() == ov::element::f32) { ov::Tensor t{ov::element::f32, shape}; strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); inputs.insert({param, t}); } else { - ASSERT_TRUE(param->get_element_type() == element::bf16); + ASSERT_TRUE(param->get_element_type() == ov::element::bf16); ov::Tensor t{ov::element::bf16, shape}; strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); inputs.insert({param, t}); @@ -336,12 +331,12 @@ class ConcatSDPTransposeTestSetState : public ConcatSDPTransposeTestBase { void new_state(ov::element::Type& type, const ov::Shape& pastKVInitShape) { auto fill = [] (ov::Tensor& t, float val) { auto shape = t.get_shape(); - if (t.get_element_type() == element::f32) { + if (t.get_element_type() == ov::element::f32) { strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); - } else if (t.get_element_type() == element::f16) { + } else if (t.get_element_type() == ov::element::f16) { strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); } else { - ASSERT_TRUE(t.get_element_type() == element::bf16); + ASSERT_TRUE(t.get_element_type() == ov::element::bf16); strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); } }; @@ -437,4 +432,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConcatSDPTransposeTestSetState, ConcatSDPTransposeTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp deleted file mode 100644 index 8b086d0833ecf5..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "common_test_utils/test_constants.hpp" -#include "shared_test_classes/subgraph/parameter_result.hpp" - -using namespace SubgraphTestsDefinitions; - -namespace ov { -namespace test { - -class ParameterResultCustomBlobTest : public ParameterResultSubgraphTestLegacyApi { -protected: - void Infer() override { - constexpr size_t inferIterations = 10lu; - - inferRequest = executableNetwork.CreateInferRequest(); - - auto inputBlob = inputs.front(); - const size_t elementsCount = inputBlob->size(); - for (size_t i = 0; i < inferIterations; ++i) { - ov::test::utils::fill_data_random(inputBlob, 10, 0, 1, i); - auto inputsInfo = cnnNetwork.getInputsInfo().begin()->second; - std::string inputName = cnnNetwork.getInputsInfo().begin()->first; - - std::vector customInpData(elementsCount); - auto inpBlobData = inputBlob->buffer().as(); - std::copy(inpBlobData, inpBlobData + elementsCount, customInpData.begin()); - - auto& tensorDesc = inputsInfo->getTensorDesc(); - auto customBlob = InferenceEngine::make_shared_blob(tensorDesc, customInpData.data(), elementsCount); - inferRequest.SetBlob(inputName, customBlob); - - inferRequest.Infer(); - - ParameterResultSubgraphTestLegacyApi::Validate(); - } - } - void Validate() override { - // Do nothing. We call Validate() in the Infer() method - } -}; - -TEST_P(ParameterResultCustomBlobTest, CompareWithRefs) { - // Just to show that it is not possible to set different precisions for inputs and outputs with the same name. - // If it was possible, the input would have I8 precision and couldn't store data from the custom blob. - inPrc = InferenceEngine::Precision::I8; - outPrc = InferenceEngine::Precision::FP32; - - Run(); -} -namespace { -INSTANTIATE_TEST_SUITE_P(smoke_Check_Custom_Blob, - ParameterResultCustomBlobTest, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); -} // namespace - -class ParameterResultSameBlobTest : public ParameterResultSubgraphTestLegacyApi { -protected: - void Infer() override { - constexpr size_t inferIterations = 10lu; - - for (size_t i = 0; i < inferIterations; ++i) { - ParameterResultSubgraphTestLegacyApi::Infer(); - ParameterResultSubgraphTestLegacyApi::Validate(); - } - } - void Validate() override { - // Do nothing. We call Validate() in the Infer() method - } -}; - -TEST_P(ParameterResultSameBlobTest, CompareWithRefs) { - Run(); -} -namespace { -INSTANTIATE_TEST_SUITE_P(smoke_Check_Same_Blob, - ParameterResultSameBlobTest, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); -} // namespace -} // namespace test -} // namespace ov diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp deleted file mode 100644 index c417dc6ce04a2c..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "subgraph_tests/parameter_result.hpp" - -#include - -#include "common_test_utils/test_constants.hpp" - -using namespace SubgraphTestsDefinitions; -using namespace ov::test; - -namespace { - -INSTANTIATE_TEST_SUITE_P(smoke_Check, - ParameterResultSubgraphTestLegacyApi, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {}}), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ParameterResultSubgraphTestBase::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Check, - ParameterResultSubgraphTest, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{1, 3, 10, 10}}}), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ParameterResultSubgraphTestBase::getTestCaseName); - -} // namespace diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp index 40123974846ea3..7600b2eac579d7 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp @@ -6,18 +6,10 @@ #include "shared_test_classes/subgraph/parameter_result.hpp" -namespace SubgraphTestsDefinitions { - -TEST_P(ParameterResultSubgraphTestLegacyApi, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions - namespace ov { namespace test { -TEST_P(ParameterResultSubgraphTest, CompareWithRefs) { +TEST_P(ParameterResultSubgraphTest, Inference) { run(); } diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp index 5384d369b7b725..8bb69e5db0d3f8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp @@ -9,7 +9,6 @@ #include #include -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { @@ -18,28 +17,15 @@ namespace test { using parameterResultParams = std::tuple; // Device name -class ParameterResultSubgraphTestBase : public testing::WithParamInterface { +class ParameterResultSubgraphTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - std::shared_ptr createModel(const ov::PartialShape& shape); -}; - -class ParameterResultSubgraphTest : public ParameterResultSubgraphTestBase, virtual public ov::test::SubgraphBaseTest { protected: void SetUp() override; + std::shared_ptr createModel(const ov::PartialShape& shape); }; } // namespace test } // namespace ov - -namespace SubgraphTestsDefinitions { - -class ParameterResultSubgraphTestLegacyApi : public ov::test::ParameterResultSubgraphTestBase, - virtual public LayerTestsUtils::LayerTestsCommon { -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp index 1bf29f54c76b1a..d93b83d75388ab 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp @@ -7,7 +7,7 @@ namespace ov { namespace test { -std::string ParameterResultSubgraphTestBase::getTestCaseName(const testing::TestParamInfo& obj) { +std::string ParameterResultSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { ov::test::InputShape inShape; std::string targetDevice; std::tie(inShape, targetDevice) = obj.param; @@ -22,7 +22,7 @@ std::string ParameterResultSubgraphTestBase::getTestCaseName(const testing::Test return result.str(); } -std::shared_ptr ParameterResultSubgraphTestBase::createModel(const ov::PartialShape& shape) { +std::shared_ptr ParameterResultSubgraphTest::createModel(const ov::PartialShape& shape) { auto parameter = std::make_shared(ov::element::f32, shape); const ngraph::ResultVector results{std::make_shared(parameter)}; ngraph::ParameterVector params = {parameter}; @@ -41,15 +41,3 @@ void ParameterResultSubgraphTest::SetUp() { } // namespace test } // namespace ov - -namespace SubgraphTestsDefinitions { -void ParameterResultSubgraphTestLegacyApi::SetUp() { - ov::test::InputShape inShape; - std::tie(inShape, targetDevice) = this->GetParam(); - - OPENVINO_ASSERT(inShape.first.is_static()); - - function = createModel(inShape.first); -} - -} // namespace SubgraphTestsDefinitions From 97439818a1064b61638cf12ba8ead86592277e1a Mon Sep 17 00:00:00 2001 From: River Li Date: Mon, 15 Jan 2024 22:07:36 +0800 Subject: [PATCH 002/122] [Core] remove ie_system_conf.hpp (#22160) * remove ie_system_conf.hpp * Fix clang format issue --- .../cross_compiled_disp_gen.cmake | 2 +- .../cross_compile/cross_compiled_func.cmake | 2 +- src/inference/dev_api/ie_system_conf.h | 278 ------------------ src/inference/src/blob_transform.cpp | 10 +- .../tests/functional/task_executor_tests.cpp | 4 +- .../cpu_map_parser/cache_parser_linux.cpp | 5 +- .../unit/cpu_map_parser/freq_parser_linux.cpp | 5 +- .../unit/cpu_map_parser/parser_macos.cpp | 5 +- .../unit/cpu_map_parser/parser_windows.cpp | 5 +- .../unit/cpu_map_parser/valid_proc_check.cpp | 5 +- src/inference/tests/unit/cpu_reserve_test.cpp | 5 +- .../tests/unit/cpu_stream_info_test.cpp | 6 +- .../unit/update_executor_config_test.cpp | 4 +- .../ov_executable_network/properties.cpp | 8 +- .../behavior/plugin/configuration_tests.cpp | 8 +- .../skip_tests_config.cpp | 5 +- .../ov_executable_network/properties.cpp | 9 +- .../behavior/plugin/configuration_tests.cpp | 11 +- .../snippets/matmul.cpp | 3 +- .../snippets/transpose_matmul.cpp | 3 +- .../unit/streams_info/enable_ht_test.cpp | 5 +- .../scheduling_core_type_test.cpp | 5 +- .../unit/streams_info/streams_e2e_test.cpp | 5 +- .../streams_info/streams_info_table_test.cpp | 5 +- 24 files changed, 58 insertions(+), 345 deletions(-) delete mode 100644 src/inference/dev_api/ie_system_conf.h diff --git a/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake b/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake index 6a92e0a69f420e..da543c910736dd 100644 --- a/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake +++ b/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake @@ -37,7 +37,7 @@ function(_generate_dispatcher) // !! do not modify it !!! // #include \"${XARCH_API_HEADER}\" -#include \"ie_system_conf.h\" +#include \"openvino/runtime/system_conf.hpp\" ") diff --git a/cmake/developer_package/cross_compile/cross_compiled_func.cmake b/cmake/developer_package/cross_compile/cross_compiled_func.cmake index d82d6a73098b6c..c36cbe6762d9a0 100644 --- a/cmake/developer_package/cross_compile/cross_compiled_func.cmake +++ b/cmake/developer_package/cross_compile/cross_compiled_func.cmake @@ -42,7 +42,7 @@ set(DISPATCHER_GEN_OPTIONS_HOLDER ${CMAKE_CURRENT_LIST_DIR}/cross_compiled_disp_ # # Allow to enable multiple cross compilation of source file inside one module # with keeping requirements on minimal instruction set. The CPU check performed -# in runtime via common utils declared in "ie_system_conf.h". +# in runtime via common utils declared in "system_conf.h". # # Usage example: # cross_compiled_file( diff --git a/src/inference/dev_api/ie_system_conf.h b/src/inference/dev_api/ie_system_conf.h deleted file mode 100644 index c0d2d81704f432..00000000000000 --- a/src/inference/dev_api/ie_system_conf.h +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Abstraction over platform specific implementations - * @file ie_system_conf.h - */ - -#pragma once - -#include -#include - -#include "openvino/runtime/system_conf.hpp" -#include "openvino/runtime/threading/cpu_streams_info.hpp" - -namespace InferenceEngine { - -/** - * @brief Checks whether OpenMP environment variables are defined - * @ingroup ie_dev_api_system_conf - * - * @param[in] includeOMPNumThreads Indicates if the omp number threads is included - * @return `True` if any OpenMP environment variable is defined, `false` otherwise - */ -inline bool checkOpenMpEnvVars(bool includeOMPNumThreads = true) { - return ov::check_open_mp_env_vars(includeOMPNumThreads); -} - -/** - * @brief Returns available CPU NUMA nodes (on Linux, and Windows [only with TBB], single node is assumed on all - * other OSes) - * @ingroup ie_dev_api_system_conf - * @return NUMA nodes - */ -inline std::vector getAvailableNUMANodes() { - return ov::get_available_numa_nodes(); -} - -/** - * @brief Returns available CPU cores types (on Linux, and Windows) and ONLY with TBB, single core type is assumed - * otherwise - * @ingroup ie_dev_api_system_conf - * @return Vector of core types - */ -inline std::vector getAvailableCoresTypes() { - return ov::get_available_cores_types(); -} - -/** - * @brief Returns number of CPU physical cores on Linux/Windows (which is considered to be more performance - * friendly for servers) (on other OSes it simply relies on the original parallel API of choice, which usually uses the - * logical cores). call function with 'false' to get #phys cores of all types call function with 'true' to get #phys - * 'Big' cores number of 'Little' = 'all' - 'Big' - * @ingroup ie_dev_api_system_conf - * @param[in] bigCoresOnly Additionally limits the number of reported cores to the 'Big' cores only. - * @return Number of physical CPU cores. - */ -inline int getNumberOfCPUCores(bool bigCoresOnly = false) { - return ov::get_number_of_cpu_cores(bigCoresOnly); -} - -/** - * @brief Returns number of CPU logical cores on Linux/Windows (on other OSes it simply relies on the original - * parallel API of choice, which uses the 'all' logical cores). call function with 'false' to get #logical cores of - * all types call function with 'true' to get #logical 'Big' cores number of 'Little' = 'all' - 'Big' - * @ingroup ie_dev_api_system_conf - * @param[in] bigCoresOnly Additionally limits the number of reported cores to the 'Big' cores only. - * @return Number of logical CPU cores. - */ -inline int getNumberOfLogicalCPUCores(bool bigCoresOnly = false) { - return ov::get_number_of_logical_cpu_cores(bigCoresOnly); -} - -/** - * @brief Returns number of blocked CPU cores. Please note that this is a temporary interface for performance - * optimization on a specific platform. May be removed in future release. - * @ingroup ov_dev_api_system_conf - * @return Number of blocked CPU cores. - */ -using ov::get_number_of_blocked_cores; - -/** - * @brief Checks whether CPU supports SSE 4.2 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is SSE 4.2 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_sse42; - -/** - * @brief Checks whether CPU supports AVX capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx; - -/** - * @brief Checks whether CPU supports AVX2 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX2 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx2; - -/** - * @brief Checks whether CPU supports AVX2_VNNI capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX2_VNNI instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx2_vnni; - -/** - * @brief Checks whether CPU supports AVX 512 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX512F (foundation) instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512f; - -/** - * @brief Checks whether CPU supports AVX 512 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX512F, AVX512BW, AVX512DQ instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core; - -/** - * @brief Checks whether CPU supports AVX 512 VNNI capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX512F, AVX512BW, AVX512DQ, AVX512_VNNI instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_vnni; - -/** - * @brief Checks whether CPU supports BFloat16 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAVX512_BF16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_bfloat16; - -/** - * @brief Checks whether CPU supports fp16 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAVX512_FP16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_fp16; - -/** - * @brief Checks whether CPU supports AMX int8 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAMX_INT8 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_amx_int8; - -/** - * @brief Checks whether CPU supports AMX bf16 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAMX_BF16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_amx_bf16; - -/** - * @brief Checks whether CPU supports AMX capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAMX_INT8 or tAMX_BF16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_amx; - -/** - * @brief Checks whether CPU mapping Available - * @ingroup ie_dev_api_system_conf - * @return `True` is CPU mapping is available, `false` otherwise - */ -using ov::is_cpu_map_available; - -/** - * @brief Get number of numa nodes - * @ingroup ie_dev_api_system_conf - * @return Number of numa nodes - */ -using ov::get_num_numa_nodes; - -/** - * @brief Get number of sockets - * @ingroup ie_dev_api_system_conf - * @return Number of sockets - */ -using ov::get_num_sockets; - -/** - * @brief Returns number of CPU cores on Linux/Windows - * @ingroup ie_dev_api_system_conf - * @param[in] plugin_task plugin task. - * @return Number of CPU cores with core_type. - */ -using ov::get_proc_type_table; - -/** - * @brief Returns original number of CPU cores on Linux/Windows - * @ingroup ie_dev_api_system_conf - * @param[in] plugin_task plugin task. - * @return Number of original CPU cores with core_type. - */ -using ov::get_org_proc_type_table; - -/** - * @brief Get and reserve available cpu ids - * @ingroup ie_dev_api_system_conf - * @param[in] streams_info_table streams information table. - * @param[in] stream_processors processors grouped in stream - * @param[in] cpu_status set cpu status - */ -using ov::reserve_available_cpus; - -/** - * @brief Set flag bit 'Used' of CPU - * @ingroup ie_dev_api_system_conf - * @param[in] cpu_ids cpus in cup_mapping. - * @param[in] used flag bit - */ -using ov::set_cpu_used; - -/** - * @brief Get socket id by current numa node id - * @ingroup ie_dev_api_system_conf - * @param[in] numa_node_id numa node id - * @return socket id - */ -using ov::get_socket_by_numa_node; - -/** - * @brief This enum contains definition of each columns in processor type table which bases on cpu core types. Will - * extend to support other CPU core type like ARM. - * - * The following are two example of processor type table. - * 1. Processor table of two socket CPUs XEON server - * - * ALL_PROC | MAIN_CORE_PROC | EFFICIENT_CORE_PROC | HYPER_THREADING_PROC - * 96 48 0 48 // Total number of two sockets - * 48 24 0 24 // Number of socket one - * 48 24 0 24 // Number of socket two - * - * 2. Processor table of one socket CPU desktop - * - * ALL_PROC | MAIN_CORE_PROC | EFFICIENT_CORE_PROC | HYPER_THREADING_PROC - * 32 8 16 8 // Total number of one socket - */ -using ov::ColumnOfProcessorTypeTable; - -/** - * @brief This enum contains definition of each columns in CPU mapping table which use processor id as index. - * - * GROUP_ID is generated according to the following rules. - * 1. If one MAIN_CORE_PROC and one HYPER_THREADING_PROC are based on same Performance-cores, they are in one group. - * 2. If some EFFICIENT_CORE_PROC share one L2 cachle, they are in one group. - * 3. There are no duplicate group IDs in the system - * - * The following is the example of CPU mapping table. - * 1. Four processors of two Pcore - * 2. Four processors of four Ecores shared L2 cache - * - * PROCESSOR_ID | SOCKET_ID | CORE_ID | CORE_TYPE | GROUP_ID | Used - * 0 0 0 3 0 0 - * 1 0 0 1 0 0 - * 2 0 1 3 1 0 - * 3 0 1 1 1 0 - * 4 0 2 2 2 0 - * 5 0 3 2 2 0 - * 6 0 4 2 2 0 - * 7 0 5 2 2 0 - */ -using ov::ColumnOfCPUMappingTable; - -/** - * @brief definition of CPU_MAP_USED_FLAG column in CPU mapping table. - */ -using ov::ProcessorUseStatus; - -} // namespace InferenceEngine diff --git a/src/inference/src/blob_transform.cpp b/src/inference/src/blob_transform.cpp index 0ccbe03a388a14..321bef03dc997b 100644 --- a/src/inference/src/blob_transform.cpp +++ b/src/inference/src/blob_transform.cpp @@ -4,7 +4,7 @@ #include "blob_transform.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" #ifdef HAVE_SSE # include "cpu_x86_sse42/blob_transform_sse42.hpp" #endif @@ -53,7 +53,7 @@ static void blob_copy_4d_t(Blob::Ptr src, Blob::Ptr dst) { #ifdef HAVE_SSE if (src->getTensorDesc().getLayout() == NHWC && dst->getTensorDesc().getLayout() == NCHW && C == 3 && - C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && with_cpu_x86_sse42()) { + C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_4d_split_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), @@ -84,7 +84,7 @@ static void blob_copy_4d_t(Blob::Ptr src, Blob::Ptr dst) { } if (src->getTensorDesc().getLayout() == NCHW && dst->getTensorDesc().getLayout() == NHWC && C == 3 && - C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && with_cpu_x86_sse42()) { + C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_4d_merge_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), @@ -214,7 +214,7 @@ static void blob_copy_5d_t(Blob::Ptr src, Blob::Ptr dst) { #ifdef HAVE_SSE if (src->getTensorDesc().getLayout() == NDHWC && dst->getTensorDesc().getLayout() == NCDHW && C == 3 && - C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && with_cpu_x86_sse42()) { + C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_5d_split_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), @@ -251,7 +251,7 @@ static void blob_copy_5d_t(Blob::Ptr src, Blob::Ptr dst) { } if (src->getTensorDesc().getLayout() == NCDHW && dst->getTensorDesc().getLayout() == NDHWC && C == 3 && - C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && with_cpu_x86_sse42()) { + C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_5d_merge_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), diff --git a/src/inference/tests/functional/task_executor_tests.cpp b/src/inference/tests/functional/task_executor_tests.cpp index 0d2396855497dc..a5beb2d027dd96 100644 --- a/src/inference/tests/functional/task_executor_tests.cpp +++ b/src/inference/tests/functional/task_executor_tests.cpp @@ -3,12 +3,12 @@ // #include -#include #include -#include #include +#include "openvino/core/parallel.hpp" +#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/threading/cpu_streams_executor.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" diff --git a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp index cacd96813a824d..1ac5278fc4f4e9 100644 --- a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp index 0609798e9669a4..36c30d67108eb8 100644 --- a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp b/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp index 9b550ee9a04f97..47e91b93c26f12 100644 --- a/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp +++ b/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp b/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp index 5dac715f1550be..1d99069338f761 100644 --- a/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp +++ b/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp b/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp index 11a0481e7c1218..fbd498129db801 100644 --- a/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp +++ b/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_reserve_test.cpp b/src/inference/tests/unit/cpu_reserve_test.cpp index e5fe6b40abdf7b..68b686dd1d0eab 100644 --- a/src/inference/tests/unit/cpu_reserve_test.cpp +++ b/src/inference/tests/unit/cpu_reserve_test.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/threading/cpu_streams_executor_internal.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_stream_info_test.cpp b/src/inference/tests/unit/cpu_stream_info_test.cpp index a11d0544d0b221..4df93f8e1e6bb7 100644 --- a/src/inference/tests/unit/cpu_stream_info_test.cpp +++ b/src/inference/tests/unit/cpu_stream_info_test.cpp @@ -4,10 +4,10 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/threading/cpu_streams_executor_internal.hpp" +#include "openvino/runtime/threading/cpu_streams_info.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/update_executor_config_test.cpp b/src/inference/tests/unit/update_executor_config_test.cpp index a660dfff0597ae..abb3612eb8750d 100644 --- a/src/inference/tests/unit/update_executor_config_test.cpp +++ b/src/inference/tests/unit/update_executor_config_test.cpp @@ -4,9 +4,7 @@ #include -#include - -// #include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" #include "openvino/runtime/threading/istreams_executor.hpp" #include "os/cpu_map_info.hpp" diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 981e8d66aa48b3..e5f8fa28768bf2 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -4,8 +4,8 @@ #include "behavior/compiled_model/properties.hpp" -#include "ie_system_conf.h" #include "openvino/runtime/properties.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace ov::test::behavior; @@ -23,8 +23,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, #if (defined(__APPLE__) || defined(_WIN32)) auto default_affinity = [] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto numaNodes = ov::get_available_numa_nodes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else if (numaNodes.size() > 1) { @@ -35,7 +35,7 @@ auto default_affinity = [] { }(); #else auto default_affinity = [] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else { diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp index bad8c61b42cf27..75ff757589ae08 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -5,15 +5,15 @@ #include "behavior/plugin/configuration_tests.hpp" #include "ie_plugin_config.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" using namespace BehaviorTestsDefinitions; namespace { #if (defined(__APPLE__) || defined(_WIN32)) auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto numaNodes = ov::get_available_numa_nodes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return std::string{CONFIG_VALUE(HYBRID_AWARE)}; } else if (numaNodes.size() > 1) { @@ -24,7 +24,7 @@ auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { }()}; #else auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return std::string{CONFIG_VALUE(HYBRID_AWARE)}; } else { diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp index bf32bfb031b4b2..f0854294dd5260 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -4,12 +4,11 @@ #include "functional_test_utils/skip_tests_config.hpp" -#include - #include #include -#include "ie_parallel.hpp" +#include "openvino/core/parallel.hpp" +#include "openvino/runtime/system_conf.hpp" std::vector disabledTestPatterns() { std::vector retVector{ diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 913315542db85b..c2a2028ff02b92 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -3,8 +3,9 @@ // #include "behavior/compiled_model/properties.hpp" -#include "ie_system_conf.h" + #include "openvino/runtime/properties.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace ov::test::behavior; @@ -33,8 +34,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, #if (defined(__APPLE__) || defined(_WIN32)) auto default_affinity = [] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto numaNodes = ov::get_available_numa_nodes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else if (numaNodes.size() > 1) { @@ -45,7 +46,7 @@ auto default_affinity = [] { }(); #else auto default_affinity = [] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp index 5cdbb8fbd7285b..8f3974817871f7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -2,17 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ie_plugin_config.hpp" -#include "ie_system_conf.h" #include "behavior/plugin/configuration_tests.hpp" +#include "ie_plugin_config.hpp" +#include "openvino/runtime/system_conf.hpp" + using namespace BehaviorTestsDefinitions; namespace { #if (defined(__APPLE__) || defined(_WIN32)) auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto numaNodes = ov::get_available_numa_nodes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return std::string{CONFIG_VALUE(HYBRID_AWARE)}; } else if (numaNodes.size() > 1) { @@ -23,7 +24,7 @@ namespace { }()}; #else auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return std::string{CONFIG_VALUE(HYBRID_AWARE)}; } else { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp index 77c78e31ca6b00..dc25378528199c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp @@ -3,8 +3,9 @@ // #include "snippets/matmul.hpp" + #include "common_test_utils/test_constants.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp index 437c8c5b97ec0a..ec97a61647b5b1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp @@ -3,8 +3,9 @@ // #include "snippets/transpose_matmul.hpp" + #include "common_test_utils/test_constants.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp index f9326d15432ada..a4cb47458116f7 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp index a13271c249fbc7..fcddb92bb91cb2 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp index 246f2f54387c06..8a773d5712bd29 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp index 3b68ef44d693fc..486a284370daf4 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; From ae54734d2811b32a277fe8337842756f07a995b2 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 15 Jan 2024 15:32:36 +0100 Subject: [PATCH 003/122] [DOCS] Removal of docs for MxNet Caffe Kaldi (#22137) * Remove mxnet caffe kaldi and speech sample * resolving conflict * model formats notice * Update docs/articles_en/documentation/openvino_legacy_features.rst * Update docs/articles_en/documentation/openvino_legacy_features.rst * Update docs/articles_en/documentation/openvino_legacy_features.rst * Update docs/articles_en/documentation/openvino_legacy_features.rst --------- Co-authored-by: Karol Blaszczak --- .../openvino_legacy_features.rst | 8 +- .../--installing-model-dev-tools.rst | 33 +- .../model_optimizer_faq.rst | 18 +- .../mxnet_caffe_kaldi.rst | 32 - .../mxnet_caffe_kaldi/aspire_tdnn_model.rst | 155 ----- .../convert_gluoncv_models.rst | 52 -- .../convert_model_from_caffe.rst | 120 ---- .../convert_model_from_kaldi.rst | 96 --- .../convert_model_from_mxnet.rst | 84 --- .../convert_style_transfer_from_mxnet.rst | 181 ------ .../installing-openvino-apt.rst | 107 ++-- ...installing-openvino-from-archive-linux.rst | 37 +- .../installing-openvino-yum.rst | 69 +- ...installing-openvino-from-archive-macos.rst | 67 +- .../installing-openvino-conda.rst | 39 +- ...stalling-openvino-from-archive-windows.rst | 69 +- .../learn_openvino/openvino_samples.rst | 5 - .../automatic_speech_recognition.rst | 596 ------------------ .../Device_Plugins/GNA.rst | 12 +- 19 files changed, 215 insertions(+), 1565 deletions(-) delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/automatic_speech_recognition.rst diff --git a/docs/articles_en/documentation/openvino_legacy_features.rst b/docs/articles_en/documentation/openvino_legacy_features.rst index dfedd4ef01dd7b..3ee12f934522b3 100644 --- a/docs/articles_en/documentation/openvino_legacy_features.rst +++ b/docs/articles_en/documentation/openvino_legacy_features.rst @@ -13,7 +13,6 @@ Legacy Features and Components Deploy Application with Deployment Manager OpenVINO API 2.0 transition Open Model ZOO - Apache MXNet, Caffe, and Kaldi Since OpenVINO has grown very rapidly in recent years, some of its features @@ -74,12 +73,9 @@ offering. | **Apache MXNet, Caffe, and Kaldi model formats** | *New solution:* conversion to ONNX via external tools -| *Old solution:* model support will be discontinued with OpenVINO 2024.0 +| *Old solution:* model support discontinued with OpenVINO 2024.0 | -| Since these three model formats proved to be far less popular among OpenVINO users - than the remaining ones, their support has been discontinued. Converting them to the - ONNX format is a possible way of retaining them in the OpenVINO-based pipeline. -| :doc:`See the previous conversion instructions ` +| `See the previous conversion instructions `__ | :doc:`See the currently supported frameworks ` diff --git a/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst b/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst index 55d3494ae789be..a0028a2d7e51de 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst @@ -5,7 +5,7 @@ Install OpenVINO™ Development Tools .. meta:: - :description: Learn how to install OpenVINO™ Development Tools on Windows, + :description: Learn how to install OpenVINO™ Development Tools on Windows, Linux, and macOS operating systems, using a PyPi package. OpenVINO Development Tools is a set of utilities that make it easy to develop and optimize models and applications for OpenVINO. It provides the following tools: @@ -20,9 +20,9 @@ The instructions on this page show how to install OpenVINO Development Tools. If In both cases, Python 3.8 - 3.11 needs to be installed on your machine before starting. -.. note:: +.. note:: - From the 2022.1 release, the OpenVINO™ Development Tools can only be installed via PyPI. + From the 2022.1 release, the OpenVINO™ Development Tools can only be installed via PyPI. .. _python_developers: @@ -30,7 +30,7 @@ For Python Developers ##################### If you are a Python developer, follow the steps in the :ref:`Installing OpenVINO Development Tools ` section on this page to install it. Installing OpenVINO Development Tools will also install OpenVINO Runtime as a dependency, so you don’t need to install OpenVINO Runtime separately. This option is recommended for new users. - + .. _cpp_developers: For C/C++ Developers @@ -64,7 +64,7 @@ Installation in a New Environment +++++++++++++++++++++++++++++++++ If you do not have an environment with a deep learning framework for the input model or you encounter any compatibility issues between OpenVINO -and your version of deep learning framework, you may install OpenVINO Development Tools with validated versions of frameworks into a new environment. +and your version of deep learning framework, you may install OpenVINO Development Tools with validated versions of frameworks into a new environment. Step 1. Set Up Python Virtual Environment ----------------------------------------- @@ -75,19 +75,19 @@ Create a virtual Python environment to avoid dependency conflicts. To create a v .. tab-item:: Windows :sync: windows - + .. code-block:: sh - + python -m venv openvino_env .. tab-item:: Linux and macOS :sync: linux-and-macos - + .. code-block:: sh - + python3 -m venv openvino_env - - + + Step 2. Activate Virtual Environment ------------------------------------ @@ -98,16 +98,16 @@ Activate the newly created Python virtual environment by issuing this command: .. tab-item:: Windows :sync: windows - + .. code-block:: sh - + openvino_env\Scripts\activate .. tab-item:: Linux and macOS :sync: linux-and-macos .. code-block:: sh - + source openvino_env/bin/activate .. important:: @@ -138,7 +138,7 @@ To install and configure the components of the development package together with where the ``extras`` parameter specifies the source deep learning framework for the input model -and is one or more of the following values separated with "," : ``caffe``, ``kaldi``, ``mxnet``, ``onnx``, ``pytorch``, ``tensorflow``, ``tensorflow2``. +and is one or more of the following values separated with "," : ``caffe``, ``kaldi``, ``mxnet``, ``onnx``, ``pytorch``, ``tensorflow``, ``tensorflow2``. For example, to install and configure dependencies required for working with TensorFlow 2.x and ONNX models, use the following command: @@ -147,7 +147,7 @@ For example, to install and configure dependencies required for working with Ten pip install openvino-dev[tensorflow2,onnx] -.. note:: +.. note:: Model conversion API support for TensorFlow 1.x environment has been deprecated. Use the ``tensorflow2`` parameter to install a TensorFlow 2.x environment that can convert both TensorFlow 1.x and 2.x models. If your model isn't compatible with the TensorFlow 2.x environment, use the `tensorflow` parameter to install the TensorFlow 1.x environment. The TF 1.x environment is provided only for legacy compatibility reasons. @@ -197,7 +197,6 @@ Try the :doc:`C++ Quick Start Example ` page for other C++ example applications to get you started with OpenVINO, such as: * :doc:`Basic object detection with the Hello Reshape SSD C++ sample ` -* :doc:`Automatic speech recognition C++ sample ` Learn OpenVINO Development Tools ++++++++++++++++++++++++++++++++ diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst index 0138307a04aec6..030cd6c7dbbdf2 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst @@ -8,8 +8,8 @@ All of the issues below refer to :doc:`legacy functionalities `. -If your question is not covered by the topics below, use the -`OpenVINO Support page `__, +If your question is not covered by the topics below, use the +`OpenVINO Support page `__, where you can participate in a free forum discussion. .. warning:: @@ -82,7 +82,7 @@ Q3. What does the message "[ ERROR ]: Unable to create ports for node with id" m **A:** Most likely, Model Optimizer does not know how to infer output shapes of some layers in the given topology. To lessen the scope, compile the list of layers that are custom for Model Optimizer: present in the topology, -absent in the :doc:`list of supported operations ` for the target framework. +absent in the :doc:`list of supported operations ` for the target framework. Then, refer to available options in the corresponding section in the :doc:`[Legacy] Custom Layers in Model Optimizer ` page. .. _question-7: @@ -255,7 +255,7 @@ Q16. What does the message "Input shape is required to convert MXNet model. Plea Q19. What does the message "Both --scale and --scale_values are defined. Specify either scale factor or scale values per input channels" mean? ##################################################################################################################################################### -**A:** The ``--scale`` option sets a scaling factor for all channels, while ``--scale_values`` sets a scaling factor per each channel. Using both of them simultaneously produces ambiguity, so you must use only one of them. For more information, refer to the **Using Framework-Agnostic Conversion Parameters** section: for :doc:`Converting a Caffe Model `, :doc:`Converting a TensorFlow Model `, :doc:`Converting an MXNet Model `. +**A:** The ``--scale`` option sets a scaling factor for all channels, while ``--scale_values`` sets a scaling factor per each channel. Using both of them simultaneously produces ambiguity, so you must use only one of them. For more information, refer to the **Using Framework-Agnostic Conversion Parameters** section: for :doc:`Converting a TensorFlow Model `. .. _question-20: @@ -547,7 +547,7 @@ Keep in mind that there is no space between and inside the brackets for input sh Q58. What does the message "Please provide input layer names for input layer shapes" mean? ##################################################################################################################################################### -**A:** When specifying input shapes for several layers, you must provide names for inputs, whose shapes will be overwritten. For usage examples, see the :doc:`Converting a Caffe Model `. Additional information for ``--input_shape`` is in FAQ :ref:`#56 `. +**A:** When specifying input shapes for several layers, you must provide names for inputs, whose shapes will be overwritten. Additional information for ``--input_shape`` is in FAQ :ref:`#56 `. .. _question-59: @@ -582,14 +582,14 @@ Q62. What does the message "You should specify input for each scale value" mean? Q63. What does the message "Number of inputs and mean values does not match" mean? ##################################################################################################################################################### -**A:** The number of specified mean values and the number of inputs must be equal. For a usage example, refer to the :doc:`Converting a Caffe Model ` guide. +**A:** The number of specified mean values and the number of inputs must be equal. .. _question-64: Q64. What does the message "Number of inputs and scale values does not match" mean? ##################################################################################################################################################### -**A:** The number of specified scale values and the number of inputs must be equal. For a usage example, refer to the :doc:`Converting a Caffe Model ` guide. +**A:** The number of specified scale values and the number of inputs must be equal. .. _question-65: @@ -715,7 +715,6 @@ Topology description (``.json`` file) should be prepared (merged) in advance and If you add additional layers and weights that are in ``.nd`` files to your model, Model Optimizer can build a model from one ``.params`` file and two additional ``.nd`` files (``*_args.nd``, ``*_auxs.nd``). To do that, provide both CLI options or do not pass them if you want to convert an MXNet model without additional weights. -For more information, refer to the :doc:`Converting an MXNet Model ` guide. .. _question-82: @@ -739,7 +738,6 @@ Q84. What does the message "Specified input json ... does not exist" mean? ##################################################################################################################################################### **A:** Most likely, ``.json`` file does not exist or has a name that does not match the notation of Apache MXNet. Make sure the file exists and has a correct name. -For more information, refer to the :doc:`Converting an MXNet Model ` guide. .. _question-85: @@ -747,8 +745,6 @@ Q85. What does the message "Unsupported Input model file type ... Model Optimize ##################################################################################################################################################### **A:** Model Optimizer for Apache MXNet supports only ``.params`` and ``.nd`` files formats. Most likely, you specified an unsupported file format in ``--input_model``. -For more information, refer to :doc:`Converting an MXNet Model `. - .. _question-86: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.rst deleted file mode 100644 index c7e0a215e11160..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. {#mxnet_caffe_kaldi} - -MX Net, Caffe, and Kaldi model formats -====================================== - - - -.. toctree:: - :maxdepth: 1 - :hidden: - - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_MxNet - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Caffe - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Kaldi - openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_GluonCV_Models - openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_Style_Transfer_From_MXNet - openvino_docs_MO_DG_prepare_model_convert_model_kaldi_specific_Aspire_Tdnn_Model - - -The following articles present the deprecated conversion method for MX Net, Caffe, -and Kaldi model formats. - -:doc:`Apache MX Net conversion ` -:doc:`Caffe conversion ` -:doc:`Kaldi conversion ` - -Here are three examples of conversion for particular models. -:doc:`MXNet GluonCV conversion ` -:doc:`MXNet Style Transfer Model conversion ` -:doc:`Kaldi ASpIRE Chain TDNN Model conversion ` - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst deleted file mode 100644 index 0b55973e6a2b57..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst +++ /dev/null @@ -1,155 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_kaldi_specific_Aspire_Tdnn_Model} - -Converting a Kaldi ASpIRE Chain Time Delay Neural Network (TDNN) Model -====================================================================== - - -.. meta:: - :description: Learn how to convert an ASpIRE Chain TDNN - model from Kaldi to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Kaldi is currently being deprecated and will be removed entirely in the future. - -At the beginning, you should `download a pre-trained model `__ -for the ASpIRE Chain Time Delay Neural Network (TDNN) from the Kaldi project official website. - -Converting an ASpIRE Chain TDNN Model to IR -########################################### - -Generate the Intermediate Representation of the model by running model conversion with the following parameters: - -.. code-block:: sh - - mo --input_model exp/chain/tdnn_7b/final.mdl --output output - - -The IR will have two inputs: ``input`` for data, and ``ivector`` for ivectors. - -Example: Running ASpIRE Chain TDNN Model with the Speech Recognition Sample -########################################################################### - -.. note:: - - Before you continue with this part of the article, get familiar with the - :doc:`Speech Recognition sample `. - -In this example, the input data contains one utterance from one speaker. - -To run the ASpIRE Chain TDNN Model with Speech Recognition sample, You need to prepare environment. Do it by following the steps below : - -1. Download a `Kaldi repository `__. -2. Build it by following instructions in ``README.md`` from the repository. -3. Download the `model archive `__ from Kaldi website. -4. Extract the downloaded model archive to the ``egs/aspire/s5`` folder of the Kaldi repository. - -Once everything has been prepared, you can start a proper run: - -1. Prepare the model for decoding. Refer to the ``README.txt`` file from the downloaded model archive for instructions. -2. Convert data and ivectors to ``.ark`` format. Refer to the corresponding sections below for instructions. - -Preparing Data -++++++++++++++++++++ - -If you have a ``.wav`` data file, convert it to the ``.ark`` format using the following command: - -.. code-block:: sh - - /src/featbin/compute-mfcc-feats --config=/egs/aspire/s5/conf/mfcc_hires.conf scp:./wav.scp ark,scp:feats.ark,feats.scp - - -Add the ``feats.ark`` absolute path to ``feats.scp`` to avoid errors in later commands. - -Preparing Ivectors -++++++++++++++++++++ - -Prepare ivectors for the Speech Recognition sample: - -1. Copy the ``feats.scp`` file to the ``egs/aspire/s5/`` directory of the built Kaldi repository and navigate there: - - .. code-block:: sh - - cp feats.scp /egs/aspire/s5/ - cd /egs/aspire/s5/ - - -2. Extract ivectors from the data: - - .. code-block:: sh - - ./steps/online/nnet2/extract_ivectors_online.sh --nj 1 --ivector_period exp/tdnn_7b_chain_online/ivector_extractor - - - You can simplify the preparation of ivectors for the Speech Recognition sample. To do it, specify the maximum number of frames in utterances as a parameter for ``--ivector_period`` to get only one ivector per utterance. - - To get the maximum number of frames in utterances, use the following command line: - - .. code-block:: sh - - ../../../src/featbin/feat-to-len scp:feats.scp ark,t: | cut -d' ' -f 2 - | sort -rn | head -1 - - - As a result, you will find the ``ivector_online.1.ark`` file in ````. - -3. Go to the ````: - - .. code-block:: sh - - cd - - -4. Convert the ``ivector_online.1.ark`` file to text format, using the ``copy-feats`` tool. Run the following command: - - .. code-block:: sh - - /src/featbin/copy-feats --binary=False ark:ivector_online.1.ark ark,t:ivector_online.1.ark.txt - - -5. For the Speech Recognition sample, the ``.ark`` file must contain an ivector for each frame. Copy the ivector ``frame_count`` times by running the below script in the Python command prompt: - - .. code-block:: py - :force: - - import subprocess - - subprocess.run(["/src/featbin/feat-to-len", "scp:/egs/aspire/s5/feats.scp", "ark,t:feats_length.txt"]) - - f = open("ivector_online.1.ark.txt", "r") - g = open("ivector_online_ie.ark.txt", "w") - length_file = open("feats_length.txt", "r") - for line in f: - if "[" not in line: - for i in range(frame_count): - line = line.replace("]", " ") - g.write(line) - else: - g.write(line) - frame_count = int(length_file.read().split(" ")[1]) - g.write("]") - f.close() - g.close() - length_file.close() - - -6. Create an ``.ark`` file from ``.txt``: - - .. code-block:: sh - - /src/featbin/copy-feats --binary=True ark,t:ivector_online_ie.ark.txt ark:ivector_online_ie.ark - - -Running the Speech Recognition Sample -+++++++++++++++++++++++++++++++++++++ - -Run the Speech Recognition sample with the created ivector ``.ark`` file: - -.. code-block:: sh - - speech_sample -i feats.ark,ivector_online_ie.ark -m final.xml -d CPU -o prediction.ark -cw_l 17 -cw_r 12 - - -Results can be decoded as described in "Use of Sample in Kaldi Speech Recognition Pipeline" -in the :doc:`Speech Recognition Sample description ` article. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.rst deleted file mode 100644 index 2a00cc9016b525..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_GluonCV_Models} - -Converting MXNet GluonCV Models -=============================== - - -.. meta:: - :description: Learn how to convert GluonCV models - from MXNet to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future. - -This article provides the instructions and examples on how to convert `GluonCV SSD and YOLO-v3 models `__ to IR. - -1. Choose the topology available from the `GluonCV Model Zoo `__ and export to the MXNet format using the GluonCV API. For example, for the ``ssd_512_mobilenet1.0`` topology: - - .. code-block:: py - :force: - - from gluoncv import model_zoo, data, utils - from gluoncv.utils import export_block - net = model_zoo.get_model('ssd_512_mobilenet1.0_voc', pretrained=True) - export_block('ssd_512_mobilenet1.0_voc', net, preprocess=True, layout='HWC') - - As a result, you will get an MXNet model representation in ``ssd_512_mobilenet1.0.params`` and ``ssd_512_mobilenet1.0.json`` files generated in the current directory. - -2. Run model conversion API, specifying the ``enable_ssd_gluoncv`` option. Make sure the ``input_shape`` parameter is set to the input shape layout of your model (NHWC or NCHW). The examples below illustrate running model conversion for the SSD and YOLO-v3 models trained with the NHWC layout and located in the ````: - - * **For GluonCV SSD topologies:** - - .. code-block:: sh - - mo --input_model /ssd_512_mobilenet1.0.params --enable_ssd_gluoncv --input_shape [1,512,512,3] --input data --output_dir - - * **For YOLO-v3 topology:** - - * To convert the model: - - .. code-block:: sh - - mo --input_model /yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --output_dir - - * To convert the model with replacing the subgraph with RegionYolo layers: - - .. code-block:: sh - - mo --input_model /models/yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --transformations_config "front/mxnet/ yolo_v3_mobilenet1_voc. json" --output_dir - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.rst deleted file mode 100644 index 138fe6de0a81f9..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Caffe} - -Converting a Caffe Model -======================== - - -.. meta:: - :description: Learn how to convert a model from the - Caffe format to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Caffe is currently being deprecated and will be removed entirely in the future. - -To convert a Caffe model, run ``mo`` with the path to the input model ``.caffemodel`` file: - -.. code-block:: cpp - - mo --input_model .caffemodel - - -The following list provides the Caffe-specific parameters. - -.. code-block:: cpp - - Caffe-specific parameters: - --input_proto INPUT_PROTO, -d INPUT_PROTO - Deploy-ready prototxt file that contains a topology - structure and layer attributes - --caffe_parser_path CAFFE_PARSER_PATH - Path to python Caffe parser generated from caffe.proto - -k K Path to CustomLayersMapping.xml to register custom - layers - --disable_omitting_optional - Disable omitting optional attributes to be used for - custom layers. Use this option if you want to transfer - all attributes of a custom layer to IR. Default - behavior is to transfer the attributes with default - values and the attributes defined by the user to IR. - --enable_flattening_nested_params - Enable flattening optional params to be used for - custom layers. Use this option if you want to transfer - attributes of a custom layer to IR with flattened - nested parameters. Default behavior is to transfer the - attributes without flattening nested parameters. - - -CLI Examples Using Caffe-Specific Parameters -++++++++++++++++++++++++++++++++++++++++++++ - -* Launching model conversion for `bvlc_alexnet.caffemodel `__ with a specified `prototxt` file. This is needed when the name of the Caffe model and the `.prototxt` file are different or are placed in different directories. Otherwise, it is enough to provide only the path to the input `model.caffemodel` file. - - .. code-block:: cpp - - mo --input_model bvlc_alexnet.caffemodel --input_proto bvlc_alexnet.prototxt - -* Launching model conversion for `bvlc_alexnet.caffemodel `__ with a specified `CustomLayersMapping` file. This is the legacy method of quickly enabling model conversion if your model has custom layers. This requires the Caffe system on the computer. Example of ``CustomLayersMapping.xml`` can be found in ``/mo/front/caffe/CustomLayersMapping.xml.example``. The optional parameters without default values and not specified by the user in the ``.prototxt`` file are removed from the Intermediate Representation, and nested parameters are flattened: - - .. code-block:: cpp - - mo --input_model bvlc_alexnet.caffemodel -k CustomLayersMapping.xml --disable_omitting_optional --enable_flattening_nested_params - - This example shows a multi-input model with input layers: ``data``, ``rois`` - - .. code-block:: cpp - - layer { - name: "data" - type: "Input" - top: "data" - input_param { - shape { dim: 1 dim: 3 dim: 224 dim: 224 } - } - } - layer { - name: "rois" - type: "Input" - top: "rois" - input_param { - shape { dim: 1 dim: 5 dim: 1 dim: 1 } - } - } - -* Launching model conversion for a multi-input model with two inputs and providing a new shape for each input in the order they are passed to the model conversion API. In particular, for data, set the shape to ``1,3,227,227``. For rois, set the shape to ``1,6,1,1``: - - .. code-block:: cpp - - mo --input_model /path-to/your-model.caffemodel --input data,rois --input_shape (1,3,227,227),[1,6,1,1] - -Custom Layer Definition -######################## - -For the definition of custom layers, refer to the :doc:`Cutting Off Parts of a Model ` page. - -Supported Caffe Layers -####################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations ` page. - -Frequently Asked Questions (FAQ) -################################ - -Model conversion API provides explanatory messages when it is unable to complete conversions due to typographical errors, incorrectly used options, or other issues. A message describes the potential cause of the problem and gives a link to :doc:`Model Optimizer FAQ ` which provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in :doc:`Convert a Model `to help you understand what went wrong. - -Summary -####### - -In this document, you learned: - -* Basic information about how model conversion works with Caffe models. -* Which Caffe models are supported. -* How to convert a trained Caffe model by using model conversion API with both framework-agnostic and Caffe-specific command-line parameters. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials ` page for a set of tutorials providing step-by-step instructions for converting specific Caffe models. - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.rst deleted file mode 100644 index 7f8fc3856d960a..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Kaldi} - -Converting a Kaldi Model -======================== - - -.. meta:: - :description: Learn how to convert a model from the - Kaldi format to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Kaldi is currently being deprecated and will be removed entirely in the future. - -.. note:: - - Model conversion API supports the `nnet1 `__ and `nnet2 `__ formats of Kaldi models. The support of the `nnet3 `__ format is limited. - -To convert a Kaldi model, run model conversion with the path to the input model ``.nnet`` or ``.mdl`` file: - -.. code-block:: cpp - - mo --input_model .nnet - -Using Kaldi-Specific Conversion Parameters -########################################## - -The following list provides the Kaldi-specific parameters. - -.. code-block:: cpp - - Kaldi-specific parameters: - --counts COUNTS A file name with full path to the counts file or empty string to utilize count values from the model file - --remove_output_softmax - Removes the Softmax that is the output layer - --remove_memory Remove the Memory layer and add new inputs and outputs instead - -Examples of CLI Commands -######################## - -* To launch model conversion for the ``wsj_dnn5b_smbr`` model with the specified ``.nnet`` file: - - .. code-block:: cpp - - mo --input_model wsj_dnn5b_smbr.nnet - -* To launch model conversion for the ``wsj_dnn5b_smbr`` model with the existing file that contains counts for the last layer with biases: - - .. code-block:: cpp - - mo --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts - - - * The model conversion normalizes сounts in the following way: - - .. math:: - - S = \frac{1}{\sum_{j = 0}^{|C|}C_{j}} - - .. math:: - - C_{i}=log(S*C_{i}) - - where :math:`C` - the counts array, :math:`C_{i} - i^{th}` element of the counts array, :math:`|C|` - number of elements in the counts array; - - * The normalized counts are subtracted from biases of the last or next to last layer (if last layer is SoftMax). - - .. note:: Model conversion API will show a warning if a model contains values of counts and the ``counts`` option is not used. - -* If you want to remove the last SoftMax layer in the topology, launch the model conversion with the ``remove_output_softmax`` flag: - - .. code-block:: cpp - - mo --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts --remove_output_softmax - - Model conversion API finds the last layer of the topology and removes this layer only if it is a SoftMax layer. - - .. note:: Model conversion can remove SoftMax layer only if the topology has one output. - -* You can use the *OpenVINO Speech Recognition* sample application for the sample inference of Kaldi models. This sample supports models with only one output. If your model has several outputs, specify the desired one with the ``output`` option. - -Supported Kaldi Layers -###################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations ` page. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials ` page for a set of tutorials providing step-by-step instructions for converting specific Kaldi models. Here are some examples: - -* :doc:`Convert Kaldi ASpIRE Chain Time Delay Neural Network (TDNN) Model ` - - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.rst deleted file mode 100644 index c7b0a02d4c0458..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_MxNet} - -Converting an MXNet Model -========================= - - -.. meta:: - :description: Learn how to convert a model from the - MXNet format to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future. - -To convert an MXNet model, run Model Optimizer with the path to the ``.params`` file of the input model: - -.. code-block:: sh - - mo --input_model model-file-0000.params - - -Using MXNet-Specific Conversion Parameters -########################################## - -The following list provides the MXNet-specific parameters. - -.. code-block:: sh - - MXNet-specific parameters: - --input_symbol - Symbol file (for example, "model-symbol.json") that contains a topology structure and layer attributes - --nd_prefix_name - Prefix name for args.nd and argx.nd files - --pretrained_model_name - Name of a pre-trained MXNet model without extension and epoch - number. This model will be merged with args.nd and argx.nd - files - --save_params_from_nd - Enable saving built parameters file from .nd files - --legacy_mxnet_model - Enable Apache MXNet loader to make a model compatible with the latest Apache MXNet version. - Use only if your model was trained with Apache MXNet version lower than 1.0.0 - --enable_ssd_gluoncv - Enable transformation for converting the gluoncv ssd topologies. - Use only if your topology is one of ssd gluoncv topologies - - -.. note:: - - By default, model conversion API does not use the Apache MXNet loader. It transforms the topology to another format which is compatible with the latest version of Apache MXNet. However, the Apache MXNet loader is required for models trained with lower version of Apache MXNet. If your model was trained with an Apache MXNet version lower than 1.0.0, specify the ``--legacy_mxnet_model`` key to enable the Apache MXNet loader. Note that the loader does not support models with custom layers. In this case, you must manually recompile Apache MXNet with custom layers and install it in your environment. - -Custom Layer Definition -####################### - -For the definition of custom layers, refer to the :doc:`Cutting Off Parts of a Model ` page. - -Supported MXNet Layers -####################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations ` page. - -Frequently Asked Questions (FAQ) -################################ - -Model conversion API provides explanatory messages when it is unable to complete conversions due to typographical errors, incorrectly used options, or other issues. A message describes the potential cause of the problem and gives a link to :doc:`Model Optimizer FAQ ` which provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in :doc:`Convert a Model ` to help you understand what went wrong. - -Summary -######## - -In this document, you learned: - -* Basic information about how model conversion API works with MXNet models. -* Which MXNet models are supported. -* How to convert a trained MXNet model by using model conversion API with both framework-agnostic and MXNet-specific command-line parameters. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials ` page for a set of tutorials providing step-by-step instructions for converting specific MXNet models. Here are some examples: - -* :doc:`Convert MXNet GluonCV Model ` -* :doc:`Convert MXNet Style Transfer Model ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.rst deleted file mode 100644 index 4afe62791563ea..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.rst +++ /dev/null @@ -1,181 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_Style_Transfer_From_MXNet} - -Converting an MXNet Style Transfer Model -======================================== - - -.. meta:: - :description: Learn how to convert a Style Transfer - model from MXNet to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future. - -This article provides instructions on how to generate a model for style transfer, using the public MXNet neural style transfer sample. - -**Step 1**: Download or clone the repository `Zhaw's Neural Style Transfer repository `__ with an MXNet neural style transfer sample. - -**Step 2**: Prepare the environment required to work with the cloned repository: - -.. note:: - - Python-tk installation is needed only for Linux. Python for Windows includes it by default. - - -1. Install packages dependency. - - .. code-block:: sh - - sudo apt-get install python-tk - - -2. Install Python requirements: - - .. code-block:: sh - - pip3 install --user mxnet - pip3 install --user matplotlib - pip3 install --user scikit-image - - -**Step 3**: Download the pre-trained `VGG19 model `__ and save it to the root directory of the cloned repository. The sample expects the model ``vgg19.params`` file to be in that directory. - -**Step 4**: Modify source code files of style transfer sample from the cloned repository: - -1. Go to the ``fast_mrf_cnn`` subdirectory. - - .. code-block:: sh - - cd ./fast_mrf_cnn - - -2. Open the ``symbol.py`` file and modify the ``decoder_symbol()`` function. You should see the following code there: - - .. code-block:: py - - def decoder_symbol(): - data = mx.sym.Variable('data') - data = mx.sym.Convolution(data=data, num_filter=256, kernel=(3,3), pad=(1,1), stride=(1, 1), name='deco_conv1') - - - Replace the code above with the following: - - .. code-block:: py - - def decoder_symbol_with_vgg(vgg_symbol): - data = mx.sym.Convolution(data=vgg_symbol, num_filter=256, kernel=(3,3), pad=(1,1), stride=(1, 1), name='deco_conv1') - - -3. Save and close the ``symbol.py`` file. - -4. Open and edit the ``make_image.py`` file. Go to the ``__init__()`` function in the ``Maker`` class: - - .. code-block:: py - - decoder = symbol.decoder_symbol() - - - Modify it with the following code: - - .. code-block:: py - - decoder = symbol.decoder_symbol_with_vgg(vgg_symbol) - - -5. To join the pre-trained weights with the decoder weights, make the following changes: - After the code lines for loading the decoder weights: - - .. code-block:: py - - args = mx.nd.load('%s_decoder_args.nd'%model_prefix) - auxs = mx.nd.load('%s_decoder_auxs.nd'%model_prefix) - - - Add the following line: - - .. code-block:: py - - arg_dict.update(args) - - -6. Use ``arg_dict`` instead of ``args`` as a parameter of the ``decoder.bind()`` function. Find the line below: - - .. code-block:: py - - self.deco_executor = decoder.bind(ctx=mx.gpu(), args=args, aux_states=auxs) - - - Replace it with the following: - - .. code-block:: py - - self.deco_executor = decoder.bind(ctx=mx.cpu(), args=arg_dict, aux_states=auxs) - - -7. Add the following code to the end of the ``generate()`` function in the ``Maker`` class to save the result model as a ``.json`` file: - - .. code-block:: py - - self.vgg_executor._symbol.save('{}-symbol.json'.format('vgg19')) - self.deco_executor._symbol.save('{}-symbol.json'.format('nst_vgg19')) - - -8. Save and close the ``make_image.py`` file. - -**Step 5**: Follow the instructions from the ``README.md`` file in the ``fast_mrf_cnn`` directory of the cloned repository and run the sample with a decoder model. -For example, use the following code to run the sample with the pre-trained decoder weights from the ``models`` folder and output shape: - -.. code-block:: py - - import make_image - maker = make_image.Maker('models/13', (1024, 768)) - maker.generate('output.jpg', '../images/tubingen.jpg') - - -The ``models/13`` string in the code above is composed of the following substrings: - -* ``models/`` -- path to the folder that contains ``.nd`` files with pre-trained styles weights. -* ``13`` -- prefix pointing to the default decoder for the repository, ``13_decoder``. - -.. note:: - - If an error prompts with "No module named ``cPickle``", try running the script from Step 5 in Python 2. After that return to Python 3 for the remaining steps. - -Any style can be selected from `collection of pre-trained weights `__. On the Chinese-language page, click the down arrow next to a size in megabytes. Then wait for an overlay box to appear, and click the blue button in it to download. The ``generate()`` function generates ``nst_vgg19-symbol.json`` and ``vgg19-symbol.json`` files for the specified shape. In the code, it is ``[1024 x 768]`` for a 4:3 ratio. You can specify another, for example, ``[224,224]`` for a square ratio. - -**Step 6**: Run model conversion to generate an Intermediate Representation (IR): - -1. Create a new directory. For example: - - .. code-block:: sh - - mkdir nst_model - - -2. Copy the initial and generated model files to the created directory. For example, to copy the pre-trained decoder weights from the ``models`` folder to the ``nst_model`` directory, run the following commands: - - .. code-block:: sh - - cp nst_vgg19-symbol.json nst_model - cp vgg19-symbol.json nst_model - cp ../vgg19.params nst_model/vgg19-0000.params - cp models/13_decoder_args.nd nst_model - cp models/13_decoder_auxs.nd nst_model - - - .. note:: - - Make sure that all the ``.params`` and ``.json`` files are in the same directory as the ``.nd`` files. Otherwise, the conversion process fails. - - -3. Run model conversion for Apache MXNet. Use the ``--nd_prefix_name`` option to specify the decoder prefix and ``input_shape`` to specify input shapes in ``[N,C,W,H]`` order. For example: - - .. code-block:: sh - - mo --input_symbol /nst_vgg19-symbol.json --framework mxnet --output_dir --input_shape [1,3,224,224] --nd_prefix_name 13_decoder --pretrained_model /vgg19-0000.params - - -4. The IR is generated (``.bin``, ``.xml`` and ``.mapping`` files) in the specified output directory, and ready to be consumed by the OpenVINO Runtime. - diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst index d5360a92bb7813..b1b4c7fe8600e8 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst @@ -5,40 +5,40 @@ Install Intel® Distribution of OpenVINO™ Toolkit for Linux Using APT Reposito .. meta:: - :description: Learn how to install OpenVINO™ Runtime on the Linux operating + :description: Learn how to install OpenVINO™ Runtime on the Linux operating system, using the APT repository. .. note:: - + Note that the APT distribution: * offers both C/C++ and Python APIs * does not offer support for GNA and NPU inference * is dedicated to Linux users only - * additionally includes code samples + * additionally includes code samples .. tab-set:: .. tab-item:: System Requirements :sync: system-requirements - + | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ - + .. tab-item:: Software Requirements :sync: software-requirements - + * `CMake 3.13 or higher, 64-bit `__ * GCC 7.5.0 (for Ubuntu 18.04), GCC 9.3.0 (for Ubuntu 20.04) or GCC 11.3.0 (for Ubuntu 22.04) * `Python 3.8 - 3.11, 64-bit `__ - + Installing OpenVINO Runtime ####################################### @@ -71,30 +71,30 @@ Step 1: Set Up the OpenVINO Toolkit APT Repository sudo apt-get install gnupg 2. Add the repository via the following command: - + .. tab-set:: .. tab-item:: Ubuntu 22 :sync: ubuntu-22 - + .. code-block:: sh - + echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu22 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list - + .. tab-item:: Ubuntu 20 :sync: ubuntu-20 - + .. code-block:: sh - + echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list - + .. tab-item:: Ubuntu 18 :sync: ubuntu-18 - + .. code-block:: sh - + echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu18 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list - + 3. Update the list of packages via the update command: @@ -121,38 +121,38 @@ Step 2: Install OpenVINO Runtime Using the APT Package Manager .. tab-item:: The Latest Version :sync: latest-version - + Run the following command: - + .. code-block:: sh - + sudo apt install openvino - - + + .. tab-item:: A Specific Version :sync: specific-version - + #. Get a list of OpenVINO packages available for installation: - + .. code-block:: sh - + sudo apt-cache search openvino - + #. Install a specific version of an OpenVINO package: - + .. code-block:: sh - + sudo apt install openvino-.. - + For example: - + .. code-block:: sh - + sudo apt install openvino-2023.2.0 - + .. note:: - You can use ``--no-install-recommends`` option to install only required packages. + You can use ``--no-install-recommends`` option to install only required packages. Keep in mind that the build tools must be installed **separately** if you want to compile the samples. @@ -165,12 +165,12 @@ Run the following command: apt list --installed | grep openvino -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. -With the APT distribution, you can build OpenVINO sample files, as explained in the +With the APT distribution, you can build OpenVINO sample files, as explained in the :doc:`guide for OpenVINO sample applications `. For C++ and C, just run the ``build_samples.sh`` script: @@ -178,16 +178,16 @@ For C++ and C, just run the ``build_samples.sh`` script: .. tab-item:: C++ :sync: cpp - + .. code-block:: sh - + /usr/share/openvino/samples/cpp/build_samples.sh - + .. tab-item:: C :sync: c - + .. code-block:: sh - + /usr/share/openvino/samples/c/build_samples.sh Python samples can run as following: @@ -205,32 +205,32 @@ To uninstall OpenVINO Runtime via APT, run the following command based on your n .. tab-item:: The Latest Version :sync: latest-version - + .. code-block:: sh - + sudo apt autoremove openvino - + .. tab-item:: A Specific Version :sync: specific-version - + .. code-block:: sh - + sudo apt autoremove openvino-.. - + For example: - + .. code-block:: sh - + sudo apt autoremove openvino-2023.2.0 What's Next? ####################################### -Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! +Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! Learn more about how to integrate a model in OpenVINO applications by trying out the following tutorials: -* Try the `C++ Quick Start Example `_ for step-by-step +* Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg @@ -239,7 +239,6 @@ Learn more about how to integrate a model in OpenVINO applications by trying out * Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ You can also try the following: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst index 6d304da1350a26..a4311c04e280a1 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst @@ -14,7 +14,7 @@ Install OpenVINO™ Runtime on Linux from an Archive File Note that the Archive distribution: * offers both C/C++ and Python APIs - * additionally includes code samples + * additionally includes code samples * is dedicated to Linux users (archives for other systems are also available) * may offer different hardware support under different operating systems (see the drop-down below for more details). @@ -134,66 +134,66 @@ Step 1: Download and Install the OpenVINO Core Components :sync: ubuntu-22 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu22_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu22_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: Ubuntu 20.04 :sync: ubuntu-20 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu20_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu20_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: Ubuntu 18.04 :sync: ubuntu-18 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu18_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu18_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: RHEL 8 :sync: rhel-8 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_rhel8_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_rhel8_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: CentOS 7 :sync: centos-7 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_centos7_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_centos7_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: ARM 64-bit :sync: arm-64 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.0.0-13770-9b52171d290/l_openvino_toolkit_ubuntu18_2024.0.0.dev20231221_arm64.tgz -O openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu18_2024.0.0.dev20231221_arm64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: ARM 32-bit :sync: arm-32 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_debian9_2023.2.0.13089.cfd42bd2cb0_armhf.tgz -O openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_debian9_2023.2.0.13089.cfd42bd2cb0_armhf /opt/intel/openvino_2023.2.0 - - + + 5. Install required system dependencies on Linux. To do this, OpenVINO provides a script in the extracted installation directory. Run the following command: .. code-block:: sh @@ -220,7 +220,7 @@ Step 1: Download and Install the OpenVINO Core Components cd /opt/intel sudo ln -s openvino_2023.2.0 openvino_2023 - + .. note:: If you have already installed a previous release of OpenVINO 2023, a symbolic link to the ``openvino_2023`` folder may already exist. Unlink the previous link with ``sudo unlink openvino_2023``, and then re-run the command above. @@ -300,9 +300,6 @@ Learn more about how to integrate a model in OpenVINO applications by trying out Visit the :doc:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `__ - * `Automatic speech recognition C++ sample `__ - - Uninstalling the Intel® Distribution of OpenVINO™ Toolkit ########################################################### diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst index 0fb0ebf3551b50..89cce25a142d4f 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst @@ -5,17 +5,17 @@ Install OpenVINO™ Runtime on Linux From YUM Repository .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Linux operating + :description: Learn how to install OpenVINO™ Runtime on Linux operating system, using the YUM repository. .. note:: - + Note that the YUM distribution: - + * offers both C/C++ and Python APIs * does not offer support for GNA and NPU inference * is dedicated to Linux users only - * additionally includes code samples + * additionally includes code samples .. tab-set:: @@ -75,7 +75,7 @@ Step 1: Set Up the Repository EOF 2. Move the new ``openvino-2023.repo`` file to the YUM configuration directory, i.e. ``/etc/yum.repos.d``: - + .. code-block:: sh sudo mv /tmp/openvino-2023.repo /etc/yum.repos.d @@ -107,26 +107,26 @@ Install OpenVINO Runtime .. tab-item:: The Latest Version :sync: latest-version - + Run the following command: - + .. code-block:: sh - + sudo yum install openvino - + .. tab-item:: A Specific Version :sync: specific-version - + Run the following command: - + .. code-block:: sh - + sudo yum install openvino-.. - + For example: - + .. code-block:: sh - + sudo yum install openvino-2023.2.0 @@ -145,12 +145,12 @@ Run the following command: You can additionally install Python API using one of the alternative methods (:doc:`conda ` or :doc:`pip `). -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. -With the YUM distribution, you can build OpenVINO sample files, as explained in the +With the YUM distribution, you can build OpenVINO sample files, as explained in the :doc:`guide for OpenVINO sample applications `. For C++ and C, just run the ``build_samples.sh`` script: @@ -158,16 +158,16 @@ For C++ and C, just run the ``build_samples.sh`` script: .. tab-item:: C++ :sync: cpp - + .. code-block:: sh - + /usr/share/openvino/samples/cpp/build_samples.sh - + .. tab-item:: C :sync: c - + .. code-block:: sh - + /usr/share/openvino/samples/c/build_samples.sh @@ -181,23 +181,23 @@ To uninstall OpenVINO Runtime via YUM, run the following command based on your n .. tab-item:: The Latest Version :sync: latest-version - + .. code-block:: sh - + sudo yum autoremove openvino - - + + .. tab-item:: A Specific Version :sync: specific-version - + .. code-block:: sh - + sudo yum autoremove openvino-.. - + For example: - + .. code-block:: sh - + sudo yum autoremove openvino-2023.2.0 @@ -205,10 +205,10 @@ To uninstall OpenVINO Runtime via YUM, run the following command based on your n What's Next? ############# -Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! +Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! Learn more about how to integrate a model in OpenVINO applications by trying out the following tutorials: -* Try the `C++ Quick Start Example `_ +* Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg @@ -217,7 +217,6 @@ Learn more about how to integrate a model in OpenVINO applications by trying out * Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ You can also try the following things: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst index 4f198c3c30f3d4..fbd107f2202911 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime on macOS from an Archive File .. meta:: - :description: Learn how to install OpenVINO™ Runtime on macOS operating + :description: Learn how to install OpenVINO™ Runtime on macOS operating system, using an archive file. .. note:: - + Note that the Archive distribution: - + * offers both C/C++ and Python APIs - * additionally includes code samples + * additionally includes code samples * is dedicated to macOS users (archives for other systems are also available) * is only supported for CPU Plugin @@ -23,13 +23,13 @@ Install OpenVINO™ Runtime on macOS from an Archive File .. tab-item:: System Requirements :sync: system-requirements - + | Full requirement listing is available in: | :doc:`System Requirements Page ` .. tab-item:: Software Requirements :sync: software-requirements - + * `CMake 3.13 or higher `__ (choose "macOS 10.13 or later"). Add ``/Applications/CMake.app/Contents/bin`` to path (for default install). * `Python 3.8 - 3.11 `__ (choose 3.8 - 3.11). Install and add to path. * Apple Xcode Command Line Tools. In the terminal, run ``xcode-select --install`` from any directory @@ -69,18 +69,18 @@ Step 1: Install OpenVINO Core Components .. tab-item:: x86, 64-bit :sync: x86-64 - + .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/macos/m_openvino_toolkit_macos_10_15_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv m_openvino_toolkit_macos_10_15_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: ARM, 64-bit :sync: arm-64 - + .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/macos/m_openvino_toolkit_macos_11_0_2023.2.0.13089.cfd42bd2cb0_arm64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv m_openvino_toolkit_macos_11_0_2023.2.0.13089.cfd42bd2cb0_arm64 /opt/intel/openvino_2023.2.0 @@ -102,7 +102,7 @@ Step 1: Install OpenVINO Core Components .. code-block:: sh - sudo ln -s /opt/intel/openvino_2023.2.0 /opt/intel/openvino_2023 + sudo ln -s /opt/intel/openvino_2023.2.0 /opt/intel/openvino_2023 .. note:: @@ -110,27 +110,27 @@ Step 1: Install OpenVINO Core Components If you have already installed a previous release of OpenVINO 2023, a symbolic link to the ``openvino_2023`` folder may already exist. Unlink the previous link with ``sudo unlink openvino_2023``, and then re-run the command above. -Congratulations, you have finished the installation! For some use cases you may still -need to install additional components. Check the description below, as well as the +Congratulations, you have finished the installation! For some use cases you may still +need to install additional components. Check the description below, as well as the :doc:`list of additional configurations ` to see if your case needs any of them. -The ``/opt/intel/openvino_2023`` folder now contains the core components for OpenVINO. -If you used a different path in Step 2, for example, ``/home//intel/``, -OpenVINO is now in ``/home//intel/openvino_2023``. The path to the ``openvino_2023`` +The ``/opt/intel/openvino_2023`` folder now contains the core components for OpenVINO. +If you used a different path in Step 2, for example, ``/home//intel/``, +OpenVINO is now in ``/home//intel/openvino_2023``. The path to the ``openvino_2023`` directory is also referred as ```` throughout the OpenVINO documentation. Step 2: Configure the Environment +++++++++++++++++++++++++++++++++ -You must update several environment variables before you can compile and run OpenVINO applications. Open a terminal window and run the ``setupvars.sh`` -script as shown below to temporarily set your environment variables. If your ```` (the folder you used to install OpenVINO) is not +You must update several environment variables before you can compile and run OpenVINO applications. Open a terminal window and run the ``setupvars.sh`` +script as shown below to temporarily set your environment variables. If your ```` (the folder you used to install OpenVINO) is not the default ``/opt/intel/openvino_2023``, use the correct one instead. .. code-block:: sh - cd /opt/intel/openvino_2023 + cd /opt/intel/openvino_2023 source /opt/intel/openvino_2023/setupvars.sh @@ -151,45 +151,44 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine .. tab-item:: Get started with Python :sync: get-started-py - + Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. - + .. image:: https://user-images.githubusercontent.com/15709723/127752390-f6aa371f-31b5-4846-84b9-18dd4f662406.gif :width: 400 - + Visit the :ref:`Tutorials ` page for more Jupyter Notebooks to get you started with OpenVINO, such as: - + * `OpenVINO Python API Tutorial `__ * `Basic image classification program with Hello Image Classification `__ * `Convert a PyTorch model and use it for image background removal `__ - + .. tab-item:: Get started with C++ :sync: get-started-cpp - + Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. - + .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg :width: 400 - + Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - + * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ Uninstalling Intel® Distribution of OpenVINO™ Toolkit ##################################################### If you have installed OpenVINO Runtime from archive files, you can uninstall it by deleting the archive files and the extracted folders. -Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. +Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. If you have created the symbolic link, remove the link first: - + .. code-block:: sh sudo rm /opt/intel/openvino_2023 - + To delete the files: - + .. code-block:: sh rm -r && rm diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst index cb3045da5aaa45..a65e6669018668 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst @@ -5,17 +5,17 @@ Install OpenVINO™ Runtime from Conda Forge .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using Conda Forge. .. note:: - + Note that the Conda Forge distribution: * offers both C/C++ and Python APIs * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) .. tab-set:: @@ -25,11 +25,11 @@ Install OpenVINO™ Runtime from Conda Forge | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -45,7 +45,7 @@ Installing OpenVINO Runtime with Anaconda Package Manager ############################################################ 1. Set up the Anaconda environment (Python 3.10 used as an example): - + .. code-block:: sh conda create --name py310 python=3.10 @@ -55,7 +55,7 @@ Installing OpenVINO Runtime with Anaconda Package Manager conda activate py310 2. Update it to the latest version: - + .. code-block:: sh conda update --all @@ -66,22 +66,22 @@ Installing OpenVINO Runtime with Anaconda Package Manager conda install -c conda-forge openvino=2023.2.0 -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the description below, as well as the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the description below, as well as the :doc:`list of additional configurations ` to see if your case needs any of them. Compiling with OpenVINO Runtime from Conda-Forge on Linux +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -When linking OpenVINO libraries from Conda on Linux, ensure that you have the necessary Conda compilers installed. +When linking OpenVINO libraries from Conda on Linux, ensure that you have the necessary Conda compilers installed. To do so, run the following command in your Conda environment: .. code-block:: sh conda install cmake c-compiler cxx-compiler make -It is crucial to reactivate your Conda environment after installing the compilers. +It is crucial to reactivate your Conda environment after installing the compilers. This step ensures that all the environment variables are set correctly for successful linkage. To reactivate your Conda environment, execute the following command: @@ -90,38 +90,33 @@ To reactivate your Conda environment, execute the following command: conda activate py310 -Once you have reactivated your Conda environment, make sure that all the necessary environment +Once you have reactivated your Conda environment, make sure that all the necessary environment variables are properly set and proceed with linking the OpenVINO libraries. Uninstalling OpenVINO™ Runtime ########################################################### -Once OpenVINO Runtime is installed via Conda, you can remove it using the following command, +Once OpenVINO Runtime is installed via Conda, you can remove it using the following command, with the proper OpenVINO version number: .. code-block:: sh - + conda remove openvino=2023.2.0 What's Next? ############################################################ -Now that you've installed OpenVINO Runtime, you are ready to run your own machine learning applications! +Now that you've installed OpenVINO Runtime, you are ready to run your own machine learning applications! To learn more about how to integrate a model in OpenVINO applications, try out some tutorials and sample applications. -Try the :doc:`C++ Quick Start Example ` for step-by-step instructions +Try the :doc:`C++ Quick Start Example ` for step-by-step instructions on building and running a basic image classification C++ application. .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg :width: 400 - + Visit the :doc:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `__ -* `Automatic speech recognition C++ sample `__ - - - - diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst index 2d164bb09cb56b..42d10c8a94e479 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime on Windows from an Archive File .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows operating + :description: Learn how to install OpenVINO™ Runtime on Windows operating system, using an archive file. .. note:: - + Note that the Archive distribution: - + * offers both C/C++ and Python APIs - * additionally includes code samples + * additionally includes code samples * is dedicated to Windows users (archives for other systems are also available) @@ -28,32 +28,32 @@ System Requirements | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ - + .. tab-item:: Software :sync: software - + * `Microsoft Visual Studio 2019 with MSBuild `__ or `Microsoft Visual Studio 2022 `__ * `CMake 3.14 or higher, 64-bit `__ (optional, only required for building sample applications) * `Python 3.8 - 3.11, 64-bit `__ - + .. note:: - + To install Microsoft Visual Studio 2019, follow the `Microsoft Visual Studio installation guide `__. You can choose to download the Community version. During installation in the **Workloads** tab, choose **Desktop development with C++**. - + .. note:: - + You can either use `cmake.msi` which is the installation wizard or `cmake.zip` where you have to go into the `bin` folder and then manually add the path to environmental variables. - + .. important:: - + When installing Python, make sure you click the option **Add Python 3.x to PATH** to `add Python `__ to your `PATH` environment variable. - + Installing OpenVINO Runtime @@ -131,14 +131,14 @@ Step 1: Download and Install OpenVINO Core Components If you have already installed a previous release of OpenVINO 2022, a symbolic link to the ``openvino_2023`` folder may already exist. If you want to override it, navigate to the ``C:\Program Files (x86)\Intel`` folder and delete the existing linked folder before running the ``mklink`` command. -Congratulations, you have finished the installation! For some use cases you may still -need to install additional components. Check the description below, as well as the +Congratulations, you have finished the installation! For some use cases you may still +need to install additional components. Check the description below, as well as the :doc:`list of additional configurations ` to see if your case needs any of them. -The ``C:\Program Files (x86)\Intel\openvino_2023`` folder now contains the core components for OpenVINO. -If you used a different path in Step 1, you will find the ``openvino_2023`` folder there. -The path to the ``openvino_2023`` directory is also referred as ```` +The ``C:\Program Files (x86)\Intel\openvino_2023`` folder now contains the core components for OpenVINO. +If you used a different path in Step 1, you will find the ``openvino_2023`` folder there. +The path to the ``openvino_2023`` directory is also referred as ```` throughout the OpenVINO documentation. @@ -162,8 +162,8 @@ You must update several environment variables before you can compile and run Ope .. note:: - If you see an error indicating Python is not installed, Python may not be added to the PATH environment variable - (as described `here `__). + If you see an error indicating Python is not installed, Python may not be added to the PATH environment variable + (as described `here `__). Check your system environment variables, and add Python if necessary. @@ -177,30 +177,29 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine .. tab-item:: Get started with Python :sync: get-started-py - + Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. - + .. image:: https://user-images.githubusercontent.com/15709723/127752390-f6aa371f-31b5-4846-84b9-18dd4f662406.gif :width: 400 - + Visit the :ref:`Tutorials ` page for more Jupyter Notebooks to get you started with OpenVINO, such as: - - * `OpenVINO Python API Tutorial `__ + + * `OpenVINO Python API Tutorial `__ * `Basic image classification program with Hello Image Classification `__ * `Convert a PyTorch model and use it for image background removal `__ - + .. tab-item:: Get started with C++ :sync: get-started-cpp - + Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. - + .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg :width: 400 - + Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - + * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ .. _uninstall-from-windows: @@ -209,7 +208,7 @@ Uninstalling OpenVINO Runtime ############################# If you have installed OpenVINO Runtime from archive files, you can uninstall it by deleting the archive files and the extracted folders. -Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. +Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. If you have created the symbolic link, remove the link first. @@ -239,7 +238,7 @@ Additional Resources * IoT libraries and code samples in the GitHUB repository: `Intel® IoT Developer Kit `__ Inner Loop - const auto current_expr_loops = expr->get_loop_ids(); + const auto& current_expr_loops = expr->get_loop_ids(); const auto current_loop_depth = current_expr_loops.size(); for (size_t i = 0; i < current_loop_depth; ++i) { const auto current_loop_id = current_expr_loops[i]; @@ -235,7 +235,7 @@ bool FuseLoops::run(LinearIR& linear_ir) { continue; } - const auto upper_loop_ids = parent_expr->get_loop_ids(); + const auto& upper_loop_ids = parent_expr->get_loop_ids(); if (upper_loop_ids.empty()) continue; @@ -279,7 +279,7 @@ bool FuseLoops::run(LinearIR& linear_ir) { continue; } - const auto lower_loop_ids = consumer_expr->get_loop_ids(); + const auto& lower_loop_ids = consumer_expr->get_loop_ids(); if (lower_loop_ids.empty()) continue; diff --git a/src/common/snippets/src/lowered/pass/identify_buffers.cpp b/src/common/snippets/src/lowered/pass/identify_buffers.cpp index 5b0dcee5221b6c..6b04701ff155d5 100644 --- a/src/common/snippets/src/lowered/pass/identify_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/identify_buffers.cpp @@ -43,8 +43,8 @@ bool IdentifyBuffers::can_reuse_id(const ShiftPtrParams& lhs, const ShiftPtrPara bool IdentifyBuffers::are_adjacent(const std::pair& lhs, const std::pair& rhs) { - const auto lhs_ids = lhs.first->get_loop_ids(); - const auto rhs_ids = rhs.first->get_loop_ids(); + const auto& lhs_ids = lhs.first->get_loop_ids(); + const auto& rhs_ids = rhs.first->get_loop_ids(); const auto equal_loop_ids = lhs_ids == rhs_ids; if (equal_loop_ids) { // Buffers are connected to the same Loop and have the same outer Loops return !can_reuse_id(lhs.second, rhs.second); diff --git a/src/common/snippets/src/lowered/pass/init_loops.cpp b/src/common/snippets/src/lowered/pass/init_loops.cpp index 68e8cc7757e13f..0d6757eed88e4c 100644 --- a/src/common/snippets/src/lowered/pass/init_loops.cpp +++ b/src/common/snippets/src/lowered/pass/init_loops.cpp @@ -47,7 +47,6 @@ void InitLoops::init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& lo if (loop_entry.is_incremented) { const auto& port = loop_entry.expr_port; const auto source = *port->get_connected_ports().begin(); - const auto loop_ids = port->get_expr()->get_loop_ids(); const auto& layout = port->get_descriptor_ptr()->get_layout(); const auto& shape = port->get_descriptor_ptr()->get_shape(); const auto& dim = *(layout.rbegin() + loop_entry.dim_idx); @@ -63,7 +62,6 @@ void InitLoops::init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& lo loop_exit.ptr_increment = 0; if (loop_exit.is_incremented) { const auto& port = loop_exit.expr_port; - const auto loop_ids = port->get_expr()->get_loop_ids(); const auto& layout = port->get_descriptor_ptr()->get_layout(); const auto& shape = port->get_descriptor_ptr()->get_shape(); const auto original_dim = layout.size() - 1 - loop_exit.dim_idx; diff --git a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp index 723b97b5a25788..d76a2b1af35147 100644 --- a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp +++ b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp @@ -17,7 +17,6 @@ namespace pass { bool InsertBroadcastMove::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::InsertBroadcastMove") bool modified = false; - const auto& loop_manager = linear_ir.get_loop_manager(); auto supports_broadcasting = [](const std::shared_ptr& n) { return ov::op::util::supports_auto_broadcast(n) || @@ -39,6 +38,7 @@ bool InsertBroadcastMove::run(LinearIR& linear_ir) { const auto& descriptors = expr->get_input_port_descriptors(); if (!supports_broadcasting(node) || descriptors.size() < 2) continue; + const auto& loop_ids = expr->get_loop_ids(); const auto& connectors = expr->get_input_port_connectors(); OPENVINO_ASSERT(connectors.size() == descriptors.size(), "Invalid expression configuration: connectors and descriptors size mismatch"); @@ -51,26 +51,18 @@ bool InsertBroadcastMove::run(LinearIR& linear_ir) { const auto broadcasted_dim = *std::max_element(last_dims.begin(), last_dims.end()); for (size_t i = 0; i < last_dims.size(); i++) { const auto& parent_port = connectors[i]->get_source(); - if (last_dims[i] != broadcasted_dim && - !dont_need_broadcasting(parent_port.get_expr()->get_node())) { + const auto& parent_node = parent_port.get_expr()->get_node(); + if (last_dims[i] != broadcasted_dim && !dont_need_broadcasting(parent_node)) { OPENVINO_ASSERT(last_dims[i] == 1, "Attempt to broadcast non-1 dimension. Target dim: ", broadcasted_dim, " This dim: ", last_dims[i]); - const auto broadcast = std::make_shared(node->get_input_source_output(i), broadcasted_dim); - - PortDescriptorUtils::set_port_descriptor_ptr(broadcast->output(0), connectors[i]->get_source().get_descriptor_ptr()->clone()); - const auto broadcast_expr = linear_ir.create_expression(broadcast, {connectors[i]}); - linear_ir.insert(expr_it, broadcast_expr); - linear_ir.replace_input(expr->get_input_port(i), broadcast_expr->get_output_port_connector(0)); + const auto broadcast = std::make_shared(parent_node, broadcasted_dim); + const auto broadcast_expr = *linear_ir.insert_node(broadcast, std::vector{ connectors[i] }, + loop_ids, true, expr_it, { expr->get_input_port(i) }); // Note that BroadcastMove modified the next expr input shape, so we need to set update // expr's input port descriptor to reflect the changes expr->get_input_port_descriptor(i)->set_shape(broadcast_expr->get_output_port_descriptor(0)->get_shape()); - // Copy Loop identifies - const auto& loop_ids = expr->get_loop_ids(); - broadcast_expr->set_loop_ids(loop_ids); - loop_manager->update_loops_port(loop_ids, expr->get_input_port(0), {broadcast_expr->get_input_port(0)}, true); - modified = true; } } diff --git a/src/common/snippets/src/lowered/pass/insert_buffers.cpp b/src/common/snippets/src/lowered/pass/insert_buffers.cpp index 81835a4ca390ae..0653d0589f3075 100644 --- a/src/common/snippets/src/lowered/pass/insert_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/insert_buffers.cpp @@ -105,8 +105,8 @@ InsertBuffers::InsertBuffers(int32_t buffer_allocation_rank) LinearIR::constExprIt InsertBuffers::insertion_position(const LinearIR& linear_ir, const LinearIR::LoopManagerPtr& loop_manager, const ExpressionPtr& up_expr, const ExpressionPtr& down_expr) { - const auto up_loops = up_expr->get_loop_ids(); - const auto down_loops = down_expr->get_loop_ids(); + const auto& up_loops = up_expr->get_loop_ids(); + const auto& down_loops = down_expr->get_loop_ids(); // If upper expression is out of Loop, we can insert Buffer implicitly after him if (up_loops.empty()) { return std::next(linear_ir.find(up_expr)); @@ -150,8 +150,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& const auto& expr = entry_port->get_expr(); const auto port_idx = entry_port->get_index(); const auto node = expr->get_node(); - const auto& input_connector = expr->get_input_port_connector(port_idx); - const auto& parent_expr_output = input_connector->get_source(); + const auto& parent_expr_output = expr->get_input_port_connector(port_idx)->get_source(); const auto& parent_expr = parent_expr_output.get_expr(); const auto parent_port = parent_expr_output.get_index(); const auto parent = parent_expr->get_node(); @@ -166,8 +165,8 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& const auto node_ma = ov::as_type_ptr(node); bool is_buffer_needed = (parent_ma && parent_ma->is_memory_access_output_port(parent_port)) || (node_ma && node_ma->is_memory_access_input_port(port_idx)); - const auto current_loops = expr->get_loop_ids(); - const auto parent_loops = parent_expr->get_loop_ids(); + const auto& current_loops = expr->get_loop_ids(); + const auto& parent_loops = parent_expr->get_loop_ids(); const auto buffer_loop_ids = get_buffer_loop_ids(current_loops, parent_loops, is_buffer_needed); if (is_buffer_needed) { @@ -182,12 +181,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& parent_expr_output, m_buffer_allocation_rank); const auto buffer = std::make_shared(parent->output(parent_port), allocation_shape); - PortDescriptorUtils::set_port_descriptor_ptr(buffer->output(0), parent_expr_output.get_descriptor_ptr()->clone()); - // Output connector is automatically filled from PortDescriptor - const auto buffer_expr = linear_ir.create_expression(buffer, {input_connector}); - linear_ir.insert(pos, buffer_expr); - linear_ir.replace_input(*entry_port.get(), buffer_expr->get_output_port_connector(0)); - buffer_expr->set_loop_ids(buffer_loop_ids); + linear_ir.insert_node(buffer, std::vector{ parent_expr_output }, buffer_loop_ids, false, pos, { *entry_port }); } } @@ -198,8 +192,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& const auto node = expr->get_node(); const auto output_connector = exit_port->get_port_connector_ptr(); const auto child_exprs_inputs = output_connector->get_consumers(); - const auto current_loops = expr->get_loop_ids(); - const std::vector node_outs = {output_connector}; + const auto& current_loops = expr->get_loop_ids(); std::set potential_consumers; std::set buffers; @@ -241,7 +234,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& for (const auto& buffer : buffers) { const auto& buffer_out = buffer->get_output_port_connector(0); const auto buffer_consumers_inputs = buffer_out->get_consumers(); - linear_ir.replace_input(buffer_consumers_inputs, output_connector); + replace_input_port_connectors(buffer_consumers_inputs, output_connector); potential_consumers.insert(buffer_consumers_inputs.begin(), buffer_consumers_inputs.end()); linear_ir.erase(linear_ir.find_after(expr_it, buffer)); } @@ -275,7 +268,6 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& *exit_port, m_buffer_allocation_rank); auto buffer = std::make_shared(node->output(port_idx), allocation_shape); - PortDescriptorUtils::set_port_descriptor_ptr(buffer->output(0), exit_port->get_descriptor_ptr()->clone()); // We cannot insert Node output connector on Buffer output because not all consumers of Node needs Buffer // Example: // Add @@ -284,10 +276,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& // | <- It should be new PortConnector // Relu // Output port connector is automatically filled from PortDescriptor - const auto buffer_expr = linear_ir.create_expression(buffer, node_outs); - linear_ir.insert(pos, buffer_expr); - linear_ir.replace_input(potential_consumers, buffer_expr->get_output_port_connector(0)); - buffer_expr->set_loop_ids(buffer_loop_ids); + linear_ir.insert_node(buffer, std::vector{ *exit_port }, buffer_loop_ids, false, pos, { potential_consumers }); } } } diff --git a/src/common/snippets/src/lowered/pass/insert_load_store.cpp b/src/common/snippets/src/lowered/pass/insert_load_store.cpp index 75e70c9c553c88..eb70e3d26042b8 100644 --- a/src/common/snippets/src/lowered/pass/insert_load_store.cpp +++ b/src/common/snippets/src/lowered/pass/insert_load_store.cpp @@ -37,31 +37,18 @@ bool InsertLoadStore::insert_load(LinearIR& linear_ir, const LinearIR::constExpr OPENVINO_ASSERT(consumer_inputs.size() == 1, "RankNormalization is supposed to be the only consumer"); data_expr = first_consumer; } - const auto& loop_manager = linear_ir.get_loop_manager(); const auto& data_ngraph_output = data_expr->get_node()->output(0); - const auto& output_connector = data_expr->get_output_port_connector(0); bool was_inserted = false; - for (const auto& consumer_input : output_connector->get_consumers()) { + const auto& data_out = data_expr->get_output_port_connector(0); + for (const auto& consumer_input : data_out->get_consumers()) { const auto& consumer_expr = consumer_input.get_expr(); - const auto port = consumer_input.get_index(); - const auto& consumer = consumer_expr->get_node(); - const auto ma = ov::as_type_ptr(consumer); - if (ma && ma->is_memory_access_input_port(port)) + const auto ma = ov::as_type_ptr(consumer_expr->get_node()); + if (ma && ma->is_memory_access_input_port(consumer_input.get_index())) return false; - const auto loop_ids = consumer_expr->get_loop_ids(); const auto load = std::make_shared(data_ngraph_output, get_count(data_expr->get_output_port_descriptor(0))); - PortDescriptorUtils::set_port_descriptor_ptr(load->output(0), consumer_input.get_descriptor_ptr()->clone()); - const auto load_expr = linear_ir.create_expression(load, {output_connector}); - linear_ir.insert(linear_ir.find_after(data_expr_it, consumer_expr), load_expr); - linear_ir.replace_input(consumer_input, load_expr->get_output_port_connector(0)); - // Copy Loop identifies - load_expr->set_loop_ids(loop_ids); - - // Need to update all the corresponding Loops with the same Entry Point - const auto& prev_entry_point = consumer_input; - const auto new_entry_point = load_expr->get_input_port(0); - loop_manager->update_loops_port(loop_ids, prev_entry_point, {new_entry_point}, true); + linear_ir.insert_node(load, std::vector{ data_out }, consumer_expr->get_loop_ids(), + true, linear_ir.find_after(data_expr_it, consumer_expr), { consumer_input }); was_inserted = true; } @@ -69,10 +56,8 @@ bool InsertLoadStore::insert_load(LinearIR& linear_ir, const LinearIR::constExpr } bool InsertLoadStore::insert_store(LinearIR& linear_ir, const LinearIR::constExprIt& data_expr_it) { - const auto& loop_manager = linear_ir.get_loop_manager(); const auto& data_expr = *data_expr_it; - const auto& input_connector = data_expr->get_input_port_connector(0); - const auto& parent_output = input_connector->get_source(); + const auto& parent_output = data_expr->get_input_port_connector(0)->get_source(); const auto& parent_expr = parent_output.get_expr(); const auto port = parent_output.get_index(); const auto& parent = parent_expr->get_node(); @@ -80,34 +65,10 @@ bool InsertLoadStore::insert_store(LinearIR& linear_ir, const LinearIR::constExp if (ma && ma->is_memory_access_output_port(port)) return false; - const auto loop_ids = parent_expr->get_loop_ids(); + const auto& loop_ids = parent_expr->get_loop_ids(); const auto store = std::make_shared(parent->output(port), get_count(data_expr->get_input_port_descriptor(0))); - PortDescriptorUtils::set_port_descriptor_ptr(store->output(0), parent_output.get_descriptor_ptr()->clone()); - const auto store_expr = linear_ir.create_expression(store, {input_connector}); const auto& insertion_pos = linear_ir.find_after(std::reverse_iterator(data_expr_it), parent_expr).base(); - linear_ir.insert(insertion_pos, store_expr); - linear_ir.replace_input(data_expr->get_input_port(0), store_expr->get_output_port_connector(0)); - // Copy Loop identifies - store_expr->set_loop_ids(loop_ids); - - // Need to update all the corresponding Loops with the same Exit Point - const auto prev_exit_point = parent_output; - // The previous exit point but one output port can have several consumers that can be potential exit points - // So we should verify on the possible future exit points - const auto consumer_inputs = input_connector->get_consumers(); - const auto should_be_saved = std::any_of(consumer_inputs.begin(), consumer_inputs.end(), - [&data_expr](const ExpressionPort& input_port) { - const auto expr = input_port.get_expr(); - // Skip the current data expr since the input of the expr is changed to Store expr - if (expr == data_expr) - return false; - const auto& node = expr->get_node(); - return ov::is_type(node) || ov::is_type(node); - }); - const auto new_exit_point = store_expr->get_output_port(0); - const auto new_exit_points = should_be_saved ? std::vector{prev_exit_point, new_exit_point} - : std::vector{new_exit_point}; - loop_manager->update_loops_port(loop_ids, prev_exit_point, new_exit_points, false); + linear_ir.insert_node(store, std::vector{ parent_output }, loop_ids, true, insertion_pos, { data_expr->get_input_port(0) }); return true; } diff --git a/src/common/snippets/src/lowered/pass/insert_loops.cpp b/src/common/snippets/src/lowered/pass/insert_loops.cpp index 3eab6e97df33fb..9e52387333a02d 100644 --- a/src/common/snippets/src/lowered/pass/insert_loops.cpp +++ b/src/common/snippets/src/lowered/pass/insert_loops.cpp @@ -88,24 +88,18 @@ void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& init_params(loop_entries); init_params(loop_exits); + const auto outer_loop_ids = get_outer_loop_ids(*loop_begin_pos, loop_id); + const auto& loop_begin = std::make_shared(); - const auto& loop_begin_expr = linear_ir.create_expression(loop_begin, std::vector{}); - linear_ir.insert(loop_begin_pos, loop_begin_expr); + const auto loop_begin_expr = *linear_ir.insert_node(loop_begin, std::vector{}, outer_loop_ids, false, loop_begin_pos); const auto& loop_end = std::make_shared( loop_begin->output(0), work_amount, work_amount_increment, is_incremented, ptr_increments, finalization_offsets, io_data_sizes, loop_entries.size(), loop_exits.size(), loop_id); loop_end->has_outer_loop = has_outer_loop; - // Add LoopBegin port connector loop_end_inputs.push_back(loop_begin_expr->get_output_port_connector(0)); - - const auto& loop_end_expr = linear_ir.create_expression(loop_end, loop_end_inputs); - const auto& it = linear_ir.insert(loop_end_pos, loop_end_expr); - - const auto outer_loop_ids = get_outer_loop_ids(*std::prev(it), loop_id); - loop_begin_expr->set_loop_ids(outer_loop_ids); - loop_end_expr->set_loop_ids(outer_loop_ids); + linear_ir.insert_node(loop_end, loop_end_inputs, outer_loop_ids, false, loop_end_pos); } bool InsertLoops::run(LinearIR& linear_ir) { @@ -125,7 +119,7 @@ bool InsertLoops::run(LinearIR& linear_ir) { continue; // Outer Loop ----> Inner Loop - const auto expr_loops = expr->get_loop_ids(); + const auto& expr_loops = expr->get_loop_ids(); const auto loop_depth = expr_loops.size(); for (size_t i = 0; i < loop_depth; ++i) { const auto loop_id = expr_loops[i]; diff --git a/src/common/snippets/src/lowered/pass/insert_perf_count.cpp b/src/common/snippets/src/lowered/pass/insert_perf_count.cpp index d49ee0b1794204..3214962ef333b5 100644 --- a/src/common/snippets/src/lowered/pass/insert_perf_count.cpp +++ b/src/common/snippets/src/lowered/pass/insert_perf_count.cpp @@ -43,16 +43,17 @@ bool InsertPerfCount::run(LinearIR& linear_ir) { // insert perf_count_begin after last parameter // linear_ir.insert has insert before behavior, need move to next. + const auto empty_inputs = std::vector{}; + const auto last_param_it = perf_count_begin_pos; perf_count_begin_pos = std::next(perf_count_begin_pos); const auto& perf_count_begin = std::make_shared(); - const auto& perf_count_begin_expr = linear_ir.create_expression(perf_count_begin, std::vector{}); - linear_ir.insert(perf_count_begin_pos, perf_count_begin_expr); + linear_ir.insert_node(perf_count_begin, empty_inputs, last_param_it->get()->get_loop_ids(), false, perf_count_begin_pos); // insert perf_count_end before first result const auto& perf_count_end = std::make_shared(perf_count_begin->output(0)); perf_count_end->set_friendly_name("last_parameter_to_first_result"); - const auto& perf_count_end_expr = linear_ir.create_expression(perf_count_end, std::vector{}); - linear_ir.insert(perf_count_end_pos, perf_count_end_expr); + // PerfCountEnd doesn't need PortConnector to PerfCountBegin + linear_ir.insert_node(perf_count_end, empty_inputs, perf_count_end_pos->get()->get_loop_ids(), false, perf_count_end_pos); return true; } diff --git a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp index cc685c1851157a..281c3eb281481b 100644 --- a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp +++ b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp @@ -251,14 +251,13 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, LinearIR::constExprIt tail_end, const size_t tail_size) { const auto& config = linear_ir.get_config(); - auto insertFill = [tail_size](const ov::Input& input) -> std::shared_ptr { + auto insertFill = [tail_size](const ov::Input& input, const ExpressionPort& source) -> std::shared_ptr { std::shared_ptr fill = nullptr; auto& rt = input.get_rt_info(); auto fill_rt = rt.find("set_fill"); if (fill_rt != rt.end()) { const auto fill_value = fill_rt->second.as(); - fill = std::make_shared(input.get_source_output(), tail_size, fill_value); - input.get_node()->set_argument(input.get_index(), fill); + fill = std::make_shared(source.get_expr()->get_node()->output(source.get_index()), tail_size, fill_value); } return fill; }; @@ -279,9 +278,9 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, if (config.m_need_fill_tail_register && (ov::is_type(op) || ov::is_type(op))) { - for (size_t i = 0; i < op->inputs().size(); ++i) { - if (auto fill = insertFill(op->input(i))) { - const auto& input = expr->get_input_port_connector(i); + for (size_t i = 0; i < expr->get_input_count(); ++i) { + const auto& input = expr->get_input_port_connector(i); + if (auto fill = insertFill(op->input(i), input->get_source())) { const auto consumers = input->get_consumers(); // If there are several consumers, fill expression must be inserted before first of them auto fst_consumer = std::min_element(consumers.cbegin(), consumers.cend(), [&](ExpressionPort lhs, ExpressionPort rhs) { @@ -289,15 +288,13 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, auto rhs_it = linear_ir.find(rhs.get_expr()); return std::distance(linear_ir.cbegin(), lhs_it) < std::distance(linear_ir.cbegin(), rhs_it); }); - const auto insert_pos = linear_ir.find(fst_consumer->get_expr()); - auto fill_expr = linear_ir.create_expression(fill, {input}); - linear_ir.insert(insert_pos, fill_expr); - linear_ir.replace_input(consumers, fill_expr->get_output_port_connector(0)); + const auto fill_expr = *linear_ir.insert_node(fill, std::vector{ input->get_source() }, expr->get_loop_ids(), true, + linear_ir.find(fst_consumer->get_expr()), consumers); + // in_reg == out_reg since we want to modify vector reg inplace const auto reg = expr->get_input_port_descriptor(0)->get_reg(); fill_expr->get_input_port_descriptor(0)->set_reg(reg); fill_expr->get_output_port_descriptor(0)->set_reg(reg); - fill_expr->set_loop_ids(expr->get_loop_ids()); } } } else if (const auto memory_access = std::dynamic_pointer_cast(op)) { diff --git a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp index 48f86cb2092972..3f9de12a5a0523 100644 --- a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp +++ b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp @@ -16,7 +16,6 @@ namespace pass { bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::LoadMoveBroadcastToBroadcastLoad") - const auto& loop_manager = linear_ir.get_loop_manager(); bool modified = false; for (auto expr_it = linear_ir.cbegin(); expr_it != linear_ir.cend(); expr_it++) { @@ -30,6 +29,9 @@ bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir) { if (!load) continue; + OPENVINO_ASSERT(expr->get_loop_ids() == parent_expr->get_loop_ids(), + "The pair of Load and MoveBroadcast expressions must be in the same loops!"); + // Cannot rewrite Broadcast + Load if load has more than 1 user // or more than one input, or if Broadcast has several inputs const auto load_consumers_inputs = interm_connector->get_consumers(); @@ -45,20 +47,7 @@ bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir) { const auto& outshape = move_broadcast->get_output_partial_shape(0); const auto broadcastload = std::make_shared(load->input_value(0), *outshape.rbegin(), load->get_offset()); - const auto move_consumers = expr->get_output_port_connector(0)->get_consumers(); - PortDescriptorUtils::set_port_descriptor_ptr(broadcastload->output(0), expr->get_output_port(0).get_descriptor_ptr()->clone()); - const auto broadcastload_expr = linear_ir.create_expression(broadcastload, { parent_expr->get_input_port_connector(0) }); - // Copy Loop identifies - broadcastload_expr->set_loop_ids(parent_expr->get_loop_ids()); - // Update the corresponding Loops with - loop_manager->update_loops_port(parent_expr->get_loop_ids(), parent_expr->get_input_port(0), {broadcastload_expr->get_input_port(0)}, true); - - const auto mv_expr_it = expr_it; - const auto insertion_pos = std::next(expr_it); - expr_it = linear_ir.insert(insertion_pos, broadcastload_expr); - linear_ir.erase(linear_ir.find_before(mv_expr_it, parent_expr)); - linear_ir.erase(mv_expr_it); - linear_ir.replace_input(move_consumers, broadcastload_expr->get_output_port_connector(0)); + expr_it = linear_ir.replace_with_node({ parent_expr, expr }, broadcastload); modified |= true; } } diff --git a/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp b/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp index b423eeda46a5cb..e336b62c00238f 100644 --- a/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp +++ b/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp @@ -33,7 +33,7 @@ bool MoveResultOutOfLoop::run(LinearIR& linear_ir) { const auto& input_connector = expr->get_input_port_connector(0); const auto& parent_expr = input_connector->get_source().get_expr(); - const auto parent_loop_ids = parent_expr->get_loop_ids(); + const auto& parent_loop_ids = parent_expr->get_loop_ids(); // Parent is out of Loop: just verify that Result is after Parent if (parent_loop_ids.empty()) { diff --git a/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp b/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp index 4174f928352289..2ec613495e9a13 100644 --- a/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp +++ b/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp @@ -35,7 +35,7 @@ bool SoftmaxDecomposition::run(LinearIR& linear_ir) { const auto& pm = matcher->get_pattern_map(); const auto softmax = pm.at(match_softmax); const auto softmax_expr = *expr_it; - const auto softmax_loop_ids = softmax_expr->get_loop_ids(); + const auto& softmax_loop_ids = softmax_expr->get_loop_ids(); const auto& input_connector = softmax_expr->get_input_port_connector(0); const auto& output_connector = softmax_expr->get_output_port_connector(0); const auto tensor_out = softmax_expr->get_output_port_descriptor(0)->get_shape(); @@ -95,9 +95,8 @@ bool SoftmaxDecomposition::run(LinearIR& linear_ir) { const auto mul = push_node(std::make_shared(exp.second, broadcast_pow.second)); // Transfer original ExpressionPorts - linear_ir.replace_input((*max.first)->get_input_port(0), input_connector); - linear_ir.replace_input((*sub.first)->get_input_port(0), input_connector); - linear_ir.replace_input(output_connector->get_consumers(), (*mul.first)->get_output_port_connector(0)); + replace_input_port_connectors({ max.first->get()->get_input_port(0), sub.first->get()->get_input_port(0) }, input_connector); + replace_input_port_connectors(output_connector->get_consumers(), (*mul.first)->get_output_port_connector(0)); // Markup of Mul Loop loop_manager->mark_loop(mul.first, expr_it, inner_work_amount, m_vector_size, 0, diff --git a/src/common/snippets/src/lowered/pass/split_loops.cpp b/src/common/snippets/src/lowered/pass/split_loops.cpp index ba036eca8011f9..3a1c113152c545 100644 --- a/src/common/snippets/src/lowered/pass/split_loops.cpp +++ b/src/common/snippets/src/lowered/pass/split_loops.cpp @@ -48,7 +48,7 @@ bool SplitLoops::run(LinearIR& linear_ir) { for (const auto& entry_point : loop->get_entry_points()) { const auto& parent_port = entry_point.expr_port->get_port_connector_ptr()->get_source(); const auto& parent_expr = parent_port.get_expr(); - const auto parent_loop_ids = parent_expr->get_loop_ids(); + const auto& parent_loop_ids = parent_expr->get_loop_ids(); if (parent_loop_ids.empty()) continue; diff --git a/src/common/snippets/src/lowered/pass/validate_loops.cpp b/src/common/snippets/src/lowered/pass/validate_loops.cpp index 2377feec95c477..99698a6b4329bd 100644 --- a/src/common/snippets/src/lowered/pass/validate_loops.cpp +++ b/src/common/snippets/src/lowered/pass/validate_loops.cpp @@ -44,7 +44,7 @@ bool ValidateLoops::run(LinearIR& linear_ir) { auto validate_loop_ports = [&loop_manager, &dim_indexes, &validated_nested_loops, &is_already_verified](const std::vector& loop_ports) { for (const auto& loop_port : loop_ports) { const auto expr = loop_port.expr_port->get_expr(); - const auto loop_ids = expr->get_loop_ids(); + const auto& loop_ids = expr->get_loop_ids(); // If loop_ids of the current port is subsequence of already validated IDs, skip if (is_already_verified(loop_ids)) continue; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp index 319b17d3e6cb07..165f9626014290 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp @@ -32,36 +32,24 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_load_convert(snippets::lowe if (consumers.size() != 1) return false; + OPENVINO_ASSERT(convert_expr->get_loop_ids() == load_expr->get_loop_ids(), + "The pair of Load and Convert expressions must be in the same loops!"); + + const auto& parent_source = load_expr->get_input_port_connector(0)->get_source(); + const auto parent_output = parent_source.get_expr()->get_node()->output(parent_source.get_index()); std::shared_ptr load_convert = nullptr; if (ov::is_type(convert)) { - load_convert = std::make_shared(load->input_value(0), - convert->get_destination_type(), + load_convert = std::make_shared(parent_output, convert->get_destination_type(), load->get_count(), load->get_offset()); } else if (ov::is_type(convert)) { - load_convert = std::make_shared(load->input_value(0), - convert->get_destination_type(), + load_convert = std::make_shared(parent_output, convert->get_destination_type(), load->get_count(), load->get_offset()); } else { OPENVINO_THROW("Type of Convert op is undefined. Supports only fusing Load and ConvertTruncation or ConvertSaturation ops"); } - const auto out_port = convert_expr->get_output_port(0); - const auto convert_consumers = out_port.get_connected_ports(); - snippets::lowered::PortDescriptorUtils::set_port_descriptor_ptr(load_convert->output(0), out_port.get_descriptor_ptr()->clone()); - const auto load_convert_expr = linear_ir.create_expression(load_convert, { load_expr->get_input_port_connector(0) }); - const auto convert_expr_it = convert_it; - const auto insertion_pos = std::next(convert_it); - convert_it = linear_ir.insert(insertion_pos, load_convert_expr); - - const auto& load_loop_ids = load_expr->get_loop_ids(); - load_convert_expr->set_loop_ids(load_loop_ids); - const auto& loop_manager = linear_ir.get_loop_manager(); - loop_manager->update_loops_port(load_loop_ids, load_expr->get_input_port(0), {load_convert_expr->get_input_port(0)}, true); - loop_manager->update_loops_port(load_loop_ids, convert_expr->get_output_port(0), {load_convert_expr->get_output_port(0)}, false); - - linear_ir.erase(std::find(linear_ir.cbegin(), convert_expr_it, load_expr)); - linear_ir.erase(convert_expr_it); - linear_ir.replace_input(convert_consumers, load_convert_expr->get_output_port_connector(0)); + convert_it = linear_ir.replace_with_node({load_expr, convert_expr}, load_convert); + return true; } @@ -69,7 +57,6 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert(snippets::low snippets::lowered::LinearIR::constExprIt& convert_it) { const auto& convert_expr = *convert_it; const auto& convert = ov::as_type_ptr(convert_expr->get_node()); - const auto& input_connector = convert_expr->get_input_port_connector(0); const auto& output_connector = convert_expr->get_output_port_connector(0); if (convert->get_input_element_type(0) != ov::element::f32 && convert->get_input_element_type(0) != ov::element::i32) return false; @@ -84,36 +71,24 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert(snippets::low if (!store) return false; + OPENVINO_ASSERT(convert_expr->get_loop_ids() == store_expr->get_loop_ids(), + "The pair of Convert and Store expressions must be in the same loops!"); + + const auto& parent_source = convert_expr->get_input_port_connector(0)->get_source(); + const auto parent_output = parent_source.get_expr()->get_node()->output(parent_source.get_index()); std::shared_ptr store_convert = nullptr; if (ov::is_type(convert)) { - store_convert = std::make_shared(convert->input_value(0), - convert->get_destination_type(), + store_convert = std::make_shared(parent_output, convert->get_destination_type(), store->get_count(), store->get_offset()); } else if (ov::is_type(convert)) { - store_convert = std::make_shared(convert->input_value(0), - convert->get_destination_type(), + store_convert = std::make_shared(parent_output, convert->get_destination_type(), store->get_count(), store->get_offset()); } else { OPENVINO_THROW("Type of Convert op is undefined. Supports only fusing Store and ConvertTruncation or ConvertSaturation ops"); } - const auto out_port = store_expr->get_output_port(0); - const auto store_consumers = out_port.get_connected_ports(); - snippets::lowered::PortDescriptorUtils::set_port_descriptor_ptr(store_convert->output(0), out_port.get_descriptor_ptr()->clone()); - const auto store_convert_expr = linear_ir.create_expression(store_convert, { input_connector }); - const auto convert_expr_it = convert_it; - const auto insertion_pos = std::next(convert_it); - convert_it = linear_ir.insert(insertion_pos, store_convert_expr); - - const auto& convert_loop_ids = convert_expr->get_loop_ids(); - store_convert_expr->set_loop_ids(convert_loop_ids); - const auto& loop_manager = linear_ir.get_loop_manager(); - loop_manager->update_loops_port(convert_loop_ids, convert_expr->get_input_port(0), {store_convert_expr->get_input_port(0)}, true); - loop_manager->update_loops_port(convert_loop_ids, store_expr->get_output_port(0), {store_convert_expr->get_output_port(0)}, false); - - linear_ir.erase(std::find(convert_expr_it, linear_ir.cend(), store_expr)); - linear_ir.erase(convert_expr_it); - linear_ir.replace_input(store_consumers, store_convert_expr->get_output_port_connector(0)); + convert_it = linear_ir.replace_with_node({convert_expr, store_expr}, store_convert); + return true; } From 585a7dcf17c67422493c80d75c2ec83caa76f5ba Mon Sep 17 00:00:00 2001 From: Maksim Kutakov Date: Tue, 16 Jan 2024 12:47:55 +0100 Subject: [PATCH 021/122] [CPU] Fill ellipsis mask to match the input rank (#22109) --- src/plugins/intel_cpu/src/nodes/strided_slice.cpp | 11 +---------- .../single_layer_tests/strided_slice.cpp | 2 ++ .../functional/single_layer_tests/strided_slice.cpp | 3 ++- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index 100be163699022..d5d2fc2e3620c3 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -93,16 +93,7 @@ StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphConte attrs.endMask = createMask(ss->get_end_mask(), 1, true); attrs.newAxisMask = createMask(ss->get_new_axis_mask()); attrs.shrinkAxisMask = createMask(ss->get_shrink_axis_mask()); - - auto origEllipsisMask = ss->get_ellipsis_mask(); - bool isEllipsis = false; - for (const auto &o : origEllipsisMask) { - isEllipsis = isEllipsis || o != 0; - attrs.ellipsisMask.push_back(o); - } - if (attrs.ellipsisMask.size() == 0 || !isEllipsis) { - for (size_t i = attrs.ellipsisMask.size(); i < nDims; ++i) attrs.ellipsisMask.push_back(0); - } + attrs.ellipsisMask = createMask(ss->get_ellipsis_mask()); } else { const size_t length = outputShapes[0].getRank(); if (inputShapes.size() > AXES_ID) { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp index 91b899090b6792..7a118d125d7a52 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp @@ -79,6 +79,8 @@ std::vector raw_test_cases = { { 1, 0 }, { 1, 0 }, { }, { }, { 1, 0 } }, RawParams{ {{ 20, 10, 5 }}, { 0, 0 }, { 0, -1 }, { 1, 1 }, { 1, 0 }, { 1, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + RawParams{ {{ 1, 8400, 6 }}, { 0, 2 }, { 0, 4 }, { 1, 1 }, + { 0 }, { 0 }, { 0 }, { 0 }, { 1 } }, RawParams{ {{ 1, 12, 100, 1, 1 }}, { 0, -1, 0, 0 }, { 0, 0, 0, 0 }, { 1, 1, 1, 1 }, { 1, 0, 1, 0 }, { 1, 0, 1, 0 }, { }, { 0, 1, 0, 1 }, {} }, RawParams{ {{ 2, 2, 2, 2 }}, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp index f76100141cb7e4..6b04f843ca7860 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp @@ -224,7 +224,8 @@ const std::vector testCasesCommon4D = { StridedSliceParams{{0, 1, 0, 10}, {1, 5, 32, 30}, {1, 1, 1, 1}, {0, 1, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, StridedSliceParams{{0, 0, 2, 10}, {1, 8, 32, 18}, {1, 2, 1, 2}, {0, 0, 1, 0}, {0, 0, 0, 1}, {}, {}, {}}, StridedSliceParams{{0, 0, 10}, {0, 32, 18}, {1, 1, 1}, {1, 1, 0}, {1, 1, 0}, {}, {}, {1, 0, 0}}, - StridedSliceParams{{0, 4, 10}, {1, 8, 0}, {1, 1, 1}, {1, 0, 1}, {1, 1, 1}, {}, {}, {0, 0, 1}}}; + StridedSliceParams{{0, 4, 10}, {1, 8, 0}, {1, 1, 1}, {1, 0, 1}, {1, 1, 1}, {}, {}, {0, 0, 1}}, + StridedSliceParams{{0, 4}, {0, 5}, {1, 1}, {0}, {0}, {0}, {0}, {1}}}; const std::vector inputShapesStatic4D = {{1, 5, 32, 32}, {2, 5, 32, 48}}; From f4e1ef5a30b4344981c02e8e3e88b6ae84d72c31 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 16 Jan 2024 13:23:25 +0100 Subject: [PATCH 022/122] [DOCS] Updating options for torch.compile (#22006) * Options for torch.compile * Update docs/articles_en/openvino_workflow/torch_compile.rst Co-authored-by: Sebastian Golebiewski * Update docs/articles_en/openvino_workflow/torch_compile.rst Co-authored-by: Sebastian Golebiewski * Update docs/articles_en/openvino_workflow/torch_compile.rst * Update docs/articles_en/openvino_workflow/torch_compile.rst * Apply suggestions from code review * update options --------- Co-authored-by: Karol Blaszczak --- .../openvino_workflow/torch_compile.rst | 98 +++++++++++++------ 1 file changed, 69 insertions(+), 29 deletions(-) diff --git a/docs/articles_en/openvino_workflow/torch_compile.rst b/docs/articles_en/openvino_workflow/torch_compile.rst index 44cf74bd7ea9fd..ca52ef34d33f09 100644 --- a/docs/articles_en/openvino_workflow/torch_compile.rst +++ b/docs/articles_en/openvino_workflow/torch_compile.rst @@ -5,7 +5,7 @@ PyTorch Deployment via "torch.compile" -The ``torch.compile`` feature enables you to use OpenVINO for PyTorch-native applications. +The ``torch.compile`` feature enables you to use OpenVINO for PyTorch-native applications. It speeds up PyTorch code by JIT-compiling it into optimized kernels. By default, Torch code runs in eager-mode, but with the use of ``torch.compile`` it goes through the following steps: @@ -20,7 +20,7 @@ By default, Torch code runs in eager-mode, but with the use of ``torch.compile`` How to Use -################# +#################### To use ``torch.compile``, you need to add an import statement and define one of the two available backends: @@ -38,7 +38,7 @@ To use ``torch.compile``, you need to add an import statement and define one of .. code-block:: console - import openvino.torch + import openvino.torch ... model = torch.compile(model, backend='openvino') @@ -68,20 +68,60 @@ To use ``torch.compile``, you need to add an import statement and define one of :align: center -Environment Variables -+++++++++++++++++++++++++++ -* **OPENVINO_TORCH_BACKEND_DEVICE**: enables selecting a specific hardware device to run the application. - By default, the OpenVINO backend for ``torch.compile`` runs PyTorch applications using the CPU. Setting - this variable to GPU.0, for example, will make the application use the integrated graphics processor instead. -* **OPENVINO_TORCH_MODEL_CACHING**: enables saving the optimized model files to a hard drive, after the first application run. - This makes them available for the following application executions, reducing the first-inference latency. - By default, this variable is set to ``False``. Setting it to ``True`` enables caching. -* **OPENVINO_TORCH_CACHE_DIR**: enables defining a custom directory for the model files (if model caching set to ``True``). - By default, the OpenVINO IR is saved in the ``cache`` sub-directory, created in the application's root directory. +Options +++++++++++++++++++++ + +It is possible to use additional arguments for ``torch.compile`` to set the backend device, +enable model caching, set the cache directory etc. You can use a dictionary of the available options: + +* ``device`` - enables selecting a specific hardware device to run the application. + By default, the OpenVINO backend for ``torch.compile`` runs PyTorch applications + on CPU. If you set this variable to ``GPU.0``, for example, the application will + use the integrated graphics processor instead. +* ``model_caching`` - enables saving the optimized model files to a hard drive, + after the first application run. This makes them available for the following + application executions, reducing the first-inference latency. By default, this + variable is set to ``False``. Set it to ``True`` to enable caching. +* ``cache_dir`` - enables defining a custom directory for the model files (if + ``model_caching`` is set to ``True``). By default, the OpenVINO IR is saved + in the cache sub-directory, created in the application's root directory. +* ``config`` - enables passing any OpenVINO configuration option as a dictionary + to this variable. For details on the various options, refer to the + :ref:`OpenVINO Advanced Features `. + +See the example below for details: + +.. code-block:: python + + model = torch.compile(model, backend="openvino", options = {"device" : "CPU", "model_caching" : True, "cache_dir": "./model_cache"}) + +You can also set OpenVINO specific configuration options by adding them as a dictionary under ``config`` key in ``options``: + +.. code-block:: python + + opts = {"device" : "CPU", "config" : {"PERFORMANCE_HINT" : "LATENCY"}} + model = torch.compile(model, backend="openvino", options=opts) + + +.. important:: + + The environment variables used in the previous release are still available but are not + recommended. They will be removed fully in future releases. + + .. dropdown:: Click to view the deprecated options. + + * ``OPENVINO_TORCH_BACKEND_DEVICE`` - enables selecting a specific hardware device to run the application. + By default, the OpenVINO backend for ``torch.compile`` runs PyTorch applications using the CPU. Setting + this variable to ``GPU.0``, for example, will make the application use the integrated graphics processor instead. + * ``OPENVINO_TORCH_MODEL_CACHING``- enables saving the optimized model files to a hard drive, after the first application run. + This makes them available for the following application executions, reducing the first-inference latency. + By default, this variable is set to ``False``. Setting it to ``True`` enables caching. + * ``OPENVINO_TORCH_CACHE_DIR``- enables defining a custom directory for the model files (if ``model_caching`` is set to ``True``). + By default, the OpenVINO IR is saved in the ``cache`` sub-directory, created in the application's root directory. Windows support -++++++++++++++++++++++++++ ++++++++++++++++++++++ Currently, PyTorch does not support ``torch.compile`` feature on Windows officially. However, it can be accessed by running the below instructions: @@ -112,10 +152,10 @@ the below instructions: Support for Automatic1111 Stable Diffusion WebUI +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -Automatic1111 Stable Diffusion WebUI is an open-source repository that hosts a browser-based interface for the Stable Diffusion -based image generation. It allows users to create realistic and creative images from text prompts. -Stable Diffusion WebUI is supported on Intel CPUs, Intel integrated GPUs, and Intel discrete GPUs by leveraging OpenVINO -``torch.compile`` capability. Detailed instructions are available in +Automatic1111 Stable Diffusion WebUI is an open-source repository that hosts a browser-based interface for the Stable Diffusion +based image generation. It allows users to create realistic and creative images from text prompts. +Stable Diffusion WebUI is supported on Intel CPUs, Intel integrated GPUs, and Intel discrete GPUs by leveraging OpenVINO +``torch.compile`` capability. Detailed instructions are available in `Stable Diffusion WebUI repository. `__ @@ -125,10 +165,10 @@ Architecture The ``torch.compile`` feature is part of PyTorch 2.0, and is based on: * **TorchDynamo** - a Python-level JIT that hooks into the frame evaluation API in CPython, - (PEP 523) to dynamically modify Python bytecode right before it is executed (PyTorch operators - that cannot be extracted to FX graph are executed in the native Python environment). - It maintains the eager-mode capabilities using - `Guards `__ to ensure the + (PEP 523) to dynamically modify Python bytecode right before it is executed (PyTorch operators + that cannot be extracted to FX graph are executed in the native Python environment). + It maintains the eager-mode capabilities using + `Guards `__ to ensure the generated graphs are valid. * **AOTAutograd** - generates the backward graph corresponding to the forward graph captured by TorchDynamo. @@ -138,15 +178,15 @@ The ``torch.compile`` feature is part of PyTorch 2.0, and is based on: -When the PyTorch module is wrapped with ``torch.compile``, TorchDynamo traces the module and +When the PyTorch module is wrapped with ``torch.compile``, TorchDynamo traces the module and rewrites Python bytecode to extract sequences of PyTorch operations into an FX Graph, -which can be optimized by the OpenVINO backend. The Torch FX graphs are first converted to -inlined FX graphs and the graph partitioning module traverses inlined FX graph to identify -operators supported by OpenVINO. +which can be optimized by the OpenVINO backend. The Torch FX graphs are first converted to +inlined FX graphs and the graph partitioning module traverses inlined FX graph to identify +operators supported by OpenVINO. -All the supported operators are clustered into OpenVINO submodules, converted to the OpenVINO -graph using OpenVINO's PyTorch decoder, and executed in an optimized manner using OpenVINO runtime. -All unsupported operators fall back to the native PyTorch runtime on CPU. If the subgraph +All the supported operators are clustered into OpenVINO submodules, converted to the OpenVINO +graph using OpenVINO's PyTorch decoder, and executed in an optimized manner using OpenVINO runtime. +All unsupported operators fall back to the native PyTorch runtime on CPU. If the subgraph fails during OpenVINO conversion, the subgraph falls back to PyTorch's default inductor backend. From 2126eeac1ff52f595f8267312b3294dae477fa05 Mon Sep 17 00:00:00 2001 From: Egor Duplenskii Date: Tue, 16 Jan 2024 14:47:34 +0100 Subject: [PATCH 023/122] [CPU] Reimplement TopologicalSort (#21911) to get rid of extra Node class member variables --- src/plugins/intel_cpu/src/graph.cpp | 66 ++++++++++++----------------- src/plugins/intel_cpu/src/graph.h | 2 - src/plugins/intel_cpu/src/node.cpp | 4 -- src/plugins/intel_cpu/src/node.h | 2 - 4 files changed, 27 insertions(+), 47 deletions(-) diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index d254f36a3efbac..1520512aa2a870 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -1299,54 +1299,42 @@ void Graph::Infer(SyncInferRequest* request) { if (infer_count != -1) infer_count++; } -void Graph::VisitNode(NodePtr node, std::vector& sortedNodes) { - if (node->temporary) { - return; - } - - if (node->permanent) { - return; - } - - node->temporary = true; - - for (size_t i = 0; i < node->getChildEdges().size(); i++) { - VisitNode(node->getChildEdgeAt(i)->getChild(), sortedNodes); - } - - node->permanent = true; - node->temporary = false; - - sortedNodes.insert(sortedNodes.begin(), node); -} - void Graph::SortTopologically() { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::intel_cpu_LT, "Graph::SortTopologically"); - std::vector unsorted; - std::vector sorted; + auto sort = [](const std::vector& nodes) { + std::unordered_set visited; + visited.reserve(nodes.size()); + std::vector sorted; + sorted.reserve(nodes.size()); - for (size_t i = 0; i < graphNodes.size(); i++) { - NodePtr node = graphNodes[i]; + std::function visit; + visit = [&visited, &sorted, &visit](const NodePtr node) { + const bool inserted = visited.insert(node).second; + if (!inserted) + return; // already visited - node->permanent = false; - node->temporary = false; - - unsorted.push_back(node); - } + for (size_t i = 0; i < node->getChildEdges().size(); i++) { + visit(node->getChildEdgeAt(i)->getChild()); + } - while (!unsorted.empty()) { - NodePtr node = unsorted.at(0); - unsorted.erase(unsorted.begin()); + sorted.push_back(node); + }; - VisitNode(node, sorted); - } + for (const auto& node : nodes) { + visit(node); + } - for (size_t i = 0; i < sorted.size(); i++) - sorted[i]->execIndex = static_cast(i); + return sorted; + }; - graphNodes.erase(graphNodes.begin(), graphNodes.end()); - graphNodes.assign(sorted.begin(), sorted.end()); + // as a first step sort in reversed topological order to avoid an insertion into the front of the vector + graphNodes = sort(graphNodes); + // reverse to the actual topological order + std::reverse(graphNodes.begin(), graphNodes.end()); + // number the nodes based on topological order + for (size_t i = 0; i < graphNodes.size(); i++) + graphNodes[i]->execIndex = static_cast(i); // TODO: Sort in/out edges by port index because of backward compatibility // A lot of plugin logic are build on top of assumption that index in diff --git a/src/plugins/intel_cpu/src/graph.h b/src/plugins/intel_cpu/src/graph.h index 029c33ca12dfde..035c1b817e9129 100644 --- a/src/plugins/intel_cpu/src/graph.h +++ b/src/plugins/intel_cpu/src/graph.h @@ -191,8 +191,6 @@ class Graph { } protected: - void VisitNode(NodePtr node, std::vector& sortedNodes); - void ForgetGraphData() { status = Status::NotReady; diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index 660d2b8d6bd7b2..85e6817c5a112b 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -76,8 +76,6 @@ Node::Node(const std::shared_ptr& op, const GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory) : selectedPrimitiveDescriptorIndex(-1), - permanent(false), - temporary(false), constant(ConstantType::NoConst), context(ctx), algorithm(Algorithm::Default), @@ -182,8 +180,6 @@ Node::Node(const std::shared_ptr& op, Node::Node(const std::string& type, const std::string& name, const GraphContext::CPtr ctx) : selectedPrimitiveDescriptorIndex(-1), - permanent(false), - temporary(false), constant(ConstantType::NoConst), context(ctx), fusingPort(-1), diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index da529fbefacde7..1a8cecf9b112f1 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -609,8 +609,6 @@ class Node { Node(const std::string& type, const std::string& name, const GraphContext::CPtr ctx); int selectedPrimitiveDescriptorIndex = -1; - bool permanent = false; - bool temporary = false; enum class InPlaceType { Unknown, From 0ad801a184c0de19c40a98d016107ec7b12edc25 Mon Sep 17 00:00:00 2001 From: Jan Iwaszkiewicz Date: Tue, 16 Jan 2024 15:08:05 +0100 Subject: [PATCH 024/122] [PyOV][SAMPLES] Fix bugbear issue B038 (#22183) --- samples/python/benchmark/bert_benchmark/bert_benchmark.py | 2 +- .../benchmark/throughput_benchmark/throughput_benchmark.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/python/benchmark/bert_benchmark/bert_benchmark.py b/samples/python/benchmark/bert_benchmark/bert_benchmark.py index b15bbd48b34c84..f81eac29387182 100755 --- a/samples/python/benchmark/bert_benchmark/bert_benchmark.py +++ b/samples/python/benchmark/bert_benchmark/bert_benchmark.py @@ -57,7 +57,7 @@ def main(): sst2_sentences = sst2['validation']['sentence'] # Warm up encoded_warm_up = dict(tokenizer('Warm up sentence is here.', return_tensors='np')) - for _ in ireqs: + for _ in range(len(ireqs)): ireqs.start_async(encoded_warm_up) ireqs.wait_all() # Benchmark diff --git a/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py b/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py index 0573642fb2fdaa..ce9431e3e5121d 100755 --- a/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py +++ b/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py @@ -52,7 +52,7 @@ def main(): for model_input in compiled_model.inputs: fill_tensor_random(ireq.get_tensor(model_input)) # Warm up - for _ in ireqs: + for _ in range(len(ireqs)): ireqs.start_async() ireqs.wait_all() # Benchmark for seconds_to_run seconds and at least niter iterations From b3c2c386dc527f5b877680fbe1215b6a3e05968f Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Tue, 16 Jan 2024 15:26:43 +0100 Subject: [PATCH 025/122] [FP8] Implementation of FP8 element types (ov::element::f8e4m3 and ov::element::f8e5m2) (#21608) * FP8 element types init * Remove redundant union * Replace using ov * Update fundamental types * Update class name check * Update tests * Remove redundant sign * Expose f8 types in Python * Add to py to dtype map * Style alignment * Align python style * Update test values * Remove constexpr from_bits to fix warning * Add trivially construcitble asserts and common constrexpr * Align python tests opset * Update f8e4m3 <-> float conversion * f8e5m2 class update * Add f8e5m2 unit test * Add to string conversion tests * Rename class f8e4m3 -> float8_e4m3 * Rename f8e5m2 -> float8_e5m2 * Remove size() and to_string from float8 - size() can be replaced by compile time sizeof - to_string can be replaced by std::to_string() * float8 E5M2 remove unused constexpr value * Fix union initialization and ncc style rules * Fix test issues * Use NaN from std::numeric_limits instead macro - minor refactor of float8_e4m3 * Update nf4 usage in element_type.cpp * Sync openvino.style with master * Update f8e5m2 test --------- Co-authored-by: Raasz, Pawel --- .../ncc_naming_style/openvino.style | 4 +- src/bindings/c/include/openvino/c/ov_common.h | 4 +- src/bindings/c/src/ov_tensor.cpp | 4 +- .../python/src/pyopenvino/core/common.cpp | 28 +-- .../pyopenvino/graph/types/element_type.cpp | 2 + .../python/tests/test_graph/test_constant.py | 122 ++++++++++++ src/core/include/ngraph/type/element_type.hpp | 2 + .../openvino/core/type/element_type.hpp | 14 ++ .../core/type/element_type_traits.hpp | 10 + .../openvino/core/type/float8_e4m3.hpp | 157 ++++++++++++++++ .../openvino/core/type/float8_e5m2.hpp | 157 ++++++++++++++++ src/core/include/openvino/op/constant.hpp | 12 ++ .../openvino/reference/utils/type_util.hpp | 3 +- src/core/src/op/constant.cpp | 22 ++- src/core/src/op/convert.cpp | 5 +- src/core/src/pass/visualize_tree.cpp | 2 + src/core/src/type/element_type.cpp | 79 ++++---- src/core/src/type/float8_e4m3.cpp | 135 ++++++++++++++ src/core/src/type/float8_e5m2.cpp | 47 +++++ src/core/tests/element_type.cpp | 4 + src/core/tests/eval.cpp | 112 +++++++++-- src/core/tests/float8_e4m3.cpp | 175 +++++++++++++++++ src/core/tests/float8_e5m2.cpp | 176 ++++++++++++++++++ .../op_reference/base_reference_test.cpp | 16 ++ .../functional/op_reference/constant.cpp | 44 +++++ .../tests/functional/op_reference/convert.cpp | 157 ++++++++++++++++ .../include/common_test_utils/data_utils.hpp | 8 + 27 files changed, 1426 insertions(+), 75 deletions(-) create mode 100644 src/core/include/openvino/core/type/float8_e4m3.hpp create mode 100644 src/core/include/openvino/core/type/float8_e5m2.hpp create mode 100644 src/core/src/type/float8_e4m3.cpp create mode 100644 src/core/src/type/float8_e5m2.cpp create mode 100644 src/core/tests/float8_e4m3.cpp create mode 100644 src/core/tests/float8_e5m2.cpp diff --git a/cmake/developer_package/ncc_naming_style/openvino.style b/cmake/developer_package/ncc_naming_style/openvino.style index ebf9ef078d4ba7..6608795381e4a1 100644 --- a/cmake/developer_package/ncc_naming_style/openvino.style +++ b/cmake/developer_package/ncc_naming_style/openvino.style @@ -1,6 +1,6 @@ # custom OpenVINO values CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN|OPENVINO_OP)$' -ClassName: '^([A-Z][\w]+|b?float16|numeric_limits|ngraph_error|stopwatch|unsupported_op)$' +ClassName: '^([A-Z][\w]+|b?float16|float8_e4m3|float8_e5m2|numeric_limits|ngraph_error|stopwatch|unsupported_op)$' StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair|stat)$' FunctionName: '^(operator\W+|[a-z_\d]+)|PrintTo$' Namespace: '^([a-z\d_]*|InferenceEngine)$' @@ -18,7 +18,7 @@ VariableReference: '^\w+$' EnumName: '^[A-Z][\w]+$' # excepts element_type -EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|nf4|string|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$' +EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|nf4|f8e4m3|f8e5m2|string|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$' # TODO: align UsingDeclaration: '^.*$' TypedefName: '^.*$' diff --git a/src/bindings/c/include/openvino/c/ov_common.h b/src/bindings/c/include/openvino/c/ov_common.h index bbbf3dd35c2db1..f1be5750c3bfec 100644 --- a/src/bindings/c/include/openvino/c/ov_common.h +++ b/src/bindings/c/include/openvino/c/ov_common.h @@ -187,6 +187,8 @@ typedef enum { U32, //!< u32 element type U64, //!< u64 element type NF4, //!< nf4 element type + F8E4M3, //!< f8e4m3 element type + F8E5M3, //!< f8e5m2 element type } ov_element_type_e; /** @@ -210,4 +212,4 @@ ov_free(const char* content); * @ingroup ov_base_c_api */ OPENVINO_C_API(const char*) -ov_get_last_err_msg(); \ No newline at end of file +ov_get_last_err_msg(); diff --git a/src/bindings/c/src/ov_tensor.cpp b/src/bindings/c/src/ov_tensor.cpp index a3372583637ff5..d81ab949d6bffb 100644 --- a/src/bindings/c/src/ov_tensor.cpp +++ b/src/bindings/c/src/ov_tensor.cpp @@ -24,7 +24,9 @@ const std::map element_type_map = { {ov_element_type_e::U16, ov::element::u16}, {ov_element_type_e::U32, ov::element::u32}, {ov_element_type_e::U64, ov::element::u64}, - {ov_element_type_e::NF4, ov::element::nf4}}; + {ov_element_type_e::NF4, ov::element::nf4}, + {ov_element_type_e::F8E4M3, ov::element::f8e4m3}, + {ov_element_type_e::F8E5M3, ov::element::f8e5m2}}; inline ov_element_type_e find_ov_element_type_e(ov::element::Type type) { for (auto iter = element_type_map.begin(); iter != element_type_map.end(); iter++) { diff --git a/src/bindings/python/src/pyopenvino/core/common.cpp b/src/bindings/python/src/pyopenvino/core/common.cpp index a8603423ecdf0c..945a0c5f777e89 100644 --- a/src/bindings/python/src/pyopenvino/core/common.cpp +++ b/src/bindings/python/src/pyopenvino/core/common.cpp @@ -19,24 +19,16 @@ namespace type_helpers { const std::map& ov_type_to_dtype() { static const std::map ov_type_to_dtype_mapping = { - {ov::element::f16, py::dtype("float16")}, - {ov::element::bf16, py::dtype("float16")}, - {ov::element::f32, py::dtype("float32")}, - {ov::element::f64, py::dtype("float64")}, - {ov::element::i8, py::dtype("int8")}, - {ov::element::i16, py::dtype("int16")}, - {ov::element::i32, py::dtype("int32")}, - {ov::element::i64, py::dtype("int64")}, - {ov::element::u8, py::dtype("uint8")}, - {ov::element::u16, py::dtype("uint16")}, - {ov::element::u32, py::dtype("uint32")}, - {ov::element::u64, py::dtype("uint64")}, - {ov::element::boolean, py::dtype("bool")}, - {ov::element::u1, py::dtype("uint8")}, - {ov::element::u4, py::dtype("uint8")}, - {ov::element::nf4, py::dtype("uint8")}, - {ov::element::i4, py::dtype("int8")}, - {ov::element::string, py::dtype("bytes_")}, + {ov::element::f16, py::dtype("float16")}, {ov::element::bf16, py::dtype("float16")}, + {ov::element::f32, py::dtype("float32")}, {ov::element::f64, py::dtype("float64")}, + {ov::element::i8, py::dtype("int8")}, {ov::element::i16, py::dtype("int16")}, + {ov::element::i32, py::dtype("int32")}, {ov::element::i64, py::dtype("int64")}, + {ov::element::u8, py::dtype("uint8")}, {ov::element::u16, py::dtype("uint16")}, + {ov::element::u32, py::dtype("uint32")}, {ov::element::u64, py::dtype("uint64")}, + {ov::element::boolean, py::dtype("bool")}, {ov::element::u1, py::dtype("uint8")}, + {ov::element::u4, py::dtype("uint8")}, {ov::element::nf4, py::dtype("uint8")}, + {ov::element::i4, py::dtype("int8")}, {ov::element::f8e4m3, py::dtype("uint8")}, + {ov::element::f8e5m2, py::dtype("uint8")}, {ov::element::string, py::dtype("bytes_")}, }; return ov_type_to_dtype_mapping; } diff --git a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp index 595d06fb073145..dc7c484012c53c 100644 --- a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp +++ b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp @@ -50,6 +50,8 @@ void regclass_graph_Type(py::module m) { type.attr("u64") = ov::element::u64; type.attr("bf16") = ov::element::bf16; type.attr("nf4") = ov::element::nf4; + type.attr("f8e4m3") = ov::element::f8e4m3; + type.attr("f8e5m2") = ov::element::f8e5m2; type.attr("string") = ov::element::string; type.def("__hash__", &ov::element::Type::hash); diff --git a/src/bindings/python/tests/test_graph/test_constant.py b/src/bindings/python/tests/test_graph/test_constant.py index 73a288e943b801..6452084e377f09 100644 --- a/src/bindings/python/tests/test_graph/test_constant.py +++ b/src/bindings/python/tests/test_graph/test_constant.py @@ -411,3 +411,125 @@ def test_memory_sharing(shared_flag): else: assert not np.array_equal(ov_const.data, arr) assert not np.shares_memory(arr, ov_const.data) + + +@pytest.mark.parametrize(("ov_type", "numpy_dtype"), [ + (Type.f32, np.float32), + (Type.f16, np.float16), +]) +def test_float_to_f8e5m2_constant(ov_type, numpy_dtype): + from openvino.runtime import opset12 as opset + import openvino as ov + data = np.array([4.75, 4.5, -5.25, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, + 0.6, 0.7, 0.8, 0.9, 1, -0.0, -0.1, -0.2, -0.3, + -0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1.0, 0.0000152587890625, 448, 500, 512, 57344], dtype=numpy_dtype) + + compressed_const = opset.constant(data, dtype=ov.Type.f8e5m2, name="f8e5m2_constant") + convert = opset.convert(compressed_const, data.dtype) + parameter = opset.parameter(ov.PartialShape([-1]), ov_type) + add_op = opset.add(parameter, convert) + model = ov.Model([add_op], [parameter]) + + compiled = ov.compile_model(model) + tensor = np.zeros(data.shape, dtype=numpy_dtype) + result = compiled(tensor)[0] + + target = [5.0, 4.0, -5.0, 0.0, 0.09375, 0.1875, 0.3125, 0.375, 0.5, 0.625, 0.75, + 0.75, 0.875, 1.0, -0.0, -0.09375, -0.1875, -0.3125, -0.375, + -0.5, -0.625, -0.75, -0.75, -0.875, -1.0, 0.0000152587890625, + 448, 512, 512, 57344] + target = np.array(target, dtype=numpy_dtype) + + assert np.allclose(result, target) + + +@pytest.mark.parametrize(("ov_type", "numpy_dtype"), [ + (Type.f32, np.float32), + (Type.f16, np.float16), +]) +def test_float_to_f8e4m3_constant(ov_type, numpy_dtype): + from openvino.runtime import opset12 as opset + import openvino as ov + data = np.array([4.75, 4.5, -5.25, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, + 0.6, 0.7, 0.8, 0.9, 1, -0.0, -0.1, -0.2, -0.3, + -0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1, 448, 512], dtype=numpy_dtype) + + compressed_const = opset.constant(data, dtype=ov.Type.f8e4m3, name="f8e4m3_constant") + convert = opset.convert(compressed_const, data.dtype) + parameter = opset.parameter(ov.PartialShape([-1]), ov_type) + add_op = opset.add(parameter, convert) + model = ov.Model([add_op], [parameter]) + + compiled = ov.compile_model(model) + tensor = np.zeros(data.shape, dtype=numpy_dtype) + result = compiled(tensor)[0] + + target = [5.0, 4.5, -5.0, 0.0, 0.1015625, 0.203125, 0.3125, + 0.40625, 0.5, 0.625, 0.6875, 0.8125, 0.875, 1, + -0, -0.1015625, -0.203125, -0.3125, -0.40625, -0.5, -0.625, + -0.6875, -0.8125, -0.875, -1, 448, np.nan] + target = np.array(target, dtype=numpy_dtype) + + assert np.allclose(result, target, equal_nan=True) + + +@pytest.mark.parametrize(("ov_type", "numpy_dtype"), [ + (Type.f32, np.float32), + (Type.f16, np.float16), +]) +def test_float_to_f8e5m2_convert(ov_type, numpy_dtype): + from openvino.runtime import opset12 as opset + import openvino as ov + data = np.array([4.75, 4.5, -5.25, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, + 0.6, 0.7, 0.8, 0.9, 1, -0.0, -0.1, -0.2, -0.3, + -0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1.0, 0.0000152587890625, 448, 500, 512, 57344], dtype=numpy_dtype) + + compressed_const = opset.constant(data, dtype=ov_type, name="fx_constant") + convert_to_fp8 = opset.convert(compressed_const, Type.f8e5m2) + convert_back = opset.convert(convert_to_fp8, ov_type) + parameter = opset.parameter(ov.PartialShape([-1]), ov_type) + add_op = opset.add(parameter, convert_back) + model = ov.Model([add_op], [parameter]) + + compiled = ov.compile_model(model) + tensor = np.zeros(data.shape, dtype=numpy_dtype) + result = compiled(tensor)[0] + + target = [5.0, 4.0, -5.0, 0.0, 0.09375, 0.1875, 0.3125, 0.375, 0.5, 0.625, 0.75, + 0.75, 0.875, 1.0, -0.0, -0.09375, -0.1875, -0.3125, -0.375, + -0.5, -0.625, -0.75, -0.75, -0.875, -1.0, 0.0000152587890625, + 448, 512, 512, 57344] + target = np.array(target, dtype=numpy_dtype) + + assert np.allclose(result, target) + + +@pytest.mark.parametrize(("ov_type", "numpy_dtype"), [ + (Type.f32, np.float32), + (Type.f16, np.float16), +]) +def test_float_to_f8e4m3_convert(ov_type, numpy_dtype): + from openvino.runtime import opset12 as opset + import openvino as ov + data = np.array([4.75, 4.5, -5.25, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, + 0.6, 0.7, 0.8, 0.9, 1, -0.0, -0.1, -0.2, -0.3, + -0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1, 448, 512], dtype=numpy_dtype) + + compressed_const = opset.constant(data, dtype=ov_type, name="fx_constant") + convert_to_fp8 = opset.convert(compressed_const, Type.f8e4m3) + convert_back = opset.convert(convert_to_fp8, ov_type) + parameter = opset.parameter(ov.PartialShape([-1]), ov_type) + add_op = opset.add(parameter, convert_back) + model = ov.Model([add_op], [parameter]) + + compiled = ov.compile_model(model) + tensor = np.zeros(data.shape, dtype=numpy_dtype) + result = compiled(tensor)[0] + + target = [5.0, 4.5, -5.0, 0.0, 0.1015625, 0.203125, 0.3125, + 0.40625, 0.5, 0.625, 0.6875, 0.8125, 0.875, 1, + -0, -0.1015625, -0.203125, -0.3125, -0.40625, -0.5, -0.625, + -0.6875, -0.8125, -0.875, -1, 448, np.nan] + target = np.array(target, dtype=numpy_dtype) + + assert np.allclose(result, target, equal_nan=True) diff --git a/src/core/include/ngraph/type/element_type.hpp b/src/core/include/ngraph/type/element_type.hpp index cd125409db5bc6..3ff94063d82d65 100644 --- a/src/core/include/ngraph/type/element_type.hpp +++ b/src/core/include/ngraph/type/element_type.hpp @@ -35,6 +35,8 @@ using ov::element::dynamic; using ov::element::f16; using ov::element::f32; using ov::element::f64; +using ov::element::f8e4m3; +using ov::element::f8e5m2; using ov::element::i16; using ov::element::i32; using ov::element::i4; diff --git a/src/core/include/openvino/core/type/element_type.hpp b/src/core/include/openvino/core/type/element_type.hpp index 88e79a75d25174..bea57a6ce98479 100644 --- a/src/core/include/openvino/core/type/element_type.hpp +++ b/src/core/include/openvino/core/type/element_type.hpp @@ -20,6 +20,8 @@ #include "openvino/core/rtti.hpp" #include "openvino/core/type/bfloat16.hpp" #include "openvino/core/type/float16.hpp" +#include "openvino/core/type/float8_e4m3.hpp" +#include "openvino/core/type/float8_e5m2.hpp" /** * @defgroup ov_element_cpp_api Element types @@ -52,6 +54,8 @@ enum class Type_t { u32, //!< u32 element type u64, //!< u64 element type nf4, //!< nf4 element type + f8e4m3, //!< f8e4m3 element type + f8e5m2, //!< f8e5m2 element type string //!< string element type }; @@ -182,6 +186,12 @@ constexpr Type u64(Type_t::u64); /// \brief nf4 element type /// \ingroup ov_element_cpp_api constexpr Type nf4(Type_t::nf4); +/// \brief f8e4m3 element type +/// \ingroup ov_element_cpp_api +constexpr Type f8e4m3(Type_t::f8e4m3); +/// \brief f8e4m3 element type +/// \ingroup ov_element_cpp_api +constexpr Type f8e5m2(Type_t::f8e5m2); /// \brief string element type /// \ingroup ov_element_cpp_api constexpr Type string(Type_t::string); @@ -219,6 +229,10 @@ OPENVINO_API Type from(); template <> OPENVINO_API Type from(); template <> +OPENVINO_API Type from(); +template <> +OPENVINO_API Type from(); +template <> OPENVINO_API Type from(); OPENVINO_API Type fundamental_type_for(const Type& type); diff --git a/src/core/include/openvino/core/type/element_type_traits.hpp b/src/core/include/openvino/core/type/element_type_traits.hpp index 33f0bbd059a99d..fefbac51866417 100644 --- a/src/core/include/openvino/core/type/element_type_traits.hpp +++ b/src/core/include/openvino/core/type/element_type_traits.hpp @@ -98,6 +98,16 @@ struct element_type_traits { using value_type = int8_t; }; +template <> +struct element_type_traits { + using value_type = ov::float8_e4m3; +}; + +template <> +struct element_type_traits { + using value_type = ov::float8_e5m2; +}; + template <> struct element_type_traits { using value_type = std::string; diff --git a/src/core/include/openvino/core/type/float8_e4m3.hpp b/src/core/include/openvino/core/type/float8_e4m3.hpp new file mode 100644 index 00000000000000..af95d183d69129 --- /dev/null +++ b/src/core/include/openvino/core/type/float8_e4m3.hpp @@ -0,0 +1,157 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "openvino/core/core_visibility.hpp" + +namespace ov { + +/** + * @brief Class to represent the f8e4m3 type. + */ +class OPENVINO_API float8_e4m3 { +public: + float8_e4m3() = default; + float8_e4m3(uint32_t sign, uint32_t biased_exponent, uint32_t fraction); + float8_e4m3(float value); + + template + explicit float8_e4m3(I value) : m_value{float8_e4m3{static_cast(value)}.m_value} {} + + template + bool operator==(const T& other) const; + template + bool operator!=(const T& other) const { + return !(*this == other); + } + + template + bool operator<(const T& other) const; + template + bool operator<=(const T& other) const; + template + bool operator>(const T& other) const; + template + bool operator>=(const T& other) const; + template + float8_e4m3 operator+(const T& other) const; + template + float8_e4m3 operator+=(const T& other); + template + float8_e4m3 operator-(const T& other) const; + template + float8_e4m3 operator-=(const T& other); + template + float8_e4m3 operator*(const T& other) const; + template + float8_e4m3 operator*=(const T& other); + template + float8_e4m3 operator/(const T& other) const; + template + float8_e4m3 operator/=(const T& other); + + operator float() const; + + static constexpr float8_e4m3 from_bits(uint8_t bits) { + return float8_e4m3(bits, true); + } + uint8_t to_bits() const; + friend std::ostream& operator<<(std::ostream& out, const float8_e4m3& obj) { + out << static_cast(obj); + return out; + } + +private: + constexpr float8_e4m3(uint8_t x, bool) : m_value{x} {} + + uint8_t m_value; +}; + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4756) +#endif +template +bool float8_e4m3::operator==(const T& other) const { +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wfloat-equal" +#endif + return (static_cast(*this) == static_cast(other)); +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif +} + +template +bool float8_e4m3::operator<(const T& other) const { + return (static_cast(*this) < static_cast(other)); +} + +template +bool float8_e4m3::operator<=(const T& other) const { + return (static_cast(*this) <= static_cast(other)); +} + +template +bool float8_e4m3::operator>(const T& other) const { + return (static_cast(*this) > static_cast(other)); +} + +template +bool float8_e4m3::operator>=(const T& other) const { + return (static_cast(*this) >= static_cast(other)); +} + +template +float8_e4m3 float8_e4m3::operator+(const T& other) const { + return {static_cast(*this) + static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator+=(const T& other) { + return *this = *this + other; +} + +template +float8_e4m3 float8_e4m3::operator-(const T& other) const { + return {static_cast(*this) - static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator-=(const T& other) { + return *this = *this - other; +} + +template +float8_e4m3 float8_e4m3::operator*(const T& other) const { + return {static_cast(*this) * static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator*=(const T& other) { + return *this = *this * other; +} + +template +float8_e4m3 float8_e4m3::operator/(const T& other) const { + return {static_cast(*this) / static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator/=(const T& other) { + return *this = *this / other; +} +#if defined(_MSC_VER) +# pragma warning(pop) +#endif +} // namespace ov diff --git a/src/core/include/openvino/core/type/float8_e5m2.hpp b/src/core/include/openvino/core/type/float8_e5m2.hpp new file mode 100644 index 00000000000000..e3990de0c56169 --- /dev/null +++ b/src/core/include/openvino/core/type/float8_e5m2.hpp @@ -0,0 +1,157 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "openvino/core/core_visibility.hpp" + +namespace ov { + +/** + * @brief Class to represent the f8e5m2 type. + */ +class OPENVINO_API float8_e5m2 { +public: + float8_e5m2() = default; + float8_e5m2(uint32_t sign, uint32_t biased_exponent, uint32_t fraction); + float8_e5m2(float value); + + template + explicit float8_e5m2(I value) : m_value{float8_e5m2{static_cast(value)}.m_value} {} + + template + bool operator==(const T& other) const; + template + bool operator!=(const T& other) const { + return !(*this == other); + } + + template + bool operator<(const T& other) const; + template + bool operator<=(const T& other) const; + template + bool operator>(const T& other) const; + template + bool operator>=(const T& other) const; + template + float8_e5m2 operator+(const T& other) const; + template + float8_e5m2 operator+=(const T& other); + template + float8_e5m2 operator-(const T& other) const; + template + float8_e5m2 operator-=(const T& other); + template + float8_e5m2 operator*(const T& other) const; + template + float8_e5m2 operator*=(const T& other); + template + float8_e5m2 operator/(const T& other) const; + template + float8_e5m2 operator/=(const T& other); + + operator float() const; + + static constexpr float8_e5m2 from_bits(uint8_t bits) { + return float8_e5m2(bits, true); + } + uint8_t to_bits() const; + friend std::ostream& operator<<(std::ostream& out, const float8_e5m2& obj) { + out << static_cast(obj); + return out; + } + +private: + constexpr float8_e5m2(uint8_t x, bool) : m_value{x} {} + + uint8_t m_value; +}; + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4756) +#endif +template +bool float8_e5m2::operator==(const T& other) const { +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wfloat-equal" +#endif + return (static_cast(*this) == static_cast(other)); +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif +} + +template +bool float8_e5m2::operator<(const T& other) const { + return (static_cast(*this) < static_cast(other)); +} + +template +bool float8_e5m2::operator<=(const T& other) const { + return (static_cast(*this) <= static_cast(other)); +} + +template +bool float8_e5m2::operator>(const T& other) const { + return (static_cast(*this) > static_cast(other)); +} + +template +bool float8_e5m2::operator>=(const T& other) const { + return (static_cast(*this) >= static_cast(other)); +} + +template +float8_e5m2 float8_e5m2::operator+(const T& other) const { + return {static_cast(*this) + static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator+=(const T& other) { + return *this = *this + other; +} + +template +float8_e5m2 float8_e5m2::operator-(const T& other) const { + return {static_cast(*this) - static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator-=(const T& other) { + return *this = *this - other; +} + +template +float8_e5m2 float8_e5m2::operator*(const T& other) const { + return {static_cast(*this) * static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator*=(const T& other) { + return *this = *this * other; +} + +template +float8_e5m2 float8_e5m2::operator/(const T& other) const { + return {static_cast(*this) / static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator/=(const T& other) { + return *this = *this / other; +} +#if defined(_MSC_VER) +# pragma warning(pop) +#endif +} // namespace ov diff --git a/src/core/include/openvino/op/constant.hpp b/src/core/include/openvino/op/constant.hpp index fe91da44baf6f4..ce089b2e6f7819 100644 --- a/src/core/include/openvino/op/constant.hpp +++ b/src/core/include/openvino/op/constant.hpp @@ -164,6 +164,12 @@ class OPENVINO_API Constant : public Op { case Type_t::nf4: fill_data(value); break; + case Type_t::f8e4m3: + fill_data(value); + break; + case Type_t::f8e5m2: + fill_data(value); + break; case Type_t::string: fill_data(value); break; @@ -882,6 +888,12 @@ class OPENVINO_API Constant : public Op { case Type_t::nf4: write_buffer(source); break; + case Type_t::f8e4m3: + write_buffer(source); + break; + case Type_t::f8e5m2: + write_buffer(source); + break; case Type_t::string: write_buffer(source); break; diff --git a/src/core/reference/include/openvino/reference/utils/type_util.hpp b/src/core/reference/include/openvino/reference/utils/type_util.hpp index 12291761612340..10d513d2fb8b3a 100644 --- a/src/core/reference/include/openvino/reference/utils/type_util.hpp +++ b/src/core/reference/include/openvino/reference/utils/type_util.hpp @@ -19,6 +19,7 @@ namespace ov { template constexpr bool is_floating_point() { using U = typename std::decay::type; - return std::is_floating_point::value || std::is_same::value || std::is_same::value; + return std::is_floating_point::value || std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value; } } // namespace ov diff --git a/src/core/src/op/constant.cpp b/src/core/src/op/constant.cpp index 914324a5dc97c6..557c71d014f74d 100644 --- a/src/core/src/op/constant.cpp +++ b/src/core/src/op/constant.cpp @@ -221,8 +221,26 @@ struct ValueToString : ov::element::NotSupported { std::string Constant::convert_value_to_string(size_t index) const { using namespace ov::element; - return IfTypeOf::apply< - ValueToString>(get_element_type(), this, index); + return IfTypeOf::apply(get_element_type(), this, index); } size_t Constant::get_byte_size() const { diff --git a/src/core/src/op/convert.cpp b/src/core/src/op/convert.cpp index b48f2d5e433ba7..a79922dcacd42f 100644 --- a/src/core/src/op/convert.cpp +++ b/src/core/src/op/convert.cpp @@ -19,7 +19,8 @@ constexpr bool is_lp_type(const element::Type_t et) { return (et == element::i4) || (et == element::u1) || (et == element::u4) || (et == element::nf4); } -#define CONVERT_ET_LIST boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4 +#define CONVERT_ET_LIST \ + boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4, f8e4m3, f8e5m2 struct Evaluate : public element::NoAction { using element::NoAction::visit; @@ -173,6 +174,8 @@ bool Convert::has_evaluate() const { case element::u32: case element::u64: case element::nf4: + case element::f8e4m3: + case element::f8e5m2: return true; default: return false; diff --git a/src/core/src/pass/visualize_tree.cpp b/src/core/src/pass/visualize_tree.cpp index e981f7c4c95911..bcd3bcd1713390 100644 --- a/src/core/src/pass/visualize_tree.cpp +++ b/src/core/src/pass/visualize_tree.cpp @@ -376,6 +376,8 @@ static std::string get_value(const std::shared_ptr& consta case ov::element::Type_t::u4: case ov::element::Type_t::nf4: case ov::element::Type_t::i4: + case ov::element::Type_t::f8e4m3: + case ov::element::Type_t::f8e5m2: ss << constant->get_output_element_type(0).get_type_name() << " value"; break; case ov::element::Type_t::bf16: diff --git a/src/core/src/type/element_type.cpp b/src/core/src/type/element_type.cpp index e5607e6a60a4de..a49312b6530378 100644 --- a/src/core/src/type/element_type.cpp +++ b/src/core/src/type/element_type.cpp @@ -71,6 +71,10 @@ inline TypeInfo get_type_info(ov::element::Type_t type) { return {64, false, false, false, "uint64_t", "u64"}; case ov::element::Type_t::nf4: return {4, false, false, true, "nfloat4", "nf4"}; + case ov::element::Type_t::f8e4m3: + return {8, true, true, true, "f8e4m3", "f8e4m3"}; + case ov::element::Type_t::f8e5m2: + return {8, true, true, true, "f8e5m2", "f8e5m2"}; case ov::element::Type_t::string: return {8 * sizeof(std::string), false, false, false, "string", "string"}; default: @@ -119,6 +123,10 @@ ov::element::Type type_from_string(const std::string& type) { return ::ov::element::Type(::ov::element::Type_t::dynamic); } else if (type == "nf4" || type == "NF4") { return ::ov::element::Type(::ov::element::Type_t::nf4); + } else if (type == "f8e4m3" || type == "F8E4M3") { + return ::ov::element::Type(::ov::element::Type_t::f8e4m3); + } else if (type == "f8e5m2" || type == "F8E5M2") { + return ::ov::element::Type(::ov::element::Type_t::f8e5m2); } else { OPENVINO_THROW("Incorrect type: ", type); } @@ -126,24 +134,12 @@ ov::element::Type type_from_string(const std::string& type) { } // namespace std::vector ov::element::Type::get_known_types() { - std::vector rc = {&ov::element::dynamic, - &ov::element::boolean, - &ov::element::bf16, - &ov::element::f16, - &ov::element::f32, - &ov::element::f64, - &ov::element::i4, - &ov::element::i8, - &ov::element::i16, - &ov::element::i32, - &ov::element::i64, - &ov::element::u1, - &ov::element::u4, - &ov::element::u8, - &ov::element::u16, - &ov::element::u32, - &ov::element::u64, - &ov::element::string}; + std::vector rc = { + &ov::element::dynamic, &ov::element::boolean, &ov::element::bf16, &ov::element::f16, &ov::element::f32, + &ov::element::f64, &ov::element::i4, &ov::element::i8, &ov::element::i16, &ov::element::i32, + &ov::element::i64, &ov::element::u1, &ov::element::u4, &ov::element::u8, &ov::element::u16, + &ov::element::u32, &ov::element::u64, &ov::element::nf4, &ov::element::f8e4m3, &ov::element::f8e5m2, + &ov::element::string}; return rc; } @@ -172,7 +168,9 @@ ov::element::Type::Type(size_t bitwidth, {ov::element::Type_t::u16, {16, false, false, false, "uint16_t", "u16"}}, {ov::element::Type_t::u32, {32, false, false, false, "uint32_t", "u32"}}, {ov::element::Type_t::u64, {64, false, false, false, "uint64_t", "u64"}}, - {ov::element::Type_t::u4, {4, false, false, false, "uint4_t", "nf4"}}, + {ov::element::Type_t::nf4, {4, false, false, true, "nfloat4", "nf4"}}, + {ov::element::Type_t::f8e4m3, {8, true, true, true, "f8e4m3", "f8e4m3"}}, + {ov::element::Type_t::f8e5m2, {8, true, true, true, "f8e5m2", "f8e5m2"}}, {ov::element::Type_t::string, {8 * sizeof(std::string), false, false, false, "string", "string"}}, }; for (const auto& t : elements_map) { @@ -266,6 +264,14 @@ Type from() { return Type_t::bf16; } template <> +Type from() { + return Type_t::f8e4m3; +} +template <> +Type from() { + return Type_t::f8e5m2; +} +template <> Type from() { return Type_t::string; } @@ -282,6 +288,10 @@ Type fundamental_type_for(const Type& type) { return from::value_type>(); case Type_t::f64: return from::value_type>(); + case Type_t::f8e4m3: + return from::value_type>(); + case Type_t::f8e5m2: + return from::value_type>(); case Type_t::i4: return from::value_type>(); case Type_t::i8: @@ -304,6 +314,8 @@ Type fundamental_type_for(const Type& type) { return from::value_type>(); case Type_t::u64: return from::value_type>(); + case Type_t::nf4: + return from::value_type>(); case Type_t::string: return from::value_type>(); default: @@ -320,24 +332,13 @@ std::ostream& ov::element::operator<<(std::ostream& out, const ov::element::Type std::istream& ov::element::operator>>(std::istream& in, ov::element::Type& obj) { const std::unordered_map legacy = { - {"BOOL", ov::element::boolean}, - {"BF16", ov::element::bf16}, - {"I4", ov::element::i4}, - {"I8", ov::element::i8}, - {"I16", ov::element::i16}, - {"I32", ov::element::i32}, - {"I64", ov::element::i64}, - {"U4", ov::element::u4}, - {"U8", ov::element::u8}, - {"U16", ov::element::u16}, - {"U32", ov::element::u32}, - {"U64", ov::element::u64}, - {"FP32", ov::element::f32}, - {"FP64", ov::element::f64}, - {"FP16", ov::element::f16}, - {"BIN", ov::element::u1}, - {"NF4", ov::element::nf4}, - {"STRING", ov::element::string}, + {"BOOL", ov::element::boolean}, {"BF16", ov::element::bf16}, {"I4", ov::element::i4}, + {"I8", ov::element::i8}, {"I16", ov::element::i16}, {"I32", ov::element::i32}, + {"I64", ov::element::i64}, {"U4", ov::element::u4}, {"U8", ov::element::u8}, + {"U16", ov::element::u16}, {"U32", ov::element::u32}, {"U64", ov::element::u64}, + {"FP32", ov::element::f32}, {"FP64", ov::element::f64}, {"FP16", ov::element::f16}, + {"BIN", ov::element::u1}, {"NF4", ov::element::nf4}, {"F8E4M3", ov::element::f8e4m3}, + {"F8E5M2", ov::element::f8e5m2}, {"STRING", ov::element::string}, }; std::string str; in >> str; @@ -420,6 +421,8 @@ inline size_t compiler_byte_size(ov::element::Type_t et) { ET_CASE(u32); ET_CASE(u64); ET_CASE(nf4); + ET_CASE(f8e4m3); + ET_CASE(f8e5m2); ET_CASE(string); #undef ET_CASE case ov::element::Type_t::undefined: @@ -454,6 +457,8 @@ OPENVINO_API EnumNames& EnumNames::get() { {"u32", element::Type_t::u32}, {"u64", element::Type_t::u64}, {"nf4", element::Type_t::nf4}, + {"f8e4m3", element::Type_t::f8e4m3}, + {"f8e5m2", element::Type_t::f8e5m2}, {"string", element::Type_t::string}}); return enum_names; } diff --git a/src/core/src/type/float8_e4m3.cpp b/src/core/src/type/float8_e4m3.cpp new file mode 100644 index 00000000000000..9041b8a0070497 --- /dev/null +++ b/src/core/src/type/float8_e4m3.cpp @@ -0,0 +1,135 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e4m3.hpp" + +#include +#include +#include + +namespace ov { + +static_assert(sizeof(float8_e4m3) == 1, "class f8e4m3 must be exactly 1 byte"); +static_assert(std::is_trivially_constructible::value, "should be trivially constructible"); +static_assert(std::is_trivially_copyable::value, "must be trivially copyable"); +static_assert(std::is_trivially_destructible::value, "must be trivially destructible"); + +namespace { +constexpr auto float_nan = std::numeric_limits::quiet_NaN(); +// Lookup table for conversion f8 -> float. The f8 bit value without sign bit (masked 0x7f) is LUT offset. +static constexpr std::array f8_to_float_lut{ + 0.0f, 0.001953125f, 0.00390625f, 0.005859375f, 0.0078125f, 0.009765625f, 0.01171875f, 0.013671875f, + 0.015625f, 0.017578125f, 0.01953125f, 0.021484375f, 0.0234375f, 0.025390625f, 0.02734375f, 0.029296875f, + 0.03125f, 0.03515625f, 0.0390625f, 0.04296875f, 0.046875f, 0.05078125f, 0.0546875f, 0.05859375f, + 0.0625f, 0.0703125f, 0.078125f, 0.0859375f, 0.09375f, 0.1015625f, 0.109375f, 0.1171875f, + 0.125f, 0.140625f, 0.15625f, 0.171875f, 0.1875f, 0.203125f, 0.21875f, 0.234375f, + 0.25f, 0.28125f, 0.3125f, 0.34375f, 0.375f, 0.40625f, 0.4375f, 0.46875f, + 0.5f, 0.5625f, 0.625f, 0.6875f, 0.75f, 0.8125f, 0.875f, 0.9375f, + 1.0f, 1.125f, 1.25f, 1.375f, 1.5f, 1.625f, 1.75f, 1.875f, + 2.0f, 2.25f, 2.5f, 2.75f, 3.0f, 3.25f, 3.5f, 3.75f, + 4.0f, 4.5f, 5.0f, 5.5f, 6.0f, 6.5f, 7.0f, 7.5f, + 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f, + 32.0f, 36.0f, 40.0f, 44.0f, 48.0f, 52.0f, 56.0f, 60.0f, + 64.0f, 72.0f, 80.0f, 88.0f, 96.0f, 104.0f, 112.0f, 120.0f, + 128.0f, 144.0f, 160.0f, 176.0f, 192.0f, 208.0f, 224.0f, 240.0f, + 256.0f, 288.0f, 320.0f, 352.0f, 384.0f, 416.0f, 448.0f, float_nan}; + +constexpr uint32_t three_bytes_shift = 24; + +constexpr uint8_t f8e4m3_s_mask = 0x80; // f8e4m3 sign bit mask +constexpr uint8_t f8e4m3_e_size = 4; // f8e4m3 exponent bit size +constexpr uint8_t f8e4m3_e_mask = 0x78; // f8e4m3 exponent bit mask +constexpr uint8_t f8e4m3_e_bias = 7; // f8e4m3 exponent bias +constexpr uint8_t f8e4m3_e_max = 0x0f; // f8e4m3 exponent max value +constexpr uint8_t f8e4m3_m_size = 3; // f8e4m3 mantissa bits size +constexpr uint8_t f8e4m3_m_mask = 0x07; // f8e4m3 mantissa bit mask + +union f32_t { + float value; + uint32_t bits; +}; + +uint8_t f32_to_f8e4m3_bits(const float value) { + constexpr uint32_t f32_s_mask = 0x80000000; // f32 sign bit mask + constexpr uint32_t f32_e_mask = 0x7F800000; // f32 exponent bits mask + constexpr uint32_t f32_e_bias = 127; // f32 exponent bias + constexpr uint32_t f32_e_size = 8; // f32 exponent bits size + constexpr uint32_t f32_m_mask = 0x007fffff; // f32 mantissa bits mask + constexpr uint32_t f32_m_size = 23; // f32 mantissa bits size + + constexpr uint32_t f8_e_mask = f8e4m3_e_mask << three_bytes_shift; // f8 exponent bits mask (on u32) + constexpr uint32_t f8_m_mask = f8e4m3_m_mask << three_bytes_shift; // f8 mantissa bits mask (on u32) + constexpr uint32_t f8_m_hidden_one_mask = 0x08000000; // f8 mantissa hidden one bits mask (on u32) + + constexpr uint32_t round_half = 0x01ffffff; // value for half to even round for f8 + constexpr uint32_t round_norm = 0x007fffff; // value for normal round for f8 + constexpr uint32_t round_even = 0x00800000; // value for half to even round for f8 + constexpr uint32_t round_odd = 0x01800000; // value for an non-half to even round for f8 + + const auto input = f32_t{value}; + auto f8_bits = static_cast((input.bits & f32_s_mask) >> three_bytes_shift); + + uint32_t f32_e_field = input.bits & f32_e_mask; + + if (f32_e_field == f32_e_mask) { + f8_bits |= (f8e4m3_e_mask | f8e4m3_m_mask); + } else if (f32_e_field != 0) { + int32_t f8_biased_exp = (f32_e_field >> f32_m_size) - (f32_e_bias - f8e4m3_e_bias); + uint32_t fractional = (input.bits & f32_m_mask) << (f32_e_size - f8e4m3_e_size); + + // for normalized values round apply rounding change f8 fractional and biased exponent + if ((fractional & round_half) == round_odd || (fractional & round_norm) != 0) { + fractional += round_even; + if (0 != (fractional & f8_e_mask)) { + fractional &= f8_e_mask; + ++f8_biased_exp; + } + } + fractional &= f8_m_mask; + + // set exponent and mantissa on f8 bits + if (f8_biased_exp > f8e4m3_e_max) { + // Use NAN as this type has no infinity + f8_bits |= (f8e4m3_e_mask | f8e4m3_m_mask); + } else if (f8_biased_exp > 0) { + f8_bits |= (f8_biased_exp << f8e4m3_m_size) | (fractional >> three_bytes_shift); + } else { + // Restore the hidden 1 in f8 mantissa for subnormal calculation + fractional = f8_m_hidden_one_mask | (input.bits & f32_m_mask) << (f32_e_size - f8e4m3_e_size); + // Will any bits be shifted off? + int32_t shift = f8_biased_exp < -(f8e4m3_e_max) ? 0 : (1U << (1 - f8_biased_exp)); + uint32_t sticky = (fractional & (shift - 1)) ? 1 : 0; + + fractional = ((1 + f8_biased_exp) > f8e4m3_e_max) ? 0 : fractional >> (1 - f8_biased_exp); + fractional |= sticky; + // apply rounding + if (((fractional & round_half) == round_odd) || ((fractional & round_norm) != 0)) { + fractional += round_even; + } + + f8_bits |= fractional >> three_bytes_shift; + } + } + + return f8_bits; +} +} // namespace + +float8_e4m3::float8_e4m3(const uint32_t sign, const uint32_t biased_exponent, const uint32_t fraction) + : m_value(((sign & 0x01U) << (f8e4m3_e_size + f8e4m3_m_size)) | + (biased_exponent & (f8e4m3_e_mask >> f8e4m3_m_size)) << f8e4m3_m_size | (fraction & f8e4m3_m_mask)) {} + +float8_e4m3::float8_e4m3(const float value) : m_value{f32_to_f8e4m3_bits(value)} {} + +float8_e4m3::operator float() const { + auto converted = f32_t{f8_to_float_lut[m_value & (f8e4m3_e_mask | f8e4m3_m_mask)]}; + converted.bits |= (m_value & f8e4m3_s_mask) << three_bytes_shift; + return converted.value; +} + +uint8_t float8_e4m3::to_bits() const { + return m_value; +} +} // namespace ov diff --git a/src/core/src/type/float8_e5m2.cpp b/src/core/src/type/float8_e5m2.cpp new file mode 100644 index 00000000000000..b44a0f75b21948 --- /dev/null +++ b/src/core/src/type/float8_e5m2.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e5m2.hpp" + +#include +#include + +#include "openvino/reference/fake_convert.hpp" + +namespace ov { +static_assert(sizeof(float8_e5m2) == 1, "class f8e5m2 must be exactly 1 byte"); +static_assert(std::is_trivially_constructible::value, "should be trivially constructible"); +static_assert(std::is_trivially_copyable::value, "must be trivially copyable"); +static_assert(std::is_trivially_destructible::value, "must be trivially destructible"); + +namespace { + +constexpr uint8_t byte_shift = 8; + +constexpr uint8_t f8e5m2_e_size = 5; // f8e5m2 exponent bit size +constexpr uint8_t f8e5m2_e_mask = 0x7c; // f8e5m2 exponent bit mask +constexpr uint8_t f8e5m2_m_size = 2; // f8e5m2 mantissa bits size +constexpr uint8_t f8e5m2_m_mask = 0x03; // f8e5m2 mantissa bit mask + +uint8_t f32_to_f8e5m2_bits(const float value) { + auto f16 = static_cast(value); + reference::func::emulate_f8e5m2_on_fp16(&f16, &f16, 1); + return static_cast((f16.to_bits() >> byte_shift)); +} +} // namespace + +float8_e5m2::float8_e5m2(uint32_t sign, uint32_t biased_exponent, uint32_t fraction) + : m_value((sign & 0x01) << (f8e5m2_e_size + f8e5m2_m_size) | + (biased_exponent & (f8e5m2_e_mask >> f8e5m2_m_size)) << f8e5m2_m_size | (fraction & f8e5m2_m_mask)) {} + +float8_e5m2::float8_e5m2(const float value) : m_value(f32_to_f8e5m2_bits(value)){}; + +float8_e5m2::operator float() const { + return static_cast(float16::from_bits((static_cast(m_value) << byte_shift))); +} + +uint8_t float8_e5m2::to_bits() const { + return m_value; +} +} // namespace ov diff --git a/src/core/tests/element_type.cpp b/src/core/tests/element_type.cpp index abf8b3f8aa7603..d7bd49c2e2e252 100644 --- a/src/core/tests/element_type.cpp +++ b/src/core/tests/element_type.cpp @@ -67,6 +67,10 @@ TEST(element_type, from_string) { EXPECT_EQ(element::Type("U64"), element::u64); EXPECT_EQ(element::Type("nf4"), element::nf4); EXPECT_EQ(element::Type("NF4"), element::nf4); + EXPECT_EQ(element::Type("f8e4m3"), element::f8e4m3); + EXPECT_EQ(element::Type("F8E4M3"), element::f8e4m3); + EXPECT_EQ(element::Type("f8e5m2"), element::f8e5m2); + EXPECT_EQ(element::Type("F8E5M2"), element::f8e5m2); EXPECT_EQ(element::Type("string"), element::string); EXPECT_EQ(element::Type("STRING"), element::string); diff --git a/src/core/tests/eval.cpp b/src/core/tests/eval.cpp index 224272715b4da1..bc77f0c2d653fa 100644 --- a/src/core/tests/eval.cpp +++ b/src/core/tests/eval.cpp @@ -2963,7 +2963,8 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e4m3_no_scale_no_shift) { using namespace testing; constexpr auto et = element::f32; - std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f}; + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; const auto data_shape = Shape{input_data.size()}; auto data = make_shared(et, data_shape); @@ -2983,10 +2984,10 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e4m3_no_scale_no_shift) { EXPECT_EQ(result.get_shape(), data_shape); EXPECT_THAT( read_vector(result), - Pointwise( - FloatEq(), - std::vector< - float>{0.f, 0.1015625f, 0.203125f, 0.3125f, 0.40625f, 0.5f, 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f})); + Pointwise(FloatEq(), std::vector{0.f, 0.1015625f, 0.203125f, 0.3125f, 0.40625f, 0.5f, + 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f, -0.f, + -0.1015625f, -0.203125f, -0.3125f, -0.40625f, -0.5f, -0.625f, + -0.6875f, -0.8125f, -0.875f, -1.f})); } TEST(eval, evaluate_fake_convert_f32_seq_to_f8e4m3_scale_1) { @@ -3223,7 +3224,8 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e5m2_scale_1) { using namespace testing; constexpr auto et = element::f32; - std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f}; + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; const auto data_shape = Shape{input_data.size()}; @@ -3244,11 +3246,11 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e5m2_scale_1) { EXPECT_EQ(result.get_element_type(), et); EXPECT_EQ(result.get_shape(), data_shape); - EXPECT_THAT( - read_vector(result), - Pointwise( - FloatEq(), - std::vector{0.f, 0.09375f, 0.1875f, 0.3125f, 0.375f, 0.5f, 0.625f, 0.75f, 0.75f, 0.875f, 1.f})); + EXPECT_THAT(read_vector(result), + Pointwise(FloatEq(), + std::vector{0.f, 0.09375f, 0.1875f, 0.3125f, 0.375f, 0.5f, 0.625f, 0.75f, + 0.75f, 0.875f, 1.f, -0.f, -0.09375f, -0.1875f, -0.3125f, -0.375f, + -0.5f, -0.625f, -0.75f, -0.75f, -0.875f, -1.f})); } TEST(eval, evaluate_fake_convert_f16_to_f8e5m2_scale_1) { @@ -3707,7 +3709,7 @@ TEST(eval, evaluate_fake_convert_bf16_matching_f8_to_f8e5m2_scale_1) { 4096.f, 5120.f, 6144.f, 7168.f, 8192.f, 10240.f, 12288.f, 14336.f, 16384.f, 20480.f, 24576.f, 28672.f, - 32768.f, 40960.f, 49152.f, 57344.0 + 32768.f, 40960.f, 49152.f, 57344.f }; // clang-format on @@ -3766,6 +3768,92 @@ TEST(eval, evaluate_fake_convert_f32_matching_f8e4m3_to_f8e5m2_scale_1) { EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); } +TEST(eval, evaluate_f8e5m2_const_from_f32) { + using namespace testing; + constexpr auto et = element::f8e5m2; + + std::vector input_data{ + 0.017578125f, 0.021484375f, 0.025390625f, 0.029296875f, 0.03515625f, 0.0703125f, 0.140625f, + 0.28125f, 0.5625f, 1.125f, 1.625f, 1.875f, 2.25f, 3.75f, + 4.5f, 9.f, 18.f, 36.f, 72.f, 144.f, 288.f, + }; + /* Rounded to f8e5m2 vals */ + std::vector output_data{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, 0.125f, + 0.25f, 0.5f, 1.f, 1.5, 2.f, 2.f, 4.f, + 4.f, 8.f, 16.f, 32.f, 64.f, 128.f, 256.f}; + + const auto data_shape = Shape{input_data.size()}; + + auto op = make_shared(et, data_shape, input_data); + auto model = make_shared(OutputVector{op}, ParameterVector{}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), et); + EXPECT_EQ(result.get_shape(), data_shape); + EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); +} + +TEST(eval, evaluate_f8e5m2_const_seq_from_f32) { + using namespace testing; + constexpr auto et = element::f8e5m2; + + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; + + /* Rounded to f8e5m2 vals */ + std::vector output_data{0.f, 0.09375f, 0.1875f, 0.3125f, 0.375f, 0.5f, 0.625f, 0.75f, + 0.75f, 0.875f, 1.f, -0.f, -0.09375f, -0.1875f, -0.3125f, -0.375f, + -0.5f, -0.625f, -0.75f, -0.75f, -0.875f, -1.f}; + + const auto data_shape = Shape{input_data.size()}; + + auto op = make_shared(et, data_shape, input_data); + auto model = make_shared(OutputVector{op}, ParameterVector{}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), et); + EXPECT_EQ(result.get_shape(), data_shape); + EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); +} + +TEST(eval, evaluate_f8e4m3_const_seq_from_f32) { + using namespace testing; + constexpr auto et = element::f8e4m3; + + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; + + /* Rounded to f8e4m3 vals */ + std::vector output_data{ + 0.f, 0.1015625f, 0.203125f, 0.3125f, 0.40625f, 0.5f, 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f, + -0.f, -0.1015625f, -0.203125f, -0.3125f, -0.40625f, -0.5f, -0.625f, -0.6875f, -0.8125f, -0.875f, -1.f}; + + const auto data_shape = Shape{input_data.size()}; + + auto op = make_shared(et, data_shape, input_data); + auto model = make_shared(OutputVector{op}, ParameterVector{}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), et); + EXPECT_EQ(result.get_shape(), data_shape); + EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); +} + TEST(eval, evaluate_fake_convert_f32_seq_to_f8e5m2_scale_shift) { using namespace testing; constexpr auto et = element::f32; diff --git a/src/core/tests/float8_e4m3.cpp b/src/core/tests/float8_e4m3.cpp new file mode 100644 index 00000000000000..6265b530e10105 --- /dev/null +++ b/src/core/tests/float8_e4m3.cpp @@ -0,0 +1,175 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e4m3.hpp" + +#include + +#include + +#include "common_test_utils/float_util.hpp" +namespace ov { +namespace test { + +template +std::vector> enumerate(const TContainer& values) { + std::vector> enum_values; + int i = 0; + for (const auto& v : values) { + enum_values.emplace_back(i, v); + ++i; + } + return enum_values; +} + +TEST(F8E4M3Test, f32_inf) { + const auto f8 = ov::float8_e4m3(std::numeric_limits::infinity()); + + EXPECT_EQ(f8.to_bits(), 0x7f); +} + +TEST(F8E4M3Test, f32_minus_inf) { + const auto f8 = ov::float8_e4m3(-std::numeric_limits::infinity()); + // f8 is NaN as there is no infinity + EXPECT_EQ(f8.to_bits(), 0xff); +} + +TEST(F8E4M3Test, f32_nan) { + const auto f8 = ov::float8_e4m3(std::numeric_limits::quiet_NaN()); + + EXPECT_EQ(f8.to_bits(), 0x7f); +} + +TEST(F8E4M3Test, f32_gt_zero_le_f8_half_lowest_subnormal) { + const auto f8 = ov::float8_e4m3(0.0009765625f); + + EXPECT_EQ(f8.to_bits(), 0x00); +} + +TEST(F8E4M3Test, f32_gt_zero_gt_f8_half_lowest_subnormal) { + const auto f8 = ov::float8_e4m3(0.00097656273283064365387f); + + EXPECT_EQ(f8.to_bits(), 0x01); +} + +TEST(F8E4M3Test, f32_normal_fractional_rounding) { + const auto f8 = ov::float8_e4m3(0.129f); + + // Rounded to 0.140625f -> 0x21 + EXPECT_EQ(f8.to_bits(), 0x20); +} + +TEST(F8E4M3Test, f32_normal_negative_fractional_rounding) { + const auto f8 = ov::float8_e4m3(-0.281f); + + // Rounded to -0.28125f -> 0x21 + EXPECT_EQ(f8.to_bits(), 0xa9); +} + +TEST(F8E4M3Test, f32_ge_f8_max_within_round_to_max) { + const auto f8 = ov::float8_e4m3(460.0f); + + // Rounded to 448.0f -> 0x7e + EXPECT_EQ(f8.to_bits(), 0x7e); +} + +TEST(F8E4M3Test, f32_ge_f8_max_not_within_round_to_max) { + const auto f8 = ov::float8_e4m3(560.0f); + + // f8 has no such value (NaN) + EXPECT_EQ(f8.to_bits(), 0x7f); +} + +TEST(F8E4M3Test, f32_le_f8_lowest_within_round_to_lowest) { + const auto f8 = ov::float8_e4m3(-460.0f); + + // Rounded to -448.0f -> 0xfe + EXPECT_EQ(f8.to_bits(), 0xfe); +} + +TEST(F8E4M3Test, f32_le_f8_lowest_not_within_round_to_lowest) { + const auto f8 = ov::float8_e4m3(-760.0f); + + // f8 has no such value (NaN) + EXPECT_EQ(f8.to_bits(), 0xff); +} + +TEST(F8E4M3Test, stream_operator) { + std::stringstream s; + s << ov::float8_e4m3(2.5f); + + EXPECT_EQ(s.str(), "2.5"); +} + +TEST(F8E4M3Test, to_string) { + const auto f8 = ov::float8_e4m3::from_bits(0b00111010); + + EXPECT_EQ(std::to_string(f8), "1.250000"); +} +constexpr auto f32_qnan = std::numeric_limits::quiet_NaN(); + +const auto exp_floats = std::vector{ + 0.0f, 0.001953125f, 0.00390625f, 0.005859375f, 0.0078125f, 0.009765625f, 0.01171875f, 0.013671875f, + 0.015625f, 0.017578125f, 0.01953125f, 0.021484375f, 0.0234375f, 0.025390625f, 0.02734375f, 0.029296875f, + 0.03125f, 0.03515625f, 0.0390625f, 0.04296875f, 0.046875f, 0.05078125f, 0.0546875f, 0.05859375f, + 0.0625f, 0.0703125f, 0.078125f, 0.0859375f, 0.09375f, 0.1015625f, 0.109375f, 0.1171875f, + 0.125f, 0.140625f, 0.15625f, 0.171875f, 0.1875f, 0.203125f, 0.21875f, 0.234375f, + 0.25f, 0.28125f, 0.3125f, 0.34375f, 0.375f, 0.40625f, 0.4375f, 0.46875f, + 0.5f, 0.5625f, 0.625f, 0.6875f, 0.75f, 0.8125f, 0.875f, 0.9375f, + 1.0f, 1.125f, 1.25f, 1.375f, 1.5f, 1.625f, 1.75f, 1.875f, + 2.0f, 2.25f, 2.5f, 2.75f, 3.0f, 3.25f, 3.5f, 3.75f, + 4.0f, 4.5f, 5.0f, 5.5f, 6.0f, 6.5f, 7.0f, 7.5f, + 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f, + 32.0f, 36.0f, 40.0f, 44.0f, 48.0f, 52.0f, 56.0f, 60.0f, + 64.0f, 72.0f, 80.0f, 88.0f, 96.0f, 104.0f, 112.0f, 120.0f, + 128.0f, 144.0f, 160.0f, 176.0f, 192.0f, 208.0f, 224.0f, 240.0f, + 256.0f, 288.0f, 320.0f, 352.0f, 384.0f, 416.0f, 448.0f, f32_qnan, + -0.0f, -0.001953125f, -0.00390625f, -0.005859375f, -0.0078125f, -0.009765625f, -0.01171875f, -0.013671875f, + -0.015625f, -0.017578125f, -0.01953125f, -0.021484375f, -0.0234375f, -0.025390625f, -0.02734375f, -0.029296875f, + -0.03125f, -0.03515625f, -0.0390625f, -0.04296875f, -0.046875f, -0.05078125f, -0.0546875f, -0.05859375f, + -0.0625f, -0.0703125f, -0.078125f, -0.0859375f, -0.09375f, -0.1015625f, -0.109375f, -0.1171875f, + -0.125f, -0.140625f, -0.15625f, -0.171875f, -0.1875f, -0.203125f, -0.21875f, -0.234375f, + -0.25f, -0.28125f, -0.3125f, -0.34375f, -0.375f, -0.40625f, -0.4375f, -0.46875f, + -0.5f, -0.5625f, -0.625f, -0.6875f, -0.75f, -0.8125f, -0.875f, -0.9375f, + -1.0f, -1.125f, -1.25f, -1.375f, -1.5f, -1.625f, -1.75f, -1.875f, + -2.0f, -2.25f, -2.5f, -2.75f, -3.0f, -3.25f, -3.5f, -3.75f, + -4.0f, -4.5f, -5.0f, -5.5f, -6.0f, -6.5f, -7.0f, -7.5f, + -8.0f, -9.0f, -10.0f, -11.0f, -12.0f, -13.0f, -14.0f, -15.0f, + -16.0f, -18.0f, -20.0f, -22.0f, -24.0f, -26.0f, -28.0f, -30.0f, + -32.0f, -36.0f, -40.0f, -44.0f, -48.0f, -52.0f, -56.0f, -60.0f, + -64.0f, -72.0f, -80.0f, -88.0f, -96.0f, -104.0f, -112.0f, -120.0f, + -128.0f, -144.0f, -160.0f, -176.0f, -192.0f, -208.0f, -224.0f, -240.0f, + -256.0f, -288.0f, -320.0f, -352.0f, -384.0f, -416.0f, -448.0f, -f32_qnan}; + +using f8m4e3_params = std::tuple; +class F8E4M3PTest : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(convert, + F8E4M3PTest, + testing::ValuesIn(enumerate(exp_floats)), + testing::PrintToStringParamName()); + +TEST_P(F8E4M3PTest, f8_bits_to_f32) { + const auto& params = GetParam(); + const auto& exp_value = std::get<1>(params); + const auto f8 = ov::float8_e4m3::from_bits(std::get<0>(params)); + + if (std::isnan(exp_value)) { + EXPECT_TRUE(std::isnan(static_cast(f8))); + } else { + EXPECT_EQ(static_cast(f8), exp_value); + } +} + +TEST_P(F8E4M3PTest, f32_to_f8_bits) { + const auto& params = GetParam(); + const auto& exp_value = std::get<0>(params); + const auto& value = std::get<1>(params); + const auto f8 = ov::float8_e4m3(value); + + EXPECT_EQ(f8.to_bits(), exp_value); +} +} // namespace test +} // namespace ov diff --git a/src/core/tests/float8_e5m2.cpp b/src/core/tests/float8_e5m2.cpp new file mode 100644 index 00000000000000..38028497341c17 --- /dev/null +++ b/src/core/tests/float8_e5m2.cpp @@ -0,0 +1,176 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e5m2.hpp" + +#include + +#include + +#include "common_test_utils/float_util.hpp" + +namespace ov { +namespace test { + +TEST(F8E5M2Test, stream_operator) { + std::stringstream s; + s << ov::float8_e5m2(2.5f); + + EXPECT_EQ(s.str(), "2.5"); +} + +TEST(F8E5M2Test, to_string) { + const auto f8 = ov::float8_e5m2::from_bits(0b00111010); + + EXPECT_EQ(std::to_string(f8), "0.750000"); +} + +TEST(F8E5M2Test, f32_inf) { + const auto f8 = ov::float8_e5m2(std::numeric_limits::infinity()); + + EXPECT_EQ(f8.to_bits(), 0b01111100); +} + +TEST(F8E5M2Test, f32_minus_inf) { + const auto f8 = ov::float8_e5m2(-std::numeric_limits::infinity()); + + EXPECT_EQ(f8.to_bits(), 0b11111100); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_inf) { + const auto f8 = ov::float8_e5m2(65520.0f); + + EXPECT_EQ(f8.to_bits(), 0b01111100); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_max) { + const auto f8 = ov::float8_e5m2(65519.9f); + + EXPECT_EQ(f8.to_bits(), 0b01111011); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_minus_inf) { + const auto f8 = ov::float8_e5m2(-65520.0f); + + EXPECT_EQ(f8.to_bits(), 0b11111100); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_lowest) { + const auto f8 = ov::float8_e5m2(-65519.9f); + + EXPECT_EQ(f8.to_bits(), 0b11111011); +} + +template +std::vector> enumerate(const TContainer& values) { + std::vector> enum_values; + int i = 0; + for (const auto& v : values) { + enum_values.emplace_back(i, v); + ++i; + } + return enum_values; +} + +constexpr auto f32_qnan = std::numeric_limits::quiet_NaN(); +constexpr auto f32_inf = std::numeric_limits::infinity(); + +// clang-format off +const auto exp_floats = std::vector{ + 0.0f, 1.52587890625e-05f, 3.0517578125e-05f, 4.57763671875e-05f, + 6.103515625e-05f, 7.62939453125e-05f, 9.1552734375e-05f, 0.0001068115234375f, + 0.0001220703125f, 0.000152587890625f, 0.00018310546875f, 0.000213623046875f, + 0.000244140625f, 0.00030517578125f, 0.0003662109375f, 0.00042724609375f, + 0.00048828125f, 0.0006103515625f, 0.000732421875f, 0.0008544921875f, + 0.0009765625f, 0.001220703125f, 0.00146484375f, 0.001708984375f, + 0.001953125f, 0.00244140625f, 0.0029296875f, 0.00341796875f, + 0.00390625f, 0.0048828125f, 0.005859375f, 0.0068359375f, + 0.0078125f, 0.009765625f, 0.01171875f, 0.013671875f, + 0.015625f, 0.01953125f, 0.0234375f, 0.02734375f, + 0.03125f, 0.0390625f, 0.046875f, 0.0546875f, + 0.0625f, 0.078125f, 0.09375f, 0.109375f, + 0.125f, 0.15625f, 0.1875f, 0.21875f, + 0.25f, 0.3125f, 0.375f, 0.4375f, + 0.5f, 0.625f, 0.75f, 0.875f, + 1.0f, 1.25f, 1.5f, 1.75f, + 2.0f, 2.5f, 3.0f, 3.5f, + 4.0f, 5.0f, 6.0f, 7.0f, + 8.0f, 10.0f, 12.0f, 14.0f, + 16.0f, 20.0f, 24.0f, 28.0f, + 32.0f, 40.0f, 48.0f, 56.0f, + 64.0f, 80.0f, 96.0f, 112.0f, + 128.0f, 160.0f, 192.0f, 224.0f, + 256.0f, 320.0f, 384.0f, 448.0f, + 512.0f, 640.0f, 768.0f, 896.0f, + 1024.0f, 1280.0f, 1536.0f, 1792.0f, + 2048.0f, 2560.0f, 3072.0f, 3584.0f, + 4096.0f, 5120.0f, 6144.0f, 7168.0f, + 8192.0f, 10240.0f, 12288.0f, 14336.0f, + 16384.0f, 20480.0f, 24576.0f, 28672.0f, + 32768.0f, 40960.0f, 49152.0f, 57344.0f, + f32_inf, f32_qnan, f32_qnan, f32_qnan, + -0.0f, -1.52587890625e-05f, -3.0517578125e-05f, -4.57763671875e-05f, + -6.103515625e-05f, -7.62939453125e-05f, -9.1552734375e-05f, -0.0001068115234375f, + -0.0001220703125f, -0.000152587890625f, -0.00018310546875f, -0.000213623046875f, + -0.000244140625f, -0.00030517578125f, -0.0003662109375f, -0.00042724609375f, + -0.00048828125f, -0.0006103515625f, -0.000732421875f, -0.0008544921875f, + -0.0009765625f, -0.001220703125f, -0.00146484375f, -0.001708984375f, + -0.001953125f, -0.00244140625f, -0.0029296875f, -0.00341796875f, + -0.00390625f, -0.0048828125f, -0.005859375f, -0.0068359375f, + -0.0078125f, -0.009765625f, -0.01171875f, -0.013671875f, + -0.015625f, -0.01953125f, -0.0234375f, -0.02734375f, + -0.03125f, -0.0390625f, -0.046875f, -0.0546875f, + -0.0625f, -0.078125f, -0.09375f, -0.109375f, + -0.125f, -0.15625f, -0.1875f, -0.21875f, + -0.25f, -0.3125f, -0.375f, -0.4375f, + -0.5f, -0.625f, -0.75f, -0.875f, + -1.0f, -1.25f, -1.5f, -1.75f, + -2.0f, -2.5f, -3.0f, -3.5f, + -4.0f, -5.0f, -6.0f, -7.0f, + -8.0f, -10.0f, -12.0f, -14.0f, + -16.0f, -20.0f, -24.0f, -28.0f, + -32.0f, -40.0f, -48.0f, -56.0f, + -64.0f, -80.0f, -96.0f, -112.0f, + -128.0f, -160.0f, -192.0f, -224.0f, + -256.0f, -320.0f, -384.0f, -448.0f, + -512.0f, -640.0f, -768.0f, -896.0f, + -1024.0f, -1280.0f, -1536.0f, -1792.0f, + -2048.0f, -2560.0f, -3072.0f, -3584.0f, + -4096.0f, -5120.0f, -6144.0f, -7168.0f, + -8192.0f, -10240.0f, -12288.0f, -14336.0f, + -16384.0f, -20480.0f, -24576.0f, -28672.0f, + -32768.0f, -40960.0f, -49152.0f, -57344.0f, + -f32_inf, -f32_qnan, -f32_qnan, -f32_qnan}; +// clang-format on + +using f8m5e2_params = std::tuple; +class F8E5M2PTest : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(convert, + F8E5M2PTest, + testing::ValuesIn(enumerate(exp_floats)), + testing::PrintToStringParamName()); + +TEST_P(F8E5M2PTest, f8_bits_to_f32) { + const auto& params = GetParam(); + const auto& exp_value = std::get<1>(params); + const auto f8 = ov::float8_e5m2::from_bits(std::get<0>(params)); + + if (std::isnan(exp_value)) { + EXPECT_TRUE(std::isnan(static_cast(f8))); + } else { + EXPECT_EQ(static_cast(f8), exp_value); + } +} + +TEST_P(F8E5M2PTest, f32_to_f8_bits) { + const auto& params = GetParam(); + const auto& value = std::get<1>(params); + const auto& exp_value = std::isnan(value) ? (std::signbit(value) ? 0xfe : 0x7e) : std::get<0>(params); + const auto f8 = ov::float8_e5m2(value); + + EXPECT_EQ(f8.to_bits(), exp_value); +} +} // namespace test +} // namespace ov diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp index d5f5a3a4f19b96..ed8621d0351a3e 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp @@ -105,6 +105,22 @@ void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, threshold, abs_threshold); break; + case ov::element::f8e4m3: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); + break; + case ov::element::f8e5m2: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); + break; case ov::element::f32: LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), outBlob.data(), diff --git a/src/plugins/template/tests/functional/op_reference/constant.cpp b/src/plugins/template/tests/functional/op_reference/constant.cpp index ababb046e9c2b6..8178544a7d940a 100644 --- a/src/plugins/template/tests/functional/op_reference/constant.cpp +++ b/src/plugins/template/tests/functional/op_reference/constant.cpp @@ -201,6 +201,50 @@ std::vector generateConstantDefinedTypeParams() { std::vector{0x4000000000000001, 0x4000000000000002}, std::vector{0x4000000000000001, 0x4000000000000002}, "tensor_constant_int64"), + ConstantParams( + {3, 9}, + element::Type_t::f8e4m3, + element::Type_t::f8e4m3, + std::vector{4.75f, 4.5f, -5.25f, 0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, + 0.6f, 0.7f, 0.8f, 0.9f, 1.f, -0.0f, -0.1f, -0.2f, -0.3f, + -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f, 0.001953125f, 448.f}, + std::vector{5.0f, 4.5f, -5.0f, 0.0f, 0.1015625f, 0.203125f, 0.3125f, + 0.40625f, 0.5f, 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f, + -0.f, -0.1015625f, -0.203125f, -0.3125f, -0.40625f, -0.5f, -0.625f, + -0.6875f, -0.8125f, -0.875f, -1.f, 0.001953125f, 448.f}, + "tensor_constant_f8e4m3"), + ConstantParams({3, 9}, + element::Type_t::f8e5m2, + element::Type_t::f8e5m2, + std::vector{4.75f, 4.5f, + -5.25f, 0.0f, + 0.1f, 0.2f, + 0.3f, 0.4f, + 0.5f, 0.6f, + 0.7f, 0.8f, + 0.9f, 1.f, + -0.0f, -0.1f, + -0.2f, -0.3f, + -0.4f, -0.5f, + -0.6f, -0.7f, + -0.8f, -0.9f, + -1.f, 0.0000152587890625f, + 57344.f}, + std::vector{4.75f, 4.5f, + -5.25f, 0.0f, + 0.09375f, 0.1875f, + 0.3125f, 0.375f, + 0.5f, 0.625f, + 0.75f, 0.75f, + 0.875f, 1.f, + -0.f, -0.09375f, + -0.1875f, -0.3125f, + -0.375f, -0.5f, + -0.625f, -0.75f, + -0.75f, -0.875f, + -1.f, 0.0000152587890625f, + 57344.f}, + "tensor_constant_f8e5m2"), }; return constantParams; } diff --git a/src/plugins/template/tests/functional/op_reference/convert.cpp b/src/plugins/template/tests/functional/op_reference/convert.cpp index b6195744c9c6f3..461daa56d80b14 100644 --- a/src/plugins/template/tests/functional/op_reference/convert.cpp +++ b/src/plugins/template/tests/functional/op_reference/convert.cpp @@ -57,6 +57,163 @@ INSTANTIATE_TEST_SUITE_P( std::numeric_limits::infinity(), -std::numeric_limits::infinity()}, std::vector{0, 1, 1, 0, 1, 1, 1, 1, 1}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 7}, + ov::element::f32, + ov::element::f8e5m2, + std::vector{ + 0.017578125f, 0.021484375f, 0.025390625f, 0.029296875f, 0.03515625f, 0.0703125f, 0.140625f, + 0.28125f, 0.5625f, 1.125f, 1.625f, 1.875f, 2.25f, 3.75f, + 4.5f, 9.f, 18.f, 36.f, 72.f, 144.f, 288.f}, + std::vector{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, + 0.125f, 0.25f, 0.5f, 1.f, 1.5, 2.f, + 2.f, 4.f, 4.f, 8.f, 16.f, 32.f, + 64.f, 128.f, 256.f}), + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 7}, + ov::element::f8e5m2, + ov::element::f32, + std::vector{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, + 0.125f, 0.25f, 0.5f, 1.f, 1.5, 2.f, + 2.f, 4.f, 4.f, 8.f, 16.f, 32.f, + 64.f, 128.f, 256.f}, + std::vector{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, 0.125f, + 0.25f, 0.5f, 1.f, 1.5, 2.f, 2.f, 4.f, + 4.f, 8.f, 16.f, 32.f, 64.f, 128.f, 256.f}), + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 7}, + ov::element::f16, + ov::element::f8e5m2, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}), + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 7}, + ov::element::f8e5m2, + ov::element::f16, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 3, 5}, + ov::element::f16, + ov::element::f8e4m3, + std::vector{0.0f, + 0.1f, + 0.2f, + 0.3f, + 0.4f, + 0.5f, + 0.6f, + 0.7f, + 0.8f, + 0.9f, + 1.f, + 1.5f, + 2.5f, + 1.5f, + 3.5f}, + std::vector{0.f, + 0.1015625f, + 0.203125f, + 0.3125f, + 0.40625f, + 0.5f, + 0.625f, + 0.6875f, + 0.8125f, + 0.875f, + 1.f, + 1.5f, + 2.5f, + 1.5f, + 3.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3}, + + ov::element::f8e4m3, + ov::element::f16, + std::vector{0.5f, 1.5f, 0.f}, + std::vector{0.5f, 1.5f, 0.f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3}, + + ov::element::f8e4m3, + ov::element::f8e4m3, + std::vector{0.5f, 1.5f, 0.f}, + std::vector{0.5f, 1.5f, 0.f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f8e5m2, + ov::element::f8e5m2, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f32, + ov::element::f32, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f8e4m3, + ov::element::f32, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f8e4m3, + ov::element::f16, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 2}, + ov::element::f8e4m3, + ov::element::f32, + std::vector{ + 0.5f, + 1.5f, + 0.5f, + 2.5f, + 1.5f, + 3.5f, + }, + std::vector{0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 3.5f}), + + ConvertParams( + ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 5}, + ov::element::f32, + ov::element::f8e4m3, + std:: + vector{0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}, + std::vector{0.5f, + 1.5f, + 0.5f, + 2.5f, + 1.5f, + 0.5f, + 3.5f, + 2.5f, + 0.5f, + 0.5f, + 2.5f, + 0.5f, + 0.5f, + 0.5f, + 1.5f}), + // destination bf16 ConvertParams( ConversionTypes::CONVERT, diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index 4f318ef98b3f03..28a26f4d754a12 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -525,6 +525,14 @@ inline ov::float16 ie_abs(const ov::float16& val) { return ov::float16::from_bits(val.to_bits() & 0x7FFF); } +inline ov::float8_e4m3 ie_abs(const ov::float8_e4m3& val) { + return ov::float8_e4m3::from_bits(val.to_bits() & 0x7F); +} + +inline ov::float8_e5m2 ie_abs(const ov::float8_e5m2& val) { + return ov::float8_e5m2::from_bits(val.to_bits() & 0x7F); +} + } // namespace utils } // namespace test } // namespace ov From 5048299d9752022c0925beab7e58ad1700a3a6f6 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Tue, 16 Jan 2024 18:58:06 +0100 Subject: [PATCH 026/122] Update year in src/bindings/python folder (#22186) * Update year in src/bindings/python folder * fix ci --- src/bindings/python/CMakeLists.txt | 2 +- src/bindings/python/docs/examples/custom_module/__init__.py | 2 +- .../python/docs/examples/custom_module/custom_helpers.py | 2 +- src/bindings/python/docs/examples/openvino/__init__.py | 2 +- src/bindings/python/docs/examples/openvino/mymodule/__init__.py | 2 +- src/bindings/python/docs/examples/openvino/mymodule/myclass.py | 2 +- src/bindings/python/src/openvino/__init__.py | 2 +- .../python/src/openvino/_offline_transformations/__init__.py | 2 +- src/bindings/python/src/openvino/frontend/__init__.py | 2 +- src/bindings/python/src/openvino/frontend/frontend.py | 2 +- src/bindings/python/src/openvino/frontend/onnx/__init__.py | 2 +- src/bindings/python/src/openvino/frontend/paddle/__init__.py | 2 +- src/bindings/python/src/openvino/frontend/pytorch/__init__.py | 2 +- src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py | 2 +- src/bindings/python/src/openvino/frontend/pytorch/gptq.py | 2 +- .../python/src/openvino/frontend/pytorch/torchdynamo/backend.py | 2 +- .../src/openvino/frontend/pytorch/torchdynamo/backend_utils.py | 2 +- .../python/src/openvino/frontend/pytorch/torchdynamo/compile.py | 2 +- .../python/src/openvino/frontend/pytorch/torchdynamo/execute.py | 2 +- .../src/openvino/frontend/pytorch/torchdynamo/op_support.py | 2 +- .../src/openvino/frontend/pytorch/torchdynamo/partition.py | 2 +- src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py | 2 +- src/bindings/python/src/openvino/frontend/pytorch/utils.py | 2 +- .../python/src/openvino/frontend/tensorflow/__init__.py | 2 +- .../python/src/openvino/frontend/tensorflow/graph_iterator.py | 2 +- .../python/src/openvino/frontend/tensorflow/node_decoder.py | 2 +- src/bindings/python/src/openvino/frontend/tensorflow/utils.py | 2 +- src/bindings/python/src/openvino/helpers/__init__.py | 2 +- src/bindings/python/src/openvino/helpers/packing.py | 2 +- src/bindings/python/src/openvino/preprocess/__init__.py | 2 +- .../python/src/openvino/preprocess/torchvision/__init__.py | 2 +- .../src/openvino/preprocess/torchvision/preprocess_converter.py | 2 +- .../preprocess/torchvision/torchvision_preprocessing.py | 2 +- src/bindings/python/src/openvino/properties/__init__.py | 2 +- src/bindings/python/src/openvino/properties/_properties.py | 2 +- src/bindings/python/src/openvino/properties/device/__init__.py | 2 +- src/bindings/python/src/openvino/properties/hint/__init__.py | 2 +- .../python/src/openvino/properties/intel_auto/__init__.py | 2 +- .../python/src/openvino/properties/intel_cpu/__init__.py | 2 +- .../python/src/openvino/properties/intel_gpu/__init__.py | 2 +- .../python/src/openvino/properties/intel_gpu/hint/__init__.py | 2 +- src/bindings/python/src/openvino/properties/log/__init__.py | 2 +- src/bindings/python/src/openvino/properties/streams/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/exceptions.py | 2 +- src/bindings/python/src/openvino/runtime/ie_api.py | 2 +- src/bindings/python/src/openvino/runtime/op/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/op/util/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset1/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset1/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset10/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset10/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset11/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset11/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset12/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset12/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset13/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset13/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset2/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset2/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset3/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset3/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset4/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset4/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset5/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset5/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset6/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset6/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset7/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset7/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset8/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset8/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset9/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/opset9/ops.py | 2 +- src/bindings/python/src/openvino/runtime/opset_utils.py | 2 +- src/bindings/python/src/openvino/runtime/passes/__init__.py | 2 +- .../python/src/openvino/runtime/passes/graph_rewrite.py | 2 +- src/bindings/python/src/openvino/runtime/passes/manager.py | 2 +- src/bindings/python/src/openvino/runtime/properties/__init__.py | 2 +- .../python/src/openvino/runtime/properties/hint/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/utils/__init__.py | 2 +- src/bindings/python/src/openvino/runtime/utils/broadcasting.py | 2 +- .../python/src/openvino/runtime/utils/data_helpers/__init__.py | 2 +- .../src/openvino/runtime/utils/data_helpers/data_dispatcher.py | 2 +- .../python/src/openvino/runtime/utils/data_helpers/wrappers.py | 2 +- src/bindings/python/src/openvino/runtime/utils/decorators.py | 2 +- .../python/src/openvino/runtime/utils/input_validation.py | 2 +- src/bindings/python/src/openvino/runtime/utils/node_factory.py | 2 +- src/bindings/python/src/openvino/runtime/utils/reduction.py | 2 +- src/bindings/python/src/openvino/runtime/utils/types.py | 2 +- src/bindings/python/src/openvino/test_utils/__init__.py | 2 +- src/bindings/python/src/openvino/torch/__init__.py | 2 +- src/bindings/python/src/openvino/utils.py | 2 +- src/bindings/python/src/pyopenvino/CMakeLists.txt | 2 +- src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp | 2 +- src/bindings/python/src/pyopenvino/core/async_infer_queue.hpp | 2 +- src/bindings/python/src/pyopenvino/core/common.cpp | 2 +- src/bindings/python/src/pyopenvino/core/common.hpp | 2 +- src/bindings/python/src/pyopenvino/core/compiled_model.cpp | 2 +- src/bindings/python/src/pyopenvino/core/compiled_model.hpp | 2 +- src/bindings/python/src/pyopenvino/core/core.cpp | 2 +- src/bindings/python/src/pyopenvino/core/core.hpp | 2 +- src/bindings/python/src/pyopenvino/core/extension.cpp | 2 +- src/bindings/python/src/pyopenvino/core/extension.hpp | 2 +- src/bindings/python/src/pyopenvino/core/infer_request.cpp | 2 +- src/bindings/python/src/pyopenvino/core/infer_request.hpp | 2 +- .../python/src/pyopenvino/core/offline_transformations.cpp | 2 +- .../python/src/pyopenvino/core/offline_transformations.hpp | 2 +- src/bindings/python/src/pyopenvino/core/profiling_info.cpp | 2 +- src/bindings/python/src/pyopenvino/core/profiling_info.hpp | 2 +- .../python/src/pyopenvino/core/properties/properties.cpp | 2 +- .../python/src/pyopenvino/core/properties/properties.hpp | 2 +- src/bindings/python/src/pyopenvino/core/tensor.cpp | 2 +- src/bindings/python/src/pyopenvino/core/tensor.hpp | 2 +- src/bindings/python/src/pyopenvino/core/variable_state.cpp | 2 +- src/bindings/python/src/pyopenvino/core/variable_state.hpp | 2 +- src/bindings/python/src/pyopenvino/core/version.cpp | 2 +- src/bindings/python/src/pyopenvino/core/version.hpp | 2 +- src/bindings/python/src/pyopenvino/frontend/decoder.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/decoder.hpp | 2 +- src/bindings/python/src/pyopenvino/frontend/extension.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/extension.hpp | 2 +- src/bindings/python/src/pyopenvino/frontend/frontend.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/frontend.hpp | 2 +- .../python/src/pyopenvino/frontend/frontend_module.cmake | 2 +- src/bindings/python/src/pyopenvino/frontend/input_model.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/input_model.hpp | 2 +- src/bindings/python/src/pyopenvino/frontend/manager.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/manager.hpp | 2 +- src/bindings/python/src/pyopenvino/frontend/node_context.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/node_context.hpp | 2 +- src/bindings/python/src/pyopenvino/frontend/onnx/CMakeLists.txt | 2 +- src/bindings/python/src/pyopenvino/frontend/onnx/extension.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/onnx/extension.hpp | 2 +- src/bindings/python/src/pyopenvino/frontend/onnx/py_module.cpp | 2 +- .../python/src/pyopenvino/frontend/paddle/CMakeLists.txt | 2 +- .../python/src/pyopenvino/frontend/paddle/extension.cpp | 2 +- .../python/src/pyopenvino/frontend/paddle/extension.hpp | 2 +- .../python/src/pyopenvino/frontend/paddle/py_module.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/place.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/place.hpp | 2 +- .../python/src/pyopenvino/frontend/pytorch/CMakeLists.txt | 2 +- src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp | 2 +- src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp | 2 +- .../python/src/pyopenvino/frontend/pytorch/extension.cpp | 2 +- .../python/src/pyopenvino/frontend/pytorch/extension.hpp | 2 +- .../python/src/pyopenvino/frontend/pytorch/py_module.cpp | 2 +- .../python/src/pyopenvino/frontend/tensorflow/CMakeLists.txt | 2 +- .../python/src/pyopenvino/frontend/tensorflow/decoder_base.cpp | 2 +- .../python/src/pyopenvino/frontend/tensorflow/decoder_base.hpp | 2 +- .../python/src/pyopenvino/frontend/tensorflow/extension.cpp | 2 +- .../python/src/pyopenvino/frontend/tensorflow/extension.hpp | 2 +- .../src/pyopenvino/frontend/tensorflow/graph_iterator.cpp | 2 +- .../src/pyopenvino/frontend/tensorflow/graph_iterator.hpp | 2 +- .../python/src/pyopenvino/frontend/tensorflow/py_module.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/any.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/any.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/axis_set.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/axis_set.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/axis_vector.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/axis_vector.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/coordinate.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/coordinate.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/descriptors/tensor.hpp | 2 +- .../python/src/pyopenvino/graph/dict_attribute_visitor.cpp | 2 +- .../python/src/pyopenvino/graph/dict_attribute_visitor.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/dimension.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/dimension.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/discrete_type_info.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/layout.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/layout.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/layout_helpers.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/model.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/model.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/node.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/node.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/node_factory.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/node_factory.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/node_input.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/node_input.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/node_output.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/node_output.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/assign.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/assign.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/constant.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/constant.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/if.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/if.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/loop.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/loop.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/parameter.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/result.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/result.hpp | 2 +- .../python/src/pyopenvino/graph/ops/tensor_iterator.cpp | 2 +- .../python/src/pyopenvino/graph/ops/tensor_iterator.hpp | 2 +- .../src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp | 2 +- .../src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp | 2 +- .../pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp | 2 +- .../pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp | 2 +- .../pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp | 2 +- .../pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp | 2 +- .../pyopenvino/graph/ops/util/binary_elementwise_logical.cpp | 2 +- .../pyopenvino/graph/ops/util/binary_elementwise_logical.hpp | 2 +- .../python/src/pyopenvino/graph/ops/util/index_reduction.cpp | 2 +- .../python/src/pyopenvino/graph/ops/util/index_reduction.hpp | 2 +- .../python/src/pyopenvino/graph/ops/util/multisubgraph.cpp | 2 +- .../python/src/pyopenvino/graph/ops/util/multisubgraph.hpp | 2 +- .../src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp | 2 +- .../src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp | 2 +- .../pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp | 2 +- .../pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/ops/util/variable.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/partial_shape.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/partial_shape.hpp | 2 +- .../python/src/pyopenvino/graph/passes/graph_rewrite.cpp | 2 +- .../python/src/pyopenvino/graph/passes/graph_rewrite.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/manager.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/manager.hpp | 2 +- .../python/src/pyopenvino/graph/passes/matcher_pass.cpp | 2 +- .../python/src/pyopenvino/graph/passes/matcher_pass.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/model_pass.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/pass_base.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.hpp | 2 +- .../src/pyopenvino/graph/passes/regmodule_graph_passes.cpp | 2 +- .../src/pyopenvino/graph/passes/regmodule_graph_passes.hpp | 2 +- .../python/src/pyopenvino/graph/passes/transformations.cpp | 2 +- .../python/src/pyopenvino/graph/passes/transformations.hpp | 2 +- .../python/src/pyopenvino/graph/preprocess/pre_post_process.cpp | 2 +- .../python/src/pyopenvino/graph/preprocess/pre_post_process.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/rt_map.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/rt_map.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/shape.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/shape.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/strides.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/strides.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/types/element_type.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/types/element_type.hpp | 2 +- .../python/src/pyopenvino/graph/types/regmodule_graph_types.cpp | 2 +- .../python/src/pyopenvino/graph/types/regmodule_graph_types.hpp | 2 +- src/bindings/python/src/pyopenvino/graph/util.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/util.hpp | 2 +- src/bindings/python/src/pyopenvino/pyopenvino.cpp | 2 +- src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt | 2 +- src/bindings/python/src/pyopenvino/test_utils/test_utils.cpp | 2 +- src/bindings/python/src/pyopenvino/utils/utils.cpp | 2 +- src/bindings/python/src/pyopenvino/utils/utils.hpp | 2 +- src/bindings/python/tests/__init__.py | 2 +- src/bindings/python/tests/conftest.py | 2 +- src/bindings/python/tests/mock/mock_py_frontend/CMakeLists.txt | 2 +- .../include/mock_py_frontend/frontend_wrappers.hpp | 2 +- .../include/mock_py_frontend/mock_py_frontend.hpp | 2 +- .../mock_py_frontend/include/mock_py_frontend/visibility.hpp | 2 +- .../python/tests/mock/mock_py_frontend/src/frontend_wrapper.cpp | 2 +- .../python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp | 2 +- .../python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt | 2 +- .../mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp | 2 +- src/bindings/python/tests/test_graph/__init__.py | 2 +- src/bindings/python/tests/test_graph/test_any.py | 2 +- src/bindings/python/tests/test_graph/test_basic.py | 2 +- src/bindings/python/tests/test_graph/test_constant.py | 2 +- src/bindings/python/tests/test_graph/test_convolution.py | 2 +- src/bindings/python/tests/test_graph/test_core.py | 2 +- src/bindings/python/tests/test_graph/test_create_op.py | 2 +- src/bindings/python/tests/test_graph/test_ctc_loss.py | 2 +- src/bindings/python/tests/test_graph/test_data_movement.py | 2 +- src/bindings/python/tests/test_graph/test_detection_output.py | 2 +- src/bindings/python/tests/test_graph/test_dft.py | 2 +- src/bindings/python/tests/test_graph/test_dyn_attributes.py | 2 +- src/bindings/python/tests/test_graph/test_einsum.py | 2 +- src/bindings/python/tests/test_graph/test_eye.py | 2 +- src/bindings/python/tests/test_graph/test_fake_convert.py | 2 +- src/bindings/python/tests/test_graph/test_gather.py | 2 +- src/bindings/python/tests/test_graph/test_idft.py | 2 +- src/bindings/python/tests/test_graph/test_if.py | 2 +- src/bindings/python/tests/test_graph/test_input_validation.py | 2 +- src/bindings/python/tests/test_graph/test_log_softmax.py | 2 +- src/bindings/python/tests/test_graph/test_loop.py | 2 +- src/bindings/python/tests/test_graph/test_manager.py | 2 +- src/bindings/python/tests/test_graph/test_multinomial.py | 2 +- src/bindings/python/tests/test_graph/test_nms_rotated.py | 2 +- src/bindings/python/tests/test_graph/test_node_factory.py | 2 +- src/bindings/python/tests/test_graph/test_normalization.py | 2 +- src/bindings/python/tests/test_graph/test_ops.py | 2 +- src/bindings/python/tests/test_graph/test_ops_binary.py | 2 +- src/bindings/python/tests/test_graph/test_ops_fused.py | 2 +- src/bindings/python/tests/test_graph/test_ops_matmul.py | 2 +- src/bindings/python/tests/test_graph/test_ops_multioutput.py | 2 +- src/bindings/python/tests/test_graph/test_ops_reshape.py | 2 +- src/bindings/python/tests/test_graph/test_ops_result.py | 2 +- src/bindings/python/tests/test_graph/test_ops_scatter.py | 2 +- .../python/tests/test_graph/test_ops_scatter_nd_update.py | 2 +- src/bindings/python/tests/test_graph/test_ops_unary.py | 2 +- src/bindings/python/tests/test_graph/test_ops_util_variable.py | 2 +- src/bindings/python/tests/test_graph/test_pad.py | 2 +- src/bindings/python/tests/test_graph/test_pooling.py | 2 +- src/bindings/python/tests/test_graph/test_preprocess.py | 2 +- src/bindings/python/tests/test_graph/test_proposal.py | 2 +- src/bindings/python/tests/test_graph/test_random_uniform.py | 2 +- src/bindings/python/tests/test_graph/test_rdft.py | 2 +- src/bindings/python/tests/test_graph/test_reduction.py | 2 +- src/bindings/python/tests/test_graph/test_roll.py | 2 +- .../tests/test_graph/test_scaled_dot_product_attention.py | 2 +- .../python/tests/test_graph/test_sequence_processing.py | 2 +- src/bindings/python/tests/test_graph/test_swish.py | 2 +- src/bindings/python/tests/test_graph/test_tensor_iterator.py | 2 +- src/bindings/python/tests/test_graph/test_utils.py | 2 +- src/bindings/python/tests/test_graph/util.py | 2 +- src/bindings/python/tests/test_package_versions.py | 2 +- src/bindings/python/tests/test_runtime/__init__.py | 2 +- .../python/tests/test_runtime/subprocess_test_tensor.py | 2 +- .../python/tests/test_runtime/test_async_infer_request.py | 2 +- src/bindings/python/tests/test_runtime/test_compiled_model.py | 2 +- src/bindings/python/tests/test_runtime/test_core.py | 2 +- src/bindings/python/tests/test_runtime/test_dimension.py | 2 +- src/bindings/python/tests/test_runtime/test_input_node.py | 2 +- src/bindings/python/tests/test_runtime/test_memory_modes.py | 2 +- src/bindings/python/tests/test_runtime/test_model.py | 2 +- src/bindings/python/tests/test_runtime/test_nogil.py | 2 +- .../python/tests/test_runtime/test_output_const_node.py | 2 +- src/bindings/python/tests/test_runtime/test_output_node.py | 2 +- src/bindings/python/tests/test_runtime/test_ovdict.py | 2 +- src/bindings/python/tests/test_runtime/test_properties.py | 2 +- .../python/tests/test_runtime/test_sync_infer_request.py | 2 +- src/bindings/python/tests/test_runtime/test_tensor.py | 2 +- src/bindings/python/tests/test_runtime/test_tensor_string.py | 2 +- src/bindings/python/tests/test_runtime/test_type.py | 2 +- .../python/tests/test_torchvision_to_ov/test_preprocessor.py | 2 +- src/bindings/python/tests/test_transformations/__init__.py | 2 +- .../python/tests/test_transformations/test_compression.py | 2 +- .../python/tests/test_transformations/test_compression_4bit.py | 2 +- .../python/tests/test_transformations/test_graph_rewrite.py | 2 +- src/bindings/python/tests/test_transformations/test_manager.py | 2 +- .../python/tests/test_transformations/test_matcher_pass.py | 2 +- .../python/tests/test_transformations/test_model_pass.py | 2 +- .../python/tests/test_transformations/test_offline_api.py | 2 +- .../python/tests/test_transformations/test_pattern_ops.py | 2 +- .../tests/test_transformations/test_public_transformations.py | 2 +- .../python/tests/test_transformations/test_replacement_api.py | 2 +- src/bindings/python/tests/test_transformations/utils/utils.py | 2 +- src/bindings/python/tests/test_utils/test_data_dispatch.py | 2 +- src/bindings/python/tests/test_utils/test_utils.py | 2 +- src/bindings/python/tests/utils/helpers.py | 2 +- src/bindings/python/wheel/CMakeLists.txt | 2 +- src/bindings/python/wheel/fdupes_check.cmake | 2 +- src/bindings/python/wheel/setup.py | 2 +- tools/benchmark_tool/openvino/__init__.py | 2 +- tools/mo/openvino/__init__.py | 2 +- tools/openvino_dev/src/openvino/__init__.py | 2 +- tools/ovc/openvino/__init__.py | 2 +- 359 files changed, 359 insertions(+), 359 deletions(-) diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index ec4661e2a73cc0..afd5842bb8f9ae 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/docs/examples/custom_module/__init__.py b/src/bindings/python/docs/examples/custom_module/__init__.py index b3922258fe3562..d2bdb0cfb06ca8 100644 --- a/src/bindings/python/docs/examples/custom_module/__init__.py +++ b/src/bindings/python/docs/examples/custom_module/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.helpers.custom_module.custom_helpers import top1_index diff --git a/src/bindings/python/docs/examples/custom_module/custom_helpers.py b/src/bindings/python/docs/examples/custom_module/custom_helpers.py index 9d0779fbf76d52..a2d39cf7a1a3ad 100644 --- a/src/bindings/python/docs/examples/custom_module/custom_helpers.py +++ b/src/bindings/python/docs/examples/custom_module/custom_helpers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 def top1_index(results: list) -> int: diff --git a/src/bindings/python/docs/examples/openvino/__init__.py b/src/bindings/python/docs/examples/openvino/__init__.py index 61d737397cfd87..70f97bea2153f1 100644 --- a/src/bindings/python/docs/examples/openvino/__init__.py +++ b/src/bindings/python/docs/examples/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore # mypy issue #1422 diff --git a/src/bindings/python/docs/examples/openvino/mymodule/__init__.py b/src/bindings/python/docs/examples/openvino/mymodule/__init__.py index 191234fecbb73e..02975c2817b772 100644 --- a/src/bindings/python/docs/examples/openvino/mymodule/__init__.py +++ b/src/bindings/python/docs/examples/openvino/mymodule/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from .myclass import MyClass diff --git a/src/bindings/python/docs/examples/openvino/mymodule/myclass.py b/src/bindings/python/docs/examples/openvino/mymodule/myclass.py index 38da658601d8b7..808c5c3d4bfe60 100644 --- a/src/bindings/python/docs/examples/openvino/mymodule/myclass.py +++ b/src/bindings/python/docs/examples/openvino/mymodule/myclass.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 class MyClass(): diff --git a/src/bindings/python/src/openvino/__init__.py b/src/bindings/python/src/openvino/__init__.py index b7dc434f3148cc..1d75589bd2eceb 100644 --- a/src/bindings/python/src/openvino/__init__.py +++ b/src/bindings/python/src/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/src/bindings/python/src/openvino/_offline_transformations/__init__.py b/src/bindings/python/src/openvino/_offline_transformations/__init__.py index 62adcc64cfa359..2cfe8cec521524 100644 --- a/src/bindings/python/src/openvino/_offline_transformations/__init__.py +++ b/src/bindings/python/src/openvino/_offline_transformations/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/__init__.py b/src/bindings/python/src/openvino/frontend/__init__.py index 06cdbf432d7040..93abf226dca827 100644 --- a/src/bindings/python/src/openvino/frontend/__init__.py +++ b/src/bindings/python/src/openvino/frontend/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/frontend/frontend.py b/src/bindings/python/src/openvino/frontend/frontend.py index 8552b819e13312..4d549d24b4ef7c 100644 --- a/src/bindings/python/src/openvino/frontend/frontend.py +++ b/src/bindings/python/src/openvino/frontend/frontend.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from typing import Union diff --git a/src/bindings/python/src/openvino/frontend/onnx/__init__.py b/src/bindings/python/src/openvino/frontend/onnx/__init__.py index 85f5ad00389ca4..dbe58fc4cd67dc 100644 --- a/src/bindings/python/src/openvino/frontend/onnx/__init__.py +++ b/src/bindings/python/src/openvino/frontend/onnx/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/frontend/paddle/__init__.py b/src/bindings/python/src/openvino/frontend/paddle/__init__.py index 36648a3a0d054a..a801719ab99729 100644 --- a/src/bindings/python/src/openvino/frontend/paddle/__init__.py +++ b/src/bindings/python/src/openvino/frontend/paddle/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/frontend/pytorch/__init__.py b/src/bindings/python/src/openvino/frontend/pytorch/__init__.py index 18045bdd67c3a3..7b1cbb471ca5c3 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/__init__.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py index 479e1a5cb1c622..09375329cb7770 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/pytorch/gptq.py b/src/bindings/python/src/openvino/frontend/pytorch/gptq.py index b4bd06552b2a1e..e29bbd17c8d11d 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/gptq.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/gptq.py @@ -1,5 +1,5 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py index 3468225ac13098..fff781fa88fad7 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py index 4288cf435a74fe..56be57e01aa229 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py index 5db1798627475c..7b486d6a1f6648 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py index 609a7b73ea1f21..7531d4976e9546 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # mypy: ignore-errors diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py index a6fb4de094d3eb..d7cb590a6dc80d 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/partition.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/partition.py index cc3381783b00ee..468eed28d91a56 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/partition.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/partition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # mypy: ignore-errors diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 404601643feadd..2ea7eb6d68b74b 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/pytorch/utils.py b/src/bindings/python/src/openvino/frontend/pytorch/utils.py index eb0f4c1e0b9a4a..0d47227c7c6f1d 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/utils.py @@ -1,5 +1,5 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/__init__.py b/src/bindings/python/src/openvino/frontend/tensorflow/__init__.py index 14f5bd7ebd7bbd..98c4dae861f1e6 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/__init__.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py b/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py index 29dc6b1ad58973..83dd7ea25a63fb 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py index 623e0b42f88134..ca830b67c193b3 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py index 2a464016fc2f03..cf59d24c7fb77f 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/helpers/__init__.py b/src/bindings/python/src/openvino/helpers/__init__.py index 69500b97be106c..7af9af93065f8c 100644 --- a/src/bindings/python/src/openvino/helpers/__init__.py +++ b/src/bindings/python/src/openvino/helpers/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/helpers/packing.py b/src/bindings/python/src/openvino/helpers/packing.py index 523c356ce5bf6d..c09aa3ba677860 100644 --- a/src/bindings/python/src/openvino/helpers/packing.py +++ b/src/bindings/python/src/openvino/helpers/packing.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/src/openvino/preprocess/__init__.py b/src/bindings/python/src/openvino/preprocess/__init__.py index 3fe713c941431f..9b37f1f328dfca 100644 --- a/src/bindings/python/src/openvino/preprocess/__init__.py +++ b/src/bindings/python/src/openvino/preprocess/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/preprocess/torchvision/__init__.py b/src/bindings/python/src/openvino/preprocess/torchvision/__init__.py index e66ed1fb94f16e..5643930cddbbf1 100644 --- a/src/bindings/python/src/openvino/preprocess/torchvision/__init__.py +++ b/src/bindings/python/src/openvino/preprocess/torchvision/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py b/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py index ff0f89fe366155..c14635cc118208 100644 --- a/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py +++ b/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from typing import Callable, Any, Union diff --git a/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py b/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py index 43968fb4fa4fc5..af0f5bb24b385d 100644 --- a/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py +++ b/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # mypy: disable-error-code="no-redef" diff --git a/src/bindings/python/src/openvino/properties/__init__.py b/src/bindings/python/src/openvino/properties/__init__.py index 237d7c0d7dd8e3..05c5a260d65882 100644 --- a/src/bindings/python/src/openvino/properties/__init__.py +++ b/src/bindings/python/src/openvino/properties/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Enums diff --git a/src/bindings/python/src/openvino/properties/_properties.py b/src/bindings/python/src/openvino/properties/_properties.py index dfabe6a7b4178e..a3d9e2076ad072 100644 --- a/src/bindings/python/src/openvino/properties/_properties.py +++ b/src/bindings/python/src/openvino/properties/_properties.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import sys diff --git a/src/bindings/python/src/openvino/properties/device/__init__.py b/src/bindings/python/src/openvino/properties/device/__init__.py index 3fd42834197b24..a1dc2c89e0a044 100644 --- a/src/bindings/python/src/openvino/properties/device/__init__.py +++ b/src/bindings/python/src/openvino/properties/device/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Enums diff --git a/src/bindings/python/src/openvino/properties/hint/__init__.py b/src/bindings/python/src/openvino/properties/hint/__init__.py index 2c40e2534f8e72..5ff211301f9c74 100644 --- a/src/bindings/python/src/openvino/properties/hint/__init__.py +++ b/src/bindings/python/src/openvino/properties/hint/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Enums diff --git a/src/bindings/python/src/openvino/properties/intel_auto/__init__.py b/src/bindings/python/src/openvino/properties/intel_auto/__init__.py index 23486becc306b4..f4e369e6d27339 100644 --- a/src/bindings/python/src/openvino/properties/intel_auto/__init__.py +++ b/src/bindings/python/src/openvino/properties/intel_auto/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Enums diff --git a/src/bindings/python/src/openvino/properties/intel_cpu/__init__.py b/src/bindings/python/src/openvino/properties/intel_cpu/__init__.py index 7b13195261ed65..9752b69d124046 100644 --- a/src/bindings/python/src/openvino/properties/intel_cpu/__init__.py +++ b/src/bindings/python/src/openvino/properties/intel_cpu/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Properties diff --git a/src/bindings/python/src/openvino/properties/intel_gpu/__init__.py b/src/bindings/python/src/openvino/properties/intel_gpu/__init__.py index 6cb43927241a40..6c2a8f7b0f18f5 100644 --- a/src/bindings/python/src/openvino/properties/intel_gpu/__init__.py +++ b/src/bindings/python/src/openvino/properties/intel_gpu/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Properties diff --git a/src/bindings/python/src/openvino/properties/intel_gpu/hint/__init__.py b/src/bindings/python/src/openvino/properties/intel_gpu/hint/__init__.py index af54a90c6e69f9..cd60893741bab2 100644 --- a/src/bindings/python/src/openvino/properties/intel_gpu/hint/__init__.py +++ b/src/bindings/python/src/openvino/properties/intel_gpu/hint/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Properties diff --git a/src/bindings/python/src/openvino/properties/log/__init__.py b/src/bindings/python/src/openvino/properties/log/__init__.py index 9295f5b11fa5f3..1a26361d3a2004 100644 --- a/src/bindings/python/src/openvino/properties/log/__init__.py +++ b/src/bindings/python/src/openvino/properties/log/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Enums diff --git a/src/bindings/python/src/openvino/properties/streams/__init__.py b/src/bindings/python/src/openvino/properties/streams/__init__.py index 457d6c88f706be..33ff3197950618 100644 --- a/src/bindings/python/src/openvino/properties/streams/__init__.py +++ b/src/bindings/python/src/openvino/properties/streams/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Classes diff --git a/src/bindings/python/src/openvino/runtime/__init__.py b/src/bindings/python/src/openvino/runtime/__init__.py index 3e3d9972ff2590..da109f8b02bf85 100644 --- a/src/bindings/python/src/openvino/runtime/__init__.py +++ b/src/bindings/python/src/openvino/runtime/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """openvino module namespace, exposing factory functions for all ops and other classes.""" diff --git a/src/bindings/python/src/openvino/runtime/exceptions.py b/src/bindings/python/src/openvino/runtime/exceptions.py index 41e272824bb3f2..8ad77403900423 100644 --- a/src/bindings/python/src/openvino/runtime/exceptions.py +++ b/src/bindings/python/src/openvino/runtime/exceptions.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """openvino exceptions hierarchy. All exceptions are descendants of OVError.""" diff --git a/src/bindings/python/src/openvino/runtime/ie_api.py b/src/bindings/python/src/openvino/runtime/ie_api.py index 271a042a5db3de..1dd5405e843cac 100644 --- a/src/bindings/python/src/openvino/runtime/ie_api.py +++ b/src/bindings/python/src/openvino/runtime/ie_api.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from typing import Any, Iterable, Union, Optional, Dict diff --git a/src/bindings/python/src/openvino/runtime/op/__init__.py b/src/bindings/python/src/openvino/runtime/op/__init__.py index 32ec2e8b909344..a5ae58ad365a20 100644 --- a/src/bindings/python/src/openvino/runtime/op/__init__.py +++ b/src/bindings/python/src/openvino/runtime/op/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/runtime/op/util/__init__.py b/src/bindings/python/src/openvino/runtime/op/util/__init__.py index cf5406f0f8e2be..0c946b115f451e 100644 --- a/src/bindings/python/src/openvino/runtime/op/util/__init__.py +++ b/src/bindings/python/src/openvino/runtime/op/util/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/src/bindings/python/src/openvino/runtime/opset1/__init__.py b/src/bindings/python/src/openvino/runtime/opset1/__init__.py index 0b15e05447630a..5bfa42f43f26b9 100644 --- a/src/bindings/python/src/openvino/runtime/opset1/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset1/ops.py b/src/bindings/python/src/openvino/runtime/opset1/ops.py index b5e8b63ec21dcb..24cffa7f852478 100644 --- a/src/bindings/python/src/openvino/runtime/opset1/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset1/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset10/__init__.py b/src/bindings/python/src/openvino/runtime/opset10/__init__.py index ade2b0dc555289..29ebcb27039abc 100644 --- a/src/bindings/python/src/openvino/runtime/opset10/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset10/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset10/ops.py b/src/bindings/python/src/openvino/runtime/opset10/ops.py index 32cc7ac5363ca6..b10cf357d61461 100644 --- a/src/bindings/python/src/openvino/runtime/opset10/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset10/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset11/__init__.py b/src/bindings/python/src/openvino/runtime/opset11/__init__.py index 50513a812c0ccb..b692741257d435 100644 --- a/src/bindings/python/src/openvino/runtime/opset11/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset11/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset11/ops.py b/src/bindings/python/src/openvino/runtime/opset11/ops.py index 235b0e0ef37af5..fc168a25b21d03 100644 --- a/src/bindings/python/src/openvino/runtime/opset11/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset11/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset12/__init__.py b/src/bindings/python/src/openvino/runtime/opset12/__init__.py index b864996e044629..381d82ce8c9be1 100644 --- a/src/bindings/python/src/openvino/runtime/opset12/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset12/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset12/ops.py b/src/bindings/python/src/openvino/runtime/opset12/ops.py index 881c6f0f1466c4..3d69bdc5f3f713 100644 --- a/src/bindings/python/src/openvino/runtime/opset12/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset12/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all ngraph ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset13/__init__.py b/src/bindings/python/src/openvino/runtime/opset13/__init__.py index 1abac3e07a7f2f..032d55ce841cd2 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset13/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset13/ops.py b/src/bindings/python/src/openvino/runtime/opset13/ops.py index f42efa55048d98..302e0e8b5df870 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset13/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for ops added to openvino opset13.""" diff --git a/src/bindings/python/src/openvino/runtime/opset2/__init__.py b/src/bindings/python/src/openvino/runtime/opset2/__init__.py index aa0d44f77e45cc..34d0d9b6737709 100644 --- a/src/bindings/python/src/openvino/runtime/opset2/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset2/ops.py b/src/bindings/python/src/openvino/runtime/opset2/ops.py index 68e82ea8973e08..5670ec22acc0dd 100644 --- a/src/bindings/python/src/openvino/runtime/opset2/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset2/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset3/__init__.py b/src/bindings/python/src/openvino/runtime/opset3/__init__.py index aa8f0ac7f7befd..964acb15b34bbc 100644 --- a/src/bindings/python/src/openvino/runtime/opset3/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset3/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset3/ops.py b/src/bindings/python/src/openvino/runtime/opset3/ops.py index 8a1d81d9703ffb..96d11b1ad43d0a 100644 --- a/src/bindings/python/src/openvino/runtime/opset3/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset3/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset4/__init__.py b/src/bindings/python/src/openvino/runtime/opset4/__init__.py index d84f4ad6e181ad..bf57172bed40e2 100644 --- a/src/bindings/python/src/openvino/runtime/opset4/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset4/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset4/ops.py b/src/bindings/python/src/openvino/runtime/opset4/ops.py index 4056053a692de4..a077f745268ae4 100644 --- a/src/bindings/python/src/openvino/runtime/opset4/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset4/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset5/__init__.py b/src/bindings/python/src/openvino/runtime/opset5/__init__.py index 0651265b756b01..6d68b3e8d9f9cb 100644 --- a/src/bindings/python/src/openvino/runtime/opset5/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset5/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset5/ops.py b/src/bindings/python/src/openvino/runtime/opset5/ops.py index d32526e3be4853..6ca3d707570412 100644 --- a/src/bindings/python/src/openvino/runtime/opset5/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset5/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset6/__init__.py b/src/bindings/python/src/openvino/runtime/opset6/__init__.py index d22fe8c4f2d4dd..2b2babb00c5021 100644 --- a/src/bindings/python/src/openvino/runtime/opset6/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset6/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset6/ops.py b/src/bindings/python/src/openvino/runtime/opset6/ops.py index d0cd8c2e8b0a72..64ed7aa82c0535 100644 --- a/src/bindings/python/src/openvino/runtime/opset6/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset6/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset7/__init__.py b/src/bindings/python/src/openvino/runtime/opset7/__init__.py index fce9b001f7800b..9ee692cea75f59 100644 --- a/src/bindings/python/src/openvino/runtime/opset7/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset7/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset7/ops.py b/src/bindings/python/src/openvino/runtime/opset7/ops.py index a6fcb697de4c40..5e11d41b1e778c 100644 --- a/src/bindings/python/src/openvino/runtime/opset7/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset7/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset8/__init__.py b/src/bindings/python/src/openvino/runtime/opset8/__init__.py index b30cde97be9cbc..bf6be68ca0cbc6 100644 --- a/src/bindings/python/src/openvino/runtime/opset8/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset8/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset8/ops.py b/src/bindings/python/src/openvino/runtime/opset8/ops.py index 3b9236ae91a09f..f01ff8d6dd3c71 100644 --- a/src/bindings/python/src/openvino/runtime/opset8/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset8/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset9/__init__.py b/src/bindings/python/src/openvino/runtime/opset9/__init__.py index d08b873e0cab92..138ecdbdcd5d33 100644 --- a/src/bindings/python/src/openvino/runtime/opset9/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset9/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.opset1.ops import absolute diff --git a/src/bindings/python/src/openvino/runtime/opset9/ops.py b/src/bindings/python/src/openvino/runtime/opset9/ops.py index 918058ed8187dd..72a6e905e1d659 100644 --- a/src/bindings/python/src/openvino/runtime/opset9/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset9/ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" diff --git a/src/bindings/python/src/openvino/runtime/opset_utils.py b/src/bindings/python/src/openvino/runtime/opset_utils.py index 33ac195b53a23d..475750e71f87c5 100644 --- a/src/bindings/python/src/openvino/runtime/opset_utils.py +++ b/src/bindings/python/src/openvino/runtime/opset_utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from typing import Optional diff --git a/src/bindings/python/src/openvino/runtime/passes/__init__.py b/src/bindings/python/src/openvino/runtime/passes/__init__.py index 6a3dfd63039261..5155379a1a2485 100644 --- a/src/bindings/python/src/openvino/runtime/passes/__init__.py +++ b/src/bindings/python/src/openvino/runtime/passes/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # type: ignore # flake8: noqa diff --git a/src/bindings/python/src/openvino/runtime/passes/graph_rewrite.py b/src/bindings/python/src/openvino/runtime/passes/graph_rewrite.py index f320cf40e36beb..317c81b180b1da 100644 --- a/src/bindings/python/src/openvino/runtime/passes/graph_rewrite.py +++ b/src/bindings/python/src/openvino/runtime/passes/graph_rewrite.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # type: ignore from openvino._pyopenvino.passes import MatcherPass diff --git a/src/bindings/python/src/openvino/runtime/passes/manager.py b/src/bindings/python/src/openvino/runtime/passes/manager.py index a59e0a7b69a261..7ad59e774ccff2 100644 --- a/src/bindings/python/src/openvino/runtime/passes/manager.py +++ b/src/bindings/python/src/openvino/runtime/passes/manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # type: ignore from openvino._pyopenvino.passes import Manager as ManagerBase diff --git a/src/bindings/python/src/openvino/runtime/properties/__init__.py b/src/bindings/python/src/openvino/runtime/properties/__init__.py index c70b24882849d4..2a6b7a8fa416a9 100644 --- a/src/bindings/python/src/openvino/runtime/properties/__init__.py +++ b/src/bindings/python/src/openvino/runtime/properties/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Enums diff --git a/src/bindings/python/src/openvino/runtime/properties/hint/__init__.py b/src/bindings/python/src/openvino/runtime/properties/hint/__init__.py index 42476cc52496b6..986d5ef3d9b0eb 100644 --- a/src/bindings/python/src/openvino/runtime/properties/hint/__init__.py +++ b/src/bindings/python/src/openvino/runtime/properties/hint/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Enums diff --git a/src/bindings/python/src/openvino/runtime/utils/__init__.py b/src/bindings/python/src/openvino/runtime/utils/__init__.py index a89ea26a52257e..73399ccbed2598 100644 --- a/src/bindings/python/src/openvino/runtime/utils/__init__.py +++ b/src/bindings/python/src/openvino/runtime/utils/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Generic utilities. Factor related functions out to separate files.""" diff --git a/src/bindings/python/src/openvino/runtime/utils/broadcasting.py b/src/bindings/python/src/openvino/runtime/utils/broadcasting.py index ed6d59f1f07163..9fd13da7728e29 100644 --- a/src/bindings/python/src/openvino/runtime/utils/broadcasting.py +++ b/src/bindings/python/src/openvino/runtime/utils/broadcasting.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import logging diff --git a/src/bindings/python/src/openvino/runtime/utils/data_helpers/__init__.py b/src/bindings/python/src/openvino/runtime/utils/data_helpers/__init__.py index 829a77af96a04c..a46105efaaeadb 100644 --- a/src/bindings/python/src/openvino/runtime/utils/data_helpers/__init__.py +++ b/src/bindings/python/src/openvino/runtime/utils/data_helpers/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.utils.data_helpers.data_dispatcher import _data_dispatch diff --git a/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py b/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py index d90bc15bd92562..1cf35a133eb9b2 100644 --- a/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py +++ b/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from functools import singledispatch diff --git a/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py b/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py index 7506faf3abef23..61c00fa094924f 100644 --- a/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py +++ b/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/src/openvino/runtime/utils/decorators.py b/src/bindings/python/src/openvino/runtime/utils/decorators.py index b980c4c7e50f06..4cef82ac03d1e7 100644 --- a/src/bindings/python/src/openvino/runtime/utils/decorators.py +++ b/src/bindings/python/src/openvino/runtime/utils/decorators.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from functools import wraps diff --git a/src/bindings/python/src/openvino/runtime/utils/input_validation.py b/src/bindings/python/src/openvino/runtime/utils/input_validation.py index d2a0febe01feb1..e79a16c48581b1 100644 --- a/src/bindings/python/src/openvino/runtime/utils/input_validation.py +++ b/src/bindings/python/src/openvino/runtime/utils/input_validation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Helper functions for validating user input.""" diff --git a/src/bindings/python/src/openvino/runtime/utils/node_factory.py b/src/bindings/python/src/openvino/runtime/utils/node_factory.py index 31826d659ac53d..b029325268d467 100644 --- a/src/bindings/python/src/openvino/runtime/utils/node_factory.py +++ b/src/bindings/python/src/openvino/runtime/utils/node_factory.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import logging as log diff --git a/src/bindings/python/src/openvino/runtime/utils/reduction.py b/src/bindings/python/src/openvino/runtime/utils/reduction.py index 34454a2aeb450e..71d0af8de7376e 100644 --- a/src/bindings/python/src/openvino/runtime/utils/reduction.py +++ b/src/bindings/python/src/openvino/runtime/utils/reduction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from typing import Iterable, Optional diff --git a/src/bindings/python/src/openvino/runtime/utils/types.py b/src/bindings/python/src/openvino/runtime/utils/types.py index d493f2ff825d8d..61214a386d3738 100644 --- a/src/bindings/python/src/openvino/runtime/utils/types.py +++ b/src/bindings/python/src/openvino/runtime/utils/types.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Functions related to converting between Python and numpy types and openvino types.""" diff --git a/src/bindings/python/src/openvino/test_utils/__init__.py b/src/bindings/python/src/openvino/test_utils/__init__.py index 121b0972ed730f..e25fa9e67be800 100644 --- a/src/bindings/python/src/openvino/test_utils/__init__.py +++ b/src/bindings/python/src/openvino/test_utils/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from .test_utils_api import compare_functions diff --git a/src/bindings/python/src/openvino/torch/__init__.py b/src/bindings/python/src/openvino/torch/__init__.py index f2c246fd827246..41f7eca178ccc1 100644 --- a/src/bindings/python/src/openvino/torch/__init__.py +++ b/src/bindings/python/src/openvino/torch/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.frontend.pytorch.torchdynamo import backend diff --git a/src/bindings/python/src/openvino/utils.py b/src/bindings/python/src/openvino/utils.py index d2c646ef986a9f..ced05e771e8944 100644 --- a/src/bindings/python/src/openvino/utils.py +++ b/src/bindings/python/src/openvino/utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2021 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/src/pyopenvino/CMakeLists.txt b/src/bindings/python/src/pyopenvino/CMakeLists.txt index b3d360f71ecd87..6e11c915c7baf2 100644 --- a/src/bindings/python/src/pyopenvino/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 project (pyopenvino) diff --git a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 188cbe263edf30..52ba997e6ac2c5 100644 --- a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 #include "pyopenvino/core/async_infer_queue.hpp" diff --git a/src/bindings/python/src/pyopenvino/core/async_infer_queue.hpp b/src/bindings/python/src/pyopenvino/core/async_infer_queue.hpp index 176d4deaf6694a..4d13d7f6f2cf06 100644 --- a/src/bindings/python/src/pyopenvino/core/async_infer_queue.hpp +++ b/src/bindings/python/src/pyopenvino/core/async_infer_queue.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 #pragma once diff --git a/src/bindings/python/src/pyopenvino/core/common.cpp b/src/bindings/python/src/pyopenvino/core/common.cpp index 945a0c5f777e89..12ee912caed5f4 100644 --- a/src/bindings/python/src/pyopenvino/core/common.cpp +++ b/src/bindings/python/src/pyopenvino/core/common.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/common.hpp b/src/bindings/python/src/pyopenvino/core/common.hpp index 9d504aff5da36d..d783d89aa5d5c8 100644 --- a/src/bindings/python/src/pyopenvino/core/common.hpp +++ b/src/bindings/python/src/pyopenvino/core/common.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp index e14e82a55f3c7f..a6b849e1a85c13 100644 --- a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp +++ b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/compiled_model.hpp b/src/bindings/python/src/pyopenvino/core/compiled_model.hpp index 2c01f6e1c0c3df..e838f799d602de 100644 --- a/src/bindings/python/src/pyopenvino/core/compiled_model.hpp +++ b/src/bindings/python/src/pyopenvino/core/compiled_model.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/core.cpp b/src/bindings/python/src/pyopenvino/core/core.cpp index 6cfc8703a472ff..5252660efe4c98 100644 --- a/src/bindings/python/src/pyopenvino/core/core.cpp +++ b/src/bindings/python/src/pyopenvino/core/core.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/core.hpp b/src/bindings/python/src/pyopenvino/core/core.hpp index 18e98939362bc8..706e8c53b91e8c 100644 --- a/src/bindings/python/src/pyopenvino/core/core.hpp +++ b/src/bindings/python/src/pyopenvino/core/core.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/bindings/python/src/pyopenvino/core/extension.cpp b/src/bindings/python/src/pyopenvino/core/extension.cpp index e7e9850354b851..651c2ed7131ea6 100644 --- a/src/bindings/python/src/pyopenvino/core/extension.cpp +++ b/src/bindings/python/src/pyopenvino/core/extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/extension.hpp b/src/bindings/python/src/pyopenvino/core/extension.hpp index 7c4827e6dcb80f..309aeb2126fea4 100644 --- a/src/bindings/python/src/pyopenvino/core/extension.hpp +++ b/src/bindings/python/src/pyopenvino/core/extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/infer_request.cpp b/src/bindings/python/src/pyopenvino/core/infer_request.cpp index 1b7b75b968b834..4e94de9a725dee 100644 --- a/src/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/src/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 #include "pyopenvino/core/infer_request.hpp" diff --git a/src/bindings/python/src/pyopenvino/core/infer_request.hpp b/src/bindings/python/src/pyopenvino/core/infer_request.hpp index 7269784ac32fe8..69f0412a1745c9 100644 --- a/src/bindings/python/src/pyopenvino/core/infer_request.hpp +++ b/src/bindings/python/src/pyopenvino/core/infer_request.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp index ce061bca9a0a36..d2bd4e6df66447 100644 --- a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp +++ b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/offline_transformations.hpp b/src/bindings/python/src/pyopenvino/core/offline_transformations.hpp index ebc93f91dd4fff..e706a07a033a7f 100644 --- a/src/bindings/python/src/pyopenvino/core/offline_transformations.hpp +++ b/src/bindings/python/src/pyopenvino/core/offline_transformations.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/profiling_info.cpp b/src/bindings/python/src/pyopenvino/core/profiling_info.cpp index 30e3110ba917a7..8c709a5bce137b 100644 --- a/src/bindings/python/src/pyopenvino/core/profiling_info.cpp +++ b/src/bindings/python/src/pyopenvino/core/profiling_info.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/profiling_info.hpp b/src/bindings/python/src/pyopenvino/core/profiling_info.hpp index 1ac2ff49469550..6823b190be625d 100644 --- a/src/bindings/python/src/pyopenvino/core/profiling_info.hpp +++ b/src/bindings/python/src/pyopenvino/core/profiling_info.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp index 2491afcf606ba6..b077be3d19ac91 100644 --- a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp +++ b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/properties/properties.hpp b/src/bindings/python/src/pyopenvino/core/properties/properties.hpp index 7437ba8073f019..a7c20c6f8707ba 100644 --- a/src/bindings/python/src/pyopenvino/core/properties/properties.hpp +++ b/src/bindings/python/src/pyopenvino/core/properties/properties.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/tensor.cpp b/src/bindings/python/src/pyopenvino/core/tensor.cpp index e7b94ffece0335..fad7d36f6dc0ea 100644 --- a/src/bindings/python/src/pyopenvino/core/tensor.cpp +++ b/src/bindings/python/src/pyopenvino/core/tensor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/tensor.hpp b/src/bindings/python/src/pyopenvino/core/tensor.hpp index 036047c71743bb..e588c498956839 100644 --- a/src/bindings/python/src/pyopenvino/core/tensor.hpp +++ b/src/bindings/python/src/pyopenvino/core/tensor.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/variable_state.cpp b/src/bindings/python/src/pyopenvino/core/variable_state.cpp index 743fa67d34ae65..32edfe1fa7e074 100644 --- a/src/bindings/python/src/pyopenvino/core/variable_state.cpp +++ b/src/bindings/python/src/pyopenvino/core/variable_state.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/variable_state.hpp b/src/bindings/python/src/pyopenvino/core/variable_state.hpp index e8736c8ec8862a..e2b81b344e3891 100644 --- a/src/bindings/python/src/pyopenvino/core/variable_state.hpp +++ b/src/bindings/python/src/pyopenvino/core/variable_state.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/version.cpp b/src/bindings/python/src/pyopenvino/core/version.cpp index 85bcd16490a787..5b55de85fcb3fe 100644 --- a/src/bindings/python/src/pyopenvino/core/version.cpp +++ b/src/bindings/python/src/pyopenvino/core/version.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/core/version.hpp b/src/bindings/python/src/pyopenvino/core/version.hpp index aff7d42208f28f..6d996cbabe8377 100644 --- a/src/bindings/python/src/pyopenvino/core/version.hpp +++ b/src/bindings/python/src/pyopenvino/core/version.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/decoder.cpp b/src/bindings/python/src/pyopenvino/frontend/decoder.cpp index b0560e361135b8..2504341ab4a39b 100644 --- a/src/bindings/python/src/pyopenvino/frontend/decoder.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/decoder.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/decoder.hpp b/src/bindings/python/src/pyopenvino/frontend/decoder.hpp index 541c0081779faf..41f0d5e7cdce8f 100644 --- a/src/bindings/python/src/pyopenvino/frontend/decoder.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/decoder.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/extension.cpp b/src/bindings/python/src/pyopenvino/frontend/extension.cpp index 20078b62082241..a4f2e9cae1ca0c 100644 --- a/src/bindings/python/src/pyopenvino/frontend/extension.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/extension.hpp b/src/bindings/python/src/pyopenvino/frontend/extension.hpp index be7869b36a9b4a..e2109a6fd634d8 100644 --- a/src/bindings/python/src/pyopenvino/frontend/extension.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp index 085ec1e2b8313c..1600e57413dc62 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.hpp b/src/bindings/python/src/pyopenvino/frontend/frontend.hpp index 9f30acd1680c74..12519803209bf7 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake b/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake index 966bd419218df8..6bdaa8a3f5b0a6 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake +++ b/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/src/pyopenvino/frontend/input_model.cpp b/src/bindings/python/src/pyopenvino/frontend/input_model.cpp index 8e47b02bb7508a..20315a74338316 100644 --- a/src/bindings/python/src/pyopenvino/frontend/input_model.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/input_model.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/input_model.hpp b/src/bindings/python/src/pyopenvino/frontend/input_model.hpp index ea545a0bd63832..2c8c3e6791c36f 100644 --- a/src/bindings/python/src/pyopenvino/frontend/input_model.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/input_model.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/manager.cpp b/src/bindings/python/src/pyopenvino/frontend/manager.cpp index 924c48898724ee..70a39aa9ce77b3 100644 --- a/src/bindings/python/src/pyopenvino/frontend/manager.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/manager.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/manager.hpp b/src/bindings/python/src/pyopenvino/frontend/manager.hpp index 9b4a5953b18c3c..5a3a8abe556f17 100644 --- a/src/bindings/python/src/pyopenvino/frontend/manager.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/manager.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/node_context.cpp b/src/bindings/python/src/pyopenvino/frontend/node_context.cpp index 1f0904070ea60d..c6e90967f688dd 100644 --- a/src/bindings/python/src/pyopenvino/frontend/node_context.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/node_context.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/node_context.hpp b/src/bindings/python/src/pyopenvino/frontend/node_context.hpp index e16ee3bf05c095..7bd470f3c47169 100644 --- a/src/bindings/python/src/pyopenvino/frontend/node_context.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/node_context.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/onnx/CMakeLists.txt b/src/bindings/python/src/pyopenvino/frontend/onnx/CMakeLists.txt index 4e5e25c4dae600..ceadd5f01b17ab 100644 --- a/src/bindings/python/src/pyopenvino/frontend/onnx/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/frontend/onnx/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2021 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/src/pyopenvino/frontend/onnx/extension.cpp b/src/bindings/python/src/pyopenvino/frontend/onnx/extension.cpp index 38556621986263..7c1678ed562c04 100644 --- a/src/bindings/python/src/pyopenvino/frontend/onnx/extension.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/onnx/extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/onnx/extension.hpp b/src/bindings/python/src/pyopenvino/frontend/onnx/extension.hpp index 1f6dc2bed8d1ea..3266229110e202 100644 --- a/src/bindings/python/src/pyopenvino/frontend/onnx/extension.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/onnx/extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/onnx/py_module.cpp b/src/bindings/python/src/pyopenvino/frontend/onnx/py_module.cpp index b5be81bfd114b0..efff46e7b11d94 100644 --- a/src/bindings/python/src/pyopenvino/frontend/onnx/py_module.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/onnx/py_module.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/paddle/CMakeLists.txt b/src/bindings/python/src/pyopenvino/frontend/paddle/CMakeLists.txt index 7394004f6b6039..93782b040a931f 100644 --- a/src/bindings/python/src/pyopenvino/frontend/paddle/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/frontend/paddle/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2021 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/src/pyopenvino/frontend/paddle/extension.cpp b/src/bindings/python/src/pyopenvino/frontend/paddle/extension.cpp index 9140aacd1e1845..47f4ef8e489c29 100644 --- a/src/bindings/python/src/pyopenvino/frontend/paddle/extension.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/paddle/extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/paddle/extension.hpp b/src/bindings/python/src/pyopenvino/frontend/paddle/extension.hpp index 4c322ec0358f00..4818f5fe9e86a0 100644 --- a/src/bindings/python/src/pyopenvino/frontend/paddle/extension.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/paddle/extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/paddle/py_module.cpp b/src/bindings/python/src/pyopenvino/frontend/paddle/py_module.cpp index f63cf7b33b13cd..a802f9e936f3da 100644 --- a/src/bindings/python/src/pyopenvino/frontend/paddle/py_module.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/paddle/py_module.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/place.cpp b/src/bindings/python/src/pyopenvino/frontend/place.cpp index 57c51969a3c40c..defea32b70aadf 100644 --- a/src/bindings/python/src/pyopenvino/frontend/place.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/place.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/place.hpp b/src/bindings/python/src/pyopenvino/frontend/place.hpp index 336b308b02d067..ef0628205479a9 100644 --- a/src/bindings/python/src/pyopenvino/frontend/place.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/place.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt b/src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt index 6f7669ad21bf46..f12460c85bd17f 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp index d0ea6780d8208d..260c0ba69c3a0f 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp index 024b03b2ff4cd9..9e372203158ed8 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.cpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.cpp index 6f64da9b1b6dc5..e206ef198c0fa6 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.hpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.hpp index 797d6d81793440..ac92aa75500499 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp index a2e3a2c7639342..80160c722db12a 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/CMakeLists.txt b/src/bindings/python/src/pyopenvino/frontend/tensorflow/CMakeLists.txt index 8a30dff1c5486c..f8a25ba405c59d 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2021 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.cpp b/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.cpp index 23ee840557459e..c1b739c3ce0bb5 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.hpp b/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.hpp index e2fc698d1abe1b..ee850d84ffacd7 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/decoder_base.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.cpp b/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.cpp index 96326859b199cc..52120357bbda5a 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.hpp b/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.hpp index 0f0c38dba21a03..11839771bdc29d 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.cpp b/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.cpp index 2476f54c146f4f..9360c3fe2ca18c 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.hpp b/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.hpp index 46f042a650493f..8d09d9c6832e74 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/graph_iterator.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/frontend/tensorflow/py_module.cpp b/src/bindings/python/src/pyopenvino/frontend/tensorflow/py_module.cpp index 66f03d7421f848..9f4963b6ac01d4 100644 --- a/src/bindings/python/src/pyopenvino/frontend/tensorflow/py_module.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/tensorflow/py_module.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/any.cpp b/src/bindings/python/src/pyopenvino/graph/any.cpp index 1c52e5c3387aa5..1d92c88923023d 100644 --- a/src/bindings/python/src/pyopenvino/graph/any.cpp +++ b/src/bindings/python/src/pyopenvino/graph/any.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/any.hpp b/src/bindings/python/src/pyopenvino/graph/any.hpp index 0ee83346426ad9..30f4d2c13e37bf 100644 --- a/src/bindings/python/src/pyopenvino/graph/any.hpp +++ b/src/bindings/python/src/pyopenvino/graph/any.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/axis_set.cpp b/src/bindings/python/src/pyopenvino/graph/axis_set.cpp index c7a31c93358f97..9252bff652a39c 100644 --- a/src/bindings/python/src/pyopenvino/graph/axis_set.cpp +++ b/src/bindings/python/src/pyopenvino/graph/axis_set.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/axis_set.hpp b/src/bindings/python/src/pyopenvino/graph/axis_set.hpp index a8abd327bbb3f0..5c0a46e4977824 100644 --- a/src/bindings/python/src/pyopenvino/graph/axis_set.hpp +++ b/src/bindings/python/src/pyopenvino/graph/axis_set.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp b/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp index 9056b65b4221b2..cb8e4dd1fcaf40 100644 --- a/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp +++ b/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/axis_vector.hpp b/src/bindings/python/src/pyopenvino/graph/axis_vector.hpp index ec6f68818fd8d0..25f52fb5cda932 100644 --- a/src/bindings/python/src/pyopenvino/graph/axis_vector.hpp +++ b/src/bindings/python/src/pyopenvino/graph/axis_vector.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/coordinate.cpp b/src/bindings/python/src/pyopenvino/graph/coordinate.cpp index b4714cc53346ed..673649cf144d75 100644 --- a/src/bindings/python/src/pyopenvino/graph/coordinate.cpp +++ b/src/bindings/python/src/pyopenvino/graph/coordinate.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/coordinate.hpp b/src/bindings/python/src/pyopenvino/graph/coordinate.hpp index 9278629071939b..1e04b19730e0dd 100644 --- a/src/bindings/python/src/pyopenvino/graph/coordinate.hpp +++ b/src/bindings/python/src/pyopenvino/graph/coordinate.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp b/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp index 38fed3b430cde7..4e67fe3effaffa 100644 --- a/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp +++ b/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp b/src/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp index 7579860ee60ad0..ca64b606fe0e54 100644 --- a/src/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp +++ b/src/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp b/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp index 646976881cb509..284aba75ff6989 100644 --- a/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp +++ b/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.hpp b/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.hpp index 4623786c177e2a..2c3f0831e05792 100644 --- a/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.hpp +++ b/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp b/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp index 2d8e36e64faf42..710ad70d0b6cfa 100644 --- a/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp +++ b/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp b/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp index 719302c34ba3dc..4672dfb1e6dc72 100644 --- a/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp +++ b/src/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/dimension.cpp b/src/bindings/python/src/pyopenvino/graph/dimension.cpp index 22722653f90d28..be622a7ab6737c 100644 --- a/src/bindings/python/src/pyopenvino/graph/dimension.cpp +++ b/src/bindings/python/src/pyopenvino/graph/dimension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/dimension.hpp b/src/bindings/python/src/pyopenvino/graph/dimension.hpp index 4cb5cd902b7b84..df45a25d1f7f10 100644 --- a/src/bindings/python/src/pyopenvino/graph/dimension.hpp +++ b/src/bindings/python/src/pyopenvino/graph/dimension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp b/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp index fb4ce7833cd778..ee99409ea411d4 100644 --- a/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp +++ b/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/discrete_type_info.hpp b/src/bindings/python/src/pyopenvino/graph/discrete_type_info.hpp index 1b447820f0f983..a94ed020cc983c 100644 --- a/src/bindings/python/src/pyopenvino/graph/discrete_type_info.hpp +++ b/src/bindings/python/src/pyopenvino/graph/discrete_type_info.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/layout.cpp b/src/bindings/python/src/pyopenvino/graph/layout.cpp index 0a70dfc23fff5d..f634c4d0b3ff87 100644 --- a/src/bindings/python/src/pyopenvino/graph/layout.cpp +++ b/src/bindings/python/src/pyopenvino/graph/layout.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/layout.hpp b/src/bindings/python/src/pyopenvino/graph/layout.hpp index 228acdd8366bd5..71520cb18e3b76 100644 --- a/src/bindings/python/src/pyopenvino/graph/layout.hpp +++ b/src/bindings/python/src/pyopenvino/graph/layout.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp b/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp index 925f173342d837..c22bc56d1f7d25 100644 --- a/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp +++ b/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/layout_helpers.hpp b/src/bindings/python/src/pyopenvino/graph/layout_helpers.hpp index 4b2dc8e8e6eac5..90c3b0f8a73d0b 100644 --- a/src/bindings/python/src/pyopenvino/graph/layout_helpers.hpp +++ b/src/bindings/python/src/pyopenvino/graph/layout_helpers.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/model.cpp b/src/bindings/python/src/pyopenvino/graph/model.cpp index 374bbe8704eae6..498ee992173457 100644 --- a/src/bindings/python/src/pyopenvino/graph/model.cpp +++ b/src/bindings/python/src/pyopenvino/graph/model.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/model.hpp b/src/bindings/python/src/pyopenvino/graph/model.hpp index 1fe96aace63c3d..d9040b2dd4cf70 100644 --- a/src/bindings/python/src/pyopenvino/graph/model.hpp +++ b/src/bindings/python/src/pyopenvino/graph/model.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node.cpp b/src/bindings/python/src/pyopenvino/graph/node.cpp index 2a276b47dcaf59..d82e8e5b271174 100644 --- a/src/bindings/python/src/pyopenvino/graph/node.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node.hpp b/src/bindings/python/src/pyopenvino/graph/node.hpp index 7fc7823b64a271..38b4296b98ad75 100644 --- a/src/bindings/python/src/pyopenvino/graph/node.hpp +++ b/src/bindings/python/src/pyopenvino/graph/node.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp index 6bcbb28b590574..6aec4cf32f7c85 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node_factory.hpp b/src/bindings/python/src/pyopenvino/graph/node_factory.hpp index 3b16d135abe9d2..d6dbac82a9f37a 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_factory.hpp +++ b/src/bindings/python/src/pyopenvino/graph/node_factory.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node_input.cpp b/src/bindings/python/src/pyopenvino/graph/node_input.cpp index 393281f8f14c55..275b13aadf91ed 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_input.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_input.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node_input.hpp b/src/bindings/python/src/pyopenvino/graph/node_input.hpp index 50f213749ff941..02b159cb83ad02 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_input.hpp +++ b/src/bindings/python/src/pyopenvino/graph/node_input.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node_output.cpp b/src/bindings/python/src/pyopenvino/graph/node_output.cpp index f9fa92f3e6f043..47160381324538 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_output.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_output.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/node_output.hpp b/src/bindings/python/src/pyopenvino/graph/node_output.hpp index 4786e25d4e8f23..0e9429cc1981c2 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_output.hpp +++ b/src/bindings/python/src/pyopenvino/graph/node_output.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp b/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp index d739f4842e99b3..394f48bde3b607 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/assign.hpp b/src/bindings/python/src/pyopenvino/graph/ops/assign.hpp index 84de7294f41bd8..d29bf7ac949f6f 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/assign.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/assign.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp b/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp index f0d1dc2d177ce5..b8fe5cfbbe04b4 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/constant.hpp b/src/bindings/python/src/pyopenvino/graph/ops/constant.hpp index cb7d457b1296ad..cde9351861d377 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/constant.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/constant.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp index 62f30ff8b8d02c..8e74aeaf8d03c1 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.hpp b/src/bindings/python/src/pyopenvino/graph/ops/if.hpp index 0460cff28aa0c6..88ff5857e4da72 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/if.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/if.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp index 8cf9e61e7cdf43..536d97d17273ab 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/loop.hpp b/src/bindings/python/src/pyopenvino/graph/ops/loop.hpp index 8910a0a347a0fa..e7bc20b0a61cfa 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/loop.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/loop.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp b/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp index 5c9e4af036f6a5..7d1a9752d9f7fb 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/parameter.hpp b/src/bindings/python/src/pyopenvino/graph/ops/parameter.hpp index 5e542bebb489b2..65658cd5e58e4e 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/parameter.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/parameter.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/result.cpp b/src/bindings/python/src/pyopenvino/graph/ops/result.cpp index c8576c8ecaf2ab..921b38a7aee4bf 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/result.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/result.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/result.hpp b/src/bindings/python/src/pyopenvino/graph/ops/result.hpp index e72ed9d43e54ee..470e01d00b7052 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/result.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/result.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp index 1b2b159bb7e51f..5932656c3eccb9 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.hpp b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.hpp index 631a72499c5bd2..84481bba40ea6c 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp index e44f58f9309661..08df35513af1f7 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp index dbb4327ee6430b..776b03a78120ac 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp index b9dbceac5cecd5..758ea8570e8176 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp index 5d9f217ddaf780..9ccf73bfbf2ef9 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp index ebc44c14153b8b..27b1ac598c874c 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp index 494c02135cf750..70b9fca08592b0 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp index 0c0b6b0faeeca8..7658da1ad69aea 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp index 00289d9e076455..3e80bab695c1c7 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp index 8a61c899edd77f..9a38ad40b4fc8c 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp index d57b5cb784aaa5..5c2a4282b6c6da 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.cpp index ec1b1fda8fff1a..5703adf2ed1531 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.hpp index 5e0fde113201d7..e329dc33236f7c 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/multisubgraph.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp index 68edcccc6e0253..ee80f6c22fd30e 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp index 7b9d1ad1709b89..b445f722dd8aeb 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp index 1531d2f65a285c..37dd348615b60d 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp index d2b91d51a88466..151ceead254675 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp index 5c833884f9ce76..7a5fe95860df7e 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/variable.hpp b/src/bindings/python/src/pyopenvino/graph/ops/util/variable.hpp index 0b5fe438cfd350..788120d159ed62 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/variable.hpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/variable.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp b/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp index 6e9c2654ad5c74..de79384193ca6d 100644 --- a/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp +++ b/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/partial_shape.hpp b/src/bindings/python/src/pyopenvino/graph/partial_shape.hpp index 758a81d726b94e..1fb0c14eebfccb 100644 --- a/src/bindings/python/src/pyopenvino/graph/partial_shape.hpp +++ b/src/bindings/python/src/pyopenvino/graph/partial_shape.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp b/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp index 0a861b92bf35f8..be013a00a76fde 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.hpp b/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.hpp index 430b0bc319aca0..689455e06115b2 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp index 113ce28c97755e..9bd2833308db41 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/manager.hpp b/src/bindings/python/src/pyopenvino/graph/passes/manager.hpp index 71493461c58f6f..5adcebe9320f88 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/manager.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/manager.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp b/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp index 54f24166c651e4..b73c1d7ef2a741 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.hpp b/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.hpp index bc33f8e1752a53..4f6e8df40c2d0a 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp b/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp index c4238c0ea10951..a94b92b13109aa 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/model_pass.hpp b/src/bindings/python/src/pyopenvino/graph/passes/model_pass.hpp index d738cfdd7ddc3c..d3e7d6adae1b12 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/model_pass.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/model_pass.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp b/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp index eb669de3378e51..afddde366aaecb 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/pass_base.hpp b/src/bindings/python/src/pyopenvino/graph/passes/pass_base.hpp index 878951236ad906..ec29189d380a5f 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/pass_base.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/pass_base.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp b/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp index feac3cb995ac68..3578f5faf69d58 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.hpp b/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.hpp index beef6f7dfdc814..561b60acb4d51c 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp b/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp index 9654cbc3f2e3ce..ea82eaf28a5367 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp b/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp index 635b81cad87afb..24c105abaf7a7f 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp b/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp index cf0186c4e5bae9..dce6db6a90f948 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/passes/transformations.hpp b/src/bindings/python/src/pyopenvino/graph/passes/transformations.hpp index b285a9bd4eafcf..0abec9988e702f 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/transformations.hpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/transformations.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp index a6ddf6c8ebe311..2cc33c8a2e4f69 100644 --- a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp +++ b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.hpp b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.hpp index 44e74460369cfd..477093df6822da 100644 --- a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.hpp +++ b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/rt_map.cpp b/src/bindings/python/src/pyopenvino/graph/rt_map.cpp index 3e7323300e7016..bcdfaed6d26195 100644 --- a/src/bindings/python/src/pyopenvino/graph/rt_map.cpp +++ b/src/bindings/python/src/pyopenvino/graph/rt_map.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/rt_map.hpp b/src/bindings/python/src/pyopenvino/graph/rt_map.hpp index 9dff6461d66b41..c3c726164f227f 100644 --- a/src/bindings/python/src/pyopenvino/graph/rt_map.hpp +++ b/src/bindings/python/src/pyopenvino/graph/rt_map.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/shape.cpp b/src/bindings/python/src/pyopenvino/graph/shape.cpp index 65c2e723abc2a2..0f0ccf8b886eb7 100644 --- a/src/bindings/python/src/pyopenvino/graph/shape.cpp +++ b/src/bindings/python/src/pyopenvino/graph/shape.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/shape.hpp b/src/bindings/python/src/pyopenvino/graph/shape.hpp index a74048630a20d8..bc50e20faa7113 100644 --- a/src/bindings/python/src/pyopenvino/graph/shape.hpp +++ b/src/bindings/python/src/pyopenvino/graph/shape.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/strides.cpp b/src/bindings/python/src/pyopenvino/graph/strides.cpp index 34bbf7cd393fef..daecb6e9e3a312 100644 --- a/src/bindings/python/src/pyopenvino/graph/strides.cpp +++ b/src/bindings/python/src/pyopenvino/graph/strides.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/strides.hpp b/src/bindings/python/src/pyopenvino/graph/strides.hpp index a30b1e93c77b1b..75df04fee2c756 100644 --- a/src/bindings/python/src/pyopenvino/graph/strides.hpp +++ b/src/bindings/python/src/pyopenvino/graph/strides.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp index dc7c484012c53c..e65590a4a04947 100644 --- a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp +++ b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/types/element_type.hpp b/src/bindings/python/src/pyopenvino/graph/types/element_type.hpp index 83ad530e1e17c2..8fab33a3b554e2 100644 --- a/src/bindings/python/src/pyopenvino/graph/types/element_type.hpp +++ b/src/bindings/python/src/pyopenvino/graph/types/element_type.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp b/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp index 1ef39c4cdcf33d..fdd736d5ce8393 100644 --- a/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp +++ b/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp b/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp index 6d1a346efef34f..0301a30e530d51 100644 --- a/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp +++ b/src/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/util.cpp b/src/bindings/python/src/pyopenvino/graph/util.cpp index 9a606cace77895..96c9dc5500257c 100644 --- a/src/bindings/python/src/pyopenvino/graph/util.cpp +++ b/src/bindings/python/src/pyopenvino/graph/util.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/util.hpp b/src/bindings/python/src/pyopenvino/graph/util.hpp index 28e958894a73e7..cfd25fa729b27d 100644 --- a/src/bindings/python/src/pyopenvino/graph/util.hpp +++ b/src/bindings/python/src/pyopenvino/graph/util.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index 5d8257aefbbc4f..bb6447a60357fa 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 #include diff --git a/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt b/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt index c63a30a30e7fef..94a1e62b7e1809 100644 --- a/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/src/pyopenvino/test_utils/test_utils.cpp b/src/bindings/python/src/pyopenvino/test_utils/test_utils.cpp index 4011d271ed9a6d..af03d67be4ce84 100644 --- a/src/bindings/python/src/pyopenvino/test_utils/test_utils.cpp +++ b/src/bindings/python/src/pyopenvino/test_utils/test_utils.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index 7f5cf58b8ff45c..906c19ae820864 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/utils/utils.hpp b/src/bindings/python/src/pyopenvino/utils/utils.hpp index 0e7aa6055e0143..6d65bce29acac6 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.hpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/tests/__init__.py b/src/bindings/python/tests/__init__.py index d45966d9fd42b5..3624da9d5d07a2 100644 --- a/src/bindings/python/tests/__init__.py +++ b/src/bindings/python/tests/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/conftest.py b/src/bindings/python/tests/conftest.py index 6438783669dc68..772b87b73ccad3 100644 --- a/src/bindings/python/tests/conftest.py +++ b/src/bindings/python/tests/conftest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/mock/mock_py_frontend/CMakeLists.txt b/src/bindings/python/tests/mock/mock_py_frontend/CMakeLists.txt index af638f15f20d62..84463ef5df2428 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/CMakeLists.txt +++ b/src/bindings/python/tests/mock/mock_py_frontend/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/frontend_wrappers.hpp b/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/frontend_wrappers.hpp index fea88960babfda..1130ac71862828 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/frontend_wrappers.hpp +++ b/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/frontend_wrappers.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/mock_py_frontend.hpp b/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/mock_py_frontend.hpp index 00260ba16af0c6..6604467a7dbe88 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/mock_py_frontend.hpp +++ b/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/mock_py_frontend.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/visibility.hpp b/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/visibility.hpp index 7d56323a55da9f..0626573dbe674a 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/visibility.hpp +++ b/src/bindings/python/tests/mock/mock_py_frontend/include/mock_py_frontend/visibility.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/tests/mock/mock_py_frontend/src/frontend_wrapper.cpp b/src/bindings/python/tests/mock/mock_py_frontend/src/frontend_wrapper.cpp index b1a0de50f60ec0..80c2afc39aae8d 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/src/frontend_wrapper.cpp +++ b/src/bindings/python/tests/mock/mock_py_frontend/src/frontend_wrapper.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp b/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp index 1c8da7cca4e07c..b45b1a9a9842dd 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp +++ b/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt b/src/bindings/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt index 0eafc5000b6ad2..322b021e6301a6 100644 --- a/src/bindings/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt +++ b/src/bindings/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp b/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp index a3ececfdfc736d..41b60a7e9c62f1 100644 --- a/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp +++ b/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/tests/test_graph/__init__.py b/src/bindings/python/tests/test_graph/__init__.py index 2949d70c3b3227..3562914947fc02 100644 --- a/src/bindings/python/tests/test_graph/__init__.py +++ b/src/bindings/python/tests/test_graph/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # openvino.dll directory path visibility is needed to use _pyopenvino module diff --git a/src/bindings/python/tests/test_graph/test_any.py b/src/bindings/python/tests/test_graph/test_any.py index bd892ff23f7ef3..0fd9635fa1baf7 100644 --- a/src/bindings/python/tests/test_graph/test_any.py +++ b/src/bindings/python/tests/test_graph/test_any.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import OVAny diff --git a/src/bindings/python/tests/test_graph/test_basic.py b/src/bindings/python/tests/test_graph/test_basic.py index ad16bc2f1d51c8..20101f79493cbf 100644 --- a/src/bindings/python/tests/test_graph/test_basic.py +++ b/src/bindings/python/tests/test_graph/test_basic.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests/test_graph/test_constant.py b/src/bindings/python/tests/test_graph/test_constant.py index 6452084e377f09..6f13d0911b0c1a 100644 --- a/src/bindings/python/tests/test_graph/test_constant.py +++ b/src/bindings/python/tests/test_graph/test_constant.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_convolution.py b/src/bindings/python/tests/test_graph/test_convolution.py index 761ae2f90bf8dd..637b46f905435e 100644 --- a/src/bindings/python/tests/test_graph/test_convolution.py +++ b/src/bindings/python/tests/test_graph/test_convolution.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_core.py b/src/bindings/python/tests/test_graph/test_core.py index 985cd68284672e..9c44d035ffa401 100644 --- a/src/bindings/python/tests/test_graph/test_core.py +++ b/src/bindings/python/tests/test_graph/test_core.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import copy diff --git a/src/bindings/python/tests/test_graph/test_create_op.py b/src/bindings/python/tests/test_graph/test_create_op.py index 5f173b6a3bde47..355de9ab151383 100644 --- a/src/bindings/python/tests/test_graph/test_create_op.py +++ b/src/bindings/python/tests/test_graph/test_create_op.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ctc_loss.py b/src/bindings/python/tests/test_graph/test_ctc_loss.py index 60429cf1b990a8..b7f3415eb56f82 100644 --- a/src/bindings/python/tests/test_graph/test_ctc_loss.py +++ b/src/bindings/python/tests/test_graph/test_ctc_loss.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_data_movement.py b/src/bindings/python/tests/test_graph/test_data_movement.py index ae088fd153249f..2e1bc1193baaf8 100644 --- a/src/bindings/python/tests/test_graph/test_data_movement.py +++ b/src/bindings/python/tests/test_graph/test_data_movement.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_detection_output.py b/src/bindings/python/tests/test_graph/test_detection_output.py index a6f40c5618c31e..0e9d2448d8efe8 100644 --- a/src/bindings/python/tests/test_graph/test_detection_output.py +++ b/src/bindings/python/tests/test_graph/test_detection_output.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_dft.py b/src/bindings/python/tests/test_graph/test_dft.py index 48dd7ae66c9a29..e1285897d8ba59 100644 --- a/src/bindings/python/tests/test_graph/test_dft.py +++ b/src/bindings/python/tests/test_graph/test_dft.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import Type diff --git a/src/bindings/python/tests/test_graph/test_dyn_attributes.py b/src/bindings/python/tests/test_graph/test_dyn_attributes.py index 1b5cfb0d13e414..240cba915d35d5 100644 --- a/src/bindings/python/tests/test_graph/test_dyn_attributes.py +++ b/src/bindings/python/tests/test_graph/test_dyn_attributes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_einsum.py b/src/bindings/python/tests/test_graph/test_einsum.py index 08f09611fba590..7841e545db0ee4 100644 --- a/src/bindings/python/tests/test_graph/test_einsum.py +++ b/src/bindings/python/tests/test_graph/test_einsum.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino.runtime.opset8 as ov diff --git a/src/bindings/python/tests/test_graph/test_eye.py b/src/bindings/python/tests/test_graph/test_eye.py index 50009236a6a8e2..a312570957c7fe 100644 --- a/src/bindings/python/tests/test_graph/test_eye.py +++ b/src/bindings/python/tests/test_graph/test_eye.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino.runtime.opset10 as ov diff --git a/src/bindings/python/tests/test_graph/test_fake_convert.py b/src/bindings/python/tests/test_graph/test_fake_convert.py index 5f3643c072ccef..d0375d8cbedf52 100644 --- a/src/bindings/python/tests/test_graph/test_fake_convert.py +++ b/src/bindings/python/tests/test_graph/test_fake_convert.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_gather.py b/src/bindings/python/tests/test_graph/test_gather.py index 03199ebd14d35f..3431bcb540e7a7 100644 --- a/src/bindings/python/tests/test_graph/test_gather.py +++ b/src/bindings/python/tests/test_graph/test_gather.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import Type diff --git a/src/bindings/python/tests/test_graph/test_idft.py b/src/bindings/python/tests/test_graph/test_idft.py index 4d273bfe1bb735..521e3b1ad78267 100644 --- a/src/bindings/python/tests/test_graph/test_idft.py +++ b/src/bindings/python/tests/test_graph/test_idft.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import Type diff --git a/src/bindings/python/tests/test_graph/test_if.py b/src/bindings/python/tests/test_graph/test_if.py index 77a59580af72bd..9cab9f75d48e7e 100644 --- a/src/bindings/python/tests/test_graph/test_if.py +++ b/src/bindings/python/tests/test_graph/test_if.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/test_graph/test_input_validation.py b/src/bindings/python/tests/test_graph/test_input_validation.py index eaaa8b1687e012..96ca726b36c704 100644 --- a/src/bindings/python/tests/test_graph/test_input_validation.py +++ b/src/bindings/python/tests/test_graph/test_input_validation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_log_softmax.py b/src/bindings/python/tests/test_graph/test_log_softmax.py index d467ba402a95ef..7613a2ad04395b 100644 --- a/src/bindings/python/tests/test_graph/test_log_softmax.py +++ b/src/bindings/python/tests/test_graph/test_log_softmax.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_loop.py b/src/bindings/python/tests/test_graph/test_loop.py index 2bb3f5356ebec1..8be0dcd966d2ad 100644 --- a/src/bindings/python/tests/test_graph/test_loop.py +++ b/src/bindings/python/tests/test_graph/test_loop.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_manager.py b/src/bindings/python/tests/test_graph/test_manager.py index 95be2a1df2c7ac..77e70b5316ee09 100644 --- a/src/bindings/python/tests/test_graph/test_manager.py +++ b/src/bindings/python/tests/test_graph/test_manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_graph/test_multinomial.py b/src/bindings/python/tests/test_graph/test_multinomial.py index b0da48737c3cea..610fa0279f9223 100644 --- a/src/bindings/python/tests/test_graph/test_multinomial.py +++ b/src/bindings/python/tests/test_graph/test_multinomial.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_nms_rotated.py b/src/bindings/python/tests/test_graph/test_nms_rotated.py index 37661216108892..14f075b9079c4b 100644 --- a/src/bindings/python/tests/test_graph/test_nms_rotated.py +++ b/src/bindings/python/tests/test_graph/test_nms_rotated.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_node_factory.py b/src/bindings/python/tests/test_graph/test_node_factory.py index 45eb280e934c4b..8c3dd41e123ab3 100644 --- a/src/bindings/python/tests/test_graph/test_node_factory.py +++ b/src/bindings/python/tests/test_graph/test_node_factory.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_normalization.py b/src/bindings/python/tests/test_graph/test_normalization.py index 8d2d0b3913fa6a..5ee8799a5a774e 100644 --- a/src/bindings/python/tests/test_graph/test_normalization.py +++ b/src/bindings/python/tests/test_graph/test_normalization.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops.py b/src/bindings/python/tests/test_graph/test_ops.py index 433b798dec0829..e29f8e286f38ba 100644 --- a/src/bindings/python/tests/test_graph/test_ops.py +++ b/src/bindings/python/tests/test_graph/test_ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # flake8: noqa diff --git a/src/bindings/python/tests/test_graph/test_ops_binary.py b/src/bindings/python/tests/test_graph/test_ops_binary.py index a234e93407b762..9831e219a8eacf 100644 --- a/src/bindings/python/tests/test_graph/test_ops_binary.py +++ b/src/bindings/python/tests/test_graph/test_ops_binary.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import operator diff --git a/src/bindings/python/tests/test_graph/test_ops_fused.py b/src/bindings/python/tests/test_graph/test_ops_fused.py index a2d1a158b2c58a..bdbf4a1a9f1f9c 100644 --- a/src/bindings/python/tests/test_graph/test_ops_fused.py +++ b/src/bindings/python/tests/test_graph/test_ops_fused.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops_matmul.py b/src/bindings/python/tests/test_graph/test_ops_matmul.py index 0917bb0d59b869..ccc8a54a83b777 100644 --- a/src/bindings/python/tests/test_graph/test_ops_matmul.py +++ b/src/bindings/python/tests/test_graph/test_ops_matmul.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops_multioutput.py b/src/bindings/python/tests/test_graph/test_ops_multioutput.py index 366e873b2a33dd..9eef485035d2ab 100644 --- a/src/bindings/python/tests/test_graph/test_ops_multioutput.py +++ b/src/bindings/python/tests/test_graph/test_ops_multioutput.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops_reshape.py b/src/bindings/python/tests/test_graph/test_ops_reshape.py index a680f076625883..470807b053e01c 100644 --- a/src/bindings/python/tests/test_graph/test_ops_reshape.py +++ b/src/bindings/python/tests/test_graph/test_ops_reshape.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino.runtime.opset8 as ov diff --git a/src/bindings/python/tests/test_graph/test_ops_result.py b/src/bindings/python/tests/test_graph/test_ops_result.py index 105ce81849ac86..3ae9086a508d21 100644 --- a/src/bindings/python/tests/test_graph/test_ops_result.py +++ b/src/bindings/python/tests/test_graph/test_ops_result.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops_scatter.py b/src/bindings/python/tests/test_graph/test_ops_scatter.py index b0f9bb38735574..accfac295fbffd 100644 --- a/src/bindings/python/tests/test_graph/test_ops_scatter.py +++ b/src/bindings/python/tests/test_graph/test_ops_scatter.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops_scatter_nd_update.py b/src/bindings/python/tests/test_graph/test_ops_scatter_nd_update.py index 0c8f6ca34388d8..2c33856219aa91 100644 --- a/src/bindings/python/tests/test_graph/test_ops_scatter_nd_update.py +++ b/src/bindings/python/tests/test_graph/test_ops_scatter_nd_update.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops_unary.py b/src/bindings/python/tests/test_graph/test_ops_unary.py index a15c37a4540a11..f83994192c21db 100644 --- a/src/bindings/python/tests/test_graph/test_ops_unary.py +++ b/src/bindings/python/tests/test_graph/test_ops_unary.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_ops_util_variable.py b/src/bindings/python/tests/test_graph/test_ops_util_variable.py index 1e777a30925e94..1440c890bdad85 100644 --- a/src/bindings/python/tests/test_graph/test_ops_util_variable.py +++ b/src/bindings/python/tests/test_graph/test_ops_util_variable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import PartialShape, Type diff --git a/src/bindings/python/tests/test_graph/test_pad.py b/src/bindings/python/tests/test_graph/test_pad.py index 1a6b96c0c964cb..eeeb2cd3e365f2 100644 --- a/src/bindings/python/tests/test_graph/test_pad.py +++ b/src/bindings/python/tests/test_graph/test_pad.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests/test_graph/test_pooling.py b/src/bindings/python/tests/test_graph/test_pooling.py index f543f528cfa259..746c580a4ea00a 100644 --- a/src/bindings/python/tests/test_graph/test_pooling.py +++ b/src/bindings/python/tests/test_graph/test_pooling.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_preprocess.py b/src/bindings/python/tests/test_graph/test_preprocess.py index 6fc302a351e583..f3f93fd2095339 100644 --- a/src/bindings/python/tests/test_graph/test_preprocess.py +++ b/src/bindings/python/tests/test_graph/test_preprocess.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_proposal.py b/src/bindings/python/tests/test_graph/test_proposal.py index 5ee5078e00f068..d24e3efcc67501 100644 --- a/src/bindings/python/tests/test_graph/test_proposal.py +++ b/src/bindings/python/tests/test_graph/test_proposal.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_random_uniform.py b/src/bindings/python/tests/test_graph/test_random_uniform.py index ab210c965e7828..fcf5249508ed67 100644 --- a/src/bindings/python/tests/test_graph/test_random_uniform.py +++ b/src/bindings/python/tests/test_graph/test_random_uniform.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_rdft.py b/src/bindings/python/tests/test_graph/test_rdft.py index 615deba744bb51..9f0aff0f726b72 100644 --- a/src/bindings/python/tests/test_graph/test_rdft.py +++ b/src/bindings/python/tests/test_graph/test_rdft.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino.runtime.opset10 as ov diff --git a/src/bindings/python/tests/test_graph/test_reduction.py b/src/bindings/python/tests/test_graph/test_reduction.py index 9747af0b926834..4dab302938d683 100644 --- a/src/bindings/python/tests/test_graph/test_reduction.py +++ b/src/bindings/python/tests/test_graph/test_reduction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_roll.py b/src/bindings/python/tests/test_graph/test_roll.py index 17e4c3f3489217..44b86e50e93537 100644 --- a/src/bindings/python/tests/test_graph/test_roll.py +++ b/src/bindings/python/tests/test_graph/test_roll.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino.runtime.opset8 as ov diff --git a/src/bindings/python/tests/test_graph/test_scaled_dot_product_attention.py b/src/bindings/python/tests/test_graph/test_scaled_dot_product_attention.py index aea046b3cc9ff3..d0733925664794 100644 --- a/src/bindings/python/tests/test_graph/test_scaled_dot_product_attention.py +++ b/src/bindings/python/tests/test_graph/test_scaled_dot_product_attention.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_sequence_processing.py b/src/bindings/python/tests/test_graph/test_sequence_processing.py index b159a069e79c05..d12151758ef669 100644 --- a/src/bindings/python/tests/test_graph/test_sequence_processing.py +++ b/src/bindings/python/tests/test_graph/test_sequence_processing.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_swish.py b/src/bindings/python/tests/test_graph/test_swish.py index 6c0ac2f415a78c..92f4ccadb8f9dc 100644 --- a/src/bindings/python/tests/test_graph/test_swish.py +++ b/src/bindings/python/tests/test_graph/test_swish.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_tensor_iterator.py b/src/bindings/python/tests/test_graph/test_tensor_iterator.py index 8cbc6121bce2ae..199e9a6bed3061 100644 --- a/src/bindings/python/tests/test_graph/test_tensor_iterator.py +++ b/src/bindings/python/tests/test_graph/test_tensor_iterator.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_graph/test_utils.py b/src/bindings/python/tests/test_graph/test_utils.py index 3c2af2e54f629c..d9aa9ad1ffc3c4 100644 --- a/src/bindings/python/tests/test_graph/test_utils.py +++ b/src/bindings/python/tests/test_graph/test_utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/test_graph/util.py b/src/bindings/python/tests/test_graph/util.py index d048dc57797043..76dea528dce598 100644 --- a/src/bindings/python/tests/test_graph/util.py +++ b/src/bindings/python/tests/test_graph/util.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests/test_package_versions.py b/src/bindings/python/tests/test_package_versions.py index fef0c7b524907c..075eb8c5bebe0d 100644 --- a/src/bindings/python/tests/test_package_versions.py +++ b/src/bindings/python/tests/test_package_versions.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino.preprocess as ov_pre diff --git a/src/bindings/python/tests/test_runtime/__init__.py b/src/bindings/python/tests/test_runtime/__init__.py index 7d403600aa67e6..aaeca21462e541 100644 --- a/src/bindings/python/tests/test_runtime/__init__.py +++ b/src/bindings/python/tests/test_runtime/__init__.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests/test_runtime/subprocess_test_tensor.py b/src/bindings/python/tests/test_runtime/subprocess_test_tensor.py index 97a35a07ad7be1..922f17c6e3051b 100644 --- a/src/bindings/python/tests/test_runtime/subprocess_test_tensor.py +++ b/src/bindings/python/tests/test_runtime/subprocess_test_tensor.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_runtime/test_async_infer_request.py b/src/bindings/python/tests/test_runtime/test_async_infer_request.py index 8fb5b3325db1a6..ea1bf1a9f2a4af 100644 --- a/src/bindings/python/tests/test_runtime/test_async_infer_request.py +++ b/src/bindings/python/tests/test_runtime/test_async_infer_request.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from collections.abc import Iterable diff --git a/src/bindings/python/tests/test_runtime/test_compiled_model.py b/src/bindings/python/tests/test_runtime/test_compiled_model.py index 7f0e5a02baf878..b86b061dc973f3 100644 --- a/src/bindings/python/tests/test_runtime/test_compiled_model.py +++ b/src/bindings/python/tests/test_runtime/test_compiled_model.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/test_runtime/test_core.py b/src/bindings/python/tests/test_runtime/test_core.py index 86700ea7804cbd..61cb3cdcc6755b 100644 --- a/src/bindings/python/tests/test_runtime/test_core.py +++ b/src/bindings/python/tests/test_runtime/test_core.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/test_runtime/test_dimension.py b/src/bindings/python/tests/test_runtime/test_dimension.py index 93a77431d10c45..de46af5fbe7d2f 100644 --- a/src/bindings/python/tests/test_runtime/test_dimension.py +++ b/src/bindings/python/tests/test_runtime/test_dimension.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import Dimension diff --git a/src/bindings/python/tests/test_runtime/test_input_node.py b/src/bindings/python/tests/test_runtime/test_input_node.py index 3d98525d223617..9b5beb587d1e94 100644 --- a/src/bindings/python/tests/test_runtime/test_input_node.py +++ b/src/bindings/python/tests/test_runtime/test_input_node.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_runtime/test_memory_modes.py b/src/bindings/python/tests/test_runtime/test_memory_modes.py index cfe1b3cb0913d4..f4746036bea178 100644 --- a/src/bindings/python/tests/test_runtime/test_memory_modes.py +++ b/src/bindings/python/tests/test_runtime/test_memory_modes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index 0a3dc6edf59414..6d545f7760cf3d 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_runtime/test_nogil.py b/src/bindings/python/tests/test_runtime/test_nogil.py index d846ffdfade8cf..d291c530d2e890 100644 --- a/src/bindings/python/tests/test_runtime/test_nogil.py +++ b/src/bindings/python/tests/test_runtime/test_nogil.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import time diff --git a/src/bindings/python/tests/test_runtime/test_output_const_node.py b/src/bindings/python/tests/test_runtime/test_output_const_node.py index 2ae2ecad78b853..d3f431d6c6b0f7 100644 --- a/src/bindings/python/tests/test_runtime/test_output_const_node.py +++ b/src/bindings/python/tests/test_runtime/test_output_const_node.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_runtime/test_output_node.py b/src/bindings/python/tests/test_runtime/test_output_node.py index d10f7e46afeef1..fecf6b5ca8421d 100644 --- a/src/bindings/python/tests/test_runtime/test_output_node.py +++ b/src/bindings/python/tests/test_runtime/test_output_node.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_runtime/test_ovdict.py b/src/bindings/python/tests/test_runtime/test_ovdict.py index d5713732c7967d..e7a5854d66d072 100644 --- a/src/bindings/python/tests/test_runtime/test_ovdict.py +++ b/src/bindings/python/tests/test_runtime/test_ovdict.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from collections.abc import Mapping diff --git a/src/bindings/python/tests/test_runtime/test_properties.py b/src/bindings/python/tests/test_runtime/test_properties.py index ca280bd33b7144..12fc10f51668bb 100644 --- a/src/bindings/python/tests/test_runtime/test_properties.py +++ b/src/bindings/python/tests/test_runtime/test_properties.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/test_runtime/test_sync_infer_request.py b/src/bindings/python/tests/test_runtime/test_sync_infer_request.py index c31f7e40653e4a..09a99df6ead1a3 100644 --- a/src/bindings/python/tests/test_runtime/test_sync_infer_request.py +++ b/src/bindings/python/tests/test_runtime/test_sync_infer_request.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from contextlib import nullcontext as does_not_raise diff --git a/src/bindings/python/tests/test_runtime/test_tensor.py b/src/bindings/python/tests/test_runtime/test_tensor.py index bc8d3beaddf5a1..fca622c7654e7c 100644 --- a/src/bindings/python/tests/test_runtime/test_tensor.py +++ b/src/bindings/python/tests/test_runtime/test_tensor.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_runtime/test_tensor_string.py b/src/bindings/python/tests/test_runtime/test_tensor_string.py index ff6c13d69c9fe4..763ff6f4c9dcbe 100644 --- a/src/bindings/python/tests/test_runtime/test_tensor_string.py +++ b/src/bindings/python/tests/test_runtime/test_tensor_string.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_runtime/test_type.py b/src/bindings/python/tests/test_runtime/test_type.py index 6a00ecb40c2b6a..831052ac81d448 100644 --- a/src/bindings/python/tests/test_runtime/test_type.py +++ b/src/bindings/python/tests/test_runtime/test_type.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/test_torchvision_to_ov/test_preprocessor.py b/src/bindings/python/tests/test_torchvision_to_ov/test_preprocessor.py index 041f2afcf55f5a..1cb91acb60c756 100644 --- a/src/bindings/python/tests/test_torchvision_to_ov/test_preprocessor.py +++ b/src/bindings/python/tests/test_torchvision_to_ov/test_preprocessor.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_transformations/__init__.py b/src/bindings/python/tests/test_transformations/__init__.py index 7d403600aa67e6..aaeca21462e541 100644 --- a/src/bindings/python/tests/test_transformations/__init__.py +++ b/src/bindings/python/tests/test_transformations/__init__.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests/test_transformations/test_compression.py b/src/bindings/python/tests/test_transformations/test_compression.py index db15d24592cf98..fa46b6d227f1e3 100644 --- a/src/bindings/python/tests/test_transformations/test_compression.py +++ b/src/bindings/python/tests/test_transformations/test_compression.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from typing import List diff --git a/src/bindings/python/tests/test_transformations/test_compression_4bit.py b/src/bindings/python/tests/test_transformations/test_compression_4bit.py index 58d1d2d122d36f..c72818b0339f7d 100644 --- a/src/bindings/python/tests/test_transformations/test_compression_4bit.py +++ b/src/bindings/python/tests/test_transformations/test_compression_4bit.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests/test_transformations/test_graph_rewrite.py b/src/bindings/python/tests/test_transformations/test_graph_rewrite.py index 1b8705784f1cf3..4821dad33dff0a 100644 --- a/src/bindings/python/tests/test_transformations/test_graph_rewrite.py +++ b/src/bindings/python/tests/test_transformations/test_graph_rewrite.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime import opset8 from openvino.runtime.passes import Manager, GraphRewrite, MatcherPass, WrapType, Matcher diff --git a/src/bindings/python/tests/test_transformations/test_manager.py b/src/bindings/python/tests/test_transformations/test_manager.py index af58aaf79b17c5..e78c62d8c1a5c4 100644 --- a/src/bindings/python/tests/test_transformations/test_manager.py +++ b/src/bindings/python/tests/test_transformations/test_manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.passes import Manager, GraphRewrite, BackwardGraphRewrite, Serialize diff --git a/src/bindings/python/tests/test_transformations/test_matcher_pass.py b/src/bindings/python/tests/test_transformations/test_matcher_pass.py index 4735e840747983..c32483be316658 100644 --- a/src/bindings/python/tests/test_transformations/test_matcher_pass.py +++ b/src/bindings/python/tests/test_transformations/test_matcher_pass.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime import opset8 from openvino.runtime.passes import Manager, Matcher, MatcherPass, WrapType diff --git a/src/bindings/python/tests/test_transformations/test_model_pass.py b/src/bindings/python/tests/test_transformations/test_model_pass.py index 8b1486342ec044..5df3d0a9024dc2 100644 --- a/src/bindings/python/tests/test_transformations/test_model_pass.py +++ b/src/bindings/python/tests/test_transformations/test_model_pass.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.runtime.passes import Manager diff --git a/src/bindings/python/tests/test_transformations/test_offline_api.py b/src/bindings/python/tests/test_transformations/test_offline_api.py index 75b7239037b8a4..cd336493b58246 100644 --- a/src/bindings/python/tests/test_transformations/test_offline_api.py +++ b/src/bindings/python/tests/test_transformations/test_offline_api.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_transformations/test_pattern_ops.py b/src/bindings/python/tests/test_transformations/test_pattern_ops.py index 44adc6292dfcac..ad8b9c0afc04c9 100644 --- a/src/bindings/python/tests/test_transformations/test_pattern_ops.py +++ b/src/bindings/python/tests/test_transformations/test_pattern_ops.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/src/bindings/python/tests/test_transformations/test_public_transformations.py b/src/bindings/python/tests/test_transformations/test_public_transformations.py index 6464ff7ebfd51b..429bc6c192acc4 100644 --- a/src/bindings/python/tests/test_transformations/test_public_transformations.py +++ b/src/bindings/python/tests/test_transformations/test_public_transformations.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os import pytest diff --git a/src/bindings/python/tests/test_transformations/test_replacement_api.py b/src/bindings/python/tests/test_transformations/test_replacement_api.py index 01f44d123471fa..b4560d7f5fdf66 100644 --- a/src/bindings/python/tests/test_transformations/test_replacement_api.py +++ b/src/bindings/python/tests/test_transformations/test_replacement_api.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import Model, PartialShape diff --git a/src/bindings/python/tests/test_transformations/utils/utils.py b/src/bindings/python/tests/test_transformations/utils/utils.py index 288163987680b5..e0239ce05fdc9d 100644 --- a/src/bindings/python/tests/test_transformations/utils/utils.py +++ b/src/bindings/python/tests/test_transformations/utils/utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino import Model, PartialShape diff --git a/src/bindings/python/tests/test_utils/test_data_dispatch.py b/src/bindings/python/tests/test_utils/test_data_dispatch.py index 73605850f787eb..3fadf354d7604f 100644 --- a/src/bindings/python/tests/test_utils/test_data_dispatch.py +++ b/src/bindings/python/tests/test_utils/test_data_dispatch.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os diff --git a/src/bindings/python/tests/test_utils/test_utils.py b/src/bindings/python/tests/test_utils/test_utils.py index a7db0ff7d27a2e..cc263f80eb7da0 100644 --- a/src/bindings/python/tests/test_utils/test_utils.py +++ b/src/bindings/python/tests/test_utils/test_utils.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/src/bindings/python/tests/utils/helpers.py b/src/bindings/python/tests/utils/helpers.py index d70907d888f6f1..ab3e2fc6abc079 100644 --- a/src/bindings/python/tests/utils/helpers.py +++ b/src/bindings/python/tests/utils/helpers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from typing import Tuple, Union, List diff --git a/src/bindings/python/wheel/CMakeLists.txt b/src/bindings/python/wheel/CMakeLists.txt index d2cc6f348ab679..e131277e8626a8 100644 --- a/src/bindings/python/wheel/CMakeLists.txt +++ b/src/bindings/python/wheel/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/wheel/fdupes_check.cmake b/src/bindings/python/wheel/fdupes_check.cmake index da9e1711dc5d05..68611f960085c9 100644 --- a/src/bindings/python/wheel/fdupes_check.cmake +++ b/src/bindings/python/wheel/fdupes_check.cmake @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/bindings/python/wheel/setup.py b/src/bindings/python/wheel/setup.py index cdd813f60c1bcc..74a71da3b3013d 100644 --- a/src/bindings/python/wheel/setup.py +++ b/src/bindings/python/wheel/setup.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os.path diff --git a/tools/benchmark_tool/openvino/__init__.py b/tools/benchmark_tool/openvino/__init__.py index b7dc434f3148cc..1d75589bd2eceb 100644 --- a/tools/benchmark_tool/openvino/__init__.py +++ b/tools/benchmark_tool/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/tools/mo/openvino/__init__.py b/tools/mo/openvino/__init__.py index 90552e0befed68..635bae28670cc6 100644 --- a/tools/mo/openvino/__init__.py +++ b/tools/mo/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/tools/openvino_dev/src/openvino/__init__.py b/tools/openvino_dev/src/openvino/__init__.py index 90552e0befed68..635bae28670cc6 100644 --- a/tools/openvino_dev/src/openvino/__init__.py +++ b/tools/openvino_dev/src/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/tools/ovc/openvino/__init__.py b/tools/ovc/openvino/__init__.py index b7dc434f3148cc..1d75589bd2eceb 100644 --- a/tools/ovc/openvino/__init__.py +++ b/tools/ovc/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) From e35de073399edadc1554067776b7a70b4b18a893 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Tue, 16 Jan 2024 19:22:23 +0100 Subject: [PATCH 027/122] [DOCS] fix section on api (#22167) --- .../articles_en/documentation/openvino_ir.rst | 19 +- .../openvino_legacy_features.rst | 12 +- .../api_2_0_transition_guide.rst | 118 --- .../common_inference_pipeline.rst | 683 ------------------ .../configure_devices.rst | 302 -------- .../deployment_migration.rst | 246 ------- .../graph_construction.rst | 40 - .../preprocessing.rst | 203 ------ docs/snippets/ie_common.cpp | 114 --- docs/snippets/ov_common.c | 146 ---- docs/snippets/ov_common.cpp | 133 ---- docs/snippets/ov_common.py | 114 --- docs/snippets/ov_graph.cpp | 46 -- docs/snippets/ov_graph.py | 47 -- docs/snippets/ov_preprocessing_migration.c | 147 ---- docs/snippets/ov_preprocessing_migration.cpp | 125 ---- docs/snippets/ov_properties_migration.c | 58 -- docs/snippets/ov_properties_migration.cpp | 105 --- .../_static/images/tf_openvino.svg | 3 - docs/sphinx_setup/api/api_reference.rst | 25 +- 20 files changed, 36 insertions(+), 2650 deletions(-) delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst delete mode 100644 docs/snippets/ie_common.cpp delete mode 100644 docs/snippets/ov_common.c delete mode 100644 docs/snippets/ov_common.cpp delete mode 100644 docs/snippets/ov_common.py delete mode 100644 docs/snippets/ov_graph.cpp delete mode 100644 docs/snippets/ov_graph.py delete mode 100644 docs/snippets/ov_preprocessing_migration.c delete mode 100644 docs/snippets/ov_preprocessing_migration.cpp delete mode 100644 docs/snippets/ov_properties_migration.c delete mode 100644 docs/snippets/ov_properties_migration.cpp delete mode 100644 docs/sphinx_setup/_static/images/tf_openvino.svg diff --git a/docs/articles_en/documentation/openvino_ir.rst b/docs/articles_en/documentation/openvino_ir.rst index ab8e8d320e4310..94a11b06e899a0 100644 --- a/docs/articles_en/documentation/openvino_ir.rst +++ b/docs/articles_en/documentation/openvino_ir.rst @@ -5,8 +5,8 @@ OpenVINO IR format .. meta:: - :description: OpenVINO IR, known as Intermediate Representation, is the result - of model conversion in OpenVINO and is represented by two files: + :description: OpenVINO IR, known as Intermediate Representation, is the result + of model conversion in OpenVINO and is represented by two files: an XML and a binary file. .. toctree:: @@ -19,13 +19,21 @@ OpenVINO IR format openvino_docs_ops_broadcast_rules openvino_docs_MO_DG_prepare_model_convert_model_IR_suitable_for_INT8_inference -The models, built and trained using various frameworks, can be large and architecture-dependent. To successfully run inference from any device and maximize the benefits of OpenVINO tools, you can convert the model to the OpenVINO Intermediate Representation (IR) format. -OpenVINO IR is the proprietary model format of OpenVINO. It is produced after converting a model with model conversion API. Model conversion API translates the frequently used deep learning operations to their respective similar representation in OpenVINO and tunes them with the associated weights and biases from the trained model. The resulting IR contains two files: +OpenVINO IR is the proprietary model format of OpenVINO, benefiting from the full extent +of its features. It is obtained by converting a model from one of the +:doc:`supported formats ` +using the model conversion API or OpenVINO Converter. The process translates common +deep learning operations of the original network to their counterpart representations in +OpenVINO and tunes them with the associated weights and biases. +The resulting OpenVINO IR format contains two files: * ``.xml`` - Describes the model topology. * ``.bin`` - Contains the weights and binary data. +:doc:`See why converting to OpenVINO IR is recommended ` + + IR Structure ############ @@ -148,11 +156,10 @@ Here is an example of a small IR XML file that corresponds to a graph from the p -The IR does not use explicit data nodes described in the previous section. In contrast, properties of data such as tensor dimensions and their data types are described as properties of input and output ports of operations. +The IR does not use explicit data nodes described in the previous section. In contrast, properties of data such as tensor dimensions and their data types are described as properties of input and output ports of operations. Additional Resources #################### * :doc:`IR and Operation Sets ` -* :doc:`OpenVINO API 2.0 transition guide ` diff --git a/docs/articles_en/documentation/openvino_legacy_features.rst b/docs/articles_en/documentation/openvino_legacy_features.rst index 3ee12f934522b3..d6953d26f59cf9 100644 --- a/docs/articles_en/documentation/openvino_legacy_features.rst +++ b/docs/articles_en/documentation/openvino_legacy_features.rst @@ -11,11 +11,10 @@ Legacy Features and Components OpenVINO Development Tools package Model Optimizer / Conversion API Deploy Application with Deployment Manager - OpenVINO API 2.0 transition Open Model ZOO -Since OpenVINO has grown very rapidly in recent years, some of its features +Since OpenVINO has grown very rapidly in recent years, a number of its features and components have been replaced by other solutions. Some of them are still supported to assure OpenVINO users are given enough time to adjust their projects, before the features are fully discontinued. @@ -90,14 +89,11 @@ offering. | `Check the NNCF GitHub project, including documentation `__ -| **Old Inference API 1.0** +| **Inference API 1.0** | *New solution:* API 2.0 launched in OpenVINO 2022.1 -| *Old solution:* discontinuation planned for OpenVINO 2024.0 +| *Old solution:* discontinued with OpenVINO 2024.0 | -| API 1.0 (Inference Engine and nGraph) is now deprecated. It can still be - used but is not recommended. Its discontinuation is planned for 2024. -| :doc:`See how to transition to API 2.0 ` - +| `The last version supporting API 1.0 `__ | **Compile tool** | *New solution:* the tool is no longer needed diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.rst deleted file mode 100644 index 56938c54b151e2..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. {#openvino_2_0_transition_guide} - -OpenVINO™ API 2.0 Transition Guide -==================================== - - -.. meta:: - :description: A detailed information on a new version of OpenVINO™ API 2.0, - as well as the new OpenVINO IR model format: IR v11. - - -.. toctree:: - :maxdepth: 1 - :hidden: - - openvino_2_0_deployment - openvino_2_0_inference_pipeline - openvino_2_0_configure_devices - openvino_2_0_preprocessing - openvino_2_0_model_creation - - -This guide introduces the new OpenVINO™ API: API 2.0, as well as the new OpenVINO IR model format: IR v11. Here, you will find comparisons of their "old" and "new" versions. - -Introduction of API 2.0 -####################### - -Versions of OpenVINO prior to 2022.1 required changes in the application logic when migrating an app from other frameworks, such as TensorFlow, ONNX Runtime, PyTorch, PaddlePaddle, etc. The changes were required because: - -- Model conversion API changed input precisions for some inputs. For example, neural language processing models with ``I64`` inputs were changed to include ``I32`` ones. -- Model conversion API changed layouts for TensorFlow models (see the :doc:`Layouts in OpenVINO `). It lead to unusual requirement of using the input data with a different layout than that of the framework: - -.. image:: _static/images/tf_openvino.svg - :alt: tf_openvino - -- Inference Engine API (`InferenceEngine::CNNNetwork `__) applied some conversion rules for input and output precisions due to limitations in device plugins. -- Users needed to specify input shapes during model conversions in model conversion API, and work with static shapes in the application. - -OpenVINO™ 2022.1 has introduced API 2.0 (also called OpenVINO API v2) to align the logic of working with models as it is done in their origin frameworks - no layout and precision changes, operating with tensor names and indices to address inputs and outputs. OpenVINO Runtime has combined Inference Engine API used for inference and nGraph API targeted to work with models and operations. API 2.0 has a common structure, naming convention styles, namespaces, and removes duplicated structures. For more details, see the :doc:`Changes to Inference Pipeline in OpenVINO API v2 `. - -.. note:: - - Your existing applications will continue to work with OpenVINO Runtime 2022.1, as normal. Although, migration to API 2.0 is strongly recommended. This will allow you to use additional features, such as :doc:`Preprocessing ` and :doc:`Dynamic shapes support `. - - -The New OpenVINO IR v11 -####################### - -To support these features, OpenVINO has introduced OpenVINO IR v11, which is now the default version for model conversion API. The model represented in OpenVINO IR v11 fully matches the original model in the original framework format in terms of inputs and outputs. It is also not required to specify input shapes during conversion, which results in OpenVINO IR v11 containing ``-1`` to denote undefined dimensions. For more details on how to fully utilize this feature, see :doc:`Working with dynamic shapes `. For information on how to reshape to static shapes in application, see :doc:`Changing input shapes `. - -OpenVINO IR v11 is fully compatible with applications written with the Inference Engine API used by older versions of OpenVINO. This backward compatibility is allowed thanks to additional runtime information included in OpenVINO IR v11. This means that when OpenVINO IR v11 is read by an application based on Inference Engine, it is internally converted to OpenVINO IR v10. - -OpenVINO IR v11 is supported by all OpenVINO Development tools including Post-Training Optimization Tool, Benchmark app, etc. - -Backward Compatibility for OpenVINO IR v10 -########################################## - -API 2.0 also supports backward compatibility for models of OpenVINO IR v10. If you have OpenVINO IR v10 files, they can also be fed to OpenVINO Runtime. For more details, see the :doc:`migration steps `. - -Some of the OpenVINO Development Tools also support both OpenVINO IR v10 and v11 as an input: - -- Accuracy checker uses API 2.0 for model accuracy measurement by default. It also supports switching to the old API by using the ``--use_new_api False`` command-line parameter. Both launchers accept OpenVINO IR v10 and v11, but in some cases configuration files should be updated. For more details, see the `Accuracy Checker documentation `__. -- :doc:`Compile tool ` compiles the model to be used in API 2.0 by default. To use the resulting compiled blob under the Inference Engine API, the additional ``ov_api_1_0`` option should be passed. - -However, Post-Training Optimization Tool of OpenVINO 2022.1 does not support OpenVINO IR v10. They require the latest version of model conversion API to generate OpenVINO IR v11 files. - -.. note:: - - To quantize your OpenVINO IR v10 models to run with OpenVINO 2022.1, download and use Post-Training Optimization Tool of OpenVINO 2021.4. - - -.. _differences_api20_ie: - -Differences in API 2.0 and Inference Engine API Behaviors -######################################################### - -Inference Engine and nGraph APIs do not become deprecated with the introduction of the new API, and they can still be used in applications. However, it is highly recommended to migrate to API 2.0, as it offers more features (further extended in future releases), such as: - -- :doc:`Working with dynamic shapes `, which increases performance when working with compatible models such as NLP (Neural Language Processing) and super-resolution models. -- :doc:`Preprocessing of the model `, which adds preprocessing operations to inference models and fully occupies the accelerator, freeing CPU resources. - -To understand the differences between Inference Engine API and API 2.0, see the definitions of two types of behaviors first: - -- **Old behavior** of OpenVINO assumes that: - - - Model Conversion API can change input element types and order of dimensions (layouts) for the model from the original framework. - - Inference Engine can override input and output element types. - - Inference Engine API uses operation names to address inputs and outputs (e.g. `InferenceEngine::InferRequest::GetBlob `__). - - Inference Engine API does not support compiling of models with dynamic input shapes. - -- **New behavior** implemented in 2022.1 assumes full model alignment with the framework: - - - Model Conversion API preserves input element types and order of dimensions (layouts), and stores tensor names from the original models. - - OpenVINO Runtime 2022.1 reads models in any format (OpenVINO IR v10, OpenVINO IR v11, TensorFlow, ONNX, PaddlePaddle, etc.). - - API 2.0 uses tensor names for addressing, which is the standard approach among the compatible model frameworks. - - API 2.0 can also address input and output tensors by the index. Some model formats like ONNX are sensitive to the input and output order, which is preserved by OpenVINO 2022.1. - -The table below demonstrates which behavior, **old** or **new**, is used for models based on the two APIs. - -+--------------------------------+-----------------+-----------------+-----------------+------------------------+ -| API | OpenVINO IR v10 | OpenVINO IR v11 | ONNX Files | Models Created in Code | -+================================+=================+=================+=================+========================+ -| Inference Engine / nGraph APIs | Old | Old | Old | Old | -+--------------------------------+-----------------+-----------------+-----------------+------------------------+ -| API 2.0 | Old | New | New | New | -+--------------------------------+-----------------+-----------------+-----------------+------------------------+ - -More Information -#################### - -See the following pages to understand how to migrate Inference Engine-based applications to API 2.0: - -- :doc:`Installation & Deployment ` -- :doc:`OpenVINO™ Common Inference pipeline ` -- :doc:`Preprocess your model ` -- :doc:`Configure device ` -- :doc:`OpenVINO™ Model Creation ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst deleted file mode 100644 index f31bcfdd83cea1..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst +++ /dev/null @@ -1,683 +0,0 @@ -.. {#openvino_2_0_inference_pipeline} - -Inference Pipeline -================== - - -.. meta:: - :description: The inference pipeline is a set of steps to be - performed in a specific order to infer models with OpenVINO™ - Runtime. - - -To infer models with OpenVINO™ Runtime, you usually need to perform the following steps in the application pipeline: - -1. `Create a Core object <#create-a-core-object>`__. - - * 1.1. `(Optional) Load extensions <#optional-load-extensions>`__ - -2. `Read a model from a drive <#read-a-model-from-a-drive>`__. - - * 2.1. `(Optional) Perform model preprocessing <#optional-perform-model-preprocessing>`__. - -3. `Load the model to the device <#load-the-model-to-the-device>`__. -4. `Create an inference request <#create-an-inference-request>`__. -5. `Fill input tensors with data <#fill-input-tensors-with-data>`__. -6. `Start inference <#start-inference>`__. -7. `Process the inference results <#process-the-inference-results>`__. - -Based on the steps, the following code demonstrates how to change the application code to migrate to API 2.0. - -1. Create a Core Object -####################### - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:create_core - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:create_core - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:create_core - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:create_core - - -1.1 (Optional) Load Extensions -++++++++++++++++++++++++++++++ - -To load a model with custom operations, you need to add extensions for these operations. -It is highly recommended to use :doc:`OpenVINO Extensibility API ` -to write extensions. However, you can also load the old extensions to the new OpenVINO™ Runtime: - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:load_old_extension - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:load_old_extension - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:load_old_extension - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:load_old_extension - - -2. Read a Model from a Drive -############################ - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:read_model - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:read_model - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:read_model - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:read_model - - -Reading a model has the same structure as the example in the :doc:`model creation migration guide `. - -You can combine reading and compiling a model into a single call ``ov::Core::compile_model(filename, devicename)``. - - -2.1 (Optional) Perform Model Preprocessing -++++++++++++++++++++++++++++++++++++++++++ - -When the application input data does not perfectly match the model input format, -preprocessing may be necessary. See :doc:`preprocessing in API 2.0 ` for more details. - - -3. Load the Model to the Device -############################### - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:compile_model - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:compile_model - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:compile_model - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:compile_model - - -If you need to configure devices with additional parameters for OpenVINO Runtime, refer to :doc:`Configuring Devices `. - - -4. Create an Inference Request -############################## - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:create_infer_request - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:create_infer_request - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:create_infer_request - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:create_infer_request - - -5. Fill Input Tensors with Data -############################### - -**Inference Engine API** - -The Inference Engine API fills inputs with data of the ``I32`` precision (**not** aligned with the original model): - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - - .. tab-item:: Model created in code - :sync: model - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - -**API 2.0** - -API 2.0 fills inputs with data of the ``I64`` precision (aligned with the original model): - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_v10 - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_v10 - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_v10 - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - - .. tab-item:: Model created in code - :sync: model-created-in-code - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - -6. Start Inference -################## - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: Sync - :sync: sync - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:inference - - .. tab-item:: Async - :sync: async - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:start_async_and_wait - - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Sync - :sync: sync - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:inference - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:inference - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:inference - - .. tab-item:: Async - :sync: async - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:start_async_and_wait - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:start_async_and_wait - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:start_async_and_wait - - -7. Process the Inference Results -################################ - -**Inference Engine API** - -The Inference Engine API processes outputs as they are of the ``I32`` precision (**not** aligned with the original model): - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - - .. tab-item:: Model created in code - :sync: model - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - -**API 2.0** - -API 2.0 processes outputs as they are of: - -* the ``I32`` precision (**not** aligned with the original model) for OpenVINO IR v10 models, to match the :ref:`old behavior `. -* the ``I64`` precision (aligned with the original model) for OpenVINO IR v11, ONNX, ov::Model, PaddlePaddle and TensorFlow models, to match the :ref:`new behavior `. - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_v10 - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_v10 - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_v10 - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - - .. tab-item:: Model created in code - :sync: model-created-in-code - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.rst deleted file mode 100644 index 4108c23b03763e..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.rst +++ /dev/null @@ -1,302 +0,0 @@ -.. {#openvino_2_0_configure_devices} - -Configuring Devices -=================== - - -.. meta:: - :description: Openvino Runtime API 2.0 has introduced properties that unify - metrics and configuration key concepts, which simplifies the - configuration of inference devices. - - -The Inference Engine API provides the ability to configure devices with configuration keys and obtain device-specific metrics. The values retrived from `InferenceEngine::Core::GetConfig `__ are requested by the string name, while the return type is `InferenceEngine::Parameter `__ , which results in users not knowing what the actual type is stored in this parameter. - -API 2.0 solves these issues by introducing :doc:`properties `, which unify metrics and configuration key concepts. The main advantage is that they have the C++ type: - -.. code-block:: sh - - static constexpr Property full_name{"FULL_DEVICE_NAME"}; - - -where the property can be requested from an inference device as: - - -.. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_ro_property - - -The snippets in the following sections demonstrate the device configurations for migrating from Inference Engine to API 2.0. - -.. note:: - - The Inference Engine API is a **legacy solution** and it is recomended to use API 2.0. If you want to learn more about Inference Engine API, its configuration and how to obtain device-specific metrics from it, check the following `article `__ from the 2021.4 version of OpenVINO documentation. - -Setting Configuration Values -############################ - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_set_config - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_load_network - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: executable_network_set_config - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_set_config - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_load_network - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: executable_network_set_config - - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_set_property - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_compile_model - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: compiled_model_set_property - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_set_property - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_compile_model - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: compiled_model_set_property - - -Getting Information -################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_config - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_metric - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: executable_network_set_config - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: executable_network_get_metric - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_config - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_metric - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: executable_network_set_config - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: executable_network_get_metric - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_rw_property - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_ro_property - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: compiled_model_get_rw_property - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: compiled_model_get_ro_property - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_rw_property - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_ro_property - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: compiled_model_get_rw_property - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: compiled_model_get_ro_property - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst deleted file mode 100644 index 8801091f9a4b3b..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst +++ /dev/null @@ -1,246 +0,0 @@ -.. {#openvino_2_0_deployment} - -Installation & Deployment -========================= - - -.. meta:: - :description: OpenVINO™ API 2.0 focuses on the use of development tools and - deployment of applications, it also simplifies migration from - different frameworks to OpenVINO. - - -One of the main concepts for OpenVINO™ API 2.0 is being "easy to use", which includes: - -* Simplification of migration from different frameworks to OpenVINO. -* Organization of OpenVINO. -* Usage of development tools. -* Development and deployment of OpenVINO-based applications. - - -To accomplish that, the 2022.1 release OpenVINO introduced significant changes to the installation -and deployment processes. Further changes were implemented in 2023.1, aiming at making the installation -process even simpler. - -.. tip:: - - These instructions are largely deprecated and should be used for versions prior to 2023.1. - - The OpenVINO Development Tools package is being deprecated and will be discontinued entirely in 2025. - With this change, the OpenVINO Runtime package has become the default choice for installing the - software. It now includes all components necessary to utilize OpenVINO's functionality. - - - -The Installer Package Contains OpenVINO™ Runtime Only -##################################################### - -Since OpenVINO 2022.1, development tools have been distributed only via `PyPI `__, and are no longer included in the OpenVINO installer package. For a list of these components, refer to the :doc:`installation overview ` guide. Benefits of this approach include: - -* simplification of the user experience - in previous versions, installation and usage of OpenVINO Development Tools differed from one distribution type to another (the OpenVINO installer vs. PyPI), -* ensuring that dependencies are handled properly via the PIP package manager, and support virtual environments of development tools. - -The structure of the OpenVINO 2022.1 installer package has been organized as follows: - -* The ``runtime`` folder includes headers, libraries and CMake interfaces. -* The ``tools`` folder contains :doc:`the compile tool `, :doc:`deployment manager `, and a set of ``requirements.txt`` files with links to the corresponding versions of the ``openvino-dev`` package. -* The ``python`` folder contains the Python version for OpenVINO Runtime. - -Installing OpenVINO Development Tools via PyPI -############################################## - -Since OpenVINO Development Tools is no longer in the installer package, the installation process has also changed. This section describes it through a comparison with previous versions. - -For Versions Prior to 2022.1 -++++++++++++++++++++++++++++ - -In previous versions, OpenVINO Development Tools was a part of the main package. After the package was installed, to convert models (for example, TensorFlow), you needed to install additional dependencies by using the requirement files, such as ``requirements_tf.txt``, install Post-Training Optimization tool and Accuracy Checker tool via the ``setup.py`` scripts, and then use the ``setupvars`` scripts to make the tools available to the following command: - -.. code-block:: sh - - $ mo.py -h - - -For 2022.1 and After (prior to 2023.1) -++++++++++++++++++++++++++++++++++++++++++ - -In OpenVINO 2022.1 and later, you can install the development tools only from a `PyPI `__ repository, using the following command (taking TensorFlow as an example): - -.. code-block:: sh - - $ python3 -m pip install -r /tools/requirements_tf.txt - - -This will install all the development tools and additional components necessary to work with TensorFlow via the ``openvino-dev`` package (see **Step 4. Install the Package** on the `PyPI page `__ for parameters of other frameworks). - -Then, the tools can be used by commands like: - -.. code-block:: sh - - $ mo -h - $ pot -h - - -Installation of any other dependencies is not required. For more details on the installation steps, see the -`Install OpenVINO Development Tools `__ prior to OpenVINO 2023.1. - -Interface Changes for Building C/C++ Applications -################################################# - -The new OpenVINO Runtime with its API 2.0 has also brought some changes for building C/C++ applications. - -CMake Interface -++++++++++++++++++++ - -The CMake interface has been changed as follows: - -**With Inference Engine of previous versions**: - -.. code-block:: cmake - - find_package(InferenceEngine REQUIRED) - find_package(ngraph REQUIRED) - add_executable(ie_ngraph_app main.cpp) - target_link_libraries(ie_ngraph_app PRIVATE ${InferenceEngine_LIBRARIES} ${NGRAPH_LIBRARIES}) - - -**With OpenVINO Runtime 2022.1 (API 2.0)**: - -.. code-block:: cmake - - find_package(OpenVINO REQUIRED) - add_executable(ov_app main.cpp) - target_link_libraries(ov_app PRIVATE openvino::runtime) - - add_executable(ov_c_app main.c) - target_link_libraries(ov_c_app PRIVATE openvino::runtime::c) - - -Native Interfaces -++++++++++++++++++++ - -It is possible to build applications without the CMake interface by using: MSVC IDE, UNIX makefiles, and any other interface, which has been changed as shown here: - -**With Inference Engine of previous versions**: - -.. tab-set:: - - .. tab-item:: Include dirs - :sync: include-dirs - - .. code-block:: sh - - /deployment_tools/inference_engine/include - /deployment_tools/ngraph/include - - .. tab-item:: Path to libs - :sync: path-libs - - .. code-block:: sh - - /deployment_tools/inference_engine/lib/intel64/Release - /deployment_tools/ngraph/lib/ - - .. tab-item:: Shared libs - :sync: shared-libs - - .. code-block:: sh - - // UNIX systems - inference_engine.so ngraph.so - - // Windows - inference_engine.dll ngraph.dll - - .. tab-item:: (Windows) .lib files - :sync: windows-lib-files - - .. code-block:: sh - - ngraph.lib - inference_engine.lib - -**With OpenVINO Runtime 2022.1 (API 2.0)**: - -.. tab-set:: - - .. tab-item:: Include dirs - :sync: include-dirs - - .. code-block:: sh - - /runtime/include - - .. tab-item:: Path to libs - :sync: path-libs - - .. code-block:: sh - - /runtime/lib/intel64/Release - - .. tab-item:: Shared libs - :sync: shared-libs - - .. code-block:: sh - - // UNIX systems - openvino.so - - // Windows - openvino.dll - - .. tab-item:: (Windows) .lib files - :sync: windows-lib-files - - .. code-block:: sh - - openvino.lib - - -Clearer Library Structure for Deployment -######################################## - -OpenVINO 2022.1 introduced a reorganization of the libraries, to make deployment easier. In the previous versions, it was required to use several libraries to perform deployment steps. Now you can just use ``openvino`` or ``openvino_c`` based on your developing language, with the necessary plugins to complete your task. For example, ``openvino_intel_cpu_plugin`` and ``openvino_ir_frontend`` plugins will enable loading OpenVINO IRs and performing inference on the CPU device (for more details, see the :doc:`Local distribution with OpenVINO `). - -Below are detailed comparisons of the library structure between OpenVINO 2022.1 and the previous versions: - -* Starting with 2022.1 release, a single core library with all the functionalities (``openvino`` for C++ Runtime, ``openvino_c`` for Inference Engine API C interface) is used, instead of the previous core libraries which contained ``inference_engine``, ``ngraph``, ``inference_engine_transformations`` and ``inference_engine_lp_transformations``. - -* The libraries of plugins have been renamed as follows: - - * ``openvino_intel_cpu_plugin`` is used for :doc:`CPU ` device instead of ``MKLDNNPlugin``. - * ``openvino_intel_gpu_plugin`` is used for :doc:`GPU ` device instead of ``clDNNPlugin``. - * ``openvino_auto_plugin`` is used for :doc:`Auto-Device Plugin `. - -* The plugins for reading and converting models have been changed as follows: - - * ``openvino_ir_frontend`` is used to read IRs instead of ``inference_engine_ir_reader``. - * ``openvino_onnx_frontend`` is used to read ONNX models instead of ``inference_engine_onnx_reader`` (with its dependencies). - * ``openvino_paddle_frontend`` is added in 2022.1 to read PaddlePaddle models. - - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst deleted file mode 100644 index e25951582bc976..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. {#openvino_2_0_model_creation} - -Model Creation in OpenVINO™ Runtime -===================================== - - -.. meta:: - :description: Model creation in OpenVINO™ Runtime API 2.0 is performed with - nGraph engine that has been preserved in the new API and its - namespace has been changed to 'ov'. - - -OpenVINO™ Runtime with API 2.0 includes the nGraph engine as a common part. The ``ngraph`` namespace has been changed to ``ov``, but all other parts of the ngraph API have been preserved. - - -API 2.0 -#################### - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_graph.py - :language: Python - :fragment: ov:graph - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_graph.cpp - :language: cpp - :fragment: ov:graph - - -Additional Resources -#################### - -* :doc:`Hello Model Creation Sample ` diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst deleted file mode 100644 index 263e6bc32b171b..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst +++ /dev/null @@ -1,203 +0,0 @@ -.. {#openvino_2_0_preprocessing} - -Preprocessing -============= - - -.. meta:: - :description: In OpenVINO™ API 2.0 each preprocessing or post-processing - operation is integrated directly into the model and compiled - together with the inference graph. - - -This guide introduces how preprocessing works in API 2.0 by a comparison with preprocessing in the previous Inference Engine API. It also demonstrates how to migrate preprocessing scenarios from Inference Engine to API 2.0 via code samples. - -How Preprocessing Works in API 2.0 -################################## - -Inference Engine API contains preprocessing capabilities in the `InferenceEngine::CNNNetwork `__ class. Such preprocessing information is not a part of the main inference graph executed by :doc:`OpenVINO devices `. Therefore, it is stored and executed separately before the inference stage: - -* Preprocessing operations are executed on the CPU for most OpenVINO inference plugins. Thus, instead of occupying accelerators, they keep the CPU busy with computational tasks. -* Preprocessing information stored in `InferenceEngine::CNNNetwork `__ is lost when saving back to the OpenVINO IR file format. - -API 2.0 introduces a :doc:`new way of adding preprocessing operations to the model ` - each preprocessing or post-processing operation is integrated directly into the model and compiled together with the inference graph: - -* API 2.0 first adds preprocessing operations by using `ov::preprocess::PrePostProcessor `__, -* and then compiles the model on the target by using `ov::Core::compile_model `__. - -Having preprocessing operations as a part of an OpenVINO opset makes it possible to read and serialize a preprocessed model as the OpenVINO™ IR file format. - -More importantly, API 2.0 does not assume any default layouts as Inference Engine did. For example, both ``{ 1, 224, 224, 3 }`` and ``{ 1, 3, 224, 224 }`` shapes are supposed to be in the `NCHW` layout, while only the latter is. Therefore, some preprocessing capabilities in the API require layouts to be set explicitly. To learn how to do it, refer to the :doc:`Layout overview `. For example, to perform image scaling by partial dimensions ``H`` and ``W``, preprocessing needs to know what dimensions ``H`` and ``W`` are. - -.. note:: - - Use model conversion API preprocessing capabilities to insert preprocessing operations in your model for optimization. Thus, the application does not need to read the model and set preprocessing repeatedly. You can use the :doc:`model caching feature ` to improve the time-to-inference. - -The following sections demonstrate how to migrate preprocessing scenarios from Inference Engine API to API 2.0. -The snippets assume that you need to preprocess a model input with the ``tensor_name`` in Inference Engine API, using ``operation_name`` to address the data. - -Preparation: Import Preprocessing in Python -########################################### - -| There are two different namespaces: -| * ``runtime``, which contains API 2.0 classes; -| * and ``preprocess``, which provides Preprocessing API. - -Using Mean and Scale Values -########################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: mean_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: c_api_ppp - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_mean_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_mean_scale - - -Converting Precision and Layout -############################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: conversions - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: c_api_ppp - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_conversions - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_conversions - - -Using Image Scaling -#################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: image_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: c_api_ppp - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_image_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_image_scale - - -Converting Color Space -++++++++++++++++++++++ - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_color_space - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_color_space - - -Additional Resources -#################### - -- :doc:`Preprocessing details ` -- :doc:`NV12 classification sample ` - diff --git a/docs/snippets/ie_common.cpp b/docs/snippets/ie_common.cpp deleted file mode 100644 index e45e956fe25a55..00000000000000 --- a/docs/snippets/ie_common.cpp +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef IN_OV_COMPONENT -# define IN_OV_COMPONENT -# define WAS_OV_LIBRARY_DEFINED -#endif - -#include - -#ifdef WAS_OV_LIBRARY_DEFINED -# undef IN_OV_COMPONENT -# undef WAS_OV_LIBRARY_DEFINED -#endif - -int main() { - //! [ie:create_core] - InferenceEngine::Core core; - //! [ie:create_core] - - //! [ie:read_model] - InferenceEngine::CNNNetwork network = core.ReadNetwork("model.xml"); - //! [ie:read_model] - - InferenceEngine::InputsDataMap inputs = network.getInputsInfo(); - InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo(); - - //! [ie:compile_model] - InferenceEngine::ExecutableNetwork exec_network = core.LoadNetwork(network, "CPU"); - //! [ie:compile_model] - - //! [ie:create_infer_request] - InferenceEngine::InferRequest infer_request = exec_network.CreateInferRequest(); - //! [ie:create_infer_request] - - //! [ie:get_input_tensor] - InferenceEngine::Blob::Ptr input_blob1 = infer_request.GetBlob(inputs.begin()->first); - // fill first blob - InferenceEngine::MemoryBlob::Ptr minput1 = InferenceEngine::as(input_blob1); - if (minput1) { - // locked memory holder should be alive all time while access to its - // buffer happens - auto minputHolder = minput1->wmap(); - // Original I64 precision was converted to I32 - auto data = minputHolder.as::value_type*>(); - // Fill data ... - } - - InferenceEngine::Blob::Ptr input_blob2 = infer_request.GetBlob("data2"); - // fill second blob - InferenceEngine::MemoryBlob::Ptr minput2 = InferenceEngine::as(input_blob2); - if (minput2) { - // locked memory holder should be alive all time while access to its - // buffer happens - auto minputHolder = minput2->wmap(); - // Original I64 precision was converted to I32 - auto data = minputHolder.as::value_type*>(); - // Fill data ... - } - //! [ie:get_input_tensor] - - //! [ie:inference] - infer_request.Infer(); - //! [ie:inference] - - //! [ie:start_async_and_wait] - // NOTE: For demonstration purposes we are trying to set callback - // which restarts inference inside one more time, so two inferences happen here - - // Start inference without blocking current thread - auto restart_once = true; - infer_request.SetCompletionCallback>( - [&, restart_once](InferenceEngine::InferRequest request, InferenceEngine::StatusCode status) mutable { - if (status != InferenceEngine::OK) { - // Process error code - } else { - // Extract inference result - InferenceEngine::Blob::Ptr output_blob = request.GetBlob(outputs.begin()->first); - // Restart inference if needed - if (restart_once) { - request.StartAsync(); - restart_once = false; - } - } - }); - infer_request.StartAsync(); - // Get inference status immediately - InferenceEngine::StatusCode status = infer_request.Wait(InferenceEngine::InferRequest::STATUS_ONLY); - // Wait for 1 milisecond - status = infer_request.Wait(1); - // Wait for inference completion - infer_request.Wait(InferenceEngine::InferRequest::RESULT_READY); - //! [ie:start_async_and_wait] - - //! [ie:get_output_tensor] - InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first); - InferenceEngine::MemoryBlob::Ptr moutput = InferenceEngine::as(output_blob); - if (moutput) { - // locked memory holder should be alive all time while access to its - // buffer happens - auto minputHolder = moutput->rmap(); - // Original I64 precision was converted to I32 - auto data = - minputHolder.as::value_type*>(); - // process output data - } - //! [ie:get_output_tensor] - //! [ie:load_old_extension] - core.AddExtension(std::make_shared("path_to_extension_library.so")); - //! [ie:load_old_extension] - (void)status; - return 0; -} diff --git a/docs/snippets/ov_common.c b/docs/snippets/ov_common.c deleted file mode 100644 index 45717cc50b5db6..00000000000000 --- a/docs/snippets/ov_common.c +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include - -void infer_request_callback(void* args) { - // Operations after infer -} -void inputs_v10(ov_infer_request_t* infer_request) { - //! [ov_api_2_0:get_input_tensor_v10] - ov_tensor_t* input_tensor1 = NULL; - ov_tensor_t* input_tensor2 = NULL; - void* data = NULL; - - { - // Get input tensor by index - ov_infer_request_get_input_tensor_by_index(infer_request, 0, &input_tensor1); - // IR v10 works with converted precisions (i64 -> i32) - ov_tensor_data(input_tensor1, &data); - int32_t* data1 = (int32_t*)data; - // Fill first data ... - } - - { - // Get input tensor by tensor name - ov_infer_request_get_tensor(infer_request, "data2_t", &input_tensor2); - // IR v10 works with converted precisions (i64 -> i32) - ov_tensor_data(input_tensor2, &data); - int32_t* data2 = (int32_t*)data; - // Fill first data ... - } - - ov_tensor_free(input_tensor1); - ov_tensor_free(input_tensor2); - //! [ov_api_2_0:get_input_tensor_v10] -} - -void inputs_aligned(ov_infer_request_t* infer_request) { - //! [ov_api_2_0:get_input_tensor_aligned] - ov_tensor_t* input_tensor1 = NULL; - ov_tensor_t* input_tensor2 = NULL; - void* data = NULL; - { - // Get input tensor by index - ov_infer_request_get_input_tensor_by_index(infer_request, 0, &input_tensor1); - // Element types, names and layouts are aligned with framework - ov_tensor_data(input_tensor1, &data); - // Fill first data ... - } - - { - // Get input tensor by tensor name - ov_infer_request_get_tensor(infer_request, "data2_t", &input_tensor2); - // Element types, names and layouts are aligned with framework - ov_tensor_data(input_tensor2, &data); - // Fill first data ... - } - - ov_tensor_free(input_tensor1); - ov_tensor_free(input_tensor2); - //! [ov_api_2_0:get_input_tensor_aligned] -} - -void outputs_v10(ov_infer_request_t* infer_request) { - //! [ov_api_2_0:get_output_tensor_v10] - ov_tensor_t* output_tensor = NULL; - void* data = NULL; - - // model has only one output - ov_infer_request_get_output_tensor(infer_request, &output_tensor); - // IR v10 works with converted precisions (i64 -> i32) - ov_tensor_data(output_tensor, &data); - int32_t* out_data = (int32_t*)data; - // process output data - - ov_tensor_free(output_tensor); - //! [ov_api_2_0:get_output_tensor_v10] -} - -void outputs_aligned(ov_infer_request_t* infer_request) { - //! [ov_api_2_0:get_output_tensor_aligned] - ov_tensor_t* output_tensor = NULL; - void* out_data = NULL; - - // model has only one output - ov_infer_request_get_output_tensor(infer_request, &output_tensor); - // Element types, names and layouts are aligned with framework - ov_tensor_data(output_tensor, &out_data); - // process output data - - ov_tensor_free(output_tensor); - //! [ov_api_2_0:get_output_tensor_aligned] -} - -int main() { - //! [ov_api_2_0:create_core] - ov_core_t* core = NULL; - ov_core_create(&core); - //! [ov_api_2_0:create_core] - - //! [ov_api_2_0:read_model] - ov_model_t* model = NULL; - ov_core_read_model(core, "model.xml", NULL, &model); - //! [ov_api_2_0:read_model] - - //! [ov_api_2_0:compile_model] - ov_compiled_model_t* compiled_model = NULL; - ov_core_compile_model(core, model, "CPU", 0, &compiled_model); - //! [ov_api_2_0:compile_model] - - //! [ov_api_2_0:create_infer_request] - ov_infer_request_t* infer_request = NULL; - ov_compiled_model_create_infer_request(compiled_model, &infer_request); - //! [ov_api_2_0:create_infer_request] - - inputs_aligned(infer_request); - - //! [ov_api_2_0:inference] - ov_infer_request_infer(infer_request); - //! [ov_api_2_0:inference] - - //! [ov_api_2_0:start_async_and_wait] - // NOTE: For demonstration purposes we are trying to set callback - ov_callback_t callback; - callback.callback_func = infer_request_callback; - callback.args = infer_request; - ov_infer_request_set_callback(infer_request, &callback); - // Start inference without blocking current thread - ov_infer_request_start_async(infer_request); - // Wait for inference completion - ov_infer_request_wait(infer_request); - // Wait for 10 milisecond - ov_infer_request_wait_for(infer_request, 10); - //! [ov_api_2_0:start_async_and_wait] - - outputs_aligned(infer_request); - - //! [ov_api_2_0:load_old_extension] - // For C API 2.0 "add_extension()" is not supported for now - //! [ov_api_2_0:load_old_extension] - ov_infer_request_free(infer_request); - ov_compiled_model_free(compiled_model); - ov_model_free(model); - ov_core_free(core); - return 0; -} diff --git a/docs/snippets/ov_common.cpp b/docs/snippets/ov_common.cpp deleted file mode 100644 index b21ec30aef31cf..00000000000000 --- a/docs/snippets/ov_common.cpp +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#ifndef IN_OV_COMPONENT -# define IN_OV_COMPONENT -# define WAS_OV_LIBRARY_DEFINED -#endif - -#include - -#ifdef WAS_OV_LIBRARY_DEFINED -# undef IN_OV_COMPONENT -# undef WAS_OV_LIBRARY_DEFINED -#endif - -#include -#include - -void inputs_v10(ov::InferRequest& infer_request) { - //! [ov_api_2_0:get_input_tensor_v10] - // Get input tensor by index - ov::Tensor input_tensor1 = infer_request.get_input_tensor(0); - // IR v10 works with converted precisions (i64 -> i32) - auto data1 = input_tensor1.data(); - // Fill first data ... - - // Get input tensor by tensor name - ov::Tensor input_tensor2 = infer_request.get_tensor("data2_t"); - // IR v10 works with converted precisions (i64 -> i32) - auto data2 = input_tensor1.data(); - // Fill first data ... - //! [ov_api_2_0:get_input_tensor_v10] -} - -void inputs_aligned(ov::InferRequest& infer_request) { - //! [ov_api_2_0:get_input_tensor_aligned] - // Get input tensor by index - ov::Tensor input_tensor1 = infer_request.get_input_tensor(0); - // Element types, names and layouts are aligned with framework - auto data1 = input_tensor1.data(); - // Fill first data ... - - // Get input tensor by tensor name - ov::Tensor input_tensor2 = infer_request.get_tensor("data2_t"); - // Element types, names and layouts are aligned with framework - auto data2 = input_tensor1.data(); - // Fill first data ... - //! [ov_api_2_0:get_input_tensor_aligned] -} - -void outputs_v10(ov::InferRequest& infer_request) { - //! [ov_api_2_0:get_output_tensor_v10] - // model has only one output - ov::Tensor output_tensor = infer_request.get_output_tensor(); - // IR v10 works with converted precisions (i64 -> i32) - auto out_data = output_tensor.data(); - // process output data - //! [ov_api_2_0:get_output_tensor_v10] -} - -void outputs_aligned(ov::InferRequest& infer_request) { - //! [ov_api_2_0:get_output_tensor_aligned] - // model has only one output - ov::Tensor output_tensor = infer_request.get_output_tensor(); - // Element types, names and layouts are aligned with framework - auto out_data = output_tensor.data(); - // process output data - //! [ov_api_2_0:get_output_tensor_aligned] -} - -int main() { - //! [ov_api_2_0:create_core] - ov::Core core; - //! [ov_api_2_0:create_core] - - //! [ov_api_2_0:read_model] - std::shared_ptr model = core.read_model("model.xml"); - //! [ov_api_2_0:read_model] - - //! [ov_api_2_0:compile_model] - ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); - //! [ov_api_2_0:compile_model] - - //! [ov_api_2_0:create_infer_request] - ov::InferRequest infer_request = compiled_model.create_infer_request(); - //! [ov_api_2_0:create_infer_request] - - inputs_aligned(infer_request); - - //! [ov_api_2_0:inference] - infer_request.infer(); - //! [ov_api_2_0:inference] - - //! [ov_api_2_0:start_async_and_wait] - // NOTE: For demonstration purposes we are trying to set callback - // which restarts inference inside one more time, so two inferences happen here - - auto restart_once = true; - infer_request.set_callback([&, restart_once](std::exception_ptr exception_ptr) mutable { - if (exception_ptr) { - // procces exception or rethrow it. - std::rethrow_exception(exception_ptr); - } else { - // Extract inference result - ov::Tensor output_tensor = infer_request.get_output_tensor(); - // Restart inference if needed - if (restart_once) { - infer_request.start_async(); - restart_once = false; - } - } - }); - // Start inference without blocking current thread - infer_request.start_async(); - // Get inference status immediately - bool status = infer_request.wait_for(std::chrono::milliseconds{0}); - // Wait for one milisecond - status = infer_request.wait_for(std::chrono::milliseconds{1}); - // Wait for inference completion - infer_request.wait(); - //! [ov_api_2_0:start_async_and_wait] - - outputs_aligned(infer_request); - - OPENVINO_SUPPRESS_DEPRECATED_START - //! [ov_api_2_0:load_old_extension] - core.add_extension(std::make_shared("path_to_extension_library.so")); - //! [ov_api_2_0:load_old_extension] - OPENVINO_SUPPRESS_DEPRECATED_END - (void)status; - - return 0; -} diff --git a/docs/snippets/ov_common.py b/docs/snippets/ov_common.py deleted file mode 100644 index b3d07524391da3..00000000000000 --- a/docs/snippets/ov_common.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - - -import numpy as np -from utils import get_image, get_path_to_extension_library, get_path_to_model - -#! [ov_api_2_0:create_core] -import openvino as ov -core = ov.Core() -#! [ov_api_2_0:create_core] - -model_path = get_path_to_model() - -#! [ov_api_2_0:read_model] -model = core.read_model(model_path) -#! [ov_api_2_0:read_model] - -#! [ov_api_2_0:compile_model] -compiled_model = core.compile_model(model, "CPU") -#! [ov_api_2_0:compile_model] - -#! [ov_api_2_0:create_infer_request] -infer_request = compiled_model.create_infer_request() -#! [ov_api_2_0:create_infer_request] - -#! [ov_api_2_0:get_input_tensor_aligned] -# Get input tensor by index -input_tensor1 = infer_request.get_input_tensor(0) -# Element types, names and layouts are aligned with framework -assert input_tensor1.data.dtype == np.int64 -# Fill the first data ... - -# Get input tensor by tensor name -input_tensor2 = infer_request.get_tensor("input") -assert input_tensor2.data.dtype == np.int64 -# Fill the second data ... -#! [ov_api_2_0:get_input_tensor_aligned] - -#! [ov_api_2_0:get_input_tensor_v10] -# Get input tensor by index -input_tensor1 = infer_request.get_input_tensor(0) -# IR v10 works with converted precisions (i64 -> i32) -assert input_tensor1.data.dtype == np.int32 -# Fill the first data ... - -# Get input tensor by tensor name -input_tensor2 = infer_request.get_tensor("input") -# IR v10 works with converted precisions (i64 -> i32) -assert input_tensor2.data.dtype == np.int32 -# Fill the second data .. -#! [ov_api_2_0:get_input_tensor_v10] - -#! [ov_api_2_0:inference] -results = infer_request.infer() -#! [ov_api_2_0:inference] - -input_data = get_image() - -def process_results(results, frame_id): - pass - -#! [ov_api_2_0:start_async_and_wait] -# Start async inference on a single infer request -infer_request.start_async() -# Wait for 1 milisecond -infer_request.wait_for(1) -# Wait for inference completion -infer_request.wait() - -# Demonstrates async pipeline using AsyncInferQueue - -results = [] - -def callback(request, frame_id): - # Copy the data from output tensors to numpy array and process it - results_copy = {output: data[:] for output, data in request.results.items()} - results.append(process_results(results_copy, frame_id)) - -# Create AsyncInferQueue with 4 infer requests -infer_queue = ov.AsyncInferQueue(compiled_model, jobs=4) -# Set callback for each infer request in the queue -infer_queue.set_callback(callback) - -total_frames = 100 -for i in range(total_frames): - # Wait for at least one available infer request and start asynchronous inference - infer_queue.start_async(input_data, userdata=i) -# Wait for all requests to complete -infer_queue.wait_all() -#! [ov_api_2_0:start_async_and_wait] - -#! [ov_api_2_0:get_output_tensor_aligned] -# Model has only one output -output_tensor = infer_request.get_output_tensor() -# Element types, names and layouts are aligned with framework -assert output_tensor.data.dtype == np.int64 -# process output data ... -#! [ov_api_2_0:get_output_tensor_aligned] - -#! [ov_api_2_0:get_output_tensor_v10] -# Model has only one output -output_tensor = infer_request.get_output_tensor() -# IR v10 works with converted precisions (i64 -> i32) -assert output_tensor.data.dtype == np.int32 -# process output data ... -#! [ov_api_2_0:get_output_tensor_v10] - -path_to_extension_library = get_path_to_extension_library() - -#! [ov_api_2_0:load_old_extension] -core.add_extension(path_to_extension_library) -#! [ov_api_2_0:load_old_extension] diff --git a/docs/snippets/ov_graph.cpp b/docs/snippets/ov_graph.cpp deleted file mode 100644 index 4e3566caad10e5..00000000000000 --- a/docs/snippets/ov_graph.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include -#include - -int main() { - //! [ov:graph] - // _____________ _____________ - // | Parameter | | Parameter | - // | data1 | | data2 | - // |___________| |___________| - // | | - // data1_t | | data2_t - // \ / - // \ / - // \ / - // ____\____/____ - // | Concat | - // | concat | - // |____________| - // | - // | concat_t - // | - // _______|_______ - // | Result | - // | result | - // |_____________| - auto data1 = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); - data1->set_friendly_name("data1"); // operation name - data1->output(0).set_names({"data1_t"}); // tensor names - auto data2 = std::make_shared(ov::element::i64, ov::Shape{1, 2, 2, 2}); - data2->set_friendly_name("data2"); // operation name - data2->output(0).set_names({"data2_t"}); // tensor names - - auto concat = std::make_shared(ov::OutputVector{data1, data2}, 1); - concat->set_friendly_name("concat"); // operation name - concat->output(0).set_names({"concat_t"}); // tensor name - - auto result = std::make_shared(concat); - result->set_friendly_name("result"); // operation name - - auto f = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{data1, data2}, "function_name"); - //! [ov:graph] - return 0; -} diff --git a/docs/snippets/ov_graph.py b/docs/snippets/ov_graph.py deleted file mode 100644 index f0d86336391b3b..00000000000000 --- a/docs/snippets/ov_graph.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - - -#! [ov:graph] -# _____________ _____________ -# | Parameter | | Parameter | -# | data1 | | data2 | -# |___________| |___________| -# | | -# data1_t | | data2_t -# \ / -# \ / -# \ / -# ____\____/____ -# | Concat | -# | concat | -# |____________| -# | -# | concat_t -# | -# _______|_______ -# | Result | -# | result | -# |_____________| - -import openvino as ov -import openvino.runtime.opset12 as ops - - -data1 = ops.parameter([1, 3, 2, 2], ov.Type.i64) -data1.friendly_name = "data1" # operation name -data1.output(0).name = "data1_t" # tensor name -data2 = ops.parameter([1, 2, 2, 2], ov.Type.i64) -data2.friendly_name = "data2" # operation name -data2.output(0).name = "data2_t" # tensor name - -concat = ops.concat([data1, data2], 1) -concat.friendly_name = "concat" # operation name -concat.output(0).name = "concat_t" # tensor name - -result = ops.result(concat) -result.friendly_name = "result" # operation name - -model = ov.Model(result, [data1, data2], "model_name") -#! [ov:graph] diff --git a/docs/snippets/ov_preprocessing_migration.c b/docs/snippets/ov_preprocessing_migration.c deleted file mode 100644 index e34b892823c780..00000000000000 --- a/docs/snippets/ov_preprocessing_migration.c +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include - -int main_new() { - char* model_path = NULL; - char* tensor_name = NULL; - - ov_core_t* core = NULL; - ov_core_create(&core); - - ov_model_t* model = NULL; - ov_core_read_model(core, model_path, NULL, &model); - - { - //! [ov_mean_scale] - ov_preprocess_prepostprocessor_t* preprocess = NULL; - ov_preprocess_input_info_t* input_info = NULL; - ov_preprocess_input_model_info_t* input_model = NULL; - ov_preprocess_preprocess_steps_t* input_process = NULL; - ov_layout_t* layout = NULL; - ov_model_t* new_model = NULL; - - ov_preprocess_prepostprocessor_create(model, &preprocess); - ov_preprocess_prepostprocessor_get_input_info_by_index(preprocess, 0, &input_info); - ov_preprocess_input_info_get_model_info(input_info, &input_model); - // we only need to know where is C dimension - ov_layout_create("...C", &layout); - ov_preprocess_input_model_info_set_layout(input_model, layout); - // specify scale and mean values, order of operations is important - ov_preprocess_input_info_get_preprocess_steps(input_info, &input_process); - ov_preprocess_preprocess_steps_mean(input_process, 116.78f); - ov_preprocess_preprocess_steps_scale(input_process, 57.21f); - // insert preprocessing operations to the 'model' - ov_preprocess_prepostprocessor_build(preprocess, &new_model); - - ov_layout_free(layout); - ov_model_free(new_model); - ov_preprocess_input_model_info_free(input_model); - ov_preprocess_preprocess_steps_free(input_process); - ov_preprocess_input_info_free(input_info); - ov_preprocess_prepostprocessor_free(preprocess); - //! [ov_mean_scale] - } - - { - //! [ov_conversions] - ov_preprocess_prepostprocessor_t* preprocess = NULL; - ov_preprocess_input_info_t* input_info = NULL; - ov_preprocess_input_tensor_info_t* input_tensor_info = NULL; - ov_layout_t* layout_nhwc = NULL; - ov_preprocess_input_model_info_t* input_model = NULL; - ov_layout_t* layout_nchw = NULL; - ov_model_t* new_model = NULL; - - ov_preprocess_prepostprocessor_create(model, &preprocess); - ov_preprocess_prepostprocessor_get_input_info_by_name(preprocess, tensor_name, &input_info); - ov_preprocess_input_info_get_tensor_info(input_info, &input_tensor_info); - - ov_layout_create("NHWC", &layout_nhwc); - ov_preprocess_input_tensor_info_set_layout(input_tensor_info, layout_nhwc); - ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, ov_element_type_e::U8); - - ov_preprocess_input_info_get_model_info(input_info, &input_model); - ov_layout_create("NCHW", &layout_nchw); - ov_preprocess_input_model_info_set_layout(input_model, layout_nchw); - // layout and precision conversion is inserted automatically, - // because tensor format != model input format - ov_preprocess_prepostprocessor_build(preprocess, &new_model); - - ov_layout_free(layout_nchw); - ov_layout_free(layout_nhwc); - ov_model_free(new_model); - ov_preprocess_input_model_info_free(input_model); - ov_preprocess_input_tensor_info_free(input_tensor_info); - ov_preprocess_input_info_free(input_info); - ov_preprocess_prepostprocessor_free(preprocess); - //! [ov_conversions] - } - - { - //! [ov_color_space] - ov_preprocess_prepostprocessor_t* preprocess = NULL; - ov_preprocess_input_info_t* input_info = NULL; - ov_preprocess_input_tensor_info_t* input_tensor_info = NULL; - ov_preprocess_preprocess_steps_t* input_process = NULL; - ov_model_t* new_model = NULL; - - ov_preprocess_prepostprocessor_create(model, &preprocess); - ov_preprocess_prepostprocessor_get_input_info_by_name(preprocess, tensor_name, &input_info); - ov_preprocess_input_info_get_tensor_info(input_info, &input_tensor_info); - ov_preprocess_input_tensor_info_set_color_format(input_tensor_info, ov_color_format_e::NV12_TWO_PLANES); - // add NV12 to BGR conversion - ov_preprocess_input_info_get_preprocess_steps(input_info, &input_process); - ov_preprocess_preprocess_steps_convert_color(input_process, ov_color_format_e::BGR); - // and insert operations to the model - ov_preprocess_prepostprocessor_build(preprocess, &new_model); - - ov_preprocess_input_tensor_info_free(input_tensor_info); - ov_preprocess_preprocess_steps_free(input_process); - ov_preprocess_input_info_free(input_info); - ov_preprocess_prepostprocessor_free(preprocess); - ov_model_free(new_model); - //! [ov_color_space] - } - - { - //! [ov_image_scale] - ov_preprocess_prepostprocessor_t* preprocess = NULL; - ov_preprocess_input_info_t* input_info = NULL; - ov_preprocess_input_tensor_info_t* input_tensor_info = NULL; - ov_preprocess_input_model_info_t* input_model = NULL; - ov_layout_t* layout = NULL; - ov_preprocess_preprocess_steps_t* input_process = NULL; - ov_model_t* new_model = NULL; - - ov_preprocess_prepostprocessor_create(model, &preprocess); - ov_preprocess_prepostprocessor_get_input_info_by_name(preprocess, tensor_name, &input_info); - ov_preprocess_input_info_get_tensor_info(input_info, &input_tensor_info); - // scale from the specified tensor size - ov_preprocess_input_tensor_info_set_spatial_static_shape(input_tensor_info, 448, 448); - // need to specify H and W dimensions in model, others are not important - ov_preprocess_input_info_get_model_info(input_info, &input_model); - ov_layout_create("??HW", &layout); - ov_preprocess_input_model_info_set_layout(input_model, layout); - // scale to model shape - ov_preprocess_input_info_get_preprocess_steps(input_info, &input_process); - ov_preprocess_preprocess_steps_resize(input_process, ov_preprocess_resize_algorithm_e::RESIZE_LINEAR); - // and insert operations to the model - ov_preprocess_prepostprocessor_build(preprocess, &new_model); - - ov_layout_free(layout); - ov_preprocess_preprocess_steps_free(input_process); - ov_preprocess_input_model_info_free(input_model); - ov_preprocess_input_tensor_info_free(input_tensor_info); - ov_preprocess_input_info_free(input_info); - ov_model_free(new_model); - ov_preprocess_prepostprocessor_free(preprocess); - //! [ov_image_scale] - ov_model_free(model); - ov_core_free(core); - } - - return 0; -} - diff --git a/docs/snippets/ov_preprocessing_migration.cpp b/docs/snippets/ov_preprocessing_migration.cpp deleted file mode 100644 index f240659c8b422f..00000000000000 --- a/docs/snippets/ov_preprocessing_migration.cpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include -#include -#include - -#ifndef IN_OV_COMPONENT -# define IN_OV_COMPONENT -# define WAS_OV_LIBRARY_DEFINED -#endif - -#include "inference_engine.hpp" - -#ifdef WAS_OV_LIBRARY_DEFINED -# undef IN_OV_COMPONENT -# undef WAS_OV_LIBRARY_DEFINED -#endif - -int main_new() { - std::string model_path; - std::string tensor_name; - - ov::Core core; - std::shared_ptr model = core.read_model(model_path); - ov::preprocess::PrePostProcessor ppp(model); - - { - //! [ov_mean_scale] -ov::preprocess::PrePostProcessor ppp(model); -ov::preprocess::InputInfo& input = ppp.input(tensor_name); -// we only need to know where is C dimension -input.model().set_layout("...C"); -// specify scale and mean values, order of operations is important -input.preprocess().mean(116.78f).scale({ 57.21f, 57.45f, 57.73f }); -// insert preprocessing operations to the 'model' -model = ppp.build(); - //! [ov_mean_scale] - } - - { - //! [ov_conversions] -ov::preprocess::PrePostProcessor ppp(model); -ov::preprocess::InputInfo& input = ppp.input(tensor_name); -input.tensor().set_layout("NHWC").set_element_type(ov::element::u8); -input.model().set_layout("NCHW"); -// layout and precision conversion is inserted automatically, -// because tensor format != model input format -model = ppp.build(); - //! [ov_conversions] - } - - { - //! [ov_color_space] -ov::preprocess::PrePostProcessor ppp(model); -ov::preprocess::InputInfo& input = ppp.input(tensor_name); -input.tensor().set_color_format(ov::preprocess::ColorFormat::NV12_TWO_PLANES); -// add NV12 to BGR conversion -input.preprocess().convert_color(ov::preprocess::ColorFormat::BGR); -// and insert operations to the model -model = ppp.build(); - //! [ov_color_space] - } - - { - //! [ov_image_scale] -ov::preprocess::PrePostProcessor ppp(model); -ov::preprocess::InputInfo& input = ppp.input(tensor_name); -// scale from the specified tensor size -input.tensor().set_spatial_static_shape(448, 448); -// need to specify H and W dimensions in model, others are not important -input.model().set_layout("??HW"); -// scale to model shape -input.preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); -// and insert operations to the model -model = ppp.build(); - //! [ov_image_scale] - } - -return 0; -} - -int main_old() { - std::string model_path; - std::string operation_name; - - InferenceEngine::Core core; - InferenceEngine::CNNNetwork network = core.ReadNetwork(model_path); - - { - //! [mean_scale] -auto preProcess = network.getInputsInfo()[operation_name]->getPreProcess(); -preProcess.init(3); -preProcess[0]->meanValue = 116.78f; -preProcess[1]->meanValue = 116.78f; -preProcess[2]->meanValue = 116.78f; -preProcess[0]->stdScale = 57.21f; -preProcess[1]->stdScale = 57.45f; -preProcess[2]->stdScale = 57.73f; -preProcess.setVariant(InferenceEngine::MEAN_VALUE); - //! [mean_scale] - } - - { - //! [conversions] -auto inputInfo = network.getInputsInfo()[operation_name]; -inputInfo->setPrecision(InferenceEngine::Precision::U8); -inputInfo->setLayout(InferenceEngine::Layout::NHWC); -// model input layout is always NCHW in Inference Engine -// for shapes with 4 dimensions - //! [conversions] - } - - { - //! [image_scale] -auto preProcess = network.getInputsInfo()[operation_name]->getPreProcess(); -// Inference Engine supposes input for resize is always in NCHW layout -// while for OpenVINO Runtime API 2.0 `H` and `W` dimensions must be specified -// Also, current code snippet supposed resize from dynamic shapes -preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - //! [image_scale] - } - - return 0; -} diff --git a/docs/snippets/ov_properties_migration.c b/docs/snippets/ov_properties_migration.c deleted file mode 100644 index 745d7bf8bbeb95..00000000000000 --- a/docs/snippets/ov_properties_migration.c +++ /dev/null @@ -1,58 +0,0 @@ -#include - -int main_new() { -ov_core_t* core = NULL; -ov_core_create(&core); - -//! [core_get_ro_property] -char* full_device_name = NULL; -ov_core_get_property(core, "CPU", ov_property_key_device_full_name, &full_device_name); -ov_free(full_device_name); -//! [core_get_ro_property] - -//! [core_get_rw_property] -char* num_streams = NULL; -ov_core_get_property(core, "CPU", ov_property_key_num_streams, &num_streams); -ov_free(num_streams); -//! [core_get_rw_property] - -//! [core_set_property] -ov_core_set_property(core, "CPU", ov_property_key_enable_profiling, "TRUE"); -//! [core_set_property] - -ov_model_t* model = NULL; -ov_core_read_model(core, "sample.xml", NULL, &model); -//! [core_compile_model] -ov_compiled_model_t* compiled_model = NULL; -ov_core_compile_model(core, model, "MULTI", 6, &compiled_model, - ov_property_key_device_priorities, "CPU, CPU", - ov_property_key_hint_performance_mode, "THROUGHPUT", - ov_property_key_hint_inference_precision, "f32"); -//! [core_compile_model] - -//! [compiled_model_set_property] -// turn CPU off for multi-device execution -ov_compiled_model_set_property(compiled_model, ov_property_key_device_priorities, "GPU"); -//! [compiled_model_set_property] - -{ -//! [compiled_model_get_ro_property] -char* nireq = NULL; -ov_compiled_model_get_property(compiled_model, ov_property_key_hint_num_requests, &nireq); -ov_free(nireq); -//! [compiled_model_get_ro_property] -} - -{ -//! [compiled_model_get_rw_property] -char* perf_mode = NULL; -ov_compiled_model_get_property(compiled_model, ov_property_key_hint_performance_mode, &perf_mode); -ov_free(perf_mode); -//! [compiled_model_get_rw_property] -} -ov_compiled_model_free(compiled_model); -ov_model_free(model); -ov_core_free(core); -return 0; -} - diff --git a/docs/snippets/ov_properties_migration.cpp b/docs/snippets/ov_properties_migration.cpp deleted file mode 100644 index 47bb0c5ec994f8..00000000000000 --- a/docs/snippets/ov_properties_migration.cpp +++ /dev/null @@ -1,105 +0,0 @@ -#include -#ifndef IN_OV_COMPONENT -# define IN_OV_COMPONENT -# define WAS_OV_LIBRARY_DEFINED -#endif - -#include - -#ifdef WAS_OV_LIBRARY_DEFINED -# undef IN_OV_COMPONENT -# undef WAS_OV_LIBRARY_DEFINED -#endif - -int main_new() { - ov::Core core; - -//! [core_get_ro_property] -// 'auto' is automatically deduced as std::string -// since the type is stored in the property -auto full_device_name = core.get_property("CPU", ov::device::full_name); -//! [core_get_ro_property] - -//! [core_get_rw_property] -// 'auto' is automatically deduced as ov::streams::Num -// since the type is stored in the property -auto num_streams = core.get_property("CPU", ov::streams::num); -//! [core_get_rw_property] - -//! [core_set_property] -core.set_property("CPU", ov::enable_profiling(true)); -//! [core_set_property] - -auto model = core.read_model("sample.xml"); -//! [core_compile_model] -auto compiled_model = core.compile_model(model, "MULTI", - ov::device::priorities("GPU", "CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::inference_precision(ov::element::f32)); -//! [core_compile_model] - -//! [compiled_model_set_property] -// turn CPU off for multi-device execution -compiled_model.set_property(ov::device::priorities("GPU")); -//! [compiled_model_set_property] - -{ -//! [compiled_model_get_ro_property] -// 'auto' is deduced to 'uint32_t' -auto nireq = compiled_model.get_property(ov::optimal_number_of_infer_requests); -//! [compiled_model_get_ro_property] -} - -{ -//! [compiled_model_get_rw_property] -ov::hint::PerformanceMode perf_mode = compiled_model.get_property(ov::hint::performance_mode); -//! [compiled_model_get_rw_property] -} - - -return 0; -} - - -int main_old() { - InferenceEngine::Core core; -//! [core_get_metric] -auto full_device_name = core.GetMetric("CPU", METRIC_KEY(FULL_DEVICE_NAME)).as(); -//! [core_get_metric] - -//! [core_get_config] -// a user has to parse std::string after -auto num_streams = core.GetConfig("CPU", CONFIG_KEY(CPU_THROUGHPUT_STREAMS)).as(); -//! [core_get_config] - -//! [core_set_config] -core.SetConfig({ { CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) } }, "CPU"); -//! [core_set_config] - -auto model = core.ReadNetwork("sample.xml"); -//! [core_load_network] -auto exec_network = core.LoadNetwork(model, "MULTI", { - { MULTI_CONFIG_KEY(DEVICE_PRIORITIES), "CPU, GPU" }, - { CONFIG_KEY(PERFORMANCE_HINT), CONFIG_VALUE(THROUGHPUT) }, - { CONFIG_KEY(ENFORCE_BF16), CONFIG_VALUE(NO) } }); -//! [core_load_network] - -//! [executable_network_set_config] -// turn CPU off for multi-device execution -exec_network.SetConfig({ { MULTI_CONFIG_KEY(DEVICE_PRIORITIES), "GPU" } }); -//! [executable_network_set_config] - -{ -//! [executable_network_get_metric] -auto nireq = exec_network.GetMetric(EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); -//! [executable_network_get_metric] -} - -{ -//! [executable_network_get_config] -std::string perf_model = exec_network.GetConfig(CONFIG_KEY(PERFORMANCE_HINT)).as(); -//! [executable_network_get_config] -} - -return 0; -} diff --git a/docs/sphinx_setup/_static/images/tf_openvino.svg b/docs/sphinx_setup/_static/images/tf_openvino.svg deleted file mode 100644 index fdf0fb6555e601..00000000000000 --- a/docs/sphinx_setup/_static/images/tf_openvino.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e6ffe6ef81a6590a3746a6e198211dfec7636d76fcde8ad06c9d984a0b6599dc -size 60397 diff --git a/docs/sphinx_setup/api/api_reference.rst b/docs/sphinx_setup/api/api_reference.rst index a17420042eb461..e1debd5cfaf84e 100644 --- a/docs/sphinx_setup/api/api_reference.rst +++ b/docs/sphinx_setup/api/api_reference.rst @@ -6,15 +6,28 @@ API Reference .. meta:: :description: Explore the features of Python, C, C++ APIs in Intel® Distribution of OpenVINO™ Toolkit. - -The OpenVINO toolkit has APIs available for Python, C, and C++. Most features exist for all APIs, but there are some differences. The C++ API is the most comprehensive. - -API references available: - - .. toctree:: :maxdepth: 2 + :hidden: c_cpp_api/group__ov__cpp__api c_cpp_api/group__ov__c__api ie_python_api/api + + + +OpenVINO toolkit offers **APIs for Python, C, and C++** which share most features (C++ being the +most comprehensive one), have a common structure, naming convention styles, namespaces, +and no duplicate structures. + +OpenVINO API 2.0 may be described by the following: + +- Preserves input element types and order of dimensions (layouts), and stores tensor names from the + original models (Model Conversion API). +- Uses tensor names for addressing, which is the standard approach among the compatible model + frameworks. +- Can address input and output tensors by the index. Some model formats like ONNX are sensitive + to the input and output order, which is preserved by OpenVINO. +- Includes :doc:`properties <../openvino_docs_OV_UG_query_api>`, unifying metrics and configuration key concepts. + The main advantage is that they have the C++ type: ``static constexpr Property full_name{"FULL_DEVICE_NAME"};`` + From 9cf2ab080de9485d1c441556dc8250ec30613100 Mon Sep 17 00:00:00 2001 From: Santhosh Mamidisetti <92091342+SANTHOSH-MAMIDISETTI@users.noreply.github.com> Date: Tue, 16 Jan 2024 23:54:24 +0530 Subject: [PATCH 028/122] fixed broken doc links (#22088) Co-authored-by: Przemyslaw Wysocki --- CONTRIBUTING.md | 6 +++--- CONTRIBUTING_PR.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f91846d8f0c78a..7e2636f9097cf8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,7 +61,7 @@ product better. [./docs/dev](https://github.com/openvinotoolkit/openvino/tree/master/docs/dev) folder. * **User documentation** is built from several sources and published at - [docs.openvino.ai](docs.openvino.ai), which is the recommended place for reading + [docs.openvino.ai](https://docs.openvino.ai/), which is the recommended place for reading these documents. Use the files maintained in this repository only for editing purposes. * The easiest way to help with documentation is to review it and provide feedback on the @@ -69,7 +69,7 @@ product better. or think more information should be added, you can reach out to any of the documentation contributors to discuss the potential changes. - You can also create a Pull Request directly, following the [editor's guide](./docs/CONTRIBUTING_DOCS.md). + You can also create a Pull Request directly, following the [editor's guide](./CONTRIBUTING_DOCS.md). ### Promote and Support OpenVINO @@ -152,4 +152,4 @@ We'll make sure to review your Pull Request as soon as possible and provide you ## License By contributing to the OpenVINO project, you agree that your contributions will be -licensed under the terms stated in the [LICENSE](./LICENSE.md) file. +licensed under the terms stated in the [LICENSE](./LICENSE) file. diff --git a/CONTRIBUTING_PR.md b/CONTRIBUTING_PR.md index df0d4ec87bd248..a6717b5336e60d 100644 --- a/CONTRIBUTING_PR.md +++ b/CONTRIBUTING_PR.md @@ -56,7 +56,7 @@ Regardless of the automated tests, you should ensure the quality of your changes ## Need Additional Help? Check these Articles -* [How to create a fork](https://help.github.com/articles/fork-a-rep) +* [How to create a fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo) * [Install Git](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) * If you want to add a new sample, please have a look at the Guide for contributing to C++/C/Python IE samples and add the license statement at the top of new files for From e5dad5437f67ba09e3c057652c8fa8f04989f511 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 16 Jan 2024 23:18:53 +0400 Subject: [PATCH 029/122] Remove deprecated API 1.0: part 1 (#22105) * Removed CompoundBlob * Removed BlobIterator, SOPinter * Removed old RemoteContext and RemoteBlob * Removed VPU headers, IE parallel * Removed preprocessing --- src/inference/dev_api/blob_factory.hpp | 14 +- src/inference/dev_api/caseless.hpp | 85 -- ...nfer_async_request_thread_safe_default.hpp | 14 - .../ie_iexecutable_network_internal.hpp | 8 - .../interface/ie_iinfer_request_internal.hpp | 82 -- .../interface/ie_iplugin_internal.hpp | 66 - src/inference/dev_api/ie_icore.hpp | 33 - .../dev_api/openvino/runtime/make_tensor.hpp | 2 - src/inference/dev_api/remote_utils.hpp | 160 -- .../include/ie/cpp/ie_executable_network.hpp | 8 - .../include/ie/cpp/ie_infer_request.hpp | 7 - .../include/ie/details/ie_blob_iterator.hpp | 100 -- .../include/ie/details/ie_so_loader.h | 82 -- .../include/ie/details/ie_so_pointer.hpp | 207 --- src/inference/include/ie/ie_blob.h | 92 +- src/inference/include/ie/ie_compound_blob.h | 173 --- src/inference/include/ie/ie_core.hpp | 43 - .../include/ie/ie_iexecutable_network.hpp | 10 - .../include/ie/ie_iinfer_request.hpp | 10 - src/inference/include/ie/ie_input_info.hpp | 22 - src/inference/include/ie/ie_parallel.hpp | 58 - src/inference/include/ie/ie_plugin_config.hpp | 3 - src/inference/include/ie/ie_preprocess.hpp | 236 --- src/inference/include/ie/ie_remote_blob.hpp | 87 -- .../include/ie/ie_remote_context.hpp | 173 --- .../include/ie/ie_transformations.hpp | 57 - src/inference/include/ie/inference_engine.hpp | 2 - src/inference/include/ie/vpu/hddl_config.hpp | 188 --- .../include/ie/vpu/myriad_config.hpp | 77 - src/inference/include/ie/vpu/vpu_config.hpp | 60 - src/inference/src/blob_factory.cpp | 4 - src/inference/src/cnn_network_ngraph_impl.cpp | 2 - src/inference/src/compilation_context.cpp | 18 - .../src/cpp/ie_executable_network.cpp | 5 - .../src/cpp/ie_executable_network_base.hpp | 5 - .../src/cpp/ie_infer_async_request_base.hpp | 5 - src/inference/src/cpp/ie_infer_request.cpp | 8 +- .../ie_iexecutable_network_internal.cpp | 4 - .../interface/ie_iinfer_request_internal.cpp | 211 +-- .../interface/ie_iplugin_internal.cpp | 55 +- src/inference/src/dev/converter_utils.cpp | 178 +-- src/inference/src/dev/converter_utils.hpp | 4 - src/inference/src/dev/core_impl.cpp | 1 + src/inference/src/dev/core_impl.hpp | 23 +- src/inference/src/dev/core_impl_ie.cpp | 40 +- src/inference/src/dev/icompiled_model.cpp | 3 - src/inference/src/dev/iplugin_wrapper.cpp | 16 +- src/inference/src/dev/make_tensor.cpp | 42 +- .../src/dev/preprocessing/mean_image.cpp | 47 - .../src/dev/preprocessing/mean_image.hpp | 28 - .../src/dev/preprocessing/preprocessing.cpp | 81 +- .../src/dev/preprocessing/preprocessing.hpp | 3 - .../src/dev/remote_context_wrapper.hpp | 51 - src/inference/src/ie_blob_common.cpp | 16 - src/inference/src/ie_compound_blob.cpp | 183 --- src/inference/src/ie_core.cpp | 66 - src/inference/src/ie_remote_context.cpp | 52 - src/inference/src/ie_transformations.cpp | 17 - ..._remote_context.cpp => remote_context.cpp} | 4 +- src/inference/src/shared_object_loader.cpp | 47 - .../functional/async_infer_request_test.cpp | 5 - .../tests/functional/blob_copy_test.cpp | 486 ------- .../tests/functional/caseless_tests.cpp | 55 - .../tests/functional/cnn_network_test.cpp | 18 - .../tests/functional/executable_network.cpp | 5 - .../tests/functional/ngraph_reshape_tests.cpp | 1282 ----------------- .../tests/functional/preprocess_test.cpp | 44 - .../tests/functional/response_buffer_test.cpp | 102 -- .../tests/functional/task_executor_tests.cpp | 1 - src/inference/tests/unit/ie_blob_test.cpp | 71 - .../tests/unit/ie_compound_blob_test.cpp | 245 ---- .../tests/unit/ie_executable_network_test.cpp | 1 - .../functional/behavior/auto_func_test.cpp | 2 +- .../infer_request/set_blob_by_type.cpp | 33 - .../skip_tests_config.cpp | 3 +- .../auto/tests/unit/auto_unit_test.cpp | 2 +- .../tests/unit/key_network_priority_test.cpp | 2 +- .../auto/tests/unit/select_device_test.cpp | 6 +- .../single_layer_tests/rdft.cpp | 4 +- .../dynamic/convolution_backprop_data.cpp | 4 +- .../dynamic/detection_output.cpp | 1 - .../group_convolution_backprop_data.cpp | 6 +- .../single_layer_tests/dynamic/matmul.cpp | 2 +- .../dynamic/non_max_suppression.cpp | 2 +- src/plugins/proxy/src/remote_tensor.cpp | 10 - .../subgraph_reference/preprocess_legacy.cpp | 156 -- .../infer_request/set_blob_by_type.hpp | 94 -- .../behavior/plugin/core_integration.hpp | 7 - .../include/behavior/plugin/version.hpp | 1 - .../multi_remote_blob_multidevice_test.hpp | 82 -- .../include/multi/multi_remote_blob_tests.hpp | 42 - .../runtime_precision.cpp | 6 +- .../plugin/shared/src/snippets/add.cpp | 3 +- .../plugin/shared/src/snippets/convert.cpp | 4 +- .../src/base/layer_test_utils.cpp | 12 +- .../src/single_layer/memory.cpp | 1 - .../quantized_convolution_backprop_data.cpp | 1 + .../quantized_convolution_batch_norm.cpp | 2 +- .../common_test_utils/test_assertions.hpp | 80 - .../functional_test_utils/blob_utils.hpp | 32 - .../mock_iexecutable_network_internal.hpp | 1 - .../mock_iinfer_request_internal.hpp | 1 - .../interface/mock_iinference_plugin.hpp | 12 - .../mocks/mock_iexecutable_network.hpp | 1 - .../mocks/mock_iinfer_request.hpp | 1 - 105 files changed, 53 insertions(+), 6256 deletions(-) delete mode 100644 src/inference/dev_api/caseless.hpp delete mode 100644 src/inference/include/ie/details/ie_blob_iterator.hpp delete mode 100644 src/inference/include/ie/details/ie_so_loader.h delete mode 100644 src/inference/include/ie/details/ie_so_pointer.hpp delete mode 100644 src/inference/include/ie/ie_compound_blob.h delete mode 100644 src/inference/include/ie/ie_parallel.hpp delete mode 100644 src/inference/include/ie/ie_preprocess.hpp delete mode 100644 src/inference/include/ie/ie_remote_blob.hpp delete mode 100644 src/inference/include/ie/ie_remote_context.hpp delete mode 100644 src/inference/include/ie/ie_transformations.hpp delete mode 100644 src/inference/include/ie/vpu/hddl_config.hpp delete mode 100644 src/inference/include/ie/vpu/myriad_config.hpp delete mode 100644 src/inference/include/ie/vpu/vpu_config.hpp delete mode 100644 src/inference/src/dev/preprocessing/mean_image.cpp delete mode 100644 src/inference/src/dev/preprocessing/mean_image.hpp delete mode 100644 src/inference/src/dev/remote_context_wrapper.hpp delete mode 100644 src/inference/src/ie_compound_blob.cpp delete mode 100644 src/inference/src/ie_remote_context.cpp delete mode 100644 src/inference/src/ie_transformations.cpp rename src/inference/src/{cpp/ie_remote_context.cpp => remote_context.cpp} (98%) delete mode 100644 src/inference/src/shared_object_loader.cpp delete mode 100644 src/inference/tests/functional/blob_copy_test.cpp delete mode 100644 src/inference/tests/functional/caseless_tests.cpp delete mode 100644 src/inference/tests/functional/ngraph_reshape_tests.cpp delete mode 100644 src/inference/tests/functional/preprocess_test.cpp delete mode 100644 src/inference/tests/functional/response_buffer_test.cpp delete mode 100644 src/inference/tests/unit/ie_compound_blob_test.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp delete mode 100644 src/plugins/template/tests/functional/subgraph_reference/preprocess_legacy.cpp delete mode 100644 src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp delete mode 100644 src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp delete mode 100644 src/tests/functional/plugin/shared/include/multi/multi_remote_blob_tests.hpp diff --git a/src/inference/dev_api/blob_factory.hpp b/src/inference/dev_api/blob_factory.hpp index 3ba23868b92159..9258e781fa690e 100644 --- a/src/inference/dev_api/blob_factory.hpp +++ b/src/inference/dev_api/blob_factory.hpp @@ -4,7 +4,7 @@ /** * @brief A file with helper functions to uniformly create Blob objects - * @file blob_transform.hpp + * @file blob_factory.hpp */ #pragma once @@ -16,7 +16,6 @@ #include "ie_blob.h" #include "ie_data.h" #include "ie_memcpy.h" -#include "ie_preprocess.hpp" IE_SUPPRESS_DEPRECATED_START /** @@ -83,17 +82,6 @@ INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) make_blob_with_precision(const InferenceEngine::TensorDesc& desc, const std::shared_ptr& alloc); -/** - * @brief Creates a plain Blob::Ptr - * @ingroup ie_dev_api_memory - * - * @param[in] prec The Precision value - * @param[in] dims The dims - * @return A Blob::Ptr pointer - */ -INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) -make_plain_blob(InferenceEngine::Precision prec, const InferenceEngine::SizeVector dims); - /** * @brief Creates Blob::Ptr with precision * @ingroup ie_dev_api_memory diff --git a/src/inference/dev_api/caseless.hpp b/src/inference/dev_api/caseless.hpp deleted file mode 100644 index fde83b4b51f5c2..00000000000000 --- a/src/inference/dev_api/caseless.hpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @file caseless.hpp - * @brief A header file with caseless containers - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -namespace InferenceEngine { -namespace details { - -/** - * @brief Provides caseless comparison for STL algorithms - * - * @tparam Key type, usually std::string - */ -template -class CaselessLess { -public: - bool operator()(const Key& a, const Key& b) const noexcept { - return std::lexicographical_compare(std::begin(a), - std::end(a), - std::begin(b), - std::end(b), - [](const char& cha, const char& chb) { - return std::tolower(cha) < std::tolower(chb); - }); - } -}; - -/** - * provides caseless eq for stl algorithms - * @tparam Key - */ -template -class CaselessEq { -public: - bool operator()(const Key& a, const Key& b) const noexcept { - return a.size() == b.size() && - std::equal(std::begin(a), std::end(a), std::begin(b), [](const char& cha, const char& chb) { - return std::tolower(cha) == std::tolower(chb); - }); - } -}; - -/** - * To hash caseless - */ -template -class CaselessHash : public std::hash { -public: - size_t operator()(T __val) const noexcept { - T lc; - std::transform(std::begin(__val), std::end(__val), std::back_inserter(lc), [](typename T::value_type ch) { - return std::tolower(ch); - }); - return std::hash()(lc); - } -}; - -template -using caseless_unordered_map = std::unordered_map, CaselessEq>; - -template -using caseless_unordered_multimap = std::unordered_multimap, CaselessEq>; - -template -using caseless_map = std::map>; - -template -using caseless_set = std::set>; - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp b/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp index 5378dcfe525345..687a87a3364388 100644 --- a/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp +++ b/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp @@ -243,25 +243,11 @@ class INFERENCE_ENGINE_1_0_DEPRECATED AsyncInferRequestThreadSafeDefault : publi _syncRequest->SetBlob(name, data); } - void SetBlobs(const std::string& name, const std::vector& blobs) override { - CheckState(); - _syncRequest->SetBlobs(name, blobs); - } - - BatchedBlob::Ptr GetBlobs(const std::string& name) override { - CheckState(); - return _syncRequest->GetBlobs(name); - } - Blob::Ptr GetBlob(const std::string& name) override { CheckState(); return _syncRequest->GetBlob(name); } - const PreProcessInfo& GetPreProcess(const std::string& name) const override { - return _syncRequest->GetPreProcess(name); - } - void SetCallback(Callback callback) override { CheckState(); _callback = std::move(callback); diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp index e488cd406c8c82..ba9420bc51bdf6 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp @@ -12,7 +12,6 @@ #include "cpp/ie_cnn_network.h" #include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" #include "so_ptr.hpp" namespace ov { @@ -29,7 +28,6 @@ namespace InferenceEngine { class IInferencePlugin; class IPluginWrapper; class IInferRequestInternal; -class RemoteContext; class IVariableStateInternal; class ICompiledModelWrapper; @@ -151,12 +149,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IExecutableNetw */ virtual Parameter GetMetric(const std::string& name) const; - /** - * @brief Gets the remote context. - * @return A reference to a context - */ - virtual std::shared_ptr GetContext() const; - /** * @brief Raises the flag that model was loaded from cache */ diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp index 4b22faa5339aeb..1c8f2b5ec786ef 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp @@ -4,14 +4,12 @@ #pragma once -#include #include #include #include #include "cpp/ie_infer_request.hpp" #include "ie_common.h" -#include "ie_compound_blob.h" #include "ie_input_info.hpp" #include "openvino/core/node_output.hpp" #include "so_ptr.hpp" @@ -89,31 +87,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn */ virtual void SetBlob(const std::string& name, const Blob::Ptr& data); - /** - * @brief Set batch of input data to infer. Default implementation performs basic validation and checks that all - * tensors are not remote. Plugin-specific implementations may override this behavior to handle remote tensors case. - * If plugin expects only memory blobs (not remote blobs), consider to override only SetBlobsImpl and reuse basic - * existing implementation - * @param name - an operation name of input or output blob. - * @param blobs - input blobs. The type of Blob must correspond to the model's input - * precision and size. - */ - virtual void SetBlobs(const std::string& name, const std::vector& blobs); - - /** - * @brief Set batch of input data to infer. Default implementation throws "Not implemented" exception - * To support 'set_input_tensors'/'set_tensors' plugin-specific implementations shall: - * - Inside SetBlobsImpl: update 'InferenceEngine::IInferRequestInternal::batched_inputs' map - * - Inside 'SetBlob': erase appropriate 'InferenceEngine::IInferRequestInternal::_batched_inputs[name]' item - * - Inside 'InferImpl': call 'convertBatchedInputBlobs' on the beginning to convert many user blobs into single - * one - * - If needed, override 'convertBatchedInputBlob' to perform custom concatenation and data copy to input blob - * @param name - an operation name of input or output blob. - * @param batched_blob - input blobs combined in batched blob. Called only if number of blobs > 1 - * precision and size. - */ - virtual void SetBlobsImpl(const std::string& name, const BatchedBlob::Ptr& batched_blob); - /** * @brief Get input/output data to infer * @note Memory allocation doesn't happen @@ -123,21 +96,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn */ virtual Blob::Ptr GetBlob(const std::string& name); - /** - * @brief Get input/output data to infer - * @note Memory allocation doesn't happen - * @param name - a name of input or output blob. - * @return data - a reference to input batched blob. - */ - virtual BatchedBlob::Ptr GetBlobs(const std::string& name); - - /** - * @brief Gets pre-process for input data - * @param name Name of input blob. - * @param info pointer to a pointer to PreProcessInfo structure - */ - virtual const PreProcessInfo& GetPreProcess(const std::string& name) const; - /** * @brief Queries memory states. * @return Returns memory states @@ -283,13 +241,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn */ std::shared_ptr findOutputByNodeName(const std::string& name) const; - /** - * @brief Concatenates _batched_inputs into single blob before inference - * It is expected that _batched_inputs map contains only valid BatchedBlob blobs with 2 or more blobs inside - * @throws Exception if error occurs - */ - void convertBatchedInputBlobs(); - /** * @brief Checks whether pre-processing step is required for a given input * @param info InputInfo corresponding to input blob @@ -303,24 +254,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn void addInputPreProcessingFor(const std::string& name, const Blob::Ptr& from, const Blob::Ptr& to); - /** - * @brief Performs actual concatenation of blobs into single tensor - * Default implementation may allocate memory for new blob containing user's input data - * Plugin is allowed to override this behavior - * @throws Exception if error occurs - */ - virtual void convertBatchedInputBlob(const std::string& name, - const InferenceEngine::BatchedBlob::Ptr& batched_blob); - - /** - * @brief Performs basic validation of user's blobs set via SetBlobs - * @note Plugin-specific implementations may call this function to performs basic validation inside 'SetBlobs' - * @param name - input name. - * @param blobs - input blobs. The type of Blob must correspond to the network input - * precision and size. - */ - virtual void checkBlobsForBatch(const std::string& name, const std::vector& blobs); - InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data InferenceEngine::BlobMap _inputs; //!< A map of user passed blobs for network inputs @@ -328,21 +261,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn InferenceEngine::BlobMap _outputs; //!< A map of user passed blobs for network outputs std::vector> _parameters; //!< A vector of function inputs std::vector> _results; //!< A vector of function outputs - std::map _batched_inputs; //!< A map of user passed blobs for network inputs - - class PreProcessDataPlugin { - public: - void setRoiBlob(const Blob::Ptr& blob) {} - - Blob::Ptr getRoiBlob() const { - return nullptr; - } - - void execute(Blob::Ptr& preprocessedBlob, const PreProcessInfo& info, bool serial, int batchSize = -1) {} - - void isApplicable(const Blob::Ptr& src, const Blob::Ptr& dst) {} - }; - std::map> _preProcData; //!< A map of pre-process data per input /** * @brief A shared pointer to IInferRequestInternal diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index eb0e8d38c46f87..9ae8659be3db42 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -30,18 +30,9 @@ namespace InferenceEngine { class ExecutorManager; class IExecutableNetworkInternal; -class RemoteContext; class IExtension; class ICore; -/** - * @brief Copies preprocess info - * - * @param[in] from PreProcessInfo to copy from - * @return copy of preprocess info - */ -INFERENCE_ENGINE_API_CPP(PreProcessInfo) copyPreProcess(const PreProcessInfo& from); - /** * @brief Copies the values of `std::string` indexed map and apply const cast * @@ -180,18 +171,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi virtual std::shared_ptr LoadNetwork(const CNNNetwork& network, const std::map& config); - /** - * @brief Creates an executable network from network object, on specified remote context - * @param network A network object acquired from InferenceEngine::Core::ReadNetwork - * @param config string-string map of config parameters relevant only for this load operation - * @param context A pointer to plugin context derived from RemoteContext class used to - * execute the network - * @return Created Executable Network object - */ - virtual std::shared_ptr LoadNetwork(const CNNNetwork& network, - const std::map& config, - const std::shared_ptr& context); - /** * @brief Creates an executable network from model file path * @param modelPath A path to model @@ -235,20 +214,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi */ virtual Parameter GetMetric(const std::string& name, const std::map& options) const; - /** - * @brief Creates a remote context instance based on a map of parameters - * @param[in] params The map of parameters - * @return A remote context object - */ - virtual std::shared_ptr CreateContext(const ParamMap& params); - - /** - * @brief Provides a default remote context instance if supported by a plugin - * @param[in] params The map of parameters - * @return The default context. - */ - virtual std::shared_ptr GetDefaultContext(const ParamMap& params); - /** * @deprecated Use ImportNetwork(std::istream& networkModel, const std::map& config) * @brief Creates an executable network from an previously exported network @@ -269,19 +234,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi virtual std::shared_ptr ImportNetwork(std::istream& networkModel, const std::map& config); - /** - * @brief Creates an executable network from an previously exported network using plugin implementation - * and removes Inference Engine magic and plugin name - * @param networkModel Reference to network model output stream - * @param context A pointer to plugin context derived from RemoteContext class used to - * execute the network - * @param config A string -> string map of parameters - * @return An Executable network - */ - virtual std::shared_ptr ImportNetwork(std::istream& networkModel, - const std::shared_ptr& context, - const std::map& config); - /** * @brief Sets pointer to ICore interface * @param core Pointer to Core interface @@ -333,24 +285,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi const CNNNetwork& network, const std::map& config); - /** - * @brief Creates an executable network using remote context from a parsed network object, - * users can create as many networks as they need and use them simultaneously (up to the limitation of the HW - * resources) - * @note The function is used in - * InferencePluginInternal::LoadNetwork(const CNNNetwork&, const std::map&, - * RemoteContext::Ptr) which performs common steps first and calls this plugin-dependent method implementation - * after. - * @param network A network object - * @param context A remote context - * @param config string-string map of config parameters relevant only for this load operation - * @return Shared pointer to the ExecutableNetwork object - */ - virtual std::shared_ptr LoadExeNetworkImpl( - const CNNNetwork& network, - const std::shared_ptr& context, - const std::map& config); - /** * @brief Set input and output information to executable network. This method is used to * set addtional information to InferenceEngine::IExecutableNetworkInternal create by device plugin. diff --git a/src/inference/dev_api/ie_icore.hpp b/src/inference/dev_api/ie_icore.hpp index 700be69f9f3691..be2b85118de466 100644 --- a/src/inference/dev_api/ie_icore.hpp +++ b/src/inference/dev_api/ie_icore.hpp @@ -16,7 +16,6 @@ #include "cpp/ie_cnn_network.h" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" #include "openvino/runtime/icore.hpp" #include "openvino/runtime/properties.hpp" @@ -60,22 +59,6 @@ class ICore : public ov::ICore { const std::string& deviceName, const std::map& config = {}) = 0; - /** - * @brief Creates an executable network from a network object. - * - * Users can create as many networks as they need and use - * them simultaneously (up to the limitation of the hardware resources) - * - * @param network CNNNetwork object acquired from Core::ReadNetwork - * @param remoteCtx "Remote" (non-CPU) accelerator device-specific execution context to use - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network reference - */ - virtual SoExecutableNetworkInternal LoadNetwork(const CNNNetwork& network, - const RemoteContext::Ptr& remoteCtx, - const std::map& config = {}) = 0; - /** * @brief Creates an executable network from a model memory. * @@ -181,15 +164,6 @@ class ICore : public ov::ICore { */ virtual bool DeviceSupportsModelCaching(const std::string& deviceName) const = 0; - /** - * @brief Create a new shared context object on specified accelerator device - * using specified plugin-specific low level device API parameters (device handle, pointer, etc.) - * @param deviceName Name of a device to create new shared context on. - * @param params Map of device-specific shared context parameters. - * @return A shared pointer to a created remote context. - */ - virtual InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName, const ov::AnyMap&) = 0; - /** * @brief Get only configs that are supported by device * @param deviceName Name of a device @@ -200,13 +174,6 @@ class ICore : public ov::ICore { const std::map& config) = 0; virtual bool isNewAPI() const = 0; - - /** - * @brief Get a pointer to default shared context object for the specified device. - * @param deviceName - A name of a device to get create shared context from. - * @return A shared pointer to a default remote context. - */ - virtual RemoteContext::Ptr GetDefaultContext(const std::string& deviceName) = 0; }; } // namespace InferenceEngine diff --git a/src/inference/dev_api/openvino/runtime/make_tensor.hpp b/src/inference/dev_api/openvino/runtime/make_tensor.hpp index 2e5d771c7d98e7..fb80e7ab5d84eb 100644 --- a/src/inference/dev_api/openvino/runtime/make_tensor.hpp +++ b/src/inference/dev_api/openvino/runtime/make_tensor.hpp @@ -68,8 +68,6 @@ OPENVINO_RUNTIME_API ov::SoPtr get_tensor_impl(const ov::Tensor& te IE_SUPPRESS_DEPRECATED_START /** @cond INTERNAL */ ov::SoPtr make_tensor(const std::shared_ptr& tensor, bool unwrap = false); -const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob); -InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob); OPENVINO_RUNTIME_API std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, bool unwrap = true, diff --git a/src/inference/dev_api/remote_utils.hpp b/src/inference/dev_api/remote_utils.hpp index 95dbeb48191b9a..d37f5ec8612150 100644 --- a/src/inference/dev_api/remote_utils.hpp +++ b/src/inference/dev_api/remote_utils.hpp @@ -5,17 +5,11 @@ #pragma once #include "ie_ngraph_utils.hpp" -#include "ie_remote_blob.hpp" -#include "ie_remote_context.hpp" #include "openvino/runtime/iremote_context.hpp" namespace ov { namespace legacy_convert { -INFERENCE_ENGINE_API_CPP(ov::SoPtr) -convert_remote_context(const std::shared_ptr& context); -INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob*) get_hardware_blob(InferenceEngine::Blob* blob); - class INFERENCE_ENGINE_API_CLASS(TensorHolder) { public: TensorHolder(ov::SoPtr tensor) : _tensor(tensor) {} @@ -29,158 +23,4 @@ class INFERENCE_ENGINE_API_CLASS(TensorHolder) { }; } // namespace legacy_convert - -/** - * @brief Tensor what contains InferenceEngine::RemoteBlob inside - * Blob owns the memory - */ -class INFERENCE_ENGINE_API_CLASS(RemoteBlobTensor) : public IRemoteTensor { - mutable element::Type m_type; - mutable Shape m_shape; - mutable Strides m_strides; - mutable ov::AnyMap m_properties; - mutable std::string m_dev_name; - -public: - std::shared_ptr blob; - - RemoteBlobTensor(const InferenceEngine::RemoteBlob::Ptr& blob) : blob{blob} { - OPENVINO_ASSERT(blob); - m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); - } - - const element::Type& get_element_type() const override { - m_type = InferenceEngine::details::convertPrecision(blob->getTensorDesc().getPrecision()); - return m_type; - } - - void set_shape(ov::Shape shape) override { - blob->setShape({shape.begin(), shape.end()}); - } - - const Shape& get_shape() const override { - m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); - return m_shape; - } - - const Strides& get_strides() const override { - OPENVINO_ASSERT(get_element_type().bitwidth() >= 8, - "Could not get strides for types with bitwidths less then 8 bit. Tensor type: ", - get_element_type()); - const auto& element_strides = blob->getTensorDesc().getBlockingDesc().getStrides(); - const size_t elem_size = get_element_type().size(); - m_strides.clear(); - m_strides.resize(element_strides.size()); - std::transform(element_strides.begin(), element_strides.end(), m_strides.begin(), [&elem_size](size_t stride) { - return stride * elem_size; - }); - return m_strides; - } - - size_t get_size() const override { - return blob->size(); - } - - size_t get_byte_size() const override { - return blob->byteSize(); - } - - const AnyMap& get_properties() const override { - m_properties = blob->getParams(); - return m_properties; - } - - const std::string& get_device_name() const override { - m_dev_name = blob->getDeviceName(); - return m_dev_name; - } -}; - -/** - * @brief Create InferenceEngine::RemoteBlob from the Tensor - */ -class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) - : public InferenceEngine::RemoteBlob, - public ov::legacy_convert::TensorHolder { -public: - TensorRemoteBlob(const ov::SoPtr& tensor, InferenceEngine::TensorDesc desc) - : InferenceEngine::RemoteBlob{desc}, - ov::legacy_convert::TensorHolder(tensor) { - OPENVINO_ASSERT(this->get_tensor()); - } - std::shared_ptr cast_tensor() const { - auto remote = std::dynamic_pointer_cast(get_tensor()._ptr); - OPENVINO_ASSERT(remote); - return remote; - } - AnyMap getParams() const override { - return cast_tensor()->get_properties(); - } - std::string getDeviceName() const noexcept override { - try { - return cast_tensor()->get_device_name(); - } catch (...) { - return {}; - } - } - std::shared_ptr getContext() const noexcept override { - return {}; - } - - void allocate() noexcept override {} - bool deallocate() noexcept override { - return true; - } - InferenceEngine::LockedMemory buffer() noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory cbuffer() const noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory rwmap() noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory rmap() const noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory wmap() noexcept override { - return {nullptr, nullptr, 0}; - } - const std::shared_ptr& getAllocator() const noexcept override { - return m_allocator; - } - void* getHandle() const noexcept override { - return nullptr; - } - - using TensorHolder::get_tensor; - -private: - std::shared_ptr m_allocator; -}; - } // namespace ov - -namespace InferenceEngine { - -class INFERENCE_ENGINE_API_CLASS(IRemoteContextWrapper) : public ov::IRemoteContext { -private: - std::shared_ptr m_context; - mutable std::string m_name; - mutable ov::AnyMap m_params; - -public: - IRemoteContextWrapper(const std::shared_ptr& context) : m_context(context) {} - virtual ~IRemoteContextWrapper() = default; - const std::shared_ptr& get_context(); - const std::string& get_device_name() const override; - - const ov::AnyMap& get_property() const override; - - ov::SoPtr create_tensor(const ov::element::Type& type, - const ov::Shape& shape, - const ov::AnyMap& params = {}) override; - ov::SoPtr create_host_tensor(const ov::element::Type type, const ov::Shape& shape) override; -}; - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/cpp/ie_executable_network.hpp b/src/inference/include/ie/cpp/ie_executable_network.hpp index bcdfd013000ad2..80ee0cbab45338 100644 --- a/src/inference/include/ie/cpp/ie_executable_network.hpp +++ b/src/inference/include/ie/cpp/ie_executable_network.hpp @@ -30,7 +30,6 @@ #include "cpp/ie_infer_request.hpp" #include "ie_iexecutable_network.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" namespace ov { class Core; @@ -171,13 +170,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ExecutableNetwo */ Parameter GetMetric(const std::string& name) const; - /** - * @brief Returns pointer to plugin-specific shared context - * on remote accelerator device that was used to create this ExecutableNetwork - * @return A context - */ - RemoteContext::Ptr GetContext() const; - /** * @brief Checks if current ExecutableNetwork object is not initialized * @return true if current ExecutableNetwork object is not initialized, false - otherwise diff --git a/src/inference/include/ie/cpp/ie_infer_request.hpp b/src/inference/include/ie/cpp/ie_infer_request.hpp index b42a35779abccb..69702bb5cccd1f 100644 --- a/src/inference/include/ie/cpp/ie_infer_request.hpp +++ b/src/inference/include/ie/cpp/ie_infer_request.hpp @@ -118,13 +118,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferRequest) { */ Blob::Ptr GetBlob(const std::string& name); - /** - * @brief Gets pre-process for input data - * @param name Name of input blob. - * @return pointer to pre-process info of blob with name - */ - const PreProcessInfo& GetPreProcess(const std::string& name) const; - /** * @brief Infers specified input(s) in synchronous mode * diff --git a/src/inference/include/ie/details/ie_blob_iterator.hpp b/src/inference/include/ie/details/ie_blob_iterator.hpp deleted file mode 100644 index e6f92e46798561..00000000000000 --- a/src/inference/include/ie/details/ie_blob_iterator.hpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for the BlobIterator class - * - * @file ie_blob_iterator.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" -#include "ie_locked_memory.hpp" - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -namespace details { -/** - * @brief This class provides range loops support for TBlob objects - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED BlobIterator { - LockedMemory _mem; - size_t _offset; - -public: - /** - * @brief A move constructor to create a BlobIterator instance from a LockedMemory instance. - * Explicitly rejects implicit conversions. - * @param lk Rvalue of the memory instance to move from - * @param offset Size of offset in memory - */ - explicit BlobIterator(LockedMemory&& lk, size_t offset = 0) : _mem(std::move(lk)), _offset(offset) {} - - /** - * @brief Increments an offset of the current BlobIterator instance - * @return The current BlobIterator instance - */ - BlobIterator& operator++() { - _offset++; - return *this; - } - - /** - * @brief An overloaded postfix incrementation operator - * Implementation does not follow std interface since only move semantics is used - */ - void operator++(int) { - _offset++; - } - - /** - * @brief Checks if the given iterator is not equal to the current one - * @param that Iterator to compare with - * @return true if the given iterator is not equal to the current one, false - otherwise - */ - bool operator!=(const BlobIterator& that) const { - return !operator==(that); - } - - /** - * @brief Gets a value by the pointer to the current iterator - * @return The value stored in memory for the current offset value - */ - const T& operator*() const { - return *(_mem.template as() + _offset); - } - - /** - * @brief Gets a value by the pointer to the current iterator - * @return The value stored in memory for the current offset value - */ - T& operator*() { - return *(_mem.template as() + _offset); - } - /** - * @brief Compares the given iterator with the current one - * @param that Iterator to compare with - * @return true if the given iterator is equal to the current one, false - otherwise - */ - bool operator==(const BlobIterator& that) const { - return &operator*() == &that.operator*(); - } -}; -} // namespace details -} // namespace InferenceEngine -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/include/ie/details/ie_so_loader.h b/src/inference/include/ie/details/ie_so_loader.h deleted file mode 100644 index d6209ccbd6b0af..00000000000000 --- a/src/inference/include/ie/details/ie_so_loader.h +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for definition of abstraction over platform specific shared objects - * - * @file ie_so_loader.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" - -namespace InferenceEngine { -namespace details { - -/** - * @deprecated This is internal stuff. Use Inference Engine Plugin API - * @brief This class provides an OS shared module abstraction - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(SharedObjectLoader) { - std::shared_ptr _so; - -public: - /** - * @brief Constructs from existing object - */ - SharedObjectLoader(const std::shared_ptr& so); - - /** - * @brief Default constructor - */ - SharedObjectLoader() = default; - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - /** - * @brief Loads a library with the wide char name specified. - * @param pluginName Full or relative path to the plugin library - */ - explicit SharedObjectLoader(const wchar_t* pluginName); -#endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - - /** - * @brief Loads a library with the name specified. - * @param pluginName Full or relative path to the plugin library - */ - explicit SharedObjectLoader(const char* pluginName); - - /** - * @brief A destructor - */ - ~SharedObjectLoader(); - - /** - * @brief Searches for a function symbol in the loaded module - * @param symbolName Name of function to find - * @return A pointer to the function if found - * @throws Exception if the function is not found - */ - void* get_symbol(const char* symbolName) const; - - /** - * @brief Retruns reference to type erased implementation - * @throws Exception if the function is not found - */ - std::shared_ptr get() const; -}; - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/include/ie/details/ie_so_pointer.hpp b/src/inference/include/ie/details/ie_so_pointer.hpp deleted file mode 100644 index e3cbda07c50d7f..00000000000000 --- a/src/inference/include/ie/details/ie_so_pointer.hpp +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a wrapper class for handling plugin instantiation and releasing resources - * @file ie_so_pointer.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ie_common.h" -#include "ie_so_loader.h" - -namespace InferenceEngine { -namespace details { -/** - * @brief This class is a trait class that provides a creator with a function name corresponding to the templated class - * parameter - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED SOCreatorTrait {}; - -/** - * @brief Enables only `char` or `wchar_t` template specializations - * @tparam C A char type - */ -template -using enableIfSupportedChar = - typename std::enable_if<(std::is_same::value || std::is_same::value)>::type; - -/** - * @deprecated This is internal stuff. Use Inference Engine Plugin API - * @brief This class instantiate object using shared library - * @tparam T An type of object SOPointer can hold - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED SOPointer { - template - friend class SOPointer; - - IE_SUPPRESS_DEPRECATED_START - struct HasRelease { - template - static char test(decltype(&C::Release)); - template - static long test(...); - constexpr static const bool value = sizeof(test(nullptr)) == sizeof(char); - }; - IE_SUPPRESS_DEPRECATED_END - -public: - /** - * @brief Default constructor - */ - SOPointer() = default; - - /** - * @brief The main constructor - * @param name Name of a shared library file - */ - template > - SOPointer(const std::basic_string& name) { - try { - _so = SharedObjectLoader(name.c_str()); - Load(std::integral_constant{}); - } catch (const std::runtime_error& ex) { - IE_THROW() << ex.what(); - } catch (...) { - details::Rethrow(); - } - } - - /** - * @brief Constructs an object with existing reference - * @param so Existing pointer to a library loader - * @param ptr Existing reference to an object - */ - SOPointer(const SharedObjectLoader& so, const std::shared_ptr& ptr) : _so{so}, _ptr{ptr} {} - - /** - * @brief Constructs an object with existing loader - * @param so Existing pointer to a library loader - */ - explicit SOPointer(const SharedObjectLoader& so) : _so(so) { - Load(std::integral_constant{}); - } - - /** - * @brief The copy-like constructor, can create So Pointer that dereferenced into child type if T is derived of U - * @param that copied SOPointer object - */ - template - SOPointer(const SOPointer& that) : _so(that._so), - _ptr(std::dynamic_pointer_cast(that._ptr)) { - IE_ASSERT(_ptr != nullptr); - } - - /** - * @brief Standard pointer operator - * @return underlined interface with disabled Release method - */ - T* operator->() const noexcept { - return _ptr.get(); - } - - explicit operator bool() const noexcept { - return _ptr != nullptr; - } - - friend bool operator==(std::nullptr_t, const SOPointer& ptr) noexcept { - return !ptr; - } - friend bool operator==(const SOPointer& ptr, std::nullptr_t) noexcept { - return !ptr; - } - friend bool operator!=(std::nullptr_t, const SOPointer& ptr) noexcept { - return static_cast(ptr); - } - friend bool operator!=(const SOPointer& ptr, std::nullptr_t) noexcept { - return static_cast(ptr); - } - - operator const SharedObjectLoader&() const noexcept { - return _so; - } - - operator std::shared_ptr&() noexcept { - return _ptr; - } - -protected: - /** - * @brief Implements load of object from library if Release method is presented - */ - void Load(std::true_type) { - try { - void* create = nullptr; - try { - create = _so.get_symbol((SOCreatorTrait::name + std::string("Shared")).c_str()); - } catch (const NotFound&) { - } - if (create == nullptr) { - create = _so.get_symbol(SOCreatorTrait::name); - using CreateF = StatusCode(T*&, ResponseDesc*); - T* object = nullptr; - ResponseDesc desc; - StatusCode sts = reinterpret_cast(create)(object, &desc); - if (sts != OK) { - IE_EXCEPTION_SWITCH(sts, - ExceptionType, - InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM} <<= - std::stringstream{} << desc.msg) - } - IE_SUPPRESS_DEPRECATED_START - _ptr = std::shared_ptr(object, [](T* ptr) { - ptr->Release(); - }); - IE_SUPPRESS_DEPRECATED_END - } else { - using CreateF = void(std::shared_ptr&); - reinterpret_cast(create)(_ptr); - } - } catch (...) { - details::Rethrow(); - } - } - - /** - * @brief Implements load of object from library - */ - void Load(std::false_type) { - try { - using CreateF = void(std::shared_ptr&); - reinterpret_cast(_so.get_symbol(SOCreatorTrait::name))(_ptr); - } catch (...) { - details::Rethrow(); - } - } - - /** - * @brief The DLL - */ - SharedObjectLoader _so; - - /** - * @brief Gets a smart pointer to the custom object - */ - std::shared_ptr _ptr; -}; -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_blob.h b/src/inference/include/ie/ie_blob.h index f8e116ddfc5b3d..4436136c5832e8 100644 --- a/src/inference/include/ie/ie_blob.h +++ b/src/inference/include/ie/ie_blob.h @@ -29,7 +29,6 @@ #include #include -#include "details/ie_blob_iterator.hpp" #include "details/ie_pre_allocator.hpp" #include "ie_allocator.hpp" #include "ie_common.h" @@ -40,8 +39,6 @@ namespace InferenceEngine { IE_SUPPRESS_DEPRECATED_START -class RemoteBlob; - /** * @brief This class represents a universal container in the Inference Engine * @@ -82,7 +79,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { typename std::enable_if::value && !std::is_reference::value, int>::type = 0, typename std::enable_if::value, int>::type = 0> bool is() noexcept { - return dynamic_cast(getHardwareBlob()) != nullptr; + return dynamic_cast(this) != nullptr; } /** @@ -95,7 +92,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { typename std::enable_if::value && !std::is_reference::value, int>::type = 0, typename std::enable_if::value, int>::type = 0> bool is() const noexcept { - return dynamic_cast(getHardwareBlob()) != nullptr; + return dynamic_cast(this) != nullptr; } /** @@ -106,25 +103,9 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { * @tparam T Type to cast to. Must represent a class derived from the Blob * @return Raw pointer to the object of the type T or nullptr on error */ - template < - typename T, - typename std::enable_if::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value && !std::is_same::value, int>::type = 0> - T* as() noexcept { - return dynamic_cast(getHardwareBlob()); - } - - /** - * @brief Casts this Blob object to the type RemoteBlob. - * - * Use InferenceEngine::as() to operate with shared Blob objects instead of raw pointers - * - * @tparam T Type to cast to. Must represent a class derived from the Blob - * @return Raw pointer to the object of the type T or nullptr on error - */ template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> + typename std::enable_if::value, int>::type = 0> T* as() noexcept { return dynamic_cast(this); } @@ -137,27 +118,11 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { * @tparam T Type to cast to. Must represent a class derived from the Blob * @return Raw pointer to the object of the type const T or nullptr on error */ - template < - typename T, - typename std::enable_if::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value && !std::is_same::value, int>::type = 0> - const T* as() const noexcept { - return dynamic_cast(getHardwareBlob()); - } - - /** - * @brief Casts this Blob object to the type RemoteBlob. - * - * Use InferenceEngine::as() to operate with shared Blob objects instead of raw pointers - * - * @tparam T Type to cast to. Must represent a class derived from the Blob - * @return Raw pointer to the object of the type T or nullptr on error - */ template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> + typename std::enable_if::value, int>::type = 0> const T* as() const noexcept { - return dynamic_cast(this); + return dynamic_cast(this); } /** @@ -320,9 +285,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { * @return The allocator for allocator-based blobs or nullptr if there is none */ virtual const std::shared_ptr& getAllocator() const noexcept = 0; - - const Blob* getHardwareBlob() const; - Blob* getHardwareBlob(); }; /** @@ -713,50 +675,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED TBlob : public MemoryBlob { return Blob::Ptr(new TBlob(*this, begin, end)); } - /** - * @brief Gets BlobIterator for the data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type T - */ - details::BlobIterator begin() { - return details::BlobIterator(data()); - } - - /** - * @brief Gets BlobIterator for the end of data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type T representing end of the data - */ - details::BlobIterator end() { - return details::BlobIterator(data(), size()); - } - - /** - * @brief Gets a const BlobIterator for the read-only data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type const T - */ - details::BlobIterator begin() const { - return details::BlobIterator(readOnly()); - } - - /** - * @brief Gets a const BlobIterator for the end of read-only data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type const T representing end of data - */ - details::BlobIterator end() const { - return details::BlobIterator(readOnly(), size()); - } - protected: /** * @brief Local instance of IAllocator to manipulate memory. diff --git a/src/inference/include/ie/ie_compound_blob.h b/src/inference/include/ie/ie_compound_blob.h deleted file mode 100644 index 5dce8d82eee7b1..00000000000000 --- a/src/inference/include/ie/ie_compound_blob.h +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for CompoundBlob - * - * @file ie_compound_blob.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_blob.h" - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -/** - * @brief This class represents a blob that contains other blobs - * - * Compound blob is a wrapper blob over references to underlying blobs. These blobs should share - * some properties and can be grouped into a single entity. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(CompoundBlob) : public Blob { -public: - /** - * @brief A smart pointer to the CompoundBlob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const CompoundBlob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief Constructs a compound blob from a vector of blobs - * - * @param blobs A vector of blobs that is copied to this object - */ - explicit CompoundBlob(const std::vector& blobs); - - /** - * @brief Constructs a compound blob from a vector of blobs - * - * @param blobs A vector of blobs that is moved to this object - */ - explicit CompoundBlob(std::vector&& blobs); - - /** - * @brief Always returns `0` - * @return Returns `0` - */ - size_t byteSize() const override; - - /** - * @brief Always returns `0` - * @return Returns `0` - */ - size_t element_size() const override; - - /** - * @brief No operation is performed. Compound blob does not allocate/deallocate any data - */ - void allocate() noexcept override; - - /** - * @brief No operation is performed. Compound blob does not allocate/deallocate any data - * @return Returns `false` - */ - bool deallocate() noexcept override; - - /** - * @brief Always returns an empty LockedMemory object - * @return Empty locked memory - */ - LockedMemory buffer() noexcept override; - - /** - * @brief Always returns an empty LockedMemory object - * @return Empty locked memory - */ - LockedMemory cbuffer() const noexcept override; - - /** - * @brief Returns the number of underlying blobs in the compound blob - * @return A number of underlying blobs - */ - size_t size() const noexcept override; - - /** - * @brief Returns an underlying blob at index i - * - * @param i the index of the underlying Blob object - * @return A smart pointer to the underlying Blob object or nullptr in case of an error - */ - virtual Blob::Ptr getBlob(size_t i) const noexcept; - - Blob::Ptr createROI(const ROI& roi) const override; - -protected: - /** - * @brief Constructs a compound blob with specified descriptor - * - * @param tensorDesc A tensor descriptor for the compound blob - */ - explicit CompoundBlob(const TensorDesc& tensorDesc); - - /** - * @brief Compound blob container for underlying blobs - */ - std::vector _blobs; - - const std::shared_ptr& getAllocator() const noexcept override; -}; - -/** - * @brief This class represents a blob that contains other blobs - one per batch - * @details Plugin which supports BatchedBlob input should report BATCHED_BLOB - * in the OPTIMIZATION_CAPABILITIES metric. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(BatchedBlob) : public CompoundBlob { -public: - /** - * @brief A smart pointer to the BatchedBlob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const BatchedBlob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief Constructs a batched blob from a vector of blobs - * @details All passed blobs should meet following requirements: - * - all blobs have equal tensor descriptors, - * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW, HWC - * - batch dimensions should be equal to 1 or not defined (C, CHW, HWC). - * Resulting blob's tensor descriptor is constructed using tensor descriptors - * of passed blobs by setting batch dimension to blobs.size() - * - * @param blobs A vector of blobs that is copied to this object - */ - explicit BatchedBlob(const std::vector& blobs); - - /** - * @brief Constructs a batched blob from a vector of blobs - * @details All passed blobs should meet following requirements: - * - all blobs have equal tensor descriptors, - * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW, HWC - * - batch dimensions should be equal to 1 or not defined (C, CHW, HWC). - * Resulting blob's tensor descriptor is constructed using tensor descriptors - * of passed blobs by setting batch dimension to blobs.size() - * - * @param blobs A vector of blobs that is moved to this object - */ - explicit BatchedBlob(std::vector&& blobs); -}; -} // namespace InferenceEngine -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/include/ie/ie_core.hpp b/src/inference/include/ie/ie_core.hpp index 5c1608d45c6103..0a25fae4444343 100644 --- a/src/inference/include/ie/ie_core.hpp +++ b/src/inference/include/ie/ie_core.hpp @@ -28,7 +28,6 @@ #include "cpp/ie_executable_network.hpp" #include "ie_extension.h" #include "ie_plugin_config.hpp" -#include "ie_remote_context.hpp" #include "ie_version.hpp" namespace InferenceEngine { @@ -178,18 +177,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) { */ void AddExtension(const IExtensionPtr& extension); - /** - * @brief Creates an executable network from a network object within a specified remote context. - * @param network CNNNetwork object acquired from Core::ReadNetwork - * @param context Pointer to RemoteContext object - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network object - */ - ExecutableNetwork LoadNetwork(const CNNNetwork& network, - RemoteContext::Ptr context, - const std::map& config = {}); - /** * @brief Registers extension for the specified plugin * @@ -232,20 +219,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) { INFERENCE_ENGINE_DEPRECATED("Use Core::ImportNetwork with explicit device name") ExecutableNetwork ImportNetwork(std::istream& networkModel); - /** - * @brief Creates an executable network from a previously exported network within a specified - * remote context. - * - * @param networkModel Network model stream - * @param context Pointer to RemoteContext object - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network reference - */ - ExecutableNetwork ImportNetwork(std::istream& networkModel, - const RemoteContext::Ptr& context, - const std::map& config = {}); - /** * @brief Query device if it supports specified network with specified configuration * @@ -348,22 +321,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) { * @param xmlConfigFile A path to .xml file with plugins to register. */ void RegisterPlugins(const std::string& xmlConfigFile); - - /** - * @brief Create a new shared context object on specified accelerator device - * using specified plugin-specific low level device API parameters (device handle, pointer, etc.) - * @param deviceName Name of a device to create new shared context on. - * @param params Map of device-specific shared context parameters. - * @return A shared pointer to a created remote context. - */ - RemoteContext::Ptr CreateContext(const std::string& deviceName, const ParamMap& params); - - /** - * @brief Get a pointer to default(plugin-supplied) shared context object for specified accelerator device. - * @param deviceName - A name of a device to get create shared context from. - * @return A shared pointer to a default remote context. - */ - RemoteContext::Ptr GetDefaultContext(const std::string& deviceName); }; /** diff --git a/src/inference/include/ie/ie_iexecutable_network.hpp b/src/inference/include/ie/ie_iexecutable_network.hpp index be826d107aa602..989167885c5d9d 100644 --- a/src/inference/include/ie/ie_iexecutable_network.hpp +++ b/src/inference/include/ie/ie_iexecutable_network.hpp @@ -30,7 +30,6 @@ #include "ie_iinfer_request.hpp" #include "ie_input_info.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" namespace InferenceEngine { @@ -158,15 +157,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED IExecutableNetwork : public std::enable_sh */ virtual StatusCode GetMetric(const std::string& name, Parameter& result, ResponseDesc* resp) const noexcept = 0; - /** - * @brief Gets shared context used to create an executable network. - * - * @param pContext Reference to a pointer that will receive resulting shared context object ptr - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return code of the operation. InferenceEngine::OK if succeeded - */ - virtual StatusCode GetContext(RemoteContext::Ptr& pContext, ResponseDesc* resp) const noexcept = 0; - protected: virtual ~IExecutableNetwork() = default; }; diff --git a/src/inference/include/ie/ie_iinfer_request.hpp b/src/inference/include/ie/ie_iinfer_request.hpp index 896d33aaed12ce..73c7570abf6d76 100644 --- a/src/inference/include/ie/ie_iinfer_request.hpp +++ b/src/inference/include/ie/ie_iinfer_request.hpp @@ -26,7 +26,6 @@ #include "ie_blob.h" #include "ie_common.h" -#include "ie_preprocess.hpp" namespace InferenceEngine { @@ -85,15 +84,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED IInferRequest : public std::enable_shared_ */ virtual StatusCode GetBlob(const char* name, Blob::Ptr& data, ResponseDesc* resp) noexcept = 0; - /** - * @brief Gets pre-process for input data - * @param name Name of input blob. - * @param info pointer to a pointer to PreProcessInfo structure - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: OK (0) for success - */ - virtual StatusCode GetPreProcess(const char* name, const PreProcessInfo** info, ResponseDesc* resp) const - noexcept = 0; /** * @brief Infers specified input(s) in synchronous mode * diff --git a/src/inference/include/ie/ie_input_info.hpp b/src/inference/include/ie/ie_input_info.hpp index 841e3d95316fb4..ec7092d42e5c62 100644 --- a/src/inference/include/ie/ie_input_info.hpp +++ b/src/inference/include/ie/ie_input_info.hpp @@ -28,7 +28,6 @@ #include "ie_common.h" #include "ie_data.h" #include "ie_precision.hpp" -#include "ie_preprocess.hpp" namespace InferenceEngine { @@ -163,28 +162,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED InputInfo { return _inputData->getTensorDesc(); } - /** - * @brief Gets pre-process info for the input - * @return A reference to the PreProcessInfo instance that contains pre-process info for this input - */ - PreProcessInfo& getPreProcess() { - return _preProcessInfo; - } - - /** - * @brief Gets pre-process info for the input - * @return A reference to the PreProcessInfo instance that contains pre-process info for this input - */ - const PreProcessInfo& getPreProcess() const { - return _preProcessInfo; - } - protected: - /** - * @brief Pre-process info for the input - */ - PreProcessInfo _preProcessInfo; - /** * @brief A smart pointer to the input data */ diff --git a/src/inference/include/ie/ie_parallel.hpp b/src/inference/include/ie/ie_parallel.hpp deleted file mode 100644 index 21dfc6d0e1c91f..00000000000000 --- a/src/inference/include/ie/ie_parallel.hpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Contains declarations and definitions for sequential and multi-threading implementations. - * - * Multi-threading support is implemented in two variants: using the Threading Building Blocks library and OpenMP* - * product. To build a particular implementation, use the corresponding identifier: IE_THREAD_TBB, IE_THREAD_TBB_AUTO, - * IE_THREAD_OMP or IE_THREAD_SEQ. - * - * @file ie_parallel.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/core/parallel.hpp" - -#define IE_THREAD_TBB OV_THREAD_TBB -#define IE_THREAD_OMP OV_THREAD_OMP -#define IE_THREAD_SEQ OV_THREAD_SEQ -#define IE_THREAD_TBB_AUTO OV_THREAD_TBB_AUTO - -namespace InferenceEngine { - -using ov::for_1d; -using ov::for_2d; -using ov::for_3d; -using ov::for_4d; -using ov::for_5d; -using ov::for_6d; -using ov::parallel_for; -using ov::parallel_for2d; -using ov::parallel_for3d; -using ov::parallel_for4d; -using ov::parallel_for5d; -using ov::parallel_for6d; -using ov::parallel_it_init; -using ov::parallel_it_step; -using ov::parallel_nt; -using ov::parallel_nt_static; -using ov::parallel_sort; -using ov::parallel_sum; -using ov::parallel_sum2d; -using ov::parallel_sum3d; -using ov::splitter; - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_plugin_config.hpp b/src/inference/include/ie/ie_plugin_config.hpp index c96334a399806a..2d3bf9eafa1985 100644 --- a/src/inference/include/ie/ie_plugin_config.hpp +++ b/src/inference/include/ie/ie_plugin_config.hpp @@ -112,7 +112,6 @@ DECLARE_METRIC_KEY(FULL_DEVICE_NAME, std::string); * - "INT8" - device can support models with INT8 layers * - "BIN" - device can support models with BIN layers * - "WINOGRAD" - device can support models where convolution implemented via Winograd transformations - * - "BATCHED_BLOB" - device can support BatchedBlob */ INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(OPTIMIZATION_CAPABILITIES, std::vector); @@ -129,8 +128,6 @@ INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(BIN); INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(WINOGRAD); -INFERENCE_ENGINE_1_0_DEPRECATED -DECLARE_METRIC_VALUE(BATCHED_BLOB); /** * @brief Metric to provide information about a range for streams on platforms where streams are supported. diff --git a/src/inference/include/ie/ie_preprocess.hpp b/src/inference/include/ie/ie_preprocess.hpp deleted file mode 100644 index 1b895962f5140f..00000000000000 --- a/src/inference/include/ie/ie_preprocess.hpp +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This header file provides structures to store info about pre-processing of - * network inputs (scale, mean image, ...) - * - * @file ie_preprocess.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ie_blob.h" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief This structure stores info about pre-processing of network inputs (scale, mean image, ...) - */ -struct INFERENCE_ENGINE_1_0_DEPRECATED PreProcessChannel { - /** @brief Scale parameter for a channel */ - float stdScale = 1; - - /** @brief Mean value for a channel */ - float meanValue = 0; - - /** @brief Mean data for a channel */ - Blob::Ptr meanData; - - /** @brief Smart pointer to an instance */ - using Ptr = std::shared_ptr; -}; - -/** - * @brief Defines available types of mean - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED MeanVariant { - MEAN_IMAGE, /**< mean value is specified for each input pixel */ - MEAN_VALUE, /**< mean value is specified for each input channel */ - NONE, /**< no mean value specified */ -}; - -/** - * @enum ResizeAlgorithm - * @brief Represents the list of supported resize algorithms. - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED ResizeAlgorithm { NO_RESIZE = 0, RESIZE_BILINEAR, RESIZE_AREA }; - -/** - * @brief This class stores pre-process information for the input - */ -class INFERENCE_ENGINE_1_0_DEPRECATED PreProcessInfo { - // Channel data - std::vector _channelsInfo; - MeanVariant _variant = NONE; - - // Resize Algorithm to be applied for input before inference if needed. - ResizeAlgorithm _resizeAlg = NO_RESIZE; - - // Color format to be used in on-demand color conversions applied to input before inference - ColorFormat _colorFormat = ColorFormat::RAW; - -public: - /** - * @brief Overloaded [] operator to safely get the channel by an index - * - * Throws an exception if channels are empty - * - * @param index Index of the channel to get - * @return The pre-process channel instance - */ - PreProcessChannel::Ptr& operator[](size_t index) { - if (_channelsInfo.empty()) { - IE_THROW() << "accessing pre-process when nothing was set."; - } - if (index >= _channelsInfo.size()) { - IE_THROW() << "pre process index " << index << " is out of bounds."; - } - return _channelsInfo[index]; - } - - /** - * @brief operator [] to safely get the channel preprocessing information by index. - * - * Throws exception if channels are empty or index is out of border - * - * @param index Index of the channel to get - * @return The const preprocess channel instance - */ - const PreProcessChannel::Ptr& operator[](size_t index) const { - if (_channelsInfo.empty()) { - IE_THROW() << "accessing pre-process when nothing was set."; - } - if (index >= _channelsInfo.size()) { - IE_THROW() << "pre process index " << index << " is out of bounds."; - } - return _channelsInfo[index]; - } - - /** - * @brief Returns a number of channels to preprocess - * - * @return The number of channels - */ - size_t getNumberOfChannels() const { - return _channelsInfo.size(); - } - - /** - * @brief Initializes with given number of channels - * - * @param numberOfChannels Number of channels to initialize - */ - void init(const size_t numberOfChannels) { - _channelsInfo.resize(numberOfChannels); - for (auto& channelInfo : _channelsInfo) { - channelInfo = std::make_shared(); - } - } - - /** - * @brief Sets mean image values if operation is applicable. - * - * Also sets the mean type to MEAN_IMAGE for all channels - * - * @param meanImage Blob with a mean image - */ - void setMeanImage(const Blob::Ptr& meanImage) { - if (meanImage.get() == nullptr) { - IE_THROW() << "Failed to set invalid mean image: nullptr"; - } else if (meanImage.get()->getTensorDesc().getLayout() != Layout::CHW) { - IE_THROW() << "Mean image layout should be CHW"; - } else if (meanImage.get()->getTensorDesc().getDims().size() != 3) { - IE_THROW() << "Failed to set invalid mean image: number of dimensions != 3"; - } else if (meanImage.get()->getTensorDesc().getDims()[0] != getNumberOfChannels()) { - IE_THROW() << "Failed to set invalid mean image: number of channels != " << getNumberOfChannels(); - } - _variant = MEAN_IMAGE; - } - - /** - * @brief Sets mean image values if operation is applicable. - * - * Also sets the mean type to MEAN_IMAGE for a particular channel - * - * @param meanImage Blob with a mean image - * @param channel Index of a particular channel - */ - void setMeanImageForChannel(const Blob::Ptr& meanImage, const size_t channel) { - if (meanImage.get() == nullptr) { - IE_THROW() << "Failed to set invalid mean image for channel: nullptr"; - } else if (meanImage.get()->getTensorDesc().getDims().size() != 2) { - IE_THROW() << "Failed to set invalid mean image for channel: number of dimensions != 2"; - } else if (channel >= _channelsInfo.size()) { - IE_THROW() << "Channel " << channel << " exceed number of PreProcess channels: " << _channelsInfo.size(); - } - _variant = MEAN_IMAGE; - _channelsInfo[channel]->meanData = meanImage; - } - - /** - * @brief Sets a type of mean operation - * - * @param variant Type of mean operation to set - */ - void setVariant(const MeanVariant& variant) { - _variant = variant; - } - - /** - * @brief Gets a type of mean operation - * - * @return The type of mean operation - */ - MeanVariant getMeanVariant() const { - return _variant; - } - - /** - * @brief Sets resize algorithm to be used during pre-processing - * - * @param alg Resize algorithm - */ - void setResizeAlgorithm(const ResizeAlgorithm& alg) { - _resizeAlg = alg; - } - - /** - * @brief Gets preconfigured resize algorithm - * - * @return Resize algorithm - */ - ResizeAlgorithm getResizeAlgorithm() const { - return _resizeAlg; - } - - /** - * @brief Changes the color format of the input data provided by the user - * - * This function should be called before loading the network to the plugin - * Setting color format different from ColorFormat::RAW enables automatic color conversion - * (as a part of built-in preprocessing routine) - * - * @param fmt A new color format associated with the input - */ - void setColorFormat(ColorFormat fmt) { - _colorFormat = fmt; - } - - /** - * @brief Gets a color format associated with the input - * - * @details By default, the color format is ColorFormat::RAW meaning - * there is no particular color format assigned to the input - * @return Color format. - */ - ColorFormat getColorFormat() const { - return _colorFormat; - } -}; -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_remote_blob.hpp b/src/inference/include/ie/ie_remote_blob.hpp deleted file mode 100644 index ad241b256ead29..00000000000000 --- a/src/inference/include/ie/ie_remote_blob.hpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a header file for the IE RemoteContext and RemoteBlob classes - * - * @file ie_remote_context.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_blob.h" -#include "ie_parameter.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START -class RemoteContext; - -/** - * @brief This class represents an Inference Engine abstraction to the memory allocated - * on the remote (non-CPU) accelerator device - */ -class INFERENCE_ENGINE_1_0_DEPRECATED RemoteBlob : public MemoryBlob { -public: - /** - * @brief A smart pointer to the RemoteBlob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const RemoteBlob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief RemoteBlob virtual destructor - */ - virtual ~RemoteBlob() = default; - - /** - * @brief Constructor. Creates an empty RemoteBlob object with the specified precision. - * @param tensorDesc Defines the layout and dims of the blob - */ - explicit RemoteBlob(const TensorDesc& tensorDesc) : MemoryBlob(tensorDesc) {} - - /** - * @brief Returns a map of device-specific parameters required for low-level - * operations with underlying object. - * Parameters include device/context/surface/buffer handles, access flags, - * etc. Contents of the map returned depend on remote execution context that is - * currently set on the device (working scenario). - * Abstract method. - * @return A map of name/parameter elements. - */ - virtual ParamMap getParams() const = 0; - - /** - * @brief Returns name of the device on which underlying object is allocated. - * Abstract method. - * @return A device name string in the same format as that in plugin metric. - */ - virtual std::string getDeviceName() const noexcept = 0; - - /** - * @brief Returns device context which underlying object belongs to. - * Abstract method. - * @return Pointer to plugin-specific context class object, which is derived from RemoteContext. - * Dynamic casting should be used if it is necessary to retrieve a pointer to original class. - */ - virtual std::shared_ptr getContext() const noexcept = 0; -}; -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_remote_context.hpp b/src/inference/include/ie/ie_remote_context.hpp deleted file mode 100644 index 7e74e8c48ecaa6..00000000000000 --- a/src/inference/include/ie/ie_remote_context.hpp +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a header file for the IE RemoteContext and RemoteBlob classes - * - * @file ie_remote_context.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_api.h" -#include "ie_parameter.hpp" -#include "ie_remote_blob.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START -/** - * @brief This class represents an Inference Engine abstraction - * for remote (non-CPU) accelerator device-specific execution context. - * Such context represents a scope on the device within which executable - * networks and remote memory blobs can exist, function and exchange data. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(RemoteContext) - : public std::enable_shared_from_this { -public: - /** - * @brief A smart pointer to the RemoteContext object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const RemoteContext object - */ - using CPtr = std::shared_ptr; - - /** - * @brief RemoteContext virtual destructor - */ - virtual ~RemoteContext() = default; - - /** - * @brief Checks if the RemoteContext object can be cast to the type T* - * - * @tparam T Type to be checked. Must represent a class derived from the RemoteContext - * @return true if this object can be dynamically cast to the type T*. Otherwise, false - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - bool is() noexcept { - return dynamic_cast(GetHardwareContext().get()) != nullptr; - } - - /** - * @brief Checks if the RemoteContext object can be cast to the type const T* - * - * @tparam T Type to be checked. Must represent a class derived from the RemoteContext - * @return true if this object can be dynamically cast to the type const T*. Otherwise, false - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - bool is() const noexcept { - return dynamic_cast(GetHardwareContext().get()) != nullptr; - } - - /** - * @brief Casts this RemoteContext object to the type T*. - * - * @tparam T Type to cast to. Must represent a class derived from the RemoteContext - * @return Raw pointer to the object of the type T or nullptr on error - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - T* as() noexcept { - return dynamic_cast(GetHardwareContext().get()); - } - - /** - * @brief Casts this RemoteContext object to the type const T*. - * - * @tparam T Type to cast to. Must represent a class derived from the RemoteContext - * @return Raw pointer to the object of the type const T or nullptr on error - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - const T* as() const noexcept { - return dynamic_cast(GetHardwareContext().get()); - } - - /** - * @brief Returns name of the device on which underlying object is allocated. - * Abstract method. - * @return A device name string in the same format as that in plugin metric. - */ - virtual std::string getDeviceName() const noexcept = 0; - - /** - * @brief Allocates memory blob in device memory or wraps user-supplied memory handle - * using the specified tensor description and low-level device-specific parameters. - * Returns a pointer to the object which implements RemoteBlob interface. - * @param tensorDesc Defines the layout and dims of the blob - * @param params Map of the low-level blob object parameters. - * Abstract method. - * @return A pointer to plugin object that implements RemoteBlob interface. - */ - virtual RemoteBlob::Ptr CreateBlob(const TensorDesc& tensorDesc, const ParamMap& params = {}) = 0; - - /** - * @brief Allocates host accessible memory blob friendly for the device in current context - * Returns a pointer to the object which implements MemoryBlob interface. - * @param tensorDesc Defines the layout and dims of the blob - * @return A pointer to host accessible MemoryBlob object - */ - virtual MemoryBlob::Ptr CreateHostBlob(const TensorDesc& tensorDesc); - - /** - * @brief Returns a map of device-specific parameters required for low-level - * operations with underlying object. - * Parameters include device/context handles, access flags, - * etc. Contents of the map returned depend on remote execution context that is - * currently set on the device (working scenario). - * Abstract method. - * @return A map of name/parameter elements. - */ - virtual ParamMap getParams() const = 0; - - /** - * @brief Unwrap hardware remote context - * - * @return shared pointer to plugin specific remote context - */ - const std::shared_ptr GetHardwareContext(); - - /** - * @brief Unwrap hardware remote context - * - * @return shared pointer to plugin specific remote context - */ - const std::shared_ptr GetHardwareContext() const; -}; - -/** - * @brief A wrapper of CreateBlob method of RemoteContext to keep consistency with - * plugin-specific wrappers. - * @param desc Defines the layout and dims of the blob - * @param ctx Pointer to the plugin object derived from RemoteContext. - * @return A pointer to plugin object that implements RemoteBlob interface. - */ -inline INFERENCE_ENGINE_1_0_DEPRECATED RemoteBlob::Ptr make_shared_blob(const TensorDesc& desc, - RemoteContext::Ptr ctx) { - return ctx->CreateBlob(desc); -} - -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_transformations.hpp b/src/inference/include/ie/ie_transformations.hpp deleted file mode 100644 index 3b3df4c92502f4..00000000000000 --- a/src/inference/include/ie/ie_transformations.hpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This header file defines the list of public transformations. - * - * @file ie_transformations.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "cpp/ie_cnn_network.h" -#include "ie_api.h" - -namespace InferenceEngine { - -/** - * @brief The transformation finds all TensorIterator/Loop layers in the network, - * processes all back edges that describe a connection between Result and Parameter - * of the TensorIterator/Loop bodies,and inserts ReadValue and Assign layers at the - * input and output corresponding to this back edge. - * Supported platform: CPU. - * - * The example below describes the changes made by the transformation - * [] - TensorIterator body - * () - new layer - * BE - back-edge - * - * before applying the transformation: - * -> input1[BE_1 -> Parameter -> Layers ... -> Result -> BE_1 ]output1-> - * - * after applying the transformation: - * ->(ReadValue)-> input1[BE_1 ->Parameter->Layers ...->Result->BE_1]output1 ->(Assign) - * \ - * ->... - * After applying the transformation, the resulting network can be inferred - * step by step, the states will store between inferences. - * @param network A network to apply LowLatency transformation - * @param use_const_initializer Changes the type of the initializing subgraph for ReadValue operations. - If "true", then the transformation inserts Constant before ReadValue operation. - If "false, then the transformation leaves existed initializing subgraph for ReadValue operation. - * Loop operation by a given number. Does not affect TensorIterators. - */ -INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(void) - lowLatency2(InferenceEngine::CNNNetwork& network, bool use_const_initializer = true); -} // namespace InferenceEngine diff --git a/src/inference/include/ie/inference_engine.hpp b/src/inference/include/ie/inference_engine.hpp index 682dd3fe4b37be..5638013e294af2 100644 --- a/src/inference/include/ie/inference_engine.hpp +++ b/src/inference/include/ie/inference_engine.hpp @@ -18,9 +18,7 @@ # endif #endif -#include "ie_compound_blob.h" #include "ie_core.hpp" -#include "ie_transformations.hpp" // remove in 2022.1 major release #include diff --git a/src/inference/include/ie/vpu/hddl_config.hpp b/src/inference/include/ie/vpu/hddl_config.hpp deleted file mode 100644 index 777c1f5e1d63c0..00000000000000 --- a/src/inference/include/ie/vpu/hddl_config.hpp +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header that defines advanced related properties for HDDL plugin. - * These properties should be used in SetConfig() and LoadNetwork() methods of plugins - * - * @file hddl_config.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "vpu_config.hpp" - -namespace InferenceEngine { - -namespace Metrics { - -/** - * @brief Metric to get a int of the device number, String value is METRIC_HDDL_DEVICE_NUM - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_NUM, int); - -/** - * @brief Metric to get a std::vector of device names, String value is METRIC_HDDL_DEVICE_NAME - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_NAME, std::vector); - -/** - * @brief Metric to get a std::vector of device thermal, String value is METRIC_HDDL_DEVICE_THERMAL - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_THERMAL, std::vector); - -/** - * @brief Metric to get a std::vector of device ids, String value is METRIC_HDDL_DEVICE_ID - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_ID, std::vector); - -/** - * @brief Metric to get a std::vector of device subclasses, String value is METRIC_HDDL_DEVICE_SUBCLASS - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_SUBCLASS, std::vector); - -/** - * @brief Metric to get a std::vector of device total memory, String value is METRIC_HDDL_MEMORY_TOTAL - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_TOTAL, std::vector); - -/** - * @brief Metric to get a std::vector of device used memory, String value is METRIC_HDDL_DEVICE_MEMORY_USED - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_USED, std::vector); - -/** - * @brief Metric to get a std::vector of device utilization, String value is METRIC_HDDL_DEVICE_UTILIZATION - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_UTILIZATION, std::vector); - -/** - * @brief Metric to get a std::vector of stream ids, String value is METRIC_HDDL_DEVICE_STREAM_ID - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_STREAM_ID, std::vector); - -/** - * @brief Metric to get a std::vector of device tags, String value is METRIC_HDDL_DEVICE_TAG - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_TAG, std::vector); - -/** - * @brief Metric to get a std::vector of group ids, String value is METRIC_HDDL_GROUP_ID - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_GROUP_ID, std::vector); - -/** - * @brief Metric to get a int number of device be using for group, String value is METRIC_HDDL_DEVICE_GROUP_USING_NUM - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_GROUP_USING_NUM, int); - -/** - * @brief Metric to get a int number of total device, String value is METRIC_HDDL_DEVICE_TOTAL_NUM - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_TOTAL_NUM, int); - -} // namespace Metrics - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: Arbitrary non-empty string. If empty (""), equals no set, default: ""; - * This option allows to specify the number of MYX devices used for inference a specific Executable network. - * Note: Only one network would be allocated to one device. - * The number of devices for the tag is specified in the hddl_service.config file. - * Example: - * "service_settings": - * { - * "graph_tag_map": - * { - * "tagA":3 - * } - * } - * It means that an executable network marked with tagA will be executed on 3 devices - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_GRAPH_TAG); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: Arbitrary non-empty string. If empty (""), equals no set, default: ""; - * This config makes the executable networks to be allocated on one certain device (instead of multiple devices). - * And all inference through this executable network, will be done on this device. - * Note: Only one network would be allocated to one device. - * The number of devices which will be used for stream-affinity must be specified in hddl_service.config file. - * Example: - * "service_settings": - * { - * "stream_device_number":5 - * } - * It means that 5 device will be used for stream-affinity - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_STREAM_ID); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: Arbitrary non-empty string. If empty (""), equals no set, default: ""; - * This config allows user to control device flexibly. This config gives a "tag" for a certain device while - * allocating a network to it. Afterward, user can allocating/deallocating networks to this device with this "tag". - * Devices used for such use case is controlled by a so-called "Bypass Scheduler" in HDDL backend, and the number - * of such device need to be specified in hddl_service.config file. - * Example: - * "service_settings": - * { - * "bypass_device_number": 5 - * } - * It means that 5 device will be used for Bypass scheduler. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_DEVICE_TAG); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: "YES/NO", default is "NO". - * This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set. After a user load a - * network, the user got a handle for the network. - * If "YES", the network allocated is bind to the device (with the specified "DEVICE_TAG"), which means all afterwards - * inference through this network handle will be executed on this device only. - * If "NO", the network allocated is not bind to the device (with the specified "DEVICE_TAG"). If the same network - * is allocated on multiple other devices (also set BIND_DEVICE to "False"), then inference through any handle of these - * networks may be executed on any of these devices those have the network loaded. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_BIND_DEVICE); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: A signed int wrapped in a string, default is "0". - * This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set and "BIND_DEVICE" is "False". - * When there are multiple devices running a certain network (a same network running on multiple devices in Bypass - * Scheduler), the device with a larger number has a higher priority, and more inference tasks will be fed to it with - * priority. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_RUNTIME_PRIORITY); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: "YES/NO", default is "NO". - * SGAD is short for "Single Graph All Device". With this scheduler, once application allocates 1 network, all devices - * (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device - * can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_USE_SGAD); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: A signed int wrapped in a string, default is "0". - * This config gives a "group id" for a certain device when this device has been reserved for certain client, client - * can use this device grouped by calling this group id while other client can't use this device - * Each device has their own group id. Device in one group shares same group id. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_GROUP_DEVICE); - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/vpu/myriad_config.hpp b/src/inference/include/ie/vpu/myriad_config.hpp deleted file mode 100644 index 52d490b87d1b86..00000000000000 --- a/src/inference/include/ie/vpu/myriad_config.hpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header that defines advanced related properties for Myriad plugin. - * These properties should be used in SetConfig() and LoadNetwork() methods of plugins - * - * @file myriad_config.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "vpu_config.hpp" - -namespace InferenceEngine { - -/** - * @brief The flag to reset stalled devices. - * This is a plugin scope option and must be used with the plugin's SetConfig method - * The only possible values are: - * CONFIG_VALUE(YES) - * CONFIG_VALUE(NO) (default value) - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_FORCE_RESET); - -/** - * @brief This option allows to specify device memory type. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_TYPE); - -/** - * @brief Supported keys definition for InferenceEngine::MYRIAD_DDR_TYPE option. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_AUTO); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_2GB); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_SAMSUNG_2GB); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_HYNIX_2GB); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_1GB); - -/** - * @brief This option allows to specify protocol. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_PROTOCOL); - -/** - * @brief Supported keys definition for InferenceEngine::MYRIAD_PROTOCOL option. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_PCIE); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_USB); - -/** - * @brief Optimize MYRIAD plugin execution to maximize throughput. - * This option should be used with integer value which is the requested number of streams. - * The only possible values are: - * 1 - * 2 - * 3 - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS); - -/** - * @brief Default key definition for InferenceEngine::MYRIAD_THROUGHPUT_STREAMS option. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS_AUTO); - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/vpu/vpu_config.hpp b/src/inference/include/ie/vpu/vpu_config.hpp deleted file mode 100644 index 1755ed4d9fff0c..00000000000000 --- a/src/inference/include/ie/vpu/vpu_config.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header that defines common config subset for VPU plugins. - * Include myriad_config.hpp or hddl_config.hpp directly. - * These properties should be used in SetConfig() and LoadNetwork() methods of plugins - * - * @file vpu_config.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" -#include "ie_plugin_config.hpp" - -#define DECLARE_VPU_CONFIG(name) static constexpr auto name = #name - -namespace InferenceEngine { - -// -// Common options -// - -/** - * @brief Turn on HW stages usage (applicable for MyriadX devices only). - * The only possible values are: - * CONFIG_VALUE(YES) (default value) - * CONFIG_VALUE(NO) - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_HW_ACCELERATION); - -/** - * @brief The flag for adding to the profiling information the time of obtaining a tensor. - * The only possible values are: - * CONFIG_VALUE(YES) - * CONFIG_VALUE(NO) (default value) - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_RECEIVING_TENSOR_TIME); - -/** - * @brief This option allows to pass custom layers binding xml. - * If layer is present in such an xml, it would be used during inference even if the layer is natively supported - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_CUSTOM_LAYERS); - -} // namespace InferenceEngine diff --git a/src/inference/src/blob_factory.cpp b/src/inference/src/blob_factory.cpp index 8408158970202a..dcf7181944502e 100644 --- a/src/inference/src/blob_factory.cpp +++ b/src/inference/src/blob_factory.cpp @@ -20,7 +20,3 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::Tenso const std::shared_ptr& alloc) { return make_blob_with_precision(desc.getPrecision(), desc, alloc); } - -InferenceEngine::Blob::Ptr make_plain_blob(InferenceEngine::Precision prec, const InferenceEngine::SizeVector dims) { - return make_blob_with_precision({prec, dims, InferenceEngine::TensorDesc::getLayoutByDims(dims)}); -} diff --git a/src/inference/src/cnn_network_ngraph_impl.cpp b/src/inference/src/cnn_network_ngraph_impl.cpp index 74f80f39e2f641..e742ad888dccbf 100644 --- a/src/inference/src/cnn_network_ngraph_impl.cpp +++ b/src/inference/src/cnn_network_ngraph_impl.cpp @@ -16,7 +16,6 @@ #include "blob_factory.hpp" #include "cpp/ie_cnn_network.h" #include "ie_common.h" -#include "ie_memcpy.h" #include "ie_ngraph_utils.hpp" #include "itt.hpp" #include "ngraph/graph_util.hpp" @@ -216,7 +215,6 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) { DataPtr input = std::make_shared(name, inData->getTensorDesc()); _data[name] = input; info->setInputData(input); - info->getPreProcess() = inputInfo.second->getPreProcess(); info->setPrecision(inputInfo.second->getPrecision()); info->setLayout(inputInfo.second->getLayout()); _inputData[name] = info; diff --git a/src/inference/src/compilation_context.cpp b/src/inference/src/compilation_context.cpp index 5c9b789b883518..72c83811b74aa7 100644 --- a/src/inference/src/compilation_context.cpp +++ b/src/inference/src/compilation_context.cpp @@ -111,24 +111,6 @@ std::string ModelCache::compute_hash(const std::shared_ptr& mod if (it != rt_info.end()) { seed = calculate_td(it->second.as(), seed); } - - it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - auto preproc = it->second.as(); - - seed = ov::hash_combine(seed, ov::as_int32_t(preproc.getMeanVariant())); - - if (preproc.getMeanVariant() == InferenceEngine::MeanVariant::MEAN_VALUE) { - seed = ov::hash_combine(seed, preproc.getNumberOfChannels()); - for (size_t c = 0; c < preproc.getNumberOfChannels(); ++c) { - const InferenceEngine::PreProcessChannel::Ptr& channelInfo = preproc[c]; - seed = ov::hash_combine(seed, channelInfo->stdScale); - seed = ov::hash_combine(seed, channelInfo->meanValue); - } - } else if (preproc.getMeanVariant() == InferenceEngine::MeanVariant::MEAN_IMAGE) { - // TODO: think if we need to compute hash for mean image if it exists - } - } } for (auto&& output : model->outputs()) { auto& rt_info = output.get_rt_info(); diff --git a/src/inference/src/cpp/ie_executable_network.cpp b/src/inference/src/cpp/ie_executable_network.cpp index ede5dface96c7e..63b74987546228 100644 --- a/src/inference/src/cpp/ie_executable_network.cpp +++ b/src/inference/src/cpp/ie_executable_network.cpp @@ -10,7 +10,6 @@ #include "ie_common.h" #include "ie_executable_network_base.hpp" #include "ie_plugin_config.hpp" -#include "ie_remote_context.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/compiled_model.hpp" @@ -93,10 +92,6 @@ Parameter ExecutableNetwork::GetMetric(const std::string& name) const { EXEC_NET_CALL_STATEMENT(return {_impl->GetMetric(name), {_so}}); } -RemoteContext::Ptr ExecutableNetwork::GetContext() const { - EXEC_NET_CALL_STATEMENT(return _impl->GetContext()); -} - bool ExecutableNetwork::operator!() const noexcept { return !_impl; } diff --git a/src/inference/src/cpp/ie_executable_network_base.hpp b/src/inference/src/cpp/ie_executable_network_base.hpp index 7afe0b8d901f8c..f1a5cffa38b0f6 100644 --- a/src/inference/src/cpp/ie_executable_network_base.hpp +++ b/src/inference/src/cpp/ie_executable_network_base.hpp @@ -17,7 +17,6 @@ #include "cpp/exception2status.hpp" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" -#include "ie_remote_context.hpp" #include "ie_iexecutable_network.hpp" #include "ie_infer_async_request_base.hpp" @@ -81,10 +80,6 @@ class ExecutableNetworkBase : public IExecutableNetwork { TO_STATUS(result = _impl->GetMetric(name)); } - StatusCode GetContext(RemoteContext::Ptr& pContext, ResponseDesc* resp) const noexcept override { - TO_STATUS(pContext = _impl->GetContext()); - } - std::shared_ptr GetImpl() const { return _impl; } diff --git a/src/inference/src/cpp/ie_infer_async_request_base.hpp b/src/inference/src/cpp/ie_infer_async_request_base.hpp index 679aa892b3efa2..79415ba6cb34b5 100644 --- a/src/inference/src/cpp/ie_infer_async_request_base.hpp +++ b/src/inference/src/cpp/ie_infer_async_request_base.hpp @@ -12,7 +12,6 @@ #include "cpp_interfaces/plugin_itt.hpp" #include #include "ie_iinfer_request.hpp" -#include "ie_preprocess.hpp" namespace InferenceEngine { @@ -138,10 +137,6 @@ class InferRequestBase : public IInferRequest { TO_STATUS(data = _impl->GetBlob(name)); } - StatusCode GetPreProcess(const char* name, const PreProcessInfo** info, ResponseDesc *resp) const noexcept override { - TO_STATUS(*info = &(_impl->GetPreProcess(name))); - } - StatusCode StartAsync(ResponseDesc* resp) noexcept override { OV_ITT_SCOPED_TASK(itt::domains::Plugin, "StartAsync"); TO_STATUS(_impl->StartAsync()); diff --git a/src/inference/src/cpp/ie_infer_request.cpp b/src/inference/src/cpp/ie_infer_request.cpp index 374273b8d743d5..4b384b3df69e20 100644 --- a/src/inference/src/cpp/ie_infer_request.cpp +++ b/src/inference/src/cpp/ie_infer_request.cpp @@ -13,7 +13,6 @@ #include "dev/converter_utils.hpp" #include "ie_infer_async_request_base.hpp" #include "ie_ngraph_utils.hpp" -#include "ie_remote_context.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/exception.hpp" #include "openvino/runtime/infer_request.hpp" @@ -52,16 +51,11 @@ Blob::Ptr InferRequest::GetBlob(const std::string& name) { std::string error = "Internal error: blob with name `" + name + "` is not allocated!"; if (blobPtr == nullptr) IE_THROW() << error; - const bool remoteBlobPassed = blobPtr->is(); - if (!remoteBlobPassed && blobPtr->buffer() == nullptr) + if (blobPtr->buffer() == nullptr) IE_THROW() << error; return blobPtr; } -const PreProcessInfo& InferRequest::GetPreProcess(const std::string& name) const { - INFER_REQ_CALL_STATEMENT(return _impl->GetPreProcess(name);) -} - void InferRequest::Infer() { INFER_REQ_CALL_STATEMENT(_impl->Infer();) } diff --git a/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp index 965fcf5200c9aa..88d84f6e5fe719 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp @@ -108,10 +108,6 @@ Parameter IExecutableNetworkInternal::GetMetric(const std::string&) const { IE_THROW(NotImplemented); } -std::shared_ptr IExecutableNetworkInternal::GetContext() const { - IE_THROW(NotImplemented); -} - std::shared_ptr IExecutableNetworkInternal::CreateInferRequestImpl( InputsDataMap networkInputs, OutputsDataMap networkOutputs) { diff --git a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp index adf7a0a7dbda04..bf1ffd10ac4acf 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp @@ -4,10 +4,8 @@ #include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" -#include #include #include -#include #include #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" @@ -17,10 +15,8 @@ #include "ie_algorithm.hpp" #include "ie_blob.h" #include "ie_common.h" -#include "ie_compound_blob.h" #include "ie_ngraph_utils.hpp" -#include "ie_preprocess.hpp" -#include "ie_remote_context.hpp" +#include "openvino/core/partial_shape.hpp" #include "transformations/utils/utils.hpp" namespace InferenceEngine { @@ -122,9 +118,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us const auto input = findInputByNodeName(name); const auto output = findOutputByNodeName(name); - const bool compoundBlobPassed = userBlob->is(); - const bool remoteBlobPassed = userBlob->is(); - if (!compoundBlobPassed && !remoteBlobPassed && userBlob->buffer() == nullptr) + if (userBlob->buffer() == nullptr) IE_THROW(NotAllocated) << "Input data was not allocated. Input name: \'" << name << "\'"; if (userBlob->size() == 0 && !((input && input->get_output_partial_shape(0).is_dynamic()) || (output && output->get_output_partial_shape(0).is_dynamic()))) { @@ -143,9 +137,6 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us } auto& devBlob = _deviceInputs[name]; - if (compoundBlobPassed) { - IE_THROW(NotImplemented) << "cannot set compound blob: supported only for input pre-processing"; - } size_t inputSize = foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR ? InferenceEngine::details::product(foundInput->getTensorDesc().getDims()) @@ -156,11 +147,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us } _inputs[name] = userBlob; devBlob = userBlob; - _batched_inputs.erase(name); } else { - if (compoundBlobPassed) { - IE_THROW(NotImplemented) << "cannot set compound blob: supported only for input pre-processing"; - } size_t outputSize = foundOutput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR ? details::product(foundOutput->getTensorDesc().getDims()) : 1; @@ -181,180 +168,6 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us } } -void IInferRequestInternal::SetBlobs(const std::string& name, const std::vector& blobs) { - if (blobs.size() == 1) { - SetBlob(name, blobs[0]); - return; - } - - bool all_memory = std::all_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& item) { - return item && item->is() && !item->is(); - }); - OPENVINO_ASSERT(all_memory, - "set_input_tensors/set_tensors error. Default implementation support only local memory tensors"); - - checkBlobsForBatch(name, blobs); - - SetBlobsImpl(name, std::make_shared(blobs)); -} - -void IInferRequestInternal::SetBlobsImpl(const std::string& name, const BatchedBlob::Ptr& batched_blob) { - IE_THROW(NotImplemented) << "set_input_tensors/set_tensors are not supported by this plugin"; -} - -void IInferRequestInternal::checkBlobsForBatch(const std::string& name, const std::vector& blobs) { - OPENVINO_ASSERT(!blobs.empty(), - "set_input_tensors/set_tensors can't be called with empty blobs for input '", - name, - "'"); - OPENVINO_ASSERT(blobs.size() != 1, - "Internal error (plugin): checkBlobsForBatch is not allowed to have only one blob inside batch " - "for input '", - name, - "'"); - - std::shared_ptr param; - const auto& inputs = GetInputs(); - for (const auto& input : inputs) { - if (auto p = std::dynamic_pointer_cast(input)) { - if (name == p->get_friendly_name()) { - param = p; - break; - } - } - } - OPENVINO_ASSERT(param, "set_input_tensors/set_tensors error. Parameter '", name, "' is not found"); - OPENVINO_ASSERT(ov::layout::has_batch(param->get_layout()), - "set_input_tensors/set_tensors can be used only for inputs with N(batch) dimension" - " 'layout' defined. Current layout for '", - name, - "' is ", - param->get_layout().to_string()); - auto batch_idx = ov::layout::batch_idx(param->get_layout()); - if (batch_idx < 0) { - batch_idx += static_cast(blobs[0]->getTensorDesc().getDims().size()); - } - OPENVINO_ASSERT(batch_idx == 0, - "set_input_tensors/set_tensors is not currently supported for batch dimension index ", - batch_idx, - " != 0"); - std::for_each(blobs.begin(), blobs.end(), [&batch_idx](const Blob::Ptr& item) { - OPENVINO_ASSERT(item->getTensorDesc().getDims()[batch_idx] == 1, - "set_input_tensors/set_tensors. Tensors shall represent one item in a batch, ", - item->getTensorDesc().getDims()[batch_idx], - " provided"); - }); - auto blobs_size = static_cast(blobs.size()); - if (param->get_partial_shape().rank().is_static()) { - OPENVINO_ASSERT(batch_idx >= 0 && batch_idx < param->get_partial_shape().rank().get_length(), - "set_input_tensors/set_tensors error. Layout ", - param->get_layout().to_string(), - " is incorrect for operation with name '", - name, - "' with shape ", - param->get_partial_shape()); - auto batch = param->get_partial_shape()[batch_idx]; - - OPENVINO_ASSERT(batch.is_dynamic() || batch.get_length() == blobs_size, - "set_input_tensors/set_tensors error. Input shape ", - param->get_partial_shape(), - "batch ", - batch, - "doesn't match with total blobs count: ", - blobs_size); - } - - // In future consider checking if blobs point to contiguous range of memory and use single 'SetBlob' instead - auto tmp_desc = blobs[0]->getTensorDesc(); - tmp_desc.getDims()[batch_idx] = blobs_size; - auto blockingDims = tmp_desc.getBlockingDesc().getBlockDims(); - blockingDims[batch_idx] = blobs_size; - auto blockingDesc = BlockingDesc(blockingDims, tmp_desc.getBlockingDesc().getOrder()); - auto batched_desc = InferenceEngine::TensorDesc(tmp_desc.getPrecision(), tmp_desc.getDims(), blockingDesc); - auto desc_to_string = [](const TensorDesc& desc) { - std::stringstream s; - s << "{ " << desc.getLayout() << " " << desc.getPrecision().name(); - s << "dim=("; - for (const auto& d : desc.getDims()) { - s << " " << d; - } - s << " ) }"; - return s.str(); - }; - for (const auto& item : blobs) { - auto item_desc = item->getTensorDesc(); - item_desc.getDims()[batch_idx] = batched_desc.getDims()[batch_idx]; - OPENVINO_ASSERT(item_desc.getDims() == batched_desc.getDims() && - item_desc.getLayout() == batched_desc.getLayout() && - item_desc.getPrecision() == batched_desc.getPrecision() && - item_desc.getBlockingDesc().getOrder() == batched_desc.getBlockingDesc().getOrder(), - "set_input_tensors/set_tensors error. Blob ", - desc_to_string(item_desc), - " is not compatible with batched blob ", - desc_to_string(batched_desc)); - } -} - -void IInferRequestInternal::convertBatchedInputBlob(const std::string& name, const BatchedBlob::Ptr& batched_blob) { - auto tmp_desc = batched_blob->getBlob(0)->getTensorDesc(); - tmp_desc.getDims()[0] = batched_blob->size(); - auto blockingDims = tmp_desc.getBlockingDesc().getBlockDims(); - blockingDims[0] = batched_blob->size(); - auto blockingDesc = BlockingDesc(blockingDims, tmp_desc.getBlockingDesc().getOrder()); - auto batched_desc = InferenceEngine::TensorDesc(tmp_desc.getPrecision(), tmp_desc.getDims(), blockingDesc); - std::shared_ptr remote_context; - MemoryBlob::Ptr mem_blob; - try { - auto net = getPointerToExecutableNetworkInternal(); - if (net) { - remote_context = net->GetContext(); - } - } catch (const InferenceEngine::NotImplemented&) { - } - if (remote_context) { - mem_blob = remote_context->CreateHostBlob(batched_desc); - } else { - mem_blob = std::dynamic_pointer_cast(make_blob_with_precision(batched_desc)); - } - OPENVINO_ASSERT(mem_blob, "Internal error - can't create host memory blob"); - mem_blob->allocate(); - auto ptr = mem_blob->wmap(); - - // Perform memory copy - InferenceEngine::parallel_for(batched_blob->size(), [&](size_t i) { - const auto& blob = as(batched_blob->getBlob(i)); - OPENVINO_ASSERT(mem_blob, "Internal error - can't cast blob ", i, " to MemoryBlob"); - const auto& blob_desc = blob->getTensorDesc().getBlockingDesc(); - bool offsets_0 = std::all_of(blob_desc.getOffsetPaddingToData().begin(), - blob_desc.getOffsetPaddingToData().end(), - [](size_t dim) { - return dim == 0; - }); - OPENVINO_ASSERT(offsets_0, - "set_tensors/set_input_tensors - default combining is not supported for " - "ROI tensors. All tensors offsets shall be 0"); - OPENVINO_ASSERT(mem_blob->getTensorDesc().getBlockingDesc().getOrder() == blob_desc.getOrder(), - "set_tensors/set_input_tensors - default combining is not supported for " - "ROI tensors. Axis order shall be default"); - OPENVINO_ASSERT(mem_blob->getTensorDesc().getBlockingDesc().getStrides() == blob_desc.getStrides(), - "set_tensors/set_input_tensors - default combining is not supported for " - "ROI tensors. Input blobs shall have default strides set"); - memcpy(ptr.as() + i * blob->byteSize(), - blob->rmap().as() + - blob->getTensorDesc().getBlockingDesc().getOffsetPadding() * blob->element_size(), - blob->byteSize()); - }); - SetBlob(name, mem_blob); -} - -void IInferRequestInternal::convertBatchedInputBlobs() { - auto batched_copy = _batched_inputs; - for (const auto& item : batched_copy) { - convertBatchedInputBlob(item.first, item.second); - } - _batched_inputs = batched_copy; -} - Blob::Ptr IInferRequestInternal::GetBlob(const std::string& name) { OV_ITT_SCOPED_TASK(itt::domains::Plugin, "GetBlob"); Blob::Ptr data; @@ -383,23 +196,6 @@ Blob::Ptr IInferRequestInternal::GetBlob(const std::string& name) { return data; } -BatchedBlob::Ptr IInferRequestInternal::GetBlobs(const std::string& name) { - if (_batched_inputs.count(name)) { - return _batched_inputs.at(name); - } - return nullptr; -} - -const PreProcessInfo& IInferRequestInternal::GetPreProcess(const std::string& name) const { - InputInfo::Ptr foundInput; - DataPtr foundOutput; - if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) { - return foundInput->getPreProcess(); - } else { - IE_THROW() << "Output blob can't have pre-processing"; - } -} - std::vector> IInferRequestInternal::QueryState() { IE_THROW(NotImplemented); } @@ -512,8 +308,7 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, if (!isDynamic && refSize != blob->size()) { IE_THROW() << strNotMatched + ": got " << blob->size() << " expecting " << refSize; } - const bool remoteBlobPassed = blob->is(); - if (!remoteBlobPassed && blob->buffer() == nullptr) + if (blob->buffer() == nullptr) IE_THROW() << strNotAllocated; } diff --git a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp index a4fe56fb58ab9b..dbe91e775cb855 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp @@ -29,6 +29,7 @@ #include "ie_icore.hpp" #include "ie_iextension.h" #include "ie_input_info.hpp" +#include "ie_memcpy.h" #include "ie_ngraph_utils.hpp" #include "ie_parameter.hpp" #include "openvino/core/deprecated.hpp" @@ -42,28 +43,12 @@ namespace InferenceEngine { -PreProcessInfo copyPreProcess(const PreProcessInfo& from) { - PreProcessInfo to = from; - if (from.getMeanVariant() == MEAN_IMAGE) { - for (size_t i = 0; i < from.getNumberOfChannels(); i++) { - auto& from_blob = from[i]->meanData; - auto to_blob = make_blob_with_precision(from[i]->meanData->getTensorDesc()); - to_blob->allocate(); - ie_memcpy(to_blob->buffer(), to_blob->byteSize(), from_blob->cbuffer(), from_blob->byteSize()); - - to.setMeanImageForChannel(to_blob, i); - } - } - return to; -} - InputsDataMap copyInfo(const InputsDataMap& networkInputs) { InputsDataMap _networkInputs; for (const auto& it : networkInputs) { InputInfo::Ptr newPtr; if (it.second) { newPtr = std::make_shared(); - newPtr->getPreProcess() = it.second->getPreProcess(); newPtr->setInputData(std::make_shared(*it.second->getInputData())); } _networkInputs.emplace(it.first, newPtr); @@ -118,12 +103,6 @@ void IInferencePlugin::SetName(const std::string& pluginName) noexcept { _pluginName = pluginName; } -std::shared_ptr IInferencePlugin::LoadNetwork( - const CNNNetwork& network, - const std::map& config) { - return LoadNetwork(network, config, nullptr); -} - template std::map> const_map_cast(const std::map>& map) { std::map> res; @@ -134,8 +113,7 @@ std::map> const_map_cast(const std::map IInferencePlugin::LoadNetwork( const CNNNetwork& orig_network, - const std::map& config, - const std::shared_ptr& context) { + const std::map& config) { std::shared_ptr impl; // if IR `version` is not set, suppose it's IR v10 for old API @@ -170,7 +148,6 @@ std::shared_ptr IInferencePlugin::LoadNetwork( auto toInfo = network.getInputsInfo().at(inputInfo.first); toInfo->setPrecision(inputInfo.second->getPrecision()); toInfo->setLayout(inputInfo.second->getLayout()); - toInfo->getPreProcess() = inputInfo.second->getPreProcess(); } for (const auto& outputInfo : orig_network.getOutputsInfo()) { auto toInfo = network.getOutputsInfo().at(outputInfo.first); @@ -181,11 +158,7 @@ std::shared_ptr IInferencePlugin::LoadNetwork( } } - if (nullptr == context) { - impl = LoadExeNetworkImpl(network, config); - } else { - impl = LoadExeNetworkImpl(network, context, config); - } + impl = LoadExeNetworkImpl(network, config); SetExeNetworkInfo(impl, const_map_cast(network.getInputsInfo()), const_map_cast(network.getOutputsInfo())); if (function) { @@ -221,14 +194,6 @@ Parameter IInferencePlugin::GetMetric(const std::string&, const std::map IInferencePlugin::CreateContext(const ParamMap&) { - IE_THROW(NotImplemented); -} - -std::shared_ptr IInferencePlugin::GetDefaultContext(const ParamMap&) { - IE_THROW(NotImplemented); -} - std::shared_ptr IInferencePlugin::ImportNetwork( const std::string& modelFileName, const std::map& config) { @@ -247,13 +212,6 @@ std::shared_ptr IInferencePlugin::ImportNetwork( IE_THROW(NotImplemented); } -std::shared_ptr IInferencePlugin::ImportNetwork( - std::istream& networkModel, - const std::shared_ptr& context, - const std::map& config) { - IE_THROW(NotImplemented); -} - void IInferencePlugin::SetCore(std::weak_ptr core) { IE_ASSERT(!core.expired()); _core = core; @@ -285,13 +243,6 @@ std::shared_ptr IInferencePlugin::LoadExeNetworkImpl IE_THROW(NotImplemented); } -std::shared_ptr IInferencePlugin::LoadExeNetworkImpl( - const CNNNetwork&, - const std::shared_ptr&, - const std::map&) { - IE_THROW(NotImplemented); -} - void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr& exeNetwork, const ConstInputsDataMap& inputs, const ConstOutputsDataMap& outputs) { diff --git a/src/inference/src/dev/converter_utils.cpp b/src/inference/src/dev/converter_utils.cpp index e5c4e4c332448c..e4824e2d7d6f67 100644 --- a/src/inference/src/dev/converter_utils.cpp +++ b/src/inference/src/dev/converter_utils.cpp @@ -16,7 +16,6 @@ #include "icompiled_model_wrapper.hpp" #include "ie_blob.h" #include "ie_common.h" -#include "ie_compound_blob.h" #include "ie_icore.hpp" #include "ie_input_info.hpp" #include "ie_layouts.h" @@ -31,17 +30,14 @@ #include "openvino/runtime/icompiled_model.hpp" #include "openvino/runtime/iinfer_request.hpp" #include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/ivariable_state.hpp" #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/profiling_info.hpp" -#include "openvino/runtime/remote_context.hpp" #include "openvino/runtime/so_ptr.hpp" #include "openvino/runtime/tensor.hpp" #include "openvino/runtime/threading/executor_manager.hpp" #include "openvino/runtime/variable_state.hpp" -#include "remote_context_wrapper.hpp" #include "transformations/utils/utils.hpp" #ifdef PROXY_PLUGIN_ENABLED @@ -62,11 +58,7 @@ void fill_input_info(ov::Output& input, InferenceEngine::InputInfo::Pt const ov::Output const_input(input.get_node(), input.get_index()); ov::legacy_convert::fill_input_info(const_input, input_info); auto& rt_info = input.get_rt_info(); - auto it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - rt_info.erase(it); - } - it = rt_info.find("ie_legacy_td"); + auto it = rt_info.find("ie_legacy_td"); if (it != rt_info.end()) { rt_info.erase(it); } @@ -106,11 +98,7 @@ void ov::legacy_convert::fill_input_info(const ov::Output& input input_info->setInputData(data); } auto& rt_info = input.get_rt_info(); - auto it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - input_info->getPreProcess() = it->second.as(); - } - it = rt_info.find("ie_legacy_td"); + auto it = rt_info.find("ie_legacy_td"); if (it != rt_info.end()) { auto td = it->second.as(); input_info->getInputData()->reshape(td.getDims(), td.getLayout()); @@ -176,7 +164,6 @@ std::shared_ptr ov::legacy_convert::convert_model(const Inferen auto input_info = network.getInputsInfo().at(param_name); auto& rt_info = input.get_rt_info(); - rt_info["ie_legacy_preproc"] = input_info->getPreProcess(); rt_info["ie_legacy_td"] = input_info->getTensorDesc(); } for (auto&& result : cloned_model->get_results()) { @@ -253,17 +240,6 @@ class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { m_plugin._so}); } - std::shared_ptr LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::map& config, - const std::shared_ptr& context) override { - return ov::legacy_convert::convert_compiled_model( - {m_plugin->compile_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()), - ov::any_copy(config), - ov::legacy_convert::convert_remote_context(context)), - m_plugin._so}); - } - ov::SoPtr LoadNetwork( const std::string& modelPath, const std::map& config) override { @@ -297,15 +273,6 @@ class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { return m_plugin->get_property(name, options); } - std::shared_ptr CreateContext(const InferenceEngine::ParamMap& params) override { - return ov::legacy_convert::convert_remote_context(m_plugin->create_context(params)); - } - - std::shared_ptr GetDefaultContext( - const InferenceEngine::ParamMap& params) override { - return ov::legacy_convert::convert_remote_context(m_plugin->get_default_context(params)); - } - std::shared_ptr ImportNetwork( const std::string& modelFileName, const std::map& config) override { @@ -321,17 +288,6 @@ class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { {m_plugin->import_model(networkModel, ov::any_copy(config)), m_plugin._so}); } - std::shared_ptr ImportNetwork( - std::istream& networkModel, - const std::shared_ptr& context, - const std::map& config) override { - return ov::legacy_convert::convert_compiled_model( - {m_plugin->import_model(networkModel, - ov::legacy_convert::convert_remote_context(context), - ov::any_copy(config)), - m_plugin._so}); - } - void SetCore(std::weak_ptr core) override { return m_plugin->set_core(std::dynamic_pointer_cast(core.lock())); } @@ -453,10 +409,6 @@ class IExecutableNetworkWrapper : public InferenceEngine::IExecutableNetworkInte return m_model->get_property(name); } - std::shared_ptr GetContext() const override { - return ov::legacy_convert::convert_remote_context(m_model->get_context()); - } - ov::SoPtr get_compiled_model() { return m_model; } @@ -551,18 +503,6 @@ class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestIntern } } - void SetBlobs(const std::string& name, const std::vector& blobs) override { - try { - std::vector> tensors; - for (const auto& blob : blobs) { - tensors.emplace_back(ov::make_tensor(blob, true)); - } - m_request->set_tensors(find_port(name), tensors); - } catch (const ov::Exception& ex) { - IE_THROW(GeneralError) << ex.what(); - } - } - InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override { auto port = find_port(name); auto& rt_info = port.get_rt_info(); @@ -574,38 +514,6 @@ class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestIntern return tensor_to_blob(m_request->get_tensor(port), true, desc); } - InferenceEngine::BatchedBlob::Ptr GetBlobs(const std::string& name) override { - auto port = find_port(name); - auto& rt_info = port.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - InferenceEngine::TensorDesc desc; - if (it != rt_info.end()) { - desc = it->second.as(); - } - auto tensors = m_request->get_tensors(port); - std::vector blobs; - for (const auto& tensor : tensors) { - blobs.emplace_back(tensor_to_blob(tensor, true, desc)); - } - return std::make_shared(blobs); - } - - const InferenceEngine::PreProcessInfo& GetPreProcess(const std::string& name) const override { -#ifdef PROXY_PLUGIN_ENABLED - if (auto proxy_request = std::dynamic_pointer_cast(m_request._ptr)) { - return ov::legacy_convert::convert_infer_request(proxy_request->get_hardware_request()) - ->GetPreProcess(name); - } -#endif - auto port = find_port(name); - auto& rt_info = port.get_rt_info(); - auto it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - return it->second.as(); - } - OPENVINO_THROW("Cannot find PreProcess info."); - } - std::vector> QueryState() override { auto res = m_request->query_state(); std::vector> ret; @@ -759,43 +667,17 @@ class IAsyncInferRequestWrapper : public ov::IAsyncInferRequest { ov::SoPtr get_tensor(const ov::Output& port) const override { const auto& name = get_legacy_name_from_port(port); - OPENVINO_ASSERT(!m_request->GetBlobs(name), - "get_tensor shall not be used together with batched " - "set_tensors/set_input_tensors for name '", - name, - "'"); auto blob = m_request->GetBlob(name); ov::SoPtr tensor = ov::make_tensor(blob); if (!tensor._so) tensor._so = m_request->getPointerToSo(); return tensor; } + void set_tensor(const ov::Output& port, const ov::SoPtr& tensor) override { m_request->SetBlob(get_legacy_name_from_port(port), ov::tensor_to_blob(tensor, m_unwrap_tensor)); } - std::vector> get_tensors(const ov::Output& port) const override { - auto blobs = m_request->GetBlobs(get_legacy_name_from_port(port)); - std::vector> ret; - if (!blobs) - return ret; - for (size_t i = 0; i < blobs->size(); i++) { - ov::SoPtr tensor = ov::make_tensor(blobs->getBlob(i)); - if (!tensor._so) - tensor._so = m_request->getPointerToSo(); - ret.emplace_back(tensor); - } - return ret; - } - void set_tensors(const ov::Output& port, - const std::vector>& tensors) override { - std::vector blobs; - for (const auto& tensor : tensors) { - blobs.emplace_back(ov::tensor_to_blob(tensor, m_unwrap_tensor)); - } - m_request->SetBlobs(get_legacy_name_from_port(port), blobs); - } - std::vector> query_state() const override { std::vector> variable_states; for (auto&& state : m_request->QueryState()) { @@ -855,60 +737,6 @@ ov::SoPtr<::ov::IAsyncInferRequest> ov::legacy_convert::convert_infer_request( request->getPointerToSo()}; } -namespace InferenceEngine { -const std::shared_ptr& IRemoteContextWrapper::get_context() { - return m_context; -} - -const std::string& IRemoteContextWrapper::get_device_name() const { - m_name = m_context->getDeviceName(); - return m_name; -} - -const ov::AnyMap& IRemoteContextWrapper::get_property() const { - m_params = m_context->getParams(); - return m_params; -} - -ov::SoPtr IRemoteContextWrapper::create_tensor(const ov::element::Type& type, - const ov::Shape& shape, - const ov::AnyMap& params) { - InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(type), - shape, - InferenceEngine::TensorDesc::getLayoutByDims(shape)); - auto blob = m_context->CreateBlob(desc, params); - blob->allocate(); - auto tensor = ov::make_tensor(blob); - return {std::dynamic_pointer_cast(tensor._ptr), tensor._so}; -} - -ov::SoPtr IRemoteContextWrapper::create_host_tensor(const ov::element::Type type, const ov::Shape& shape) { - InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(type), - shape, - InferenceEngine::TensorDesc::getLayoutByDims(shape)); - auto blob = m_context->CreateHostBlob(desc); - blob->allocate(); - return ov::make_tensor(blob); -} - -} // namespace InferenceEngine - -std::shared_ptr ov::legacy_convert::convert_remote_context( - const ov::SoPtr& context) { - if (auto ctx = std::dynamic_pointer_cast(context._ptr)) { - return ctx->get_context(); - } - return std::make_shared(context); -} - -ov::SoPtr ov::legacy_convert::convert_remote_context( - const std::shared_ptr& context) { - if (auto ctx = std::dynamic_pointer_cast(context)) { - return ctx->get_context(); - } - return {std::make_shared(context)}; -} - namespace ov { /* diff --git a/src/inference/src/dev/converter_utils.hpp b/src/inference/src/dev/converter_utils.hpp index d121f5a4fa9ac3..ea2de550a51d10 100644 --- a/src/inference/src/dev/converter_utils.hpp +++ b/src/inference/src/dev/converter_utils.hpp @@ -8,14 +8,12 @@ #include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "ie_iextension.h" -#include "ie_remote_blob.hpp" #include "openvino/core/extension.hpp" #include "openvino/core/model.hpp" #include "openvino/runtime/iasync_infer_request.hpp" #include "openvino/runtime/icompiled_model.hpp" #include "openvino/runtime/iplugin.hpp" #include "openvino/runtime/iremote_context.hpp" -#include "remote_utils.hpp" namespace ov { namespace legacy_convert { @@ -40,8 +38,6 @@ ov::SoPtr<::ov::IAsyncInferRequest> convert_infer_request( const std::shared_ptr<::InferenceEngine::IInferRequestInternal>& request, const std::string& plugin_name = ""); -std::shared_ptr convert_remote_context(const ov::SoPtr& context); - std::vector convert_extension(const std::vector& exts); std::vector convert_extension(const std::vector& exts); diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 1e5ed283adb0d3..514dd798149249 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -11,6 +11,7 @@ #include "dev/converter_utils.hpp" #include "dev/icompiled_model_wrapper.hpp" #include "dev/iplugin_wrapper.hpp" +#include "ie_plugin_config.hpp" #include "itt.hpp" #include "model_reader.hpp" #include "openvino/core/any.hpp" diff --git a/src/inference/src/dev/core_impl.hpp b/src/inference/src/dev/core_impl.hpp index 2a4415ad941bd4..6aa1db3fa9929b 100644 --- a/src/inference/src/dev/core_impl.hpp +++ b/src/inference/src/dev/core_impl.hpp @@ -6,8 +6,6 @@ #include -#include - #include "any_copy.hpp" #include "cache_guard.hpp" #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" @@ -15,7 +13,6 @@ #include "ie_cache_manager.hpp" #include "ie_extension.h" #include "ie_icore.hpp" -#include "multi-device/multi_device_config.hpp" #include "openvino/core/any.hpp" #include "openvino/core/extension.hpp" #include "openvino/core/so_extension.hpp" @@ -212,8 +209,7 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t ov::SoPtr LoadNetworkImpl( const InferenceEngine::CNNNetwork& model, ov::Plugin& plugin, - const std::map& parsedConfig, - const InferenceEngine::RemoteContext::Ptr& context); + const std::map& parsedConfig); public: CoreImpl(bool _newAPI); @@ -249,13 +245,6 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t bool isNewAPI() const override; - InferenceEngine::RemoteContext::Ptr GetDefaultContext(const std::string& deviceName) override; - - ov::SoPtr LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::shared_ptr& context, - const std::map& config) override; - InferenceEngine::SoExecutableNetworkInternal LoadNetwork(const InferenceEngine::CNNNetwork& network, const std::string& deviceNameOrig, const std::map& config) override; @@ -294,16 +283,6 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t */ std::vector GetAvailableDevices() const override; - /** - * @brief Create a new shared context object on specified accelerator device - * using specified plugin-specific low level device API parameters (device handle, pointer, etc.) - * @param deviceName Name of a device to create new shared context on. - * @param params Map of device-specific shared context parameters. - * @return A shared pointer to a created remote context. - */ - InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName, - const InferenceEngine::ParamMap& params) override; - std::map GetSupportedConfig(const std::string& deviceName, const std::map& configs) override; diff --git a/src/inference/src/dev/core_impl_ie.cpp b/src/inference/src/dev/core_impl_ie.cpp index 88dd55a595f17a..89445449b17de6 100644 --- a/src/inference/src/dev/core_impl_ie.cpp +++ b/src/inference/src/dev/core_impl_ie.cpp @@ -31,21 +31,13 @@ bool ov::CoreImpl::isNewAPI() const { ov::SoPtr ov::CoreImpl::LoadNetworkImpl( const InferenceEngine::CNNNetwork& network, ov::Plugin& plugin, - const std::map& parsedConfig, - const InferenceEngine::RemoteContext::Ptr& context) { + const std::map& parsedConfig) { OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "CoreImpl::LoadNetworkImpl"); ov::SoPtr execNetwork; auto wrapper = std::dynamic_pointer_cast(plugin.m_ptr); OPENVINO_ASSERT(wrapper); auto old_plugin = wrapper->get_plugin(); - execNetwork = {context ? old_plugin->LoadNetwork(network, parsedConfig, context) - : old_plugin->LoadNetwork(network, parsedConfig), - plugin.m_so}; - return execNetwork; -} - -InferenceEngine::RemoteContext::Ptr ov::CoreImpl::GetDefaultContext(const std::string& deviceName) { - return ov::legacy_convert::convert_remote_context(get_default_context(deviceName)); + return {old_plugin->LoadNetwork(network, parsedConfig), plugin.m_so}; } InferenceEngine::CNNNetwork ov::CoreImpl::ReadNetwork(const std::string& modelPath, const std::string& binPath) const { @@ -64,27 +56,6 @@ InferenceEngine::CNNNetwork ov::CoreImpl::ReadNetwork(const std::string& model, return InferenceEngine::details::ReadNetwork(model, weights, extensions, isNewAPI(), frontendMode); } -ov::SoPtr ov::CoreImpl::LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::shared_ptr& context, - const std::map& config) { - OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::LoadTime, "Core::LoadNetwork::RemoteContext"); - if (network.getFunction()) { - auto ctx = ov::legacy_convert::convert_remote_context(context); - auto compiled_model = - compile_model(ov::legacy_convert::convert_model(network, isNewAPI()), ctx, any_copy(config)); - return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so}; - } - if (context == nullptr) { - IE_THROW() << "Remote context is null"; - } - // have to deduce the device name/config from the context first - auto parsed = parseDeviceNameIntoConfig(context->getDeviceName(), any_copy(config)); - auto plugin = get_plugin(parsed._deviceName); - auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config), context); - return res; -} - InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( const InferenceEngine::CNNNetwork& network, const std::string& deviceName, @@ -97,7 +68,7 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( } auto parsed = parseDeviceNameIntoConfig(deviceName, any_copy(config)); auto plugin = get_plugin(parsed._deviceName); - auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config), nullptr); + auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config)); return {res._ptr, res._so}; } @@ -205,11 +176,6 @@ std::vector ov::CoreImpl::GetAvailableDevices() const { return get_available_devices(); } -InferenceEngine::RemoteContext::Ptr ov::CoreImpl::CreateContext(const std::string& deviceName, - const InferenceEngine::ParamMap& params) { - return ov::legacy_convert::convert_remote_context(create_context(deviceName, params)); -} - /** * @brief Registers the extension in a Core object * Such extensions can be used for both CNNNetwork readers and device plugins diff --git a/src/inference/src/dev/icompiled_model.cpp b/src/inference/src/dev/icompiled_model.cpp index 6e81c091719686..49d60e5268cb49 100644 --- a/src/inference/src/dev/icompiled_model.cpp +++ b/src/inference/src/dev/icompiled_model.cpp @@ -137,9 +137,6 @@ void ov::ICompiledModel::set_callback_executor(const std::shared_ptr ov::ICompiledModel::get_context() const { - if (auto wrapper = dynamic_cast(this)) { - return ov::legacy_convert::convert_remote_context(wrapper->get_executable_network()->GetContext()); - } if (m_context) return m_context; return m_plugin->get_default_context({}); diff --git a/src/inference/src/dev/iplugin_wrapper.cpp b/src/inference/src/dev/iplugin_wrapper.cpp index 5b25dcfb6aac1d..d51dbb767c43a9 100644 --- a/src/inference/src/dev/iplugin_wrapper.cpp +++ b/src/inference/src/dev/iplugin_wrapper.cpp @@ -50,11 +50,7 @@ std::shared_ptr IPluginWrapper::compile_model(const std::str std::shared_ptr IPluginWrapper::compile_model(const std::shared_ptr& model, const ov::AnyMap& properties, const ov::SoPtr& context) const { - return ov::legacy_convert::convert_compiled_model( - update_exec_network(m_old_plugin->LoadNetwork(ov::legacy_convert::convert_model(model, is_new_api()), - any_copy(properties), - ov::legacy_convert::convert_remote_context(context)))) - ._ptr; + OPENVINO_NOT_IMPLEMENTED; } void IPluginWrapper::set_property(const ov::AnyMap& properties) { @@ -70,11 +66,11 @@ ov::Any IPluginWrapper::get_property(const std::string& name, const ov::AnyMap& } ov::SoPtr IPluginWrapper::create_context(const ov::AnyMap& remote_properties) const { - return ov::legacy_convert::convert_remote_context(m_old_plugin->CreateContext(remote_properties)); + OPENVINO_NOT_IMPLEMENTED; } ov::SoPtr IPluginWrapper::get_default_context(const ov::AnyMap& remote_properties) const { - return ov::legacy_convert::convert_remote_context(m_old_plugin->GetDefaultContext(remote_properties)); + OPENVINO_NOT_IMPLEMENTED; } std::shared_ptr IPluginWrapper::import_model(std::istream& model, @@ -87,11 +83,7 @@ std::shared_ptr IPluginWrapper::import_model(std::istream& m std::shared_ptr IPluginWrapper::import_model(std::istream& model, const ov::SoPtr& context, const ov::AnyMap& properties) const { - return ov::legacy_convert::convert_compiled_model( - update_exec_network(m_old_plugin->ImportNetwork(model, - ov::legacy_convert::convert_remote_context(context), - any_copy(properties)))) - ._ptr; + OPENVINO_NOT_IMPLEMENTED; } ov::SupportedOpsMap IPluginWrapper::query_model(const std::shared_ptr& model, diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 4f536c6cd24a7e..8e4eaa8f01a9a3 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -7,7 +7,7 @@ #include #include "ie_blob.h" -#include "ie_remote_blob.hpp" +#include "ie_ngraph_utils.hpp" #include "openvino/runtime/iremote_tensor.hpp" #include "openvino/runtime/properties.hpp" #include "remote_utils.hpp" @@ -354,8 +354,6 @@ class BlobTensor : public ITensor { std::shared_ptr blob; BlobTensor(const InferenceEngine::Blob::Ptr& blob) : blob{blob} { - auto remote_impl = dynamic_cast(blob.get()); - OPENVINO_ASSERT(!remote_impl); OPENVINO_ASSERT(blob); m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); update_strides(); @@ -460,10 +458,6 @@ ov::SoPtr make_tensor(const std::shared_ptr& blo return {}; } else if (unwrap && std::dynamic_pointer_cast(blob) != nullptr) { return std::dynamic_pointer_cast(blob)->get_tensor(); - } else if (auto remote_blob = std::dynamic_pointer_cast(blob)) { - return remote_blob->get_tensor(); - } else if (auto remote_blob = std::dynamic_pointer_cast(blob)) { - return {std::make_shared(remote_blob), nullptr}; } ELSE_IF(float) ELSE_IF(double) @@ -484,36 +478,6 @@ ov::SoPtr make_tensor(const std::shared_ptr& blo #undef IF } -InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob) { -#ifdef PROXY_PLUGIN_ENABLED - if (auto remote_blob = dynamic_cast(blob)) { - const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor()); - if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } - OPENVINO_NOT_IMPLEMENTED; - } -#endif - return blob; -} - -const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob) { -#ifdef PROXY_PLUGIN_ENABLED - if (auto remote_blob = dynamic_cast(blob)) { - const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor()); - if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } - OPENVINO_NOT_IMPLEMENTED; - } -#endif - return blob; -} - InferenceEngine::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap, InferenceEngine::TensorDesc desc) { @@ -559,10 +523,6 @@ InferenceEngine::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, return {}; } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { return blob_tensor->blob; - } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob; - } else if (std::dynamic_pointer_cast(tensor._ptr)) { - return std::make_shared(tensor, create_desc(tensor, desc)); } else { #define CASE(precision, T) \ case element::precision: \ diff --git a/src/inference/src/dev/preprocessing/mean_image.cpp b/src/inference/src/dev/preprocessing/mean_image.cpp deleted file mode 100644 index 639a31d5bf6bca..00000000000000 --- a/src/inference/src/dev/preprocessing/mean_image.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "mean_image.hpp" - -#include "openvino/cc/pass/itt.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" - -ov::pass::AddMeanImage::AddMeanImage(const MeanMap& inputInfoMap) { - MATCHER_SCOPE(AddMeanImage); - auto label = ov::pass::pattern::wrap_type(); - - ov::matcher_pass_callback callback = [=](pattern::Matcher& m) { - auto param = std::dynamic_pointer_cast(m.get_match_root()); - if (!param) { - return false; - } - - auto it = inputInfoMap.find(param->get_friendly_name()); - if (it == inputInfoMap.end()) { - return false; - } - - auto mean_const = it->second; - OPENVINO_ASSERT(mean_const->get_element_type() == ov::element::f32, - "Mean for ", - param->get_friendly_name(), - " must have f32 type"); - - auto copy_param = param->clone_with_new_inputs({}); - auto sub = std::make_shared(copy_param, mean_const); - - ov::replace_node(param, sub); - sub->set_argument(0, param); - - // Return true as the root node was changed - return true; - }; - - // Register pattern with Parameter operation as a pattern root node - auto m = std::make_shared(label, matcher_name); - // Register Matcher - register_matcher(m, callback); -} diff --git a/src/inference/src/dev/preprocessing/mean_image.hpp b/src/inference/src/dev/preprocessing/mean_image.hpp deleted file mode 100644 index 70c387c7a756b9..00000000000000 --- a/src/inference/src/dev/preprocessing/mean_image.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "openvino/op/constant.hpp" -#include "openvino/pass/graph_rewrite.hpp" - -namespace ov { -namespace pass { - -/** - * @brief Add `meanImage` preprocessing to input nodes - */ -class AddMeanImage : public ov::pass::MatcherPass { -public: - using MeanMap = std::map>; - - OPENVINO_RTTI("AddMeanImage", "0"); - explicit AddMeanImage(const MeanMap& inputInfoMap); -}; - -} // namespace pass -} // namespace ov diff --git a/src/inference/src/dev/preprocessing/preprocessing.cpp b/src/inference/src/dev/preprocessing/preprocessing.cpp index 69fb991da1eb32..ce19eda01efa27 100644 --- a/src/inference/src/dev/preprocessing/preprocessing.cpp +++ b/src/inference/src/dev/preprocessing/preprocessing.cpp @@ -5,8 +5,6 @@ #include "preprocessing.hpp" #include "dev/converter_utils.hpp" -#include "dev/preprocessing/mean_image.hpp" -#include "ie_common.h" #include "ie_ngraph_utils.hpp" #include "openvino/cc/pass/itt.hpp" #include "openvino/core/preprocess/color_format.hpp" @@ -19,7 +17,6 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& model) { RUN_ON_MODEL_SCOPE(AddPreprocessing); ov::preprocess::PrePostProcessor preproc(model); - ov::pass::AddMeanImage::MeanMap meanMap; for (size_t i = 0; i < model->inputs().size(); i++) { ov::Output const_input(model->input(i).get_node(), model->input(i).get_index()); @@ -29,8 +26,6 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& ov::legacy_convert::fill_input_info(const_input, input_info); OPENVINO_ASSERT(input_info); - auto& legacy_preproc = input_info->getPreProcess(); - preproc.input(i).tensor().set_element_type( InferenceEngine::details::convertPrecision(input_info->getPrecision())); @@ -41,77 +36,10 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& preproc.input(i).tensor().set_layout(ov::Layout{stream.str()}); } - // Resize - switch (legacy_preproc.getResizeAlgorithm()) { - case InferenceEngine::ResizeAlgorithm::RESIZE_AREA: - preproc.input(i).preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_NEAREST); - preproc.input(i).tensor().set_spatial_dynamic_shape(); - break; - case InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR: - preproc.input(i).preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); - preproc.input(i).tensor().set_spatial_dynamic_shape(); - break; - default: - // nothing to do - break; - } - - switch (legacy_preproc.getMeanVariant()) { - case InferenceEngine::MEAN_IMAGE: { - ov::Shape shape(input_info->getTensorDesc().getDims()); - std::vector scale; - std::vector meanImageData(ov::shape_size(shape)); - for (size_t c = 0, i = 0; c < legacy_preproc.getNumberOfChannels(); ++c) { - auto blob = legacy_preproc[c]->meanData; - - auto lm = blob->buffer(); - const float* data = lm.as(); - - std::memcpy(&meanImageData[i], data, blob->byteSize()); - i += blob->size(); - scale.emplace_back(legacy_preproc[c]->stdScale); - } - meanMap[input_info->name()] = ov::op::v0::Constant::create(ov::element::f32, shape, meanImageData); - preproc.input(i).preprocess().scale(scale); - break; - } - case InferenceEngine::MEAN_VALUE: { - std::vector mean, scale; - for (size_t i = 0; i < legacy_preproc.getNumberOfChannels(); i++) { - mean.emplace_back(legacy_preproc[i]->meanValue); - scale.emplace_back(legacy_preproc[i]->stdScale); - } - preproc.input(i).preprocess().mean(mean).scale(scale); - break; - } - default: - break; - } - - switch (legacy_preproc.getColorFormat()) { - case InferenceEngine::ColorFormat::BGR: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::BGR); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - case InferenceEngine::ColorFormat::RGB: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::RGB); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - case InferenceEngine::ColorFormat::RGBX: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::RGBX); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - case InferenceEngine::ColorFormat::BGRX: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::BGRX); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - default: - break; - } - if (const_input.get_partial_shape().is_static() && const_input.get_shape().size() == 4) preproc.input(i).model().set_layout("NCHW"); } + std::vector legacy_names(model->get_output_size()); for (size_t i = 0; i < model->get_output_size(); i++) { ov::Output const_output(model->output(i).get_node(), model->output(i).get_index()); @@ -137,13 +65,6 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& } } - ov::pass::Manager manager(get_pass_config()); - auto rewrite = manager.register_pass(); - if (!meanMap.empty()) { - rewrite->add_matcher(meanMap); - } - manager.run_passes(model); - preproc.build(); for (size_t i = 0; i < model->get_output_size(); i++) { diff --git a/src/inference/src/dev/preprocessing/preprocessing.hpp b/src/inference/src/dev/preprocessing/preprocessing.hpp index 15c2675db3e135..c0a8494f744b76 100644 --- a/src/inference/src/dev/preprocessing/preprocessing.hpp +++ b/src/inference/src/dev/preprocessing/preprocessing.hpp @@ -12,9 +12,6 @@ namespace pass { /** * @brief Converts the following preprocessing information to OpenVINO operations: - * - InferenceEngine::PreProcessInfo->PreProcessChannel::meanData -> Subtract - * - InferenceEngine::PreProcessInfo->PreProcessChannel::meanValue -> Subtract - * - InferenceEngine::PreProcessInfo->PreProcessChannel::stdScale -> Divide * * The order of operations is the following: * (x - mean) / stdScale diff --git a/src/inference/src/dev/remote_context_wrapper.hpp b/src/inference/src/dev/remote_context_wrapper.hpp deleted file mode 100644 index c061e4d38836c3..00000000000000 --- a/src/inference/src/dev/remote_context_wrapper.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "ie_ngraph_utils.hpp" -#include "ie_remote_context.hpp" -#include "openvino/runtime/iremote_context.hpp" -#include "openvino/runtime/make_tensor.hpp" -#include "openvino/runtime/so_ptr.hpp" - -namespace ov { - -class RemoteContextWrapper : public InferenceEngine::RemoteContext { -private: - ov::SoPtr m_context; - -public: - RemoteContextWrapper(const ov::SoPtr& context) : m_context(context) {} - - const ov::SoPtr& get_context() const { - return m_context; - } - - std::string getDeviceName() const noexcept override { - return m_context->get_device_name(); - } - - InferenceEngine::RemoteBlob::Ptr CreateBlob(const InferenceEngine::TensorDesc& tensorDesc, - const InferenceEngine::ParamMap& params = {}) override { - return std::dynamic_pointer_cast(ov::tensor_to_blob( - m_context->create_tensor(InferenceEngine::details::convertPrecision(tensorDesc.getPrecision()), - tensorDesc.getBlockingDesc().getBlockDims(), - params), - false)); - } - - InferenceEngine::MemoryBlob::Ptr CreateHostBlob(const InferenceEngine::TensorDesc& tensorDesc) override { - return std::dynamic_pointer_cast(ov::tensor_to_blob( - m_context->create_host_tensor(InferenceEngine::details::convertPrecision(tensorDesc.getPrecision()), - tensorDesc.getBlockingDesc().getBlockDims()), - false)); - } - - InferenceEngine::ParamMap getParams() const override { - return m_context->get_property(); - } -}; - -} // namespace ov diff --git a/src/inference/src/ie_blob_common.cpp b/src/inference/src/ie_blob_common.cpp index e7e0d0ab040178..eb26b055a597bd 100644 --- a/src/inference/src/ie_blob_common.cpp +++ b/src/inference/src/ie_blob_common.cpp @@ -13,22 +13,6 @@ namespace InferenceEngine { IE_SUPPRESS_DEPRECATED_START -Blob* Blob::getHardwareBlob() { -#ifdef PROXY_PLUGIN_ENABLED - return ov::get_hardware_blob(this); -#else - return this; -#endif -} - -const Blob* Blob::getHardwareBlob() const { -#ifdef PROXY_PLUGIN_ENABLED - return ov::get_hardware_blob(this); -#else - return this; -#endif -} - void Blob::setShape(const SizeVector& dims) { // we don't want to allow setShape for: // 1. ROI cases diff --git a/src/inference/src/ie_compound_blob.cpp b/src/inference/src/ie_compound_blob.cpp deleted file mode 100644 index 8bafacb3d223b5..00000000000000 --- a/src/inference/src/ie_compound_blob.cpp +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief An implementation file for CompoundBlob - * @file ie_compound_blob.cpp - */ - -#include "ie_compound_blob.h" - -#include -#include -#include -#include - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { - -namespace { - -TensorDesc getBlobTensorDesc(const Blob::Ptr& blob) { - return blob->getTensorDesc(); -} - -TensorDesc verifyBatchedBlobInput(const std::vector& blobs) { - // verify invariants - if (blobs.empty()) { - IE_THROW() << "BatchedBlob cannot be created from empty vector of Blob, Please, make sure vector contains at " - "least one Blob"; - } - - // Cannot create a compound blob from nullptr Blob objects - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob == nullptr; - })) { - IE_THROW() << "Cannot create a compound blob from nullptr Blob objects"; - } - - const auto subBlobDesc = getBlobTensorDesc(blobs[0]); - - if (std::any_of(blobs.begin(), blobs.end(), [&subBlobDesc](const Blob::Ptr& blob) { - return getBlobTensorDesc(blob) != subBlobDesc; - })) { - IE_THROW() << "All blobs tensors should be equal"; - } - - auto subBlobLayout = subBlobDesc.getLayout(); - - auto blobLayout = Layout::ANY; - SizeVector blobDims = subBlobDesc.getDims(); - switch (subBlobLayout) { - case NCHW: - case NHWC: - case NCDHW: - case NDHWC: - case NC: - case CN: - blobLayout = subBlobLayout; - if (blobDims[0] != 1) { - IE_THROW() << "All blobs should be batch 1"; - } - blobDims[0] = blobs.size(); - break; - case C: - blobLayout = NC; - blobDims.insert(blobDims.begin(), blobs.size()); - break; - case CHW: - blobLayout = NCHW; - blobDims.insert(blobDims.begin(), blobs.size()); - break; - case HWC: - blobLayout = NHWC; - blobDims.insert(blobDims.begin(), blobs.size()); - break; - default: - IE_THROW() << "Unsupported sub-blobs layout - to be one of: [NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW]"; - } - - return TensorDesc{subBlobDesc.getPrecision(), blobDims, blobLayout}; -} - -} // anonymous namespace - -CompoundBlob::CompoundBlob(const TensorDesc& tensorDesc) : Blob(tensorDesc) {} - -CompoundBlob::CompoundBlob(const std::vector& blobs) : CompoundBlob(TensorDesc{}) { - // Cannot create a compound blob from nullptr Blob objects - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob == nullptr; - })) { - IE_THROW() << "Cannot create a compound blob from nullptr Blob objects"; - } - - // Check that none of the blobs provided is compound. If at least one of them is compound, throw - // an exception because recursive behavior is not allowed - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob->is(); - })) { - IE_THROW() << "Cannot create a compound blob from other compound blobs"; - } - - this->_blobs = blobs; -} - -CompoundBlob::CompoundBlob(std::vector&& blobs) : CompoundBlob(TensorDesc{}) { - // Cannot create a compound blob from nullptr Blob objects - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob == nullptr; - })) { - IE_THROW() << "Cannot create a compound blob from nullptr Blob objects"; - } - - // Check that none of the blobs provided is compound. If at least one of them is compound, throw - // an exception because recursive behavior is not allowed - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob->is(); - })) { - IE_THROW() << "Cannot create a compound blob from other compound blobs"; - } - - this->_blobs = std::move(blobs); -} - -size_t CompoundBlob::byteSize() const { - return 0; -} - -size_t CompoundBlob::element_size() const { - return 0; -} - -void CompoundBlob::allocate() noexcept {} - -bool CompoundBlob::deallocate() noexcept { - return false; -} - -LockedMemory CompoundBlob::buffer() noexcept { - return LockedMemory(nullptr, nullptr, 0); -} - -LockedMemory CompoundBlob::cbuffer() const noexcept { - return LockedMemory(nullptr, nullptr, 0); -} - -size_t CompoundBlob::size() const noexcept { - return _blobs.size(); -} - -Blob::Ptr CompoundBlob::getBlob(size_t i) const noexcept { - if (i >= _blobs.size()) { - return nullptr; - } - return _blobs[i]; -} - -Blob::Ptr CompoundBlob::createROI(const ROI& roi) const { - std::vector roiBlobs; - roiBlobs.reserve(_blobs.size()); - - for (const auto& blob : _blobs) { - roiBlobs.push_back(blob->createROI(roi)); - } - - return std::make_shared(std::move(roiBlobs)); -} - -const std::shared_ptr& CompoundBlob::getAllocator() const noexcept { - static std::shared_ptr _allocator = nullptr; - return _allocator; -}; - -BatchedBlob::BatchedBlob(const std::vector& blobs) : CompoundBlob(verifyBatchedBlobInput(blobs)) { - this->_blobs = blobs; -} - -BatchedBlob::BatchedBlob(std::vector&& blobs) : CompoundBlob(verifyBatchedBlobInput(blobs)) { - this->_blobs = std::move(blobs); -} - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index 5da90bde36e353..f81f49218b2711 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -28,7 +28,6 @@ #include "ie_network_reader.hpp" #include "ie_ngraph_utils.hpp" #include "ie_plugin_config.hpp" -#include "ie_remote_context.hpp" #include "itt.hpp" #include "openvino/core/except.hpp" #include "openvino/core/so_extension.hpp" @@ -150,22 +149,6 @@ ExecutableNetwork Core::LoadNetwork(const CNNNetwork& network, } } -ExecutableNetwork Core::LoadNetwork(const CNNNetwork& network, - RemoteContext::Ptr context, - const std::map& config) { - auto valid = ::CheckStatic(network); - try { - OPENVINO_ASSERT(std::get<0>(valid), - "InferenceEngine::Core::LoadNetwork doesn't support inputs having dynamic shapes. ", - "Use ov::Core::compile_model API instead. Dynamic inputs are :", - std::get<1>(valid)); - auto exec = _impl->LoadNetwork(network, std::dynamic_pointer_cast(context), config); - return {exec._ptr, exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - ExecutableNetwork Core::LoadNetwork(const std::string& modelPath, const std::string& deviceName, const std::map& config) { @@ -191,31 +174,6 @@ ExecutableNetwork Core::LoadNetwork(const std::string& modelPath, const std::map } } -RemoteContext::Ptr Core::CreateContext(const std::string& deviceName, const ParamMap& params) { - try { - return _impl->CreateContext(deviceName, params); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -RemoteContext::Ptr Core::GetDefaultContext(const std::string& deviceName) { - if (deviceName.find("HETERO") == 0) { - IE_THROW() << "HETERO device does not support remote context"; - } - if (deviceName.find("MULTI") == 0) { - IE_THROW() << "MULTI device does not support remote context"; - } - if (deviceName.find("AUTO") == 0) { - IE_THROW() << "AUTO device does not support remote context"; - } - try { - return _impl->GetDefaultContext(deviceName); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - void Core::AddExtension(IExtensionPtr extension, const std::string& deviceName_) { if (deviceName_.find("HETERO") == 0) { IE_THROW() << "HETERO device does not support extensions. Please, set extensions directly to fallback devices"; @@ -296,30 +254,6 @@ ExecutableNetwork Core::ImportNetwork(std::istream& networkModel) { } } -ExecutableNetwork Core::ImportNetwork(std::istream& networkModel, - const RemoteContext::Ptr& context, - const std::map& config) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::ImportNetwork"); - - if (context == nullptr) { - IE_THROW() << "Remote context is null"; - } - - std::string deviceName_ = context->getDeviceName(); - ov::DeviceIDParser device(deviceName_); - std::string deviceName = device.get_device_name(); - - try { - auto parsed = ov::parseDeviceNameIntoConfig(deviceName, ov::any_copy(config)); - auto exec = - _impl->get_plugin(deviceName) - .import_model(networkModel, ov::legacy_convert::convert_remote_context(context), parsed._config); - return {ov::legacy_convert::convert_compiled_model(exec), exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - QueryNetworkResult Core::QueryNetwork(const CNNNetwork& network, const std::string& deviceName, const std::map& config) const { diff --git a/src/inference/src/ie_remote_context.cpp b/src/inference/src/ie_remote_context.cpp deleted file mode 100644 index 2bab079fbaf279..00000000000000 --- a/src/inference/src/ie_remote_context.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_remote_context.hpp" - -#include -#include - -#include "blob_factory.hpp" -#include "dev/converter_utils.hpp" -#include "dev/remote_context_wrapper.hpp" -#include "openvino/runtime/remote_context.hpp" -#ifdef PROXY_PLUGIN_ENABLED -# include "openvino/proxy/plugin.hpp" -#endif - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START -MemoryBlob::Ptr RemoteContext::CreateHostBlob(const TensorDesc& tensorDesc) { - auto blob = std::dynamic_pointer_cast(make_blob_with_precision(tensorDesc)); - if (!blob) - IE_THROW(NotAllocated) << "Failed to create host blob in remote context for " << getDeviceName() << " device"; - - return blob; -} - -const std::shared_ptr RemoteContext::GetHardwareContext() { -#ifdef PROXY_PLUGIN_ENABLED - if (auto wrapper = dynamic_cast(this)) { - auto ov_context = wrapper->get_context(); - auto hw_context = ov::proxy::get_hardware_context(ov_context); - return ov::legacy_convert::convert_remote_context(hw_context._ptr); - } -#endif - return shared_from_this(); -} - -const std::shared_ptr RemoteContext::GetHardwareContext() const { -#ifdef PROXY_PLUGIN_ENABLED - if (auto wrapper = dynamic_cast(this)) { - auto ov_context = wrapper->get_context(); - auto hw_context = ov::proxy::get_hardware_context(ov_context); - return ov::legacy_convert::convert_remote_context(hw_context._ptr); - } -#endif - return shared_from_this(); -} -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_transformations.cpp b/src/inference/src/ie_transformations.cpp deleted file mode 100644 index 1b1ba8212ebc6b..00000000000000 --- a/src/inference/src/ie_transformations.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_transformations.hpp" - -#include "ngraph/pass/low_latency.hpp" -#include "ngraph/pass/manager.hpp" - -using namespace InferenceEngine; - -void InferenceEngine::lowLatency2(InferenceEngine::CNNNetwork& network, bool use_const_initializer) { - auto function = network.getFunction(); - ngraph::pass::Manager manager; - manager.register_pass(use_const_initializer); - manager.run_passes(function); -} diff --git a/src/inference/src/cpp/ie_remote_context.cpp b/src/inference/src/remote_context.cpp similarity index 98% rename from src/inference/src/cpp/ie_remote_context.cpp rename to src/inference/src/remote_context.cpp index 10dde33bb6158b..e2a2bc61a0c731 100644 --- a/src/inference/src/cpp/ie_remote_context.cpp +++ b/src/inference/src/remote_context.cpp @@ -2,17 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ie_remote_context.hpp" +#include "openvino/runtime/remote_context.hpp" #include #include "any_copy.hpp" -#include "ie_remote_blob.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/make_tensor.hpp" -#include "openvino/runtime/remote_context.hpp" #define OV_REMOTE_CONTEXT_STATEMENT(...) \ OPENVINO_ASSERT(_impl != nullptr, "RemoteContext was not initialized."); \ diff --git a/src/inference/src/shared_object_loader.cpp b/src/inference/src/shared_object_loader.cpp deleted file mode 100644 index 748b0b4864290c..00000000000000 --- a/src/inference/src/shared_object_loader.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "details/ie_so_loader.h" -#include "ie_common.h" -#include "openvino/util/file_util.hpp" -#include "openvino/util/shared_object.hpp" - -IE_SUPPRESS_DEPRECATED_START - -namespace InferenceEngine { -namespace details { - -SharedObjectLoader::SharedObjectLoader(const std::shared_ptr& so) : _so(so) {} - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -SharedObjectLoader::SharedObjectLoader(const wchar_t* pluginName) - : SharedObjectLoader(ov::util::wstring_to_string(pluginName).c_str()) {} -#endif - -SharedObjectLoader::SharedObjectLoader(const char* pluginName) : _so{nullptr} { - try { - _so = ov::util::load_shared_object(pluginName); - } catch (const std::runtime_error& ex) { - IE_THROW(GeneralError) << ex.what(); - } -} - -SharedObjectLoader::~SharedObjectLoader() {} - -void* SharedObjectLoader::get_symbol(const char* symbolName) const { - try { - return ov::util::get_symbol(_so, symbolName); - } catch (const std::runtime_error& ex) { - IE_THROW(NotFound) << ex.what(); - } -} - -std::shared_ptr SharedObjectLoader::get() const { - return _so; -} - -} // namespace details -} // namespace InferenceEngine - -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/tests/functional/async_infer_request_test.cpp b/src/inference/tests/functional/async_infer_request_test.cpp index b6f4f4143766da..68b97e5d69bb6d 100644 --- a/src/inference/tests/functional/async_infer_request_test.cpp +++ b/src/inference/tests/functional/async_infer_request_test.cpp @@ -23,11 +23,6 @@ TEST(InferRequestCPPTests, throwsOnUninitializedGetBlob) { ASSERT_THROW(req.GetBlob({}), InferenceEngine::NotAllocated); } -TEST(InferRequestCPPTests, throwsOnUninitializedGetPreProcess) { - InferRequest req; - ASSERT_THROW(req.GetPreProcess({}), InferenceEngine::NotAllocated); -} - TEST(InferRequestCPPTests, throwsOnUninitializedInfer) { InferRequest req; ASSERT_THROW(req.Infer(), InferenceEngine::NotAllocated); diff --git a/src/inference/tests/functional/blob_copy_test.cpp b/src/inference/tests/functional/blob_copy_test.cpp deleted file mode 100644 index 0807d5c24d9e79..00000000000000 --- a/src/inference/tests/functional/blob_copy_test.cpp +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include -#include - -using namespace ::testing; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -using ChannelNum = size_t; -using BatchNum = size_t; -using PrecisionType = InferenceEngine::Precision::ePrecision; -using IsInterleaved = bool; // true = interleaved, false = deinterleaved. -using Dims = - std::vector; // dimensions are in the form of (N x C x D1 x D2 ... Dn), so Dims is vector (D1 x D2 ... Dn) - -namespace { - -InferenceEngine::Layout setLayout(IsInterleaved isInterleaved, int dimsSize) { - if (dimsSize == 3) { - return (isInterleaved) ? InferenceEngine::Layout::NDHWC : InferenceEngine::Layout::NCDHW; - } else if (dimsSize == 2) { - return (isInterleaved) ? InferenceEngine::Layout::NHWC : InferenceEngine::Layout::NCHW; - } - IE_THROW() << "Can't set layout"; -} - -// Support only for 4d and 5d blobs -SizeVector SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) { - if (dims.size() == 2) { - return SizeVector{batchNum, channelNum, dims[0], dims[1]}; - } else if (dims.size() == 3) { - return SizeVector{batchNum, channelNum, dims[0], dims[1], dims[2]}; - } - IE_THROW() << "Can't set dimVector"; -} - -// For FP16 and Q78 precision we use int16_t type -InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, - SizeVector dimsVector, - InferenceEngine::Layout layout) { - InferenceEngine::TensorDesc tensorDesc(precision, dimsVector, layout); - switch (precision) { - case InferenceEngine::Precision::FP32: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::FP64: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::I32: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U32: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::I64: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U64: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U16: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return make_shared_blob(tensorDesc); - default: - IE_THROW() << "Unsupported precision"; - } -} - -// returns a random value in the range [0 , elem) -size_t GenerateRandom(size_t elem) { - size_t result; - do { - result = static_cast(std::floor(std::rand() / static_cast(RAND_MAX * elem))); - } while (result >= elem); - return result; -} - -// returns index of random element of the blob: -// dims is the blob shape, e.g. {1, 3, 640, 480} -// random index[i] lays between 0 and dims[i]-1 -SizeVector GenerateRandomVector(SizeVector dims) { - SizeVector idx(dims.size()); - - for (size_t i = 0; i < dims.size(); ++i) { - idx[i] = GenerateRandom(dims[i]); - } - return idx; -} - -void PrintParams(InferenceEngine::Layout layout, - SizeVector dims, - std::string blobType, - InferenceEngine::Precision precision) { - std::cout << blobType << "Blob params: " << layout << ", precision: " << precision << ", dims: {"; - for (size_t i = 0; i < dims.size(); i++) { - std::cout << (i > 0 ? ", " : "") << dims[i]; - } - std::cout << "}" << std::endl; -} - -// For FP16 and Q78 precision we use int16_t type -template -void FillBlobRandom(Blob::Ptr& inputBlob) { - srand(1); - auto inputBlobData = inputBlob->buffer().as(); - for (size_t i = 0; i < inputBlob->size(); i++) { - inputBlobData[i] = (T)(GenerateRandom(RAND_MAX) / static_cast(RAND_MAX) * 100); - } -} - -// For FP16 and Q78 precision we use int16_t type -void FillBlob(Blob::Ptr& inputBlob) { - auto precision = inputBlob->getTensorDesc().getPrecision(); - switch (precision) { - case InferenceEngine::Precision::FP32: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::FP64: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::I32: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U32: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::I64: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U64: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U16: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return FillBlobRandom(inputBlob); - default: - IE_THROW() << "Cant fill blob with \"" << precision << "\" precision\n"; - } -} - -template -T GetElem(Blob::Ptr& blob, SizeVector idx) { - T* src = blob->buffer().as() + blob->getTensorDesc().getBlockingDesc().getOffsetPadding(); - - auto blobLayout = blob->getTensorDesc().getLayout(); - - SizeVector strides = blob->getTensorDesc().getBlockingDesc().getStrides(); - if (blobLayout == NHWC || blobLayout == NDHWC) { - for (size_t i = 2; i < strides.size(); i++) { - std::swap(strides[1], strides[i]); - } - } - - size_t offset = 0; - - for (size_t i = 0; i < idx.size(); i++) { - offset += idx[i] * strides[i]; - } - - return src[offset]; -} - -int SetExperimentsNum(int blobSize) { - if (blobSize < 1000) { - return blobSize; - } else if (blobSize < 10000) { - return 1000; - } else if (blobSize < 100000) { - return blobSize / 10; - } else { - return blobSize / 100; - } -} - -template -bool IsCorrectBlobCopy_Impl(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { - EXPECT_TRUE(srcBlob->size() == dstBlob->size()); - int experimentsNum = SetExperimentsNum(static_cast(srcBlob->size())); - int errorsCount = 0; - for (; experimentsNum > 0; --experimentsNum) { - SizeVector randomElemIdx = GenerateRandomVector(srcBlob->getTensorDesc().getDims()); - auto srcElem = GetElem(srcBlob, randomElemIdx); - auto dstElem = GetElem(dstBlob, randomElemIdx); - if (srcElem != dstElem) { - if (errorsCount < 10) { - errorsCount++; - std::cout << "ERROR: srcElem = " << srcElem << ", dstElem = " << dstElem << std::endl; - } else { - errorsCount++; - } - } - } - if (errorsCount > 0) { - std::cout << "errorsCount = " << errorsCount << std::endl; - } - return errorsCount == 0; -} - -bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { - switch (srcBlob->getTensorDesc().getPrecision()) { - case InferenceEngine::Precision::FP32: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP64: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I32: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U32: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I64: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U64: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U16: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - default: - return false; - } -} - -} // namespace - -using BlobCopyTest = - ::testing::TestWithParam>; - -TEST_P(BlobCopyTest, BlobCopy) { - IsInterleaved srcIsInterleaved = get<0>(GetParam()); - IsInterleaved dstIsInterleaved = get<1>(GetParam()); - BatchNum batchNum = get<2>(GetParam()); - ChannelNum channelNum = get<3>(GetParam()); - Dims dims = get<4>(GetParam()); - PrecisionType precisionType = get<5>(GetParam()); - - SizeVector srcDims = SetDimVector(batchNum, channelNum, dims); - SizeVector dstDims = SetDimVector(batchNum, channelNum, dims); - - InferenceEngine::Layout srcLayout = setLayout(srcIsInterleaved, static_cast(dims.size())); - InferenceEngine::Layout dstLayout = setLayout(dstIsInterleaved, static_cast(dims.size())); - - PrintParams(srcLayout, srcDims, "src", precisionType); - PrintParams(dstLayout, dstDims, "dst", precisionType); - - Blob::Ptr srcBlob = createBlob(precisionType, srcDims, srcLayout); - Blob::Ptr dstBlob = createBlob(precisionType, dstDims, dstLayout); - - srcBlob->allocate(); - dstBlob->allocate(); - - FillBlob(srcBlob); - - auto start = std::chrono::high_resolution_clock::now(); - blob_copy(srcBlob, dstBlob); - auto finish = std::chrono::high_resolution_clock::now(); - - std::cout << "Blob_copy execution time : " - << std::chrono::duration_cast(finish - start).count() << " micros" - << std::endl; - - ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is not correct"; -} - -namespace { - -// is interleaved srcBlob? -std::vector BlobCopy_srcLayoutParam = { - true, - false, -}; -// is interleaved dstBlob? -std::vector BlobCopy_dstLayoutParam = { - false, - true, -}; - -std::vector BlobCopy_BatchNum = { - 1, - 3, -}; - -std::vector BlobCopy_ChannelNum = { - 3, - 7, -}; - -std::vector BlobCopy_Dims = { - {{10, 20, 30}}, - {{60, 80}}, -}; - -// The 'blob_copy(4/5)_d' function is a template with the parameter-list -// FP32 is used for cases with the following accuracy: FP32, I32, U32 -// FP16 is used for cases with the following accuracy: FP16, U16, I16 -// U8 is used for cases with the following accuracy: U8, I8 -// Cases with other precision are not supported -std::vector BlobCopy_PrecisionParams = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U16, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U32, - InferenceEngine::Precision::I32, -}; - -} // namespace - -INSTANTIATE_TEST_SUITE_P(accuracy, - BlobCopyTest, - ::testing::Combine(::testing::ValuesIn(BlobCopy_srcLayoutParam), - ::testing::ValuesIn(BlobCopy_dstLayoutParam), - ::testing::ValuesIn(BlobCopy_BatchNum), - ::testing::ValuesIn(BlobCopy_ChannelNum), - ::testing::ValuesIn(BlobCopy_Dims), - ::testing::ValuesIn(BlobCopy_PrecisionParams))); - -namespace { - -template -bool IsEqualBlobCopy_Impl(Blob::Ptr& ref, Blob::Ptr& dst) { - EXPECT_TRUE(ref->size() == dst->size()); - auto refData = ref->buffer().as(); - auto dstData = dst->buffer().as(); - return (std::equal(dstData, dstData + dst->size(), refData, [](T left, T right) { - return left == right; - })); -} - -bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { - switch (srcBlob->getTensorDesc().getPrecision()) { - case InferenceEngine::Precision::FP32: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP64: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U32: - IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I32: - IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U64: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I64: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U16: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - default: - return false; - } -} - -template -void copy3DBlobsAllBytesWithReLayout(const Blob::Ptr& srcLayoutBlob, Blob::Ptr& trgLayoutBlob) { - auto srcData = srcLayoutBlob->buffer().as(); - auto dstData = trgLayoutBlob->buffer().as(); - auto& dims = srcLayoutBlob->getTensorDesc().getDims(); - size_t C = dims[1]; - size_t H = dims[2]; - size_t W = dims[3]; - for (size_t c = 0; c < C; ++c) { - for (size_t h = 0; h < H; ++h) { - for (size_t w = 0; w < W; ++w) { - size_t src_idx = c * H * W + h * W + w; - size_t dst_idx = h * W * C + w * C + c; - dstData[dst_idx] = srcData[src_idx]; - } - } - } -} - -// For FP16 and Q78 precision we use int16_t type -void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob::Ptr& trgLayoutBlob) { - auto precision = srcLayoutBlob->getTensorDesc().getPrecision(); - switch (precision) { - case InferenceEngine::Precision::FP32: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::FP64: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::I32: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U32: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U64: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::I64: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U16: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - default: - IE_THROW() << "Cant copy blob with \"" << precision << "\" precision\n"; - } -} - -std::vector BlobCopySetLayout_Dims = { - {{1, 10, 10}}, - {{2, 100, 100}}, - {{3, 224, 224}}, -}; - -std::vector BlobCopySetLayout_Precisions = { - Precision::U8, - Precision::U16, - InferenceEngine::Precision::FP32, -}; - -} // namespace - -using BlobCopySetLayoutTest = ::testing::TestWithParam>; - -// test after [IE] Fix TensorDesc::setLayout method, 735d275b47c4fd0c7b0db5c8f9fe8705967270f0 -TEST_P(BlobCopySetLayoutTest, BlobCopyWithNCHW_To_NHWC_After_setLayout) { - const size_t C_sz = get<0>(GetParam())[0]; - const size_t H_sz = get<0>(GetParam())[1]; - const size_t W_sz = get<0>(GetParam())[2]; - const Precision precision = get<1>(GetParam()); - const Layout src_layout = Layout::NCHW, dst_layout = Layout::NHWC; - - auto src = createBlob(precision, {1, C_sz, H_sz, W_sz}, dst_layout); - src->allocate(); - src->getTensorDesc().setLayout(src_layout); - - FillBlob(src); - - auto dst = createBlob(precision, {1, C_sz, H_sz, W_sz}, dst_layout); - dst->allocate(); - - blob_copy(src, dst); - - auto ref = createBlob(precision, {1, C_sz, H_sz, W_sz}, dst_layout); - ref->allocate(); - - copy3DBlobsAllBytesWithReLayoutWrapper(src, ref); - - ASSERT_TRUE(IsEqualBlobCopy(ref, dst)) << "'blob_copy' after setLayout function is not correct"; -} - -INSTANTIATE_TEST_SUITE_P(accuracy, - BlobCopySetLayoutTest, - ::testing::Combine(::testing::ValuesIn(BlobCopySetLayout_Dims), - ::testing::ValuesIn(BlobCopySetLayout_Precisions))); diff --git a/src/inference/tests/functional/caseless_tests.cpp b/src/inference/tests/functional/caseless_tests.cpp deleted file mode 100644 index 0ba008f4798373..00000000000000 --- a/src/inference/tests/functional/caseless_tests.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "caseless.hpp" -#include "debug.h" - -using namespace std; -using namespace InferenceEngine::details; - -using CaselessTests = ::testing::Test; - -TEST_F(CaselessTests, emptyAreEqual) { - ASSERT_TRUE(InferenceEngine::details::equal("", "")); -} - -TEST_F(CaselessTests, canIgnoreCase) { - ASSERT_TRUE(InferenceEngine::details::equal("abc", "ABC")); -} - -TEST_F(CaselessTests, emptyIsNotEqualNotEmpty) { - ASSERT_FALSE(InferenceEngine::details::equal("", "abc")); -} - -TEST_F(CaselessTests, canFindCaslessInMap) { - caseless_map storage = { - {"Abc", 1}, - {"bC", 2}, - {"AbcD", 3}, - }; - ASSERT_EQ(storage["abc"], 1); - ASSERT_EQ(storage["ABC"], 1); - ASSERT_EQ(storage["BC"], 2); - ASSERT_EQ(storage["aBCd"], 3); - ASSERT_EQ(storage.find("aBd"), storage.end()); - ASSERT_EQ(storage.find(""), storage.end()); -} - -TEST_F(CaselessTests, canFindCaslessInUnordered) { - caseless_unordered_map storage = { - {"Abc", 1}, - {"bC", 2}, - {"AbcD", 3}, - }; - ASSERT_EQ(storage["abc"], 1); - ASSERT_EQ(storage["ABC"], 1); - ASSERT_EQ(storage["BC"], 2); - ASSERT_EQ(storage["aBCd"], 3); - ASSERT_EQ(storage.find("aBd"), storage.end()); - ASSERT_EQ(storage.find(""), storage.end()); -} diff --git a/src/inference/tests/functional/cnn_network_test.cpp b/src/inference/tests/functional/cnn_network_test.cpp index 497051aa92a88e..171a6ee0845253 100644 --- a/src/inference/tests/functional/cnn_network_test.cpp +++ b/src/inference/tests/functional/cnn_network_test.cpp @@ -112,24 +112,6 @@ TEST_F(CNNNetworkTests, throwsHasDynamicInputs) { } } -TEST_F(CNNNetworkTests, throwsHasDynamicInputs_remoteContext) { - auto model = CNNNetworkTests_create_model(); - CNNNetwork network(model); - InferenceEngine::Core core; - try { - core.LoadNetwork(network, InferenceEngine::RemoteContext::Ptr()); - FAIL() << "LoadNetwork with dynamic inputs shall throw"; - } catch (const InferenceEngine::Exception& e) { - EXPECT_TRUE(std::string(e.what()).find("InferenceEngine::Core::LoadNetwork") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_1") == std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_2") == std::string::npos) << e.what(); - } -} - TEST_F(CNNNetworkTests, throwsHasDynamicInputs_queryNetwork) { auto model = CNNNetworkTests_create_model(); CNNNetwork network(model); diff --git a/src/inference/tests/functional/executable_network.cpp b/src/inference/tests/functional/executable_network.cpp index 84b77740826caf..5d741e876749c4 100644 --- a/src/inference/tests/functional/executable_network.cpp +++ b/src/inference/tests/functional/executable_network.cpp @@ -50,8 +50,3 @@ TEST(ExecutableNetworkTests, throwsOnUninitializedGetMetric) { ExecutableNetwork exec; ASSERT_THROW(exec.GetMetric({}), InferenceEngine::NotAllocated); } - -TEST(ExecutableNetworkTests, throwsOnUninitializedGetContext) { - ExecutableNetwork exec; - ASSERT_THROW(exec.GetContext(), InferenceEngine::NotAllocated); -} diff --git a/src/inference/tests/functional/ngraph_reshape_tests.cpp b/src/inference/tests/functional/ngraph_reshape_tests.cpp deleted file mode 100644 index caca91383fc846..00000000000000 --- a/src/inference/tests/functional/ngraph_reshape_tests.cpp +++ /dev/null @@ -1,1282 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/data_utils.hpp" -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_common.hpp" -#include "ie_common.h" -#include "openvino/core/partial_shape.hpp" -#include "openvino/core/shape.hpp" - -using namespace testing; -using namespace InferenceEngine; - -using NGraphReshapeTests = ov::test::TestsCommon; - -TEST_F(NGraphReshapeTests, getBatchSize) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - CNNNetwork cnnNetwork(ngraph); - ASSERT_EQ(1, cnnNetwork.getBatchSize()); -} - -TEST_F(NGraphReshapeTests, ReshapedDynamicShapeLayout) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({-1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - param->set_friendly_name("A"); - auto relu = std::make_shared(param); - - ngraph::ParameterVector params = {param}; - - ngraph = std::make_shared(relu, params); - } - - CNNNetwork cnnNetwork(ngraph); - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["A"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["A"]->getInputData()->getDims(), (SizeVector{0, 3, 22, 22})); - - ICNNNetwork::InputShapes new_shape; - new_shape["A"] = {1, 3, 22, 22}; - cnnNetwork.reshape(new_shape); - - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["A"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["A"]->getInputData()->getDims(), (SizeVector{1, 3, 22, 22})); -} - -TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLU) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - param->set_friendly_name("data"); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - CNNNetwork cnnNetwork(ngraph::clone_function(*ngraph)); - std::map shapes; - shapes["data"] = {1, 3, 25, 25}; - - ASSERT_NO_THROW(cnnNetwork.reshape(shapes)); - - auto changedFunction = cnnNetwork.getFunction(); - ASSERT_NE(nullptr, changedFunction); - ASSERT_EQ(changedFunction->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(changedFunction->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["data"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["data"]->getInputData()->getDims(), (SizeVector{1, 3, 25, 25})); -} - -TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLUWithoutCloneFunction) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - param->set_friendly_name("data"); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - CNNNetwork cnnNetwork(ngraph); - std::map shapes; - shapes["data"] = {1, 3, 25, 25}; - - ASSERT_NO_THROW(cnnNetwork.reshape(shapes)); - - auto changedFunction = cnnNetwork.getFunction(); - ASSERT_NE(nullptr, changedFunction); - ASSERT_EQ(changedFunction->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(changedFunction->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["data"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["data"]->getInputData()->getDims(), (SizeVector{1, 3, 25, 25})); -} - -class CustomTestOp : public ngraph::op::Op { -public: - OPENVINO_OP("CustomTestLayer", "test_extension"); - - CustomTestOp() = default; - CustomTestOp(const ngraph::Output& arg, bool test1, int64_t test2) - : Op({arg}), - test1(test1), - test2(test2) { - constructor_validate_and_infer_types(); - } - - void validate_and_infer_types() override { - auto input_pshape = get_input_partial_shape(0); - if (input_pshape.is_static()) { - auto input_shape = input_pshape.to_shape(); - ngraph::Shape output_shape(input_shape); - for (size_t i = 0; i < input_shape.size(); ++i) { - output_shape[i] = input_shape[i] * test2 + (test1 ? 0 : 1); - } - set_output_type(0, get_input_element_type(0), ngraph::PartialShape(output_shape)); - } else { - set_output_type(0, get_input_element_type(0), ngraph::PartialShape::dynamic()); - } - } - - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override { - if (new_args.size() != 1) { - OPENVINO_THROW("Incorrect number of new arguments"); - } - - return std::make_shared(new_args.at(0), test1, test2); - } - - bool visit_attributes(ngraph::AttributeVisitor& visitor) override { - visitor.on_attribute("test1", test1); - visitor.on_attribute("test2", test2); - return true; - } - -private: - bool test1; - int64_t test2; -}; - -class TestInPlaceExtension : public InferenceEngine::IExtension { -public: - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {} - - void Unload() noexcept override {} - - std::map getOpSets() override { - static std::map opsets; - if (opsets.empty()) { - ngraph::OpSet opset; - opset.insert(); - opsets[CustomTestOp::get_type_info_static().version_id] = opset; - } - return opsets; - } - -private: -}; - -#if defined(ENABLE_OV_IR_FRONTEND) -TEST_F(NGraphReshapeTests, ReshapeNewIRWithNewExtension1) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - 1 - 3 - 22 - 22 - - - - - 1 - 3 - 22 - 22 - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - ie.AddExtension(std::make_shared()); - Blob::Ptr weights; - SizeVector refBeforeReshape = {1, 3, 22, 22}; - SizeVector refAfterReshape = {4, 6, 44, 44}; - - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 3, 22, 22}; - - ASSERT_NO_THROW(network.reshape(newShapes)); - auto output = network.getOutputsInfo(); - SizeVector outDims = output["activation"]->getTensorDesc().getDims(); - ASSERT_EQ(outDims, refAfterReshape); -} - -TEST_F(NGraphReshapeTests, ReshapeNewIRWithNewExtension2) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - 1 - 3 - 22 - 22 - - - - - 1 - 3 - 22 - 22 - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - ie.AddExtension(std::make_shared()); - Blob::Ptr weights; - SizeVector refBeforeReshape = {1, 3, 22, 22}; - SizeVector refAfterReshape = {7, 10, 67, 67}; - - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 3, 22, 22}; - - ASSERT_NO_THROW(network.reshape(newShapes)); - auto output = network.getOutputsInfo(); - SizeVector outDims = output["activation"]->getTensorDesc().getDims(); - ASSERT_EQ(outDims, refAfterReshape); -} -#endif // defined(ENABLE_OV_IR_FRONTEND) - -class BadExtension : public InferenceEngine::IExtension { -public: - BadExtension() {} - - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override{}; - - void Unload() noexcept override{}; - - std::map getOpSets() override { - static std::map opsets; - if (opsets.empty()) { - ngraph::OpSet opset; - opset.insert(); - opsets["opset1"] = opset; - } - return opsets; - } -}; - -TEST_F(NGraphReshapeTests, LoadBadNewExtension) { - InferenceEngine::Core ie; - ASSERT_THROW(ie.AddExtension(std::make_shared()), InferenceEngine::Exception); -} - -TEST_F(NGraphReshapeTests, TestInterpParameters) { - auto inp = std::make_shared(ngraph::element::f32, ngraph::Shape{2, 3, 4, 5}); - inp->set_friendly_name("test"); - - ngraph::op::v0::InterpolateAttrs attrs; - attrs.pads_begin.push_back(0); - attrs.pads_end.push_back(0); - attrs.axes = ngraph::AxisSet{2, 3}; - attrs.align_corners = false; - attrs.mode = "nearest"; - attrs.antialias = false; - - std::vector shape = {8, 10}; - auto out_shape = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, shape); - auto interp = std::make_shared(inp, out_shape, attrs); - - auto output = std::make_shared(interp); - auto ngraph_function = - std::make_shared(ngraph::ResultVector{output}, ngraph::ParameterVector{inp}); - - CNNNetwork cnn(ngraph_function); - std::map inShape; - inShape["test"] = {1, 3, 4, 5}; - cnn.reshape(inShape); -} - -#ifdef ENABLE_OV_IR_FRONTEND -TEST_F(NGraphReshapeTests, ReshapeWithDefaultGenericOps) { - // the RNNCEll was initially marked as "experimental" operation but later was added to opset - // the test checks that IR reader properly instantiate the "experimental" RNNCell as "opset6" RNNCell - std::string model = R"V0G0N( - - - - - - - 1 - 16 - - - - - - - - 1 - 128 - - - - - - - - 128 - 16 - - - - - - - - 128 - 128 - - - - - - - - 128 - - - - - - - - 1 - 16 - - - 1 - 128 - - - 128 - 16 - - - 128 - 128 - - - 128 - - - - - 1 - 128 - - - - - - - 1 - 128 - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 16}; - newShapes["in2"] = {2, 128}; - - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDDetectionOutput) { - std::string model = R"V0G0N( - - - - - - - 1000 - 4 - - - - - - - - 1000 - 324 - - - - - - - - 1000 - 81 - - - - - - - - 1 - 3 - - - - - - - - 1000 - 4 - - - 1000 - 324 - - - 1000 - 81 - - - 1 - 3 - - - - - 100 - 4 - - - 100 - - - 100 - - - - - - - 100 - 4 - - - - - - - 100 - - - - - - - 100 - - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {2000, 4}; - newShapes["in1"] = {2000, 324}; - newShapes["in2"] = {2000, 81}; - - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDPriorGridGenerator) { - std::string model = R"V0G0N( - - - - - - - 3 - 4 - - - - - - - - 1 - 256 - 200 - 336 - - - - - - - - 1000 - 81 - - - - - - - - 3 - 4 - - - 1 - 256 - 200 - 336 - - - 1 - 3 - 800 - 1344 - - - - - 201600 - 4 - - - - - - - 201600 - 4 - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 256, 200, 336}; - newShapes["in2"] = {2, 3, 800, 1344}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDGenerateProposalsSingleImage) { - std::string model = R"V0G0N( - - - - - - - 3 - - - - - - - - 201600 - 4 - - - - - - - - 12 - 200 - 336 - - - - - - - - 3 - 200 - 336 - - - - - - - - 3 - - - 201600 - 4 - - - 12 - 200 - 336 - - - 3 - 200 - 336 - - - - - 1000 - 4 - - - 1000 - - - - - - - 1000 - 4 - - - - - - - 1000 - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in2"] = {12, 200, 300}; - newShapes["in3"] = {2, 200, 300}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDGenerateProposalsSingleImage_opset6) { - std::string model = R"V0G0N( - - - - - - - 3 - - - - - - - - 201600 - 4 - - - - - - - - 12 - 200 - 336 - - - - - - - - 3 - 200 - 336 - - - - - - - - 3 - - - 201600 - 4 - - - 12 - 200 - 336 - - - 3 - 200 - 336 - - - - - 1000 - 4 - - - 1000 - - - - - - - 1000 - 4 - - - - - - - 1000 - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in2"] = {12, 200, 300}; - newShapes["in3"] = {2, 200, 300}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeGenerateProposals) { - std::string model = R"V0G0N( - - - - - - - 8 - 3 - - - - - - - - 50 - 84 - 3 - 4 - - - - - - - - 8 - 12 - 50 - 84 - - - - - - - - 8 - 3 - 50 - 84 - - - - - - - - 8 - 3 - - - 50 - 84 - 3 - 4 - - - 8 - 12 - 50 - 84 - - - 8 - 3 - 50 - 84 - - - - - -1 - 4 - - - -1 - - - 8 - - - - - - - -1 - 4 - - - - - - - -1 - - - - - - - 8 - - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {100, 100, 4, 4}; - newShapes["in2"] = {8, 16, 100, 100}; - newShapes["in3"] = {8, 4, 100, 100}; - ASSERT_NO_THROW(network.reshape(newShapes)); - - InferenceEngine::ICNNNetwork::InputShapes newShapes2; - newShapes2["in0"] = {2, 4}; - newShapes2["in1"] = {100, 100, 4, 4}; - newShapes2["in2"] = {2, 16, 100, 100}; - newShapes2["in3"] = {2, 4, 100, 100}; - ASSERT_NO_THROW(network.reshape(newShapes2)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDROIFeatureExtractor) { - std::string model = R"V0G0N( - - - - - - - 1000 - 4 - - - - - - - - 1 - 256 - 200 - 336 - - - - - - - - 1000 - 4 - - - 1 - 256 - 200 - 336 - - - - - 1000 - 256 - 7 - 7 - - - - - - - 1000 - 256 - 7 - 7 - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {1256, 4}; - newShapes["in1"] = {1, 256, 7, 7}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDROIFeatureExtractorOpset6) { - std::string model = R"V0G0N( - - - - - - - 1000 - 4 - - - - - - - - 1 - 256 - 200 - 336 - - - - - - - - 1000 - 4 - - - 1 - 256 - 200 - 336 - - - - - 1000 - 256 - 7 - 7 - - - - - - - 1000 - 256 - 7 - 7 - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {1256, 4}; - newShapes["in1"] = {1, 256, 7, 7}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDTopKROIs) { - std::string model = R"V0G0N( - - - - - - - 5000 - 4 - - - - - - - - 5000 - - - - - - - - 5000 - 4 - - - 5000 - - - - - 1000 - 4 - - - - - - - 1000 - 4 - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {10000, 4}; - newShapes["in1"] = {10000}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} -#endif diff --git a/src/inference/tests/functional/preprocess_test.cpp b/src/inference/tests/functional/preprocess_test.cpp deleted file mode 100644 index 8b5045a5b7bc18..00000000000000 --- a/src/inference/tests/functional/preprocess_test.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -using namespace std; - -IE_SUPPRESS_DEPRECATED_START -using PreProcessTests = ::testing::Test; - -TEST_F(PreProcessTests, throwsOnSettingNullMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(1); - ASSERT_THROW(info.setMeanImage(InferenceEngine::Blob::Ptr(nullptr)), InferenceEngine::Exception); -} - -TEST_F(PreProcessTests, throwsOnSetting2DMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(1); - InferenceEngine::Blob::Ptr blob( - new InferenceEngine::TBlob({InferenceEngine::Precision::FP32, {1, 1}, InferenceEngine::Layout::HW})); - ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception); -} - -TEST_F(PreProcessTests, throwsOnSettingWrongSizeMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(1); - InferenceEngine::TBlob::Ptr blob( - new InferenceEngine::TBlob({InferenceEngine::Precision::FP32, {2, 1, 1}, InferenceEngine::Layout::CHW})); - blob->allocate(); - ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception); -} - -TEST_F(PreProcessTests, noThrowWithCorrectSizeMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(2); - InferenceEngine::TBlob::Ptr blob( - new InferenceEngine::TBlob({InferenceEngine::Precision::FP32, {2, 1, 1}, InferenceEngine::Layout::CHW})); - blob->allocate(); - ASSERT_NO_THROW(info.setMeanImage(blob)); -} diff --git a/src/inference/tests/functional/response_buffer_test.cpp b/src/inference/tests/functional/response_buffer_test.cpp deleted file mode 100644 index 31a63304ceee46..00000000000000 --- a/src/inference/tests/functional/response_buffer_test.cpp +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "description_buffer.hpp" - -using namespace std; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -using ResponseBufferTests = ::testing::Test; - -TEST_F(ResponseBufferTests, canCreateResponseMessage) { - ResponseDesc desc; - DescriptionBuffer(&desc) << "make error: " << 1; - ASSERT_STREQ("make error: 1", desc.msg); -} - -TEST_F(ResponseBufferTests, canReportError) { - ResponseDesc desc; - DescriptionBuffer d(NETWORK_NOT_LOADED, &desc); - d << "make error: "; - ASSERT_EQ(NETWORK_NOT_LOADED, (StatusCode)d); -} - -TEST_F(ResponseBufferTests, savePreviosMessage) { - ResponseDesc desc; - desc.msg[0] = 'T'; - desc.msg[1] = 'e'; - desc.msg[2] = 's'; - desc.msg[3] = 't'; - desc.msg[4] = '\0'; - DescriptionBuffer d(&desc); - ASSERT_EQ(GENERAL_ERROR, (StatusCode)d); - ASSERT_EQ(std::string("Test"), desc.msg); -} - -TEST_F(ResponseBufferTests, canHandleBigMessage) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - - buf << bigVal; - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} - -TEST_F(ResponseBufferTests, canHandleNotNullTerminatedInput) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - - desc.msg[size - 1] = 'B'; - - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - - buf << bigVal; - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} - -TEST_F(ResponseBufferTests, canHandlePredefined) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - buf << bigVal; - - DescriptionBuffer buf2(&desc); - std::string bigVal2(size, 'B'); - buf2 << bigVal2; - - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} - -TEST_F(ResponseBufferTests, canHandleNotNullTerminatedPredefined) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - buf << bigVal; - - desc.msg[size - 1] = 'B'; - - DescriptionBuffer buf2(&desc); - std::string bigVal2(size, 'B'); - buf2 << bigVal2; - - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} diff --git a/src/inference/tests/functional/task_executor_tests.cpp b/src/inference/tests/functional/task_executor_tests.cpp index a5beb2d027dd96..18df500dd59ccb 100644 --- a/src/inference/tests/functional/task_executor_tests.cpp +++ b/src/inference/tests/functional/task_executor_tests.cpp @@ -8,7 +8,6 @@ #include #include "openvino/core/parallel.hpp" -#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/threading/cpu_streams_executor.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" diff --git a/src/inference/tests/unit/ie_blob_test.cpp b/src/inference/tests/unit/ie_blob_test.cpp index ecc8792d7b5f97..368ea50a9bf2d9 100644 --- a/src/inference/tests/unit/ie_blob_test.cpp +++ b/src/inference/tests/unit/ie_blob_test.cpp @@ -278,63 +278,6 @@ TEST_F(BlobTests, cannotCreateBlobWithIncorrectPrecision) { ASSERT_THROW(InferenceEngine::make_shared_blob(desc), InferenceEngine::Exception); } -TEST_F(BlobTests, canUseBlobInMoveSemantics) { - InferenceEngine::TBlob b(InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::C)); - - b.getTensorDesc().setDims({3}); - b.allocate(); - b.data()[0] = 1.0f; - b.data()[1] = 2.0f; - b.data()[2] = 3.0f; - - std::vector dump; - - for (const auto& e : b) { - dump.push_back(e); - } - - ASSERT_EQ(dump.size(), 3); - - ASSERT_EQ(dump[0], 1.0f); - ASSERT_EQ(dump[1], 2.0f); - ASSERT_EQ(dump[2], 3.0f); -} - -TEST_F(BlobTests, DISABLED_canUseLockedMemoryAsRvalueReference) { - std::vector dump; - std::vector v({1.0f, 2.0f, 3.0f}); - auto blob = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::C), - &v[0], - v.size()); - for (auto e : *blob) { - dump.push_back(e); - } - - ASSERT_EQ(dump.size(), 3); - - ASSERT_EQ(dump[0], 1.0f); - ASSERT_EQ(dump[1], 2.0f); - ASSERT_EQ(dump[2], 3.0f); -} - -TEST_F(BlobTests, canCreateBlobOnExistedMemory) { - float input[] = {0.1f, 0.2f, 0.3f}; - { - auto b = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 2}, InferenceEngine::HW), - input); - auto i = b->begin(); - ASSERT_NEAR(*i, 0.1, 0.00001); - i++; - ASSERT_NEAR(*i, 0.2, 0.00001); - i++; - ASSERT_EQ(i, b->end()); - - ASSERT_EQ(&*b->begin(), input); - } -} - // SetShape TEST_F(BlobTests, canSetShape) { auto b = InferenceEngine::make_shared_blob( @@ -350,20 +293,6 @@ TEST_F(BlobTests, canSetShape) { ASSERT_EQ(newDims[2], 6); } -TEST_F(BlobTests, canModifyDataInRangedFor) { - InferenceEngine::SizeVector v = {1, 2, 3}; - InferenceEngine::TBlob blob({InferenceEngine::Precision::I32, v, InferenceEngine::CHW}); - blob.allocate(); - - for (auto& data : blob) { - data = 5; - } - - for (size_t i = 0; i < v.size(); i++) { - ASSERT_EQ(5, blob.data()[i]) << "Mismatch at" << i; - } -} - TEST_F(BlobTests, makeRoiBlobNchw) { // we create main blob with NCHW layout. We will crop ROI from this blob. InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6 diff --git a/src/inference/tests/unit/ie_compound_blob_test.cpp b/src/inference/tests/unit/ie_compound_blob_test.cpp deleted file mode 100644 index c6521386f22648..00000000000000 --- a/src/inference/tests/unit/ie_compound_blob_test.cpp +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -class CompoundBlobTests : public ::testing::Test { -protected: - Blob::Ptr _test_blob; - using BlobPtrs = std::vector; - using MemoryBlobPtrs = std::vector; - -public: - void verifyCompoundBlob(const Blob::Ptr& blob) { - // verify basic assumptions about a compound blob - ASSERT_NE(nullptr, blob); - ASSERT_TRUE(blob->is()); - CompoundBlob::Ptr compound_blob = as(blob); - ASSERT_NE(nullptr, compound_blob); - EXPECT_EQ(compound_blob.get(), blob->as()); // shared object == raw ptr - EXPECT_EQ(0, compound_blob->element_size()); - EXPECT_EQ(nullptr, compound_blob->buffer()); - EXPECT_EQ(nullptr, compound_blob->cbuffer()); - EXPECT_GT(compound_blob->size(), 0); - EXPECT_NE(nullptr, compound_blob->getBlob(0)); - } - - void verifyCompoundBlob(Blob::Ptr blob, const BlobPtrs& underlying_blobs) { - verifyCompoundBlob(blob); - - // check that the compound blob contains a vector of provided underlying blobs - CompoundBlob::Ptr compound_blob = as(blob); - EXPECT_EQ(compound_blob.get(), blob->as()); // shared object == raw ptr - ASSERT_EQ(underlying_blobs.size(), compound_blob->size()); - for (size_t i = 0; i < underlying_blobs.size(); ++i) { - EXPECT_EQ(underlying_blobs[i], compound_blob->getBlob(i)); - } - } -}; - -TEST(BlobConversionTests, canWorkWithMemoryBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as(blob).get(), blob->as()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithConstMemoryBlob) { - Blob::CPtr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as(blob).get(), blob->as()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithTBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is>()); - ASSERT_FALSE(blob->is>()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithConstTBlob) { - Blob::CPtr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is>()); - ASSERT_FALSE(blob->is>()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithCompoundBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - Blob::Ptr cblob = make_shared_blob(std::vector({blob})); - ASSERT_TRUE(cblob->is()); - ASSERT_FALSE(cblob->is()); - ASSERT_NE(nullptr, as(cblob)); - ASSERT_EQ(nullptr, as(cblob)); - ASSERT_EQ(as(cblob).get(), cblob->as()); - ASSERT_EQ(as(cblob).get(), cblob->as()); -} - -TEST(BlobConversionTests, canWorkWithConstCompoundBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - Blob::CPtr cblob = make_shared_blob(std::vector({blob})); - ASSERT_TRUE(cblob->is()); - ASSERT_FALSE(cblob->is()); - ASSERT_NE(nullptr, as(cblob)); - ASSERT_EQ(nullptr, as(cblob)); - ASSERT_EQ(as(cblob).get(), cblob->as()); - ASSERT_EQ(as(cblob).get(), cblob->as()); -} - -TEST(BlobConversionTests, blobSharesOwnershipOnCast) { - static constexpr const uint8_t stored_value = 123; - TBlob::Ptr tblob; - { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - ASSERT_EQ(1, blob.use_count()); - ASSERT_TRUE(blob->is>()); - tblob = as>(blob); - ASSERT_NE(nullptr, tblob); - ASSERT_EQ(2, blob.use_count()); - ASSERT_EQ(2, tblob.use_count()); - tblob->allocate(); - tblob->data()[0] = stored_value; - ASSERT_EQ(stored_value, tblob->data()[0]); - } - ASSERT_EQ(1, tblob.use_count()); - ASSERT_NE(nullptr, tblob); - ASSERT_EQ(stored_value, tblob->data()[0]); -} - -TEST_F(CompoundBlobTests, cannotCreateCompoundBlobFromNullptr) { - Blob::Ptr valid = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - EXPECT_THROW(make_shared_blob(std::vector({valid, nullptr})), InferenceEngine::Exception); -} - -TEST_F(CompoundBlobTests, canCreateEmptyCompoundBlob) { - _test_blob = make_shared_blob(std::vector()); - - ASSERT_NE(nullptr, _test_blob); - EXPECT_EQ(0, _test_blob->element_size()); - EXPECT_EQ(nullptr, _test_blob->buffer()); - EXPECT_EQ(nullptr, _test_blob->cbuffer()); - ASSERT_TRUE(_test_blob->is()); - CompoundBlob::Ptr compound_blob = as(_test_blob); - ASSERT_NE(nullptr, compound_blob); - EXPECT_EQ(0, compound_blob->size()); - EXPECT_EQ(nullptr, compound_blob->getBlob(0)); -} - -TEST_F(CompoundBlobTests, canCreateCompoundBlob) { - // Create a blob with NCHW layout and pass it to compound for test - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - BlobPtrs blobs = {blob}; - - _test_blob = make_shared_blob(blobs); - verifyCompoundBlob(_test_blob, blobs); -} - -TEST_F(CompoundBlobTests, cannotCreateCompoundBlobFromCompoundBlob) { - // Create a blob with NCHW layout and pass it to compound for test. The created compound blob - // cannot be used to construct another compound blob. Recursive behavior is rejected - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - - _test_blob = make_shared_blob(std::vector({blob})); - verifyCompoundBlob(_test_blob); - - EXPECT_THROW(make_shared_blob(std::vector({blob, _test_blob})), - InferenceEngine::Exception); -} - -TEST_F(CompoundBlobTests, compoundBlobHoldsCorrectDataInCorrectOrder) { - // Create a vector of blobs with HW layout and pass it to a compound blob to test if the vector - // is stored correctly - static constexpr const uint8_t MAGIC_NUMBER = 23; - BlobPtrs blobs(5); - for (size_t i = 0; i < blobs.size(); ++i) { - blobs[i] = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - blobs[i]->allocate(); - MemoryBlob::Ptr mb = as(blobs[i]); - auto lm = mb->rwmap(); - lm.as()[0] = static_cast(i + MAGIC_NUMBER); - } - - _test_blob = make_shared_blob(blobs); - - verifyCompoundBlob(_test_blob, blobs); - - CompoundBlob::Ptr compound_blob = as(_test_blob); - EXPECT_EQ(blobs.size(), compound_blob->size()); - for (size_t i = 0; i < compound_blob->size(); ++i) { - auto blob = compound_blob->getBlob(i); - ASSERT_NE(nullptr, blob); - MemoryBlob::Ptr mb = as(blob); - ASSERT_NE(nullptr, mb); - auto lm = mb->rwmap(); - EXPECT_EQ(static_cast(i + MAGIC_NUMBER), lm.as()[0]); - } -} - -TEST_F(CompoundBlobTests, compoundBlobHoldsReferencesToBlobs) { - // Create a blob with HW layout and pass it to a compound blob to check that the compound blob - // holds references to the blob and not a copy of it - MemoryBlob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - blob->allocate(); - // here is quite self to dereference address since LockedMemory would be destroyed only after assignemnt - blob->rwmap().as()[0] = 12; - _test_blob = make_shared_blob(std::vector({blob})); - - verifyCompoundBlob(_test_blob); - - CompoundBlob::Ptr compound_blob = as(_test_blob); - Blob::Ptr b0 = compound_blob->getBlob(0); - MemoryBlob::CPtr mb0 = as(b0); - EXPECT_EQ(12, mb0->rmap().as()[0]); - blob->rwmap().as()[0] = 34; - EXPECT_EQ(34, mb0->rmap().as()[0]); -} - -TEST_F(CompoundBlobTests, compoundBlobHoldsValidDataWhenUnderlyingBlobIsDestroyed) { - // Create a scoped blob with HW layout, pass it to compound, and destroy the original scoped - // blob. Check that the compound blob, which holds a reference to the destroyed blob, still has - // a valid object - static constexpr const uint8_t stored_value = 123; - { - MemoryBlob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - blob->allocate(); - blob->rwmap().as()[0] = stored_value; - _test_blob = make_shared_blob(std::vector({blob})); - } - - verifyCompoundBlob(_test_blob); - CompoundBlob::Ptr compound_blob = as(_test_blob); - ASSERT_NE(nullptr, compound_blob->getBlob(0)); - MemoryBlob::CPtr mb0 = as(compound_blob->getBlob(0)); - ASSERT_NE(nullptr, mb0); - EXPECT_EQ(stored_value, mb0->rmap().as()[0]); -} diff --git a/src/inference/tests/unit/ie_executable_network_test.cpp b/src/inference/tests/unit/ie_executable_network_test.cpp index 3db791e60a9aaa..a1bd8d9bb7bf5a 100644 --- a/src/inference/tests/unit/ie_executable_network_test.cpp +++ b/src/inference/tests/unit/ie_executable_network_test.cpp @@ -34,7 +34,6 @@ using testing::Throw; // 5. void SetConfig(const std::map& config) // 6. Parameter GetConfig(const std::string& name) const // 7. Parameter GetMetric(const std::string& name) const -// 8. RemoteContext::Ptr GetContext() class ExecutableNetworkTests : public ::testing::Test { protected: diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp index 123a41e3524744..f0941660d12d30 100644 --- a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp @@ -560,7 +560,7 @@ class MockPluginSupportBatchAndContext : public MockPluginBase { return decltype(ov::optimal_batch_size)::value_type(4); } else if (name == ov::device::capabilities.name()) { return decltype(ov::device::capabilities)::value_type( - {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8", ov::device::capability::EXPORT_IMPORT}); + {"FP32", "FP16", "BIN", "INT8", ov::device::capability::EXPORT_IMPORT}); } else if (name == ov::device::type.name()) { return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED); } else if (name == ov::loaded_from_cache.name()) { diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp deleted file mode 100644 index c1037519a72f8e..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/set_blob_by_type.hpp" - -#include "common_test_utils/test_constants.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace InferenceEngine; - -const std::vector BlobTypes = { - FuncTestUtils::BlobType::Compound, - FuncTestUtils::BlobType::Batched, - FuncTestUtils::BlobType::Memory, -}; - -const std::map autoConfig{ - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, - InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::Values(autoConfig)), - InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, - InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::Values(autoConfig)), - InferRequestSetBlobByType::getTestCaseName); diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp index f0854294dd5260..02548538f29862 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -7,8 +7,7 @@ #include #include -#include "openvino/core/parallel.hpp" -#include "openvino/runtime/system_conf.hpp" +#include "openvino/core/visibility.hpp" std::vector disabledTestPatterns() { std::vector retVector{ diff --git a/src/plugins/auto/tests/unit/auto_unit_test.cpp b/src/plugins/auto/tests/unit/auto_unit_test.cpp index 139533bc378bba..e4c7e8135774cc 100644 --- a/src/plugins/auto/tests/unit/auto_unit_test.cpp +++ b/src/plugins/auto/tests/unit/auto_unit_test.cpp @@ -131,7 +131,7 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() { .WillByDefault(RETURN_MOCK_VALUE(supportedProps)); ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(12)); std::vector cpuCability = {"FP32", "FP16", "INT8", "BIN"}; - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}; + std::vector gpuCability = {"FP32", "FP16", "BIN", "INT8"}; std::vector othersCability = {"FP32", "FP16"}; std::string igpuArchitecture = "GPU: vendor=0x8086 arch=0"; std::string dgpuArchitecture = "GPU: vendor=0x8086 arch=1"; diff --git a/src/plugins/auto/tests/unit/key_network_priority_test.cpp b/src/plugins/auto/tests/unit/key_network_priority_test.cpp index 616f14040486b6..595731fe49ee52 100644 --- a/src/plugins/auto/tests/unit/key_network_priority_test.cpp +++ b/src/plugins/auto/tests/unit/key_network_priority_test.cpp @@ -44,7 +44,7 @@ class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWit void SetUp() override { std::tie(netPrecision, enableDevicePriority, PriorityConfigs) = GetParam(); sizeOfConfigs = static_cast(PriorityConfigs.size()); - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"}; + std::vector gpuCability = {"FP32", "FP16", "BIN"}; ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _)) .WillByDefault(RETURN_MOCK_VALUE(gpuCability)); diff --git a/src/plugins/auto/tests/unit/select_device_test.cpp b/src/plugins/auto/tests/unit/select_device_test.cpp index baef090b32459c..bfcd5e92fc7d38 100644 --- a/src/plugins/auto/tests/unit/select_device_test.cpp +++ b/src/plugins/auto/tests/unit/select_device_test.cpp @@ -21,15 +21,13 @@ const std::vector fp32DeviceVector = {DGPU_INFO, IGPU_INFO, O const std::vector fp16DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector int8DeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; const std::vector binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; -const std::vector batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO}; std::map> devicesMap = {{"FP32", fp32DeviceVector}, {"FP16", fp16DeviceVector}, {"INT8", int8DeviceVector}, - {"BIN", binDeviceVector}, - {"BATCHED_BLOB", batchedblobDeviceVector}}; + {"BIN", binDeviceVector}}; const std::vector totalDevices = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector reverseTotalDevices = {CPU_INFO, OTHERS_INFO, IGPU_INFO, DGPU_INFO}; -const std::vector netPrecisions = {"FP32", "FP16", "INT8", "BIN", "BATCHED_BLOB"}; +const std::vector netPrecisions = {"FP32", "FP16", "INT8", "BIN"}; std::vector testConfigs; class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp index 01d71d360dbca6..f3270b8c83b922 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp @@ -78,7 +78,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_RDFT_5d, // RDFT can support last axis INSTANTIATE_TEST_SUITE_P(smoke_RDFT_5d_last_axis, RDFTLayerTest, - testing::Combine(testing::Values(InferenceEngine::SizeVector{10, 4, 8, 2, 5}), + testing::Combine(testing::Values(std::vector{10, 4, 8, 2, 5}), testing::ValuesIn(inputPrecisions), testing::ValuesIn(std::vector>{{{0, 1, 2, 3, 4}}}), testing::ValuesIn(std::vector>{{}, {3, 10, 8, 6, 2}}), @@ -89,7 +89,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_RDFT_5d_last_axis, // IRDFT can support 6d INSTANTIATE_TEST_SUITE_P(smoke_RDFT_6d, RDFTLayerTest, - testing::Combine(testing::Values(InferenceEngine::SizeVector{10, 4, 8, 2, 5, 2}), + testing::Combine(testing::Values(std::vector{10, 4, 8, 2, 5, 2}), testing::ValuesIn(inputPrecisions), testing::ValuesIn(std::vector>{{{0, 1, 2, 3, 4}}}), testing::ValuesIn(std::vector>{{}, {3, 10, 8, 6, 2}}), diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp index 473935bd799840..6b255c9981c08a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp @@ -118,7 +118,7 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterface outShapeNode; if (!outShapeData.empty()) { if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { - IE_ASSERT(inputDynamicShapes.size() == 2); + OPENVINO_ASSERT(inputDynamicShapes.size() == 2); auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; @@ -133,7 +133,7 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterface deconv; if (!outShapeData.empty()) { - IE_ASSERT(outShapeNode != nullptr); + OPENVINO_ASSERT(outShapeNode != nullptr); deconv = ov::test::utils::make_convolution_backprop_data(params[0], outShapeNode, model_type, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels); } else { diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp index 9df68f3af86b72..8ddfabd9bcdc50 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp @@ -127,7 +127,6 @@ class DetectionOutputLayerGPUTest : public testing::WithParamInterfaceinputs(); for (auto i = 0ul; i < funcInputs.size(); ++i) { const auto &funcInput = funcInputs[i]; - InferenceEngine::Blob::Ptr blob; int32_t resolution = 1; uint32_t range = 1; if (i == 2) { diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp index 8a93e4b89b12d2..499a15ec766ab7 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp @@ -121,7 +121,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface, ov::Tensor> ¶ms) { return params.first->get_friendly_name() == "param_1"; }); - IE_ASSERT(pos != inputs.end()); + OPENVINO_ASSERT(pos != inputs.end()); inputs.erase(pos); } auto expectedOutputs = calculate_refs(); @@ -164,7 +164,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface outShapeNode; if (!outShapeData.empty()) { if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { - IE_ASSERT(inputDynamicShapes.size() == 2); + OPENVINO_ASSERT(inputDynamicShapes.size() == 2); auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; @@ -179,7 +179,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface deconv; if (!outShapeData.empty()) { - IE_ASSERT(outShapeNode != nullptr); + OPENVINO_ASSERT(outShapeNode != nullptr); deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, groupNum); } else { diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp index 2b3d2dccf2cc77..ce54580e1f77d5 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp @@ -79,7 +79,7 @@ class MatMulLayerGPUTest : public testing::WithParamInterface void transpose(T& shape) { - IE_ASSERT(shape.size() > 1); + OPENVINO_ASSERT(shape.size() > 1); std::swap(*(shape.end() - 1), *(shape.end() - 2)); } diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp index 2e798d7639542b..ffda0040d446e4 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp @@ -72,7 +72,7 @@ class NmsLayerGPUTest : public testing::WithParamInterface, std::ostringstream result; if (!bounds.empty()) { - IE_ASSERT(bounds.size() == 3); + OPENVINO_ASSERT(bounds.size() == 3); result << "BatchesBounds=" << bounds[BATCHES] << "_BoxesBounds=" << bounds[BOXES] << "_ClassesBounds=" << bounds[CLASSES] << "_"; } for (const auto &ts : targetShapes) { diff --git a/src/plugins/proxy/src/remote_tensor.cpp b/src/plugins/proxy/src/remote_tensor.cpp index 49dc25fcebd92d..b8fe5237ebe107 100644 --- a/src/plugins/proxy/src/remote_tensor.cpp +++ b/src/plugins/proxy/src/remote_tensor.cpp @@ -10,7 +10,6 @@ #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/so_ptr.hpp" -#include "remote_utils.hpp" namespace { std::shared_ptr cast_tensor(const ov::SoPtr& tensor) { @@ -68,15 +67,6 @@ ov::SoPtr ov::proxy::RemoteTensor::get_hardware_tensor(const ov::So if (auto remote_tensor = std::dynamic_pointer_cast(tensor._ptr)) hw_tensor = remote_tensor->m_tensor; - if (unwrap) { - if (auto wrapper = std::dynamic_pointer_cast(hw_tensor._ptr)) { - auto blob = ov::get_hardware_blob(wrapper->blob.get()); - if (auto tensor_holder = dynamic_cast(blob)) { - hw_tensor = tensor_holder->get_tensor(); - } - } - } - return hw_tensor; } diff --git a/src/plugins/template/tests/functional/subgraph_reference/preprocess_legacy.cpp b/src/plugins/template/tests/functional/subgraph_reference/preprocess_legacy.cpp deleted file mode 100644 index e691495452d01c..00000000000000 --- a/src/plugins/template/tests/functional/subgraph_reference/preprocess_legacy.cpp +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "base_reference_cnn_test.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "shared_test_classes/single_layer/convert_color_i420.hpp" -#include "shared_test_classes/single_layer/convert_color_nv12.hpp" - -using namespace ov; -using namespace ov::preprocess; -using namespace reference_tests; -namespace { - -class ReferencePreprocessLegacyTest : public testing::Test, public ReferenceCNNTest { -public: - void SetUp() override { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - } -}; - -} // namespace - -static std::shared_ptr create_simple_function(element::Type type, const PartialShape& shape) { - auto data1 = std::make_shared(type, shape); - data1->set_friendly_name("input1"); - data1->get_output_tensor(0).set_names({"tensor_input1", "input1"}); - auto c = op::v0::Constant::create(type, {1}, {0}); - auto op = std::make_shared(data1, c); - op->set_friendly_name("Add0"); - auto res = std::make_shared(op); - res->set_friendly_name("Result1"); - res->get_output_tensor(0).set_names({"tensor_output1", "Result1", "Add0"}); - return std::make_shared(ResultVector{res}, ParameterVector{data1}); -} - -TEST_F(ReferencePreprocessLegacyTest, mean) { - function = create_simple_function(element::f32, Shape{1, 3, 2, 2}); - auto p = PrePostProcessor(function); - p.input().preprocess().mean(1.f); - p.build(); - - auto f2 = create_simple_function(element::f32, Shape{1, 3, 2, 2}); - legacy_network = InferenceEngine::CNNNetwork(f2); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.init(3); - preProcess[0]->meanValue = 1; - preProcess[1]->meanValue = 1; - preProcess[2]->meanValue = 1; - preProcess[0]->stdScale = 1; - preProcess[1]->stdScale = 1; - preProcess[2]->stdScale = 1; - preProcess.setVariant(InferenceEngine::MEAN_VALUE); - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, mean_scale) { - function = create_simple_function(element::f32, Shape{1, 3, 20, 20}); - auto p = PrePostProcessor(function); - p.input().preprocess().scale(2.f); - p.build(); - - auto f2 = create_simple_function(element::f32, Shape{1, 3, 20, 20}); - legacy_network = InferenceEngine::CNNNetwork(f2); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.init(3); - preProcess[0]->meanValue = 0; - preProcess[1]->meanValue = 0; - preProcess[2]->meanValue = 0; - preProcess[0]->stdScale = 2; - preProcess[1]->stdScale = 2; - preProcess[2]->stdScale = 2; - preProcess.setVariant(InferenceEngine::MEAN_VALUE); - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, resize) { - function = create_simple_function(element::f32, Shape{1, 3, 5, 5}); - auto f2 = create_simple_function(element::f32, Shape{1, 3, 5, 5}); - legacy_network = InferenceEngine::CNNNetwork(f2); - - auto p = PrePostProcessor(function); - p.input().tensor().set_layout("NCHW").set_spatial_static_shape(42, 30); - p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR); - p.input().model().set_layout("NCHW"); - p.build(); - - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, bgrx_to_bgr) { - const int h = 160; - const int w = 160; - auto rgbx_input = std::vector(h * w * 4, 0); - for (auto i = 0; i < h * w * 4; i++) { - rgbx_input[i] = i % 256; - } - function = create_simple_function(element::f32, Shape{1, 3, h, w}); - auto f2 = create_simple_function(element::f32, Shape{1, 3, h, w}); - legacy_network = InferenceEngine::CNNNetwork(f2); - - auto p = PrePostProcessor(function); - auto& input = p.input(); - input.tensor().set_color_format(ColorFormat::BGRX).set_element_type(element::u8); - input.preprocess().convert_color(ColorFormat::BGR); - input.model().set_layout("NCHW"); - function = p.build(); - inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data()); - - InferenceEngine::TensorDesc rgbx_plane_desc(InferenceEngine::Precision::U8, - {1, 4, h, w}, - InferenceEngine::Layout::NHWC); - legacy_network.getInputsInfo().begin()->second->setLayout(InferenceEngine::NHWC); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setColorFormat(InferenceEngine::ColorFormat::BGRX); - legacy_input_blobs["input1"] = InferenceEngine::make_shared_blob(rgbx_plane_desc, rgbx_input.data()); - - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, rgbx_to_bgr) { - const int h = 160; - const int w = 160; - auto rgbx_input = std::vector(h * w * 4, 0); - for (auto i = 0; i < h * w * 4; i++) { - rgbx_input[i] = i % 256; - } - function = create_simple_function(element::f32, Shape{1, 3, h, w}); - auto f2 = create_simple_function(element::f32, Shape{1, 3, h, w}); - legacy_network = InferenceEngine::CNNNetwork(f2); - - auto p = PrePostProcessor(function); - auto& input = p.input(); - input.tensor().set_color_format(ColorFormat::RGBX).set_element_type(element::u8); - input.preprocess().convert_color(ColorFormat::BGR); - input.model().set_layout("NCHW"); - function = p.build(); - inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data()); - - InferenceEngine::TensorDesc rgbx_plane_desc(InferenceEngine::Precision::U8, - {1, 4, h, w}, - InferenceEngine::Layout::NHWC); - legacy_network.getInputsInfo().begin()->second->setLayout(InferenceEngine::NHWC); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setColorFormat(InferenceEngine::ColorFormat::RGBX); - legacy_input_blobs["input1"] = InferenceEngine::make_shared_blob(rgbx_plane_desc, rgbx_input.data()); - - Exec(); -} diff --git a/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp b/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp deleted file mode 100644 index 92282dc7ecff7d..00000000000000 --- a/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "base/behavior_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" - -namespace BehaviorTestsDefinitions { - -using InferRequestSetBlobByTypeParams = std::tuple< - FuncTestUtils::BlobType, // Blob type - std::string, // Device name - std::map // Device config ->; - -class InferRequestSetBlobByType : public testing::WithParamInterface, - public BehaviorTestsUtils::IEInferRequestTestBase { -public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - using namespace ov::test::utils; - - FuncTestUtils::BlobType BlobType; - std::string targetDevice; - std::map configuration; - std::tie(BlobType, targetDevice, configuration) = obj.param; - std::replace(targetDevice.begin(), targetDevice.end(), ':', '.'); - - std::ostringstream result; - result << "BlobType=" << BlobType << "_"; - result << "Device="<< targetDevice << "_"; - result << "Config=" << configuration; - return result.str(); - } - - void SetUp() override { - std::map config; - std::tie(blobType, target_device, config) = this->GetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - APIBaseTest::SetUp(); - std::shared_ptr function = ov::test::utils::make_conv_pool_relu({4, 3, 6, 8}, ov::element::u8); - InferenceEngine::CNNNetwork cnnNetwork(function); - executableNetwork = ie->LoadNetwork(cnnNetwork, target_device, config); - } - -protected: - bool blobTypeIsSupportedByDevice() { - switch (blobType) { - case FuncTestUtils::BlobType::Memory: - return true; - case FuncTestUtils::BlobType::Compound: - case FuncTestUtils::BlobType::Remote: - return false; - case FuncTestUtils::BlobType::Batched: { - auto supported_metrics = ie->GetMetric(target_device, METRIC_KEY(SUPPORTED_METRICS)).as>(); - if (std::find(supported_metrics.begin(), supported_metrics.end(), - METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == supported_metrics.end()) { - return false; - } - - auto optimization_caps = - ie->GetMetric(target_device, METRIC_KEY(OPTIMIZATION_CAPABILITIES)).as>(); - return std::find(optimization_caps.begin(), optimization_caps.end(), - METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end(); - } - default: - IE_THROW() << "Test does not support the blob kind"; - } - } - - FuncTestUtils::BlobType blobType; - InferenceEngine::ExecutableNetwork executableNetwork; - std::shared_ptr ie = PluginCache::get().ie(); -}; - -TEST_P(InferRequestSetBlobByType, setInputBlobsByType) { - // Create InferRequest - auto req = executableNetwork.CreateInferRequest(); - for (const auto &input : executableNetwork.GetInputsInfo()) { - const auto &info = input.second; - auto blob = FuncTestUtils::createBlobByType(info->getTensorDesc(), blobType); - if (blobTypeIsSupportedByDevice()) { - EXPECT_NO_THROW(req.SetBlob(info->name(), blob)); - } else { - EXPECT_THROW(req.SetBlob(info->name(), blob), InferenceEngine::Exception); - } - } -} -} // namespace BehaviorTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index 2be633a2441166..4271c923ad5419 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -369,13 +369,6 @@ TEST(IEClassBasicTest, smoke_ImportNetworkMultiThrows) { ASSERT_THROW(ie.ImportNetwork("model", ov::test::utils::DEVICE_MULTI), InferenceEngine::NetworkNotRead); } -TEST_P(IEClassBasicTestP, ImportNetworkWithNullContextThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::RemoteContext::Ptr context = nullptr; - std::istringstream stream("None"); - ASSERT_THROW(ie.ImportNetwork(stream, context, {}), InferenceEngine::Exception); -} - // // QueryNetwork // diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp index 463266084624a8..08a50c9f356e40 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp @@ -10,7 +10,6 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ie_preprocess.hpp" #include "base/behavior_test_utils.hpp" namespace BehaviorTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp b/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp deleted file mode 100644 index 3b2f7e40aacab9..00000000000000 --- a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "openvino/runtime/intel_gpu/ocl/ocl.hpp" -#include "openvino/runtime/core.hpp" -#include "openvino/runtime/properties.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" -#include -#include "common_test_utils/ov_tensor_utils.hpp" - -TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity) { - auto ie = ov::Core(); - using namespace ov::preprocess; - auto p = PrePostProcessor(fn_ptr); - p.input().tensor().set_element_type(ov::element::i8); - p.input().preprocess().convert_element_type(ov::element::f32); - - auto function = p.build(); - ov::CompiledModel exec_net; - try { - exec_net = ie.compile_model(function, device_names, ov::hint::allow_auto_batching(false)); - } catch (...) { - // device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test - return; - } - std::vector inf_req_shared = {}; - auto input = function->get_parameters().at(0); - auto output = function->get_results().at(0); - auto fakeImageData = ov::test::utils::create_and_fill_tensor(input->get_element_type(), input->get_shape()); - auto inf_req_regular = exec_net.create_infer_request(); - inf_req_regular.set_tensor(input, fakeImageData); - // infer using system memory - inf_req_regular.infer(); - auto output_tensor_regular = inf_req_regular.get_tensor(output); - auto imSize = ov::shape_size(input->get_shape()); - std::vector contexts = {}; - std::vector cldnn_tensor = {}; - for (auto& iter : device_lists) { - try { - auto cldnn_context = ie.get_default_context(iter).as(); - contexts.push_back(cldnn_context); - cl_context ctx = cldnn_context; - auto ocl_instance = std::make_shared(ctx); - cl_int err; - cl::Buffer shared_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, imSize, NULL, &err); - { - void* buffer = fakeImageData.data(); - ocl_instance->_queue.enqueueWriteBuffer(shared_buffer, true, 0, imSize, buffer); - } - cldnn_tensor.emplace_back(cldnn_context.create_tensor(input->get_element_type(), input->get_shape(), shared_buffer)); - } catch(...) { - // device does not support remote context - continue; - } - } - for (size_t i = 0; i < cldnn_tensor.size(); i++) { - auto temprequest = exec_net.create_infer_request(); - temprequest.set_input_tensor(cldnn_tensor.at(i)); - inf_req_shared.emplace_back(temprequest); - } - for (size_t i = 0; i < inf_req_shared.size(); i++) - inf_req_shared.at(i).start_async(); - for (size_t i = 0; i < inf_req_shared.size(); i++) - inf_req_shared.at(i).wait(); - - // compare results - for (size_t i = 0; i < inf_req_shared.size(); i++) { - auto output_tensor_shared = inf_req_shared.at(i).get_tensor(output); - - { - ASSERT_EQ(output->get_element_type(), ov::element::f32); - ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); - ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); - } - } -} diff --git a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_tests.hpp b/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_tests.hpp deleted file mode 100644 index 16676330103e0c..00000000000000 --- a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_tests.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "ie_core.hpp" -#include "base/multi/multi_helpers.hpp" -#include "functional_test_utils/plugin_cache.hpp" - -TEST_P(MultiDevice_SupportTest, canCreateContextThenRequestThenBlobsAndInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - InferenceEngine::CNNNetwork net(fn_ptr); - net.getInputsInfo().begin()->second->setLayout(InferenceEngine::Layout::NCHW); - net.getInputsInfo().begin()->second->setPrecision(InferenceEngine::Precision::U8); - - auto ie = PluginCache::get().ie(); - - std::map configs; - for (auto&& value : _properties) { - configs.emplace(value.first, value.second.as()); - } - - auto exec_net = ie->LoadNetwork(net, device_names, configs); - if (expected_status) { - std::shared_ptr ctx; - ASSERT_NE(ctx = exec_net.GetContext(), nullptr); - InferenceEngine::InferRequest req = exec_net.CreateInferRequest(); - ASSERT_TRUE(req); - const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo(); - for (auto i : inputInfo) { - auto rblob = InferenceEngine::make_shared_blob(i.second->getTensorDesc(), ctx); - rblob->allocate(); - req.SetBlob(i.first, rblob); - } - ASSERT_NO_THROW(req.StartAsync()); - ASSERT_EQ(req.Wait(InferenceEngine::InferRequest::RESULT_READY), InferenceEngine::StatusCode::OK); - - } else { - ASSERT_THROW(exec_net.GetContext(), InferenceEngine::NotImplemented); - } -} diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp index aba0d057a4320e..9052182aae4f89 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp @@ -23,7 +23,7 @@ namespace ExecutionGraphTests { std::shared_ptr makeEltwiseFunction(const std::vector& inputPrecisions) { - IE_ASSERT(inputPrecisions.size() == 2); + OPENVINO_ASSERT(inputPrecisions.size() == 2); ov::ParameterVector inputs{std::make_shared(inputPrecisions[0], ov::Shape{1, 16, 5, 4}), @@ -38,7 +38,7 @@ std::shared_ptr makeEltwiseFunction(const std::vector makeFakeQuantizeReluFunction(const std::vector& inputPrecisions) { - IE_ASSERT(inputPrecisions.size() == 1); + OPENVINO_ASSERT(inputPrecisions.size() == 1); ov::ParameterVector inputs{std::make_shared(inputPrecisions[0], ov::Shape{1, 16, 5, 4})}; auto inputLowNode = ov::test::utils::deprecated::make_constant(ov::element::f32, {1, 1, 1, 1}, {0}); @@ -56,7 +56,7 @@ std::shared_ptr makeFakeQuantizeReluFunction(const std::vector makeFakeQuantizeBinaryConvolutionFunction(const std::vector &inputPrecisions) { - IE_ASSERT(inputPrecisions.size() == 1); + OPENVINO_ASSERT(inputPrecisions.size() == 1); ov::ParameterVector inputs{std::make_shared(inputPrecisions[0], ov::Shape{1, 16, 5, 4})}; auto inputLowNode = ov::test::utils::deprecated::make_constant(ov::element::f32, {1, 1, 1, 1}, {1}); diff --git a/src/tests/functional/plugin/shared/src/snippets/add.cpp b/src/tests/functional/plugin/shared/src/snippets/add.cpp index b0a695b0097106..5c4542f516bf5a 100644 --- a/src/tests/functional/plugin/shared/src/snippets/add.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/add.cpp @@ -106,8 +106,7 @@ std::string AddPair::getTestCaseName(testing::TestParamInfo actual; for (auto&& res : queryNetworkResult.supportedLayersMap) { - std::shared_ptr ctx = nullptr; - try { - // Try to take fully specified name from the context to match it with query network result for devices that support remote contexts - ctx = core->GetDefaultContext(targetDevice); - ASSERT_EQ(res.second, ctx->getDeviceName()); - } catch (...) { - // otherwise, compare with originally used device name - ASSERT_EQ(ov::DeviceIDParser(res.second).get_device_name(), targetDevice); - } + // compare with originally used device name + ASSERT_EQ(ov::DeviceIDParser(res.second).get_device_name(), targetDevice); + actual.insert(res.first); } ASSERT_EQ(expected, actual); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp index cec0846756b65b..393787329724d6 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp @@ -7,7 +7,6 @@ #include #include -#include #include #include "ngraph/pass/low_latency.hpp" diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index f5c141432f9d9c..f354f14d874dd6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -7,6 +7,7 @@ #include "ov_models/builders.hpp" #include "common_test_utils/node_builders/constant.hpp" #include "ov_models/utils/ov_helpers.hpp" +#include "ie_common.h" #include "common_test_utils/node_builders/fake_quantize.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp index 48d03880089670..684efb24ae38d3 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp @@ -171,7 +171,7 @@ void QuantizedConvolutionBatchNorm::TearDown() { auto get_layer_type = [] (const std::shared_ptr& node) -> const std::string& { const auto& rt_info = node->get_rt_info(); auto it = rt_info.find(ov::exec_model_info::LAYER_TYPE); - IE_ASSERT(it != rt_info.end()); + OPENVINO_ASSERT(it != rt_info.end()); return it->second.as(); }; diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp index f2b26523c5cf58..7917c22fb6fcf9 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp @@ -8,11 +8,6 @@ #include "gmock/gmock-matchers.h" #include "gtest/gtest.h" -#include "ie_blob.h" -#include "ie_data.h" -#include "ie_input_info.hpp" -#include "ie_preprocess.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/util/pp.hpp" inline bool strContains(const std::string& str, const std::string& substr) { @@ -29,20 +24,6 @@ inline bool strDoesnotContain(const std::string& str, const std::string& substr) #define EXPECT_STR_CONTAINS(str, substr) EXPECT_PRED2(&strContains, str, substr) -#define ASSERT_BLOB_EQ(lhs, rhs) compare_blob(lhs, rhs) - -#define ASSERT_DIMS_EQ(lhs, rhs) compare_dims(lhs, rhs) - -#define ASSERT_DATA_EQ(lhs, rhs) compare_data(lhs, rhs) - -#define ASSERT_PREPROCESS_CHANNEL_EQ(lhs, rhs) compare_preprocess(lhs, rhs) - -#define ASSERT_PREPROCESS_INFO_EQ(lhs, rhs) compare_preprocess_info(lhs, rhs) - -#define ASSERT_OUTPUTS_INFO_EQ(lhs, rhs) compare_outputs_info(lhs, rhs) - -#define ASSERT_INPUTS_INFO_EQ(lhs, rhs) compare_inputs_info(lhs, rhs) - #define ASSERT_STRINGEQ(lhs, rhs) compare_cpp_strings(lhs, rhs) #define OV_ASSERT_NO_THROW(statement) OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_) @@ -86,67 +67,6 @@ inline bool strDoesnotContain(const std::string& str, const std::string& substr) FAIL() << "Unknown exception"; \ } -OPENVINO_SUPPRESS_DEPRECATED_START -inline void compare_blob(InferenceEngine::Blob::Ptr lhs, InferenceEngine::Blob::Ptr rhs) { - ASSERT_EQ(lhs.get(), rhs.get()); - // TODO: add blob specific comparison for general case -} - -inline void compare_dims(const InferenceEngine::SizeVector& lhs, const InferenceEngine::SizeVector& rhs) { - ASSERT_EQ(lhs.size(), rhs.size()); - for (size_t i = 0; i < lhs.size(); i++) { - ASSERT_EQ(lhs[i], rhs[i]); - } -} - -inline void compare_data(const InferenceEngine::Data& lhs, const InferenceEngine::Data& rhs) { - ASSERT_DIMS_EQ(lhs.getDims(), rhs.getDims()); - ASSERT_STREQ(lhs.getName().c_str(), rhs.getName().c_str()); - ASSERT_EQ(lhs.getPrecision(), rhs.getPrecision()); -} - -inline void compare_preprocess(const InferenceEngine::PreProcessChannel& lhs, - const InferenceEngine::PreProcessChannel& rhs) { - ASSERT_FLOAT_EQ(lhs.meanValue, rhs.meanValue); - ASSERT_FLOAT_EQ(lhs.stdScale, rhs.stdScale); - ASSERT_BLOB_EQ(lhs.meanData, rhs.meanData); -} - -inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo& lhs, - const InferenceEngine::PreProcessInfo& rhs) { - ASSERT_EQ(lhs.getMeanVariant(), rhs.getMeanVariant()); - ASSERT_EQ(lhs.getNumberOfChannels(), rhs.getNumberOfChannels()); - for (size_t i = 0; i < lhs.getNumberOfChannels(); i++) { - ASSERT_PREPROCESS_CHANNEL_EQ(*lhs[i].get(), *rhs[i].get()); - } -} - -inline void compare_outputs_info(const InferenceEngine::OutputsDataMap& lhs, - const InferenceEngine::OutputsDataMap& rhs) { - ASSERT_EQ(lhs.size(), rhs.size()); - auto i = lhs.begin(); - auto j = rhs.begin(); - - for (size_t k = 0; k != lhs.size(); k++, i++, j++) { - ASSERT_STREQ(i->first.c_str(), j->first.c_str()); - ASSERT_DATA_EQ(*i->second.get(), *j->second.get()); - } -} - -inline void compare_inputs_info(const InferenceEngine::InputsDataMap& lhs, const InferenceEngine::InputsDataMap& rhs) { - ASSERT_EQ(lhs.size(), rhs.size()); - auto i = lhs.begin(); - auto j = rhs.begin(); - - for (size_t k = 0; k != lhs.size(); k++, i++, j++) { - ASSERT_STREQ(i->first.c_str(), j->first.c_str()); - ASSERT_DIMS_EQ(i->second->getTensorDesc().getDims(), j->second->getTensorDesc().getDims()); - ASSERT_PREPROCESS_INFO_EQ(i->second->getPreProcess(), j->second->getPreProcess()); - ASSERT_DATA_EQ(*i->second->getInputData().get(), *j->second->getInputData().get()); - } -} -OPENVINO_SUPPRESS_DEPRECATED_END - inline void compare_cpp_strings(const std::string& lhs, const std::string& rhs) { ASSERT_STREQ(lhs.c_str(), rhs.c_str()); } diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp index ceb4ee4aacd072..ea62ed52aaab99 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp @@ -16,7 +16,6 @@ #include "blob_transform.hpp" #include "common_test_utils/data_utils.hpp" #include "common_test_utils/test_constants.hpp" -#include "ie_compound_blob.h" #include "ie_ngraph_utils.hpp" #include "openvino/runtime/common.hpp" #include "precision_utils.h" @@ -675,7 +674,6 @@ inline short reducePrecisionBitwiseS(const float in) { enum class BlobType { Memory, - Batched, Compound, Remote, }; @@ -684,10 +682,6 @@ inline std::ostream& operator<<(std::ostream& os, BlobType type) { switch (type) { case BlobType::Memory: return os << "Memory"; - case BlobType::Batched: - return os << "Batched"; - case BlobType::Compound: - return os << "Compound"; case BlobType::Remote: return os << "Remote"; default: @@ -695,32 +689,6 @@ inline std::ostream& operator<<(std::ostream& os, BlobType type) { } } -inline InferenceEngine::Blob::Ptr createBlobByType(const InferenceEngine::TensorDesc& td, BlobType blobType) { - switch (blobType) { - case BlobType::Memory: - return createAndFillBlob(td); - case BlobType::Batched: - case BlobType::Compound: { - auto dims = td.getDims(); - const size_t subBlobsNum = dims.front(); - dims[0] = 1; - std::vector subBlobs; - InferenceEngine::TensorDesc subBlobDesc(td.getPrecision(), dims, td.getLayout()); - for (size_t i = 0; i < subBlobsNum; i++) { - subBlobs.push_back(createAndFillBlob(subBlobDesc)); - } - return blobType == BlobType::Batched - ? InferenceEngine::make_shared_blob(subBlobs) - : InferenceEngine::make_shared_blob(subBlobs); - } - // TODO: ocl + remote - // case BlobType::Remote: - // return InferenceEngine::as(createAndFillBlob(td)); - default: - IE_THROW() << "Test does not support the blob kind"; - } -} - inline bool checkLayout(InferenceEngine::Layout layout, const std::vector& inputShapes) { bool check = false; switch (layout) { diff --git a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp index 08283dc81fb774..e8a6c9a7a4c04e 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp @@ -28,7 +28,6 @@ class MockIExecutableNetworkInternal : public IExecutableNetworkInternal { MOCK_METHOD1(SetConfig, void(const std::map& config)); MOCK_CONST_METHOD1(GetConfig, Parameter(const std::string& name)); MOCK_CONST_METHOD1(GetMetric, Parameter(const std::string& name)); - MOCK_CONST_METHOD0(GetContext, std::shared_ptr(void)); void WrapOstreamExport(std::ostream& networkModel) { IExecutableNetworkInternal::Export(networkModel); } diff --git a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp index 11da8a6ea42fd9..03307ae842818d 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp @@ -21,7 +21,6 @@ class MockIInferRequestInternal : public InferenceEngine::IInferRequestInternal MOCK_CONST_METHOD0(GetPerformanceCounts, std::map()); MOCK_METHOD2(SetBlob, void(const std::string&, const InferenceEngine::Blob::Ptr&)); MOCK_METHOD1(GetBlob, InferenceEngine::Blob::Ptr(const std::string&)); - MOCK_CONST_METHOD1(GetPreProcess, const InferenceEngine::PreProcessInfo&(const std::string&)); MOCK_METHOD1(SetCallback, void(std::function)); MOCK_METHOD0(QueryState, std::vector()); MOCK_METHOD0(Cancel, void()); diff --git a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp index 958ff721568778..c5df4baefb87dd 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp @@ -39,22 +39,10 @@ class MockIInferencePlugin : public InferenceEngine::IInferencePlugin { MOCK_CONST_METHOD2(GetMetric, InferenceEngine::Parameter(const std::string&, const std::map&)); - MOCK_METHOD1(CreateContext, std::shared_ptr(const InferenceEngine::ParamMap&)); - MOCK_METHOD1(GetDefaultContext, std::shared_ptr(const InferenceEngine::ParamMap&)); - MOCK_METHOD3(LoadNetwork, - std::shared_ptr( - const InferenceEngine::CNNNetwork&, - const std::map&, - const std::shared_ptr&)); MOCK_METHOD2( ImportNetwork, std::shared_ptr(std::istream&, const std::map&)); - MOCK_METHOD3(ImportNetwork, - std::shared_ptr( - std::istream&, - const std::shared_ptr&, - const std::map&)); MOCK_CONST_METHOD2(QueryNetwork, InferenceEngine::QueryNetworkResult(const InferenceEngine::CNNNetwork&, const std::map&)); diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp b/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp index 7b0e59356958f9..c3a2b428acbfe2 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp @@ -40,7 +40,6 @@ class MockIExecutableNetwork : public IExecutableNetwork { GetMetric, (const std::string& name, Parameter& result, ResponseDesc* resp), (const, noexcept)); - MOCK_METHOD(StatusCode, GetContext, (RemoteContext::Ptr & pContext, ResponseDesc* resp), (const, noexcept)); }; IE_SUPPRESS_DEPRECATED_END diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp b/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp index 48e42d3f9c4e4f..2677416523e799 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp @@ -33,7 +33,6 @@ class MockIInferRequest : public IInferRequest { ((std::map&), ResponseDesc*), (const, noexcept)); MOCK_METHOD(StatusCode, GetBlob, (const char*, Blob::Ptr&, ResponseDesc*), (noexcept)); - MOCK_METHOD(StatusCode, GetPreProcess, (const char*, const PreProcessInfo**, ResponseDesc*), (const, noexcept)); MOCK_METHOD(StatusCode, SetBlob, (const char*, const Blob::Ptr&, ResponseDesc*), (noexcept)); MOCK_METHOD(StatusCode, Cancel, (ResponseDesc*), (noexcept)); }; From 997dda54759ed29680faf43541790fcc60423b30 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jan 2024 22:20:27 +0100 Subject: [PATCH 030/122] Bump attrs from 23.1.0 to 23.2.0 in /tests (#22093) Bumps [attrs](https://github.com/sponsors/hynek) from 23.1.0 to 23.2.0. - [Commits](https://github.com/sponsors/hynek/commits) --- updated-dependencies: - dependency-name: attrs dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index 4e3455dd1b8b27..7a32f3987fa5bb 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -1,5 +1,5 @@ numpy>=1.16.6,<1.27 -attrs==23.1.0 +attrs==23.2.0 distro==1.8.0 h5py>=3.1.0 Jinja2>=2.11.2 From e510de30222c750a5d41205170cbd94e1bf77143 Mon Sep 17 00:00:00 2001 From: Taylor Yeonbok Lee Date: Tue, 16 Jan 2024 13:44:26 -0800 Subject: [PATCH 031/122] [GPU] Optimization for kv cache op (#22089) * - Fixed wrong max_output_layout_size of skipped gather - kv cache opt should be done after gather skip Remove debug prints Fix can_be_optimized set fixed max_output_layout_size trial WIP disabling copy Fixed kv cache memory usage - Removed the duplication of memory - More kv cache can_be_optimized = True Working Prevent mem alloc for optimizable nodes Record kv cache's output layout always Refactoring & cleanup Refactor 2 Revert not relevant change Fixed bugs - Error on max_output_layout - Error on concat_axis_size w.r.t the legacy axis - Init variables for empty input Fix for empty past - Fix more bugs in 1) reset variable 2) unsupported optimization for non-outermost axis concat Support memory sharing for non-outermost dim too Fix to update output layout only if needed Add more debug comment Applied review comment Fix func test failure (KVCacheTests.smoke_multipleIterations_stateful_gather_with_initializer_batch_3) * Applied review comment --- .../intel_gpu/plugin/variable_state.hpp | 4 + .../intel_gpu/runtime/shape_predictor.hpp | 8 +- .../src/graph/impls/ocl/kv_cache.cpp | 2 + .../src/graph/include/kv_cache_inst.h | 44 ++++ .../src/graph/include/primitive_inst.h | 2 + .../intel_gpu/src/graph/primitive_inst.cpp | 230 +++++++++++------- .../intel_gpu/src/graph/read_value.cpp | 4 + .../intel_gpu/src/plugin/variable_state.cpp | 19 +- .../intel_gpu/src/runtime/shape_predictor.cpp | 9 +- 9 files changed, 231 insertions(+), 91 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp index 57e460296b2296..0bacf2ec9a00a8 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp @@ -40,6 +40,10 @@ class VariableState : public ov::IVariableState { bool is_set() const; void set(); void set_layout(const cldnn::layout& new_layout); + void set_memory(const cldnn::memory::ptr& new_mem, const cldnn::layout& actual_layout); + size_t get_actual_mem_size() const { + return actual_size; + } private: cldnn::layout m_layout; diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp index 51f09989502a13..aea07971ca6020 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp @@ -51,9 +51,15 @@ struct ShapePredictor { std::pair predict_preallocation_shape(const std::string& id, const ov::Shape& current_shape, size_t dt_bitwidth, - bool can_reuse_buffer); + bool can_reuse_buffer, + int32_t next_iters_prealloc_count = -1); + bool can_preallocate(size_t desired_buffer_size); + void reset() { + _shapes_info.clear(); + } + private: void add_shape(const std::string& id, const ov::Shape& shape); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp index f92f121e3e23b8..9bbb6753feb8a9 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp @@ -79,10 +79,12 @@ struct kv_cache_impl : typed_primitive_impl_ocl { variable.set(); if (can_be_optimized) { + GPU_DEBUG_TRACE_DETAIL << desc->id << " : Output is same as variable memory! Skip copying " << std::endl; // When primitive is optimized, concat kernel writes directly to variable memory return res_event; } else { // Othwerise, we need to copy result from out buffer to state memory + GPU_DEBUG_TRACE_DETAIL << desc->id << " : Copying output to variable meomry" << std::endl; auto& stream = instance.get_network().get_stream(); stream.enqueue_barrier(); diff --git a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h index bd6d6e3bc0a9f8..34c4ccf555008b 100644 --- a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h @@ -36,6 +36,50 @@ class typed_primitive_inst : public typed_primitive_inst_base= 2) { + auto spatial_axis = sequence_axis_legacy - 2; + // Default and minimum number of dimensions is 4 + auto spatial_size = std::max(past_layout_rank, 4) - 2; + sequence_axis_legacy = spatial_size - spatial_axis - 1 + 2; + } + return sequence_axis_legacy; + } + + static int64_t get_max_pad(const layout& target_layout, size_t buffer_size, int64_t legacy_sequence_axis, std::string target_name = "") { + if (buffer_size == 0) + return 0; + const size_t total_elements = target_layout.count(); + const int64_t concat_axis_size = target_layout.get_tensor().sizes()[legacy_sequence_axis]; + const int64_t sequence_element_size = total_elements / concat_axis_size; + const int64_t max_sequence_elements = buffer_size / sequence_element_size; + auto max_pad = std::max(max_sequence_elements - concat_axis_size, 0); + auto target_layout_name = (target_name != "") ? target_name : "target_layout"; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] " << target_name << " : " << target_layout.to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] buffer size " << buffer_size << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] total_elements " << total_elements << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] concat_axis_size = " << concat_axis_size << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] sequence_element_size = " << sequence_element_size << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] max_sequence_elements = " << max_sequence_elements << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] max_pad (max_sequence_elements - concat_axis_size) = " << max_pad << std::endl; + return max_pad; + } + typed_primitive_inst(network& network, const kv_cache_node& desc); typed_primitive_inst(network& network) : parent(network), memory_state::variable("") {} }; diff --git a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h index 762a9dd1ea90f2..cd56778fa7bbda 100644 --- a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h @@ -229,6 +229,8 @@ class primitive_inst { void set_shape_change() { _shape_changed = true; } void build_deps(); + + void update_paddings(); void do_runtime_skip_reorder(); void do_runtime_skip_gather(); void do_runtime_skip_permute(); diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 28db3a8d013a07..996c90ccb99cec 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -463,29 +463,44 @@ event::ptr primitive_inst::realloc_if_needed() { if (_node->is_type()) return ev; + auto& sp = *get_network().get_shape_predictor(); + auto dt_size = ov::element::Type(actual_layout.data_type).bitwidth(); // read_value/assign nodes are supposed to always use variable memory if (auto stateful_prim = dynamic_cast(this)) { std::string variable_id = stateful_prim->variable_id(); auto& variable = get_network().get_variable(variable_id); - GPU_DEBUG_TRACE_DETAIL << "realloc_if_needed: variable " << id() << " set layout" << _impl_params->get_output_layout().to_string() << std::endl; if (_node->is_type()) { // Reuse state memory as output for kv cache if possible // otherwise clear _outputs for the cases when mem was reused previously if (_impl_params->can_be_optimized()) { + GPU_DEBUG_TRACE_DETAIL << id() << " : realloc_if_needed: Set kvcache output memmory as variable memory " << variable.get_memory()->buffer_ptr() + << " (ptr: " << variable.get_memory()->buffer_ptr() + << ", actual_size: " << variable.get_actual_mem_size()/8 << " bytes" + << ", variable layout " << variable.get_layout().to_short_string() << ")" << std::endl; + _outputs[0] = variable.get_memory(); + // To record shape predictor + auto prealloc_info = sp.predict_preallocation_shape(id(), _impl_params->output_layouts[0].get_shape(), dt_size, true); return ev; } else if (_outputs[0] && variable.get_memory() && get_network().get_engine().is_the_same_buffer(*_outputs[0], *variable.get_memory())) { + GPU_DEBUG_TRACE_DETAIL << id() << " : realloc_if_needed: Reset output mem" << std::endl; _outputs[0] = nullptr; _max_output_layout_count = 0; + } else { + GPU_DEBUG_TRACE_DETAIL << id() << " : realloc_if_needed: can_be_optimized = false and memories are not being shared" << std::endl; } + } else { + variable.set_layout(_impl_params->output_layouts[0]); + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable (ptr: " << variable.get_memory()->buffer_ptr() + << ", actual_size:" << variable.get_actual_mem_size() << " bytes" + << ", variable layout:" << variable.get_layout().to_short_string() << ")" << std::endl; } - variable.set_layout(actual_layout); - GPU_DEBUG_TRACE_DETAIL << id() << ": use variable memory " << variable.get_memory()->buffer_ptr() - << " (size=" << variable.get_memory()->size() << ")" << std::endl; // For nodes that can be optimized, variable memory is used as output memory // so there is no need for output memory reallocation - if (can_be_optimized()) + if (can_be_optimized()) { + _max_output_layout_count = variable.get_actual_mem_size() / (dt_size / 8); return ev; + } } // Update output layout with respect to FC's fake alignment @@ -507,14 +522,14 @@ event::ptr primitive_inst::realloc_if_needed() { } } - if (_node->is_type() || _node->is_type()) { - // For the nodes which can be optimized at runtime, input memory is used as output memory - // So there is no need to reallocate output memory - if (can_be_optimized()) + // Clear out memory if if was previously reused, but now primitive can't be optimized + if (_node->is_type() || _node->is_type() || _node->is_type() || _node->is_type()) { + if (can_be_optimized()) { + _max_output_layout_count = _deps[0].first->_max_output_layout_count; return ev; - // Clear out memory if if was previously reused, but now primitive can't be optimized - if (!can_be_optimized() && _outputs[0] && dep_memory_ptr(0) - && _network.get_engine().is_the_same_buffer(dep_memory(0), output_memory(0))) { + } else if (_outputs[0] && dep_memory_ptr(0) && + _network.get_engine().is_the_same_buffer(dep_memory(0), output_memory(0))) { + // Clear out memory if if was previously reused, but now primitive can't be optimized _outputs[0] = nullptr; _max_output_layout_count = 0; } @@ -522,16 +537,21 @@ event::ptr primitive_inst::realloc_if_needed() { // update layout to ensure that it repsects paddings for correct allocation size if (_node_output_layout.data_padding.get_dynamic_pad_dims() != tensor(0)) { - const auto current_buf_size = updated_layout.get_buffer_size().sizes(); - updated_layout = layout(ov::Shape(current_buf_size.begin(), current_buf_size.end()), updated_layout.data_type, updated_layout.format); + size_t rank = updated_layout.get_shape().size(); + auto current_buf_shape = updated_layout.get_buffer_size().get_partial_shape(rank, std::min(static_cast(4), rank)); + updated_layout = layout(current_buf_shape, updated_layout.data_type, updated_layout.format); } - bool can_reuse_buffer = _outputs[0] && updated_layout.count() <= _max_output_layout_count; // If we allocated too large memory, reclaim the memory. - if (updated_layout.count() * 10 < _max_output_layout_count) - can_reuse_buffer = false; + if (updated_layout.count() * 10 < _max_output_layout_count) { + GPU_DEBUG_TRACE_DETAIL << id() << ": Updated output size " << updated_layout.count() + << " is much smaller than current memory size! " << _max_output_layout_count + << "Reset memory" << std::endl; + _max_output_layout_count = 0; + } + bool can_reuse_buffer = _outputs[0] && updated_layout.count() <= _max_output_layout_count; // Handle runtime dynamic concat optimization if (_node->is_type() && can_be_optimized() && allocation_done_by_other) { allocation_done_by_other = false; @@ -539,16 +559,21 @@ event::ptr primitive_inst::realloc_if_needed() { } auto current_shape = updated_layout.get_shape(); - auto& sp = *get_network().get_shape_predictor(); - auto dt_size = ov::element::Type(updated_layout.data_type).bitwidth(); - auto prealloc_info = sp.predict_preallocation_shape(id(), current_shape, dt_size, can_reuse_buffer); + std::pair prealloc_info; + int32_t tmp_prealloc_count = _node->is_type() ? kv_cache_inst::get_prealloc_iter_num() : -1; + GPU_DEBUG_IF(debug_config->mem_preallocation_params.is_initialized) { + // If debug config is set, repsect the config most + tmp_prealloc_count = -1; + } + prealloc_info = sp.predict_preallocation_shape(id(), current_shape, dt_size, can_reuse_buffer, tmp_prealloc_count); + if (prealloc_info.first && sp.can_preallocate(ov::shape_size(prealloc_info.second) * dt_size)) { auto new_layout = updated_layout; new_layout.set_partial_shape(prealloc_info.second); updated_params.output_layouts[0] = new_layout; } - if (updated_params.output_layouts[0].count() < updated_layout.count()) + if (updated_params.output_layouts[0].get_buffer_size().count() < updated_layout.get_buffer_size().count()) updated_params.output_layouts[0] = updated_layout; if (can_reuse_buffer) { @@ -567,8 +592,61 @@ event::ptr primitive_inst::realloc_if_needed() { << " Requested buffer_size=" << updated_layout.count() << std::endl; _outputs = allocate_outputs(&updated_params, need_reset_output_memory(), true); // TODO : need to handle multiple outputs - _max_output_layout_count = updated_params.output_layouts[0].count(); + _max_output_layout_count = updated_params.output_layouts[0].get_buffer_size().count(); + } + // Set variable memory same as output memory + if (_node->is_type()) { + auto desc = _node->as().get_primitive(); + auto& variable = get_network().get_variable(desc->variable_info.variable_id); + auto present_layout = _impl_params->output_layouts[0]; + const auto& sequence_axis = desc->concat_axis; + auto sequence_axis_legacy = + kv_cache_inst::get_sequence_axis_legacy(sequence_axis, present_layout.get_partial_shape().size()); + GPU_DEBUG_TRACE_DETAIL << id() << " is kv_cache => set the variable with newly allocated output memory" + << std::endl; + bool axis_is_outer_most = true; + for (int64_t dim = 0; dim < sequence_axis; ++dim) { + if (present_layout.get_shape()[dim] > 1) { + axis_is_outer_most = false; + break; + } + } + if (present_layout.data_padding.get_dynamic_pad_dims().sizes()[sequence_axis_legacy] == 1) { + // Apply padding of variable to make it be optimized in the next iteration + auto max_pad = kv_cache_inst::get_max_pad(present_layout, + updated_params.output_layouts[0].get_buffer_size().count(), + sequence_axis_legacy, + "present_layout"); + if (max_pad > 0) { + kv_cache_inst::update_pad(present_layout, max_pad, sequence_axis_legacy); + if (!axis_is_outer_most) { + GPU_DEBUG_TRACE_DETAIL << id() << ": Update impl with new output padding" << std::endl; + set_shape_change(); + _impl_params->output_layouts[0] = present_layout; + update_impl(); + } + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable " << variable.get_name() + << "'s memory with allocated kv cache output: " + << present_layout.to_short_string() << " is_set = " << variable.is_set() + << std::endl; + variable.set_memory(_outputs[0], present_layout); + _impl_params->_can_be_optimized = true; + // No need to copy, still it can be optimized + GPU_DEBUG_TRACE_DETAIL << id() << ": Set can_be_optimized = true " << std::endl; + } else { + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable " << variable.get_name() + << "'s layout with allocated kv cache output: " << present_layout.to_short_string() + << " (is_set = " << variable.is_set() << ") " << std::endl; + variable.set_layout(present_layout); + } + } else { + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable " << variable.get_name() + << "'s layout with allocated kv cache output: " << present_layout.to_short_string() + << " (is_set = " << variable.is_set() << ") " << std::endl; + variable.set_layout(present_layout); + } } + _mem_allocated = true; // intermediate memory allocation is required for primitives consisting of multiple kernels in dynamic case { @@ -780,6 +858,37 @@ bool primitive_inst::update_impl() { return true; } +void primitive_inst::update_paddings() { + auto reset_pad = [](kernel_impl_params& params, const program_node* node) { + params.output_layouts[0].data_padding = node->get_output_layout(0).data_padding; + }; + if (_node->is_type()) { + auto& variable = get_network().get_variable(_node->as().get_primitive()->variable_id); + // Reset paddings for read_value and users with dynamic pad when variable is reset + // to avoid wrong pad used for some nodes due to pad propagation logic (which uses previous iter pad values) + if (!variable.is_set()) { + primitive_inst* inst = this; + while (inst) { + reset_pad(*inst->_impl_params, inst->_node); + auto& users = inst->_node->get_users(); + if (users.size() == 1 && users.front()->get_output_layout(0).data_padding.get_dynamic_pad_dims() != tensor(0)) { + inst = inst->get_user_insts().front(); + } else { + inst = nullptr; + } + } + } + return; + } + if (_node->is_type() && _impl_params->output_layouts[0].data_padding.get_dynamic_pad_dims() != tensor(0)) { + if (can_be_optimized()) + _impl_params->output_layouts[0] = _impl_params->input_layouts[0]; + else + reset_pad(*_impl_params, _node); + return; + } +} + void primitive_inst::do_runtime_skip_reorder() { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("do_runtime_skip_reorder: " + id())); GPU_DEBUG_GET_INSTANCE(debug_config); @@ -840,91 +949,41 @@ void primitive_inst::do_runtime_skip_reorder() { void primitive_inst::do_runtime_in_place_kv_cache() { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("do_runtime_in_place_kv_cache: " + id())); - auto reset_pad = [](kernel_impl_params& params, const program_node* node) { - params.output_layouts[0].data_padding = node->get_output_layout(0).data_padding; - }; - if (_node->is_type()) { - auto& variable = get_network().get_variable(_node->as().get_primitive()->variable_id); - // Reset paddings for read_value and users with dynamic pad when variable is reset - // to avoid wrong pad used for some nodes due to pad propagation logic (which uses previous iter pad values) - if (!variable.is_set()) { - primitive_inst* inst = this; - while (inst) { - reset_pad(*inst->_impl_params, inst->_node); - auto& users = inst->_node->get_users(); - if (users.size() == 1 && users.front()->get_output_layout(0).data_padding.get_dynamic_pad_dims() != tensor(0)) { - inst = inst->get_user_insts().front(); - } else { - inst = nullptr; - } - } - } - return; - } - - if (_node->is_type() && _impl_params->output_layouts[0].data_padding.get_dynamic_pad_dims() != tensor(0)) { - if (can_be_optimized()) - _impl_params->output_layouts[0] = _impl_params->input_layouts[0]; - else - reset_pad(*_impl_params, _node); - return; - } - if (!_node->is_type()) return; _impl_params->_can_be_optimized = false; + if (_impl_params->get_input_layout(0).count() == 0) { return; } - auto desc = _node->as().get_primitive(); auto& past_layout = _impl_params->input_layouts[0]; auto& present_layout = _impl_params->output_layouts[0]; const auto& sequence_axis = desc->concat_axis; - auto sequence_axis_legacy = sequence_axis; - if (sequence_axis_legacy >= 2) { - auto spatial_axis = sequence_axis_legacy - 2; - // Default and minimum number of dimensions is 4 - auto spatial_size = std::max(past_layout.get_partial_shape().size(), 4) - 2; - sequence_axis_legacy = spatial_size - spatial_axis - 1 + 2; - } - + auto sequence_axis_legacy = kv_cache_inst::get_sequence_axis_legacy(sequence_axis, past_layout.get_partial_shape().size()); if (present_layout.data_padding.get_dynamic_pad_dims().sizes()[sequence_axis_legacy] != 1) return; - const size_t total_elements = past_layout.count(); - const int64_t concat_axis_size = past_layout.get_partial_shape()[sequence_axis].get_length(); - const int64_t sequence_element_size = total_elements / concat_axis_size; - - const int64_t max_sequence_elements = _deps[0].first->_max_output_layout_count / sequence_element_size; - const int64_t max_pad = std::max(max_sequence_elements - concat_axis_size, 0); + GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << id() << " initial present_layout : " << present_layout.to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << id() << " initial past_layout : " << past_layout.to_string() << std::endl; + auto max_pad = kv_cache_inst::get_max_pad(past_layout, _deps[0].first->_max_output_layout_count, sequence_axis_legacy, "past_layout"); if (max_pad > 0) { - auto update_pad = [&](layout& l, int64_t pad) { - const auto& dyn_pad_dims = l.data_padding.get_dynamic_pad_dims(); - const auto& lower_padd = l.data_padding.lower_size().sizes(); - auto upper_padd = l.data_padding.upper_size().sizes(); - upper_padd[sequence_axis_legacy] = pad; - l.data_padding = padding(lower_padd, upper_padd, 0.f, dyn_pad_dims); - }; - - update_pad(present_layout, max_pad - 1); + kv_cache_inst::update_pad(present_layout, max_pad - 1, sequence_axis_legacy); + GPU_DEBUG_TRACE_DETAIL << "[do runtime_in_place_kv_cache] " << id() << " Updated present_layout's pad : " << present_layout.to_string() << std::endl; auto& variable = get_network().get_variable(desc->variable_info.variable_id); - GPU_DEBUG_TRACE_DETAIL << "do_runtime_in_place_kv_cache set_layout: " << present_layout.to_string() << " is_set = " << variable.is_set() << std::endl; variable.set_layout(present_layout); + GPU_DEBUG_TRACE_DETAIL << "[do_runtime_in_place_kv_cache] " << id() << "Updated variable with present_layout" + << variable.get_layout().to_string() << " is_set = " << variable.is_set() << std::endl; if (past_layout.data_padding.upper_size().sizes()[sequence_axis_legacy] > 0 && variable.is_set()) { - update_pad(past_layout, max_pad); + kv_cache_inst::update_pad(past_layout, max_pad, sequence_axis_legacy); _impl_params->_can_be_optimized = true; + GPU_DEBUG_TRACE_DETAIL << "[do_runtime_in_place_kv_cache] " << id() << " Updated past layout's pad : " << past_layout.to_string() << std::endl; } - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] concat_axis_size = " << concat_axis_size << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] sequence_element_size = " << sequence_element_size << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] max_sequence_elements = " << max_sequence_elements << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] max_pad = " << max_pad << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] can be optimized: " << _impl_params->_can_be_optimized << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << present_layout.to_string() << std::endl; } + GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << id() << " can be optimized: " << _impl_params->_can_be_optimized << std::endl; } void primitive_inst::do_runtime_skip_gather() { @@ -1117,7 +1176,6 @@ event::ptr primitive_inst::execute(const std::vector& events) { OPENVINO_ASSERT(_node != nullptr, "[GPU] Invalid primitive_inst object for dynamic shapes case: program_node can't be null"); update_shape(); - do_runtime_in_place_kv_cache(); bool can_skip_execution = false; if (_impl_params->output_layouts[0].count() == 0) { @@ -1150,6 +1208,8 @@ event::ptr primitive_inst::execute(const std::vector& events) { // if the user is can_be_optimized and output node then current nodes' output should be allocated to host. do_runtime_skip_reorder(); do_runtime_skip_gather(); + update_paddings(); + do_runtime_in_place_kv_cache(); do_runtime_skip_permute(); if (!is_valid_fusion()) { diff --git a/src/plugins/intel_gpu/src/graph/read_value.cpp b/src/plugins/intel_gpu/src/graph/read_value.cpp index ec80ea5ef707cc..bf6e730e8a808b 100644 --- a/src/plugins/intel_gpu/src/graph/read_value.cpp +++ b/src/plugins/intel_gpu/src/graph/read_value.cpp @@ -40,6 +40,10 @@ void read_value_inst::update_output_memory() { return; const auto& variable = get_network().get_variable(variable_id()); + GPU_DEBUG_TRACE_DETAIL << id() << " Update output memory with variable " << variable_id() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - ptr : " << variable.get_memory()->buffer_ptr() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - layout " << variable.get_layout().to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - actual_size " << variable.get_actual_mem_size() << " bytes" << std::endl; set_output_memory(variable.get_memory(), false, 0); } } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/plugin/variable_state.cpp b/src/plugins/intel_gpu/src/plugin/variable_state.cpp index 1d89991826b94a..e2728eb1b0dea0 100644 --- a/src/plugins/intel_gpu/src/plugin/variable_state.cpp +++ b/src/plugins/intel_gpu/src/plugin/variable_state.cpp @@ -47,9 +47,20 @@ void VariableState::set() { m_is_set = true; } +void VariableState::set_memory(const cldnn::memory::ptr& new_mem, const cldnn::layout& actual_layout) { + GPU_DEBUG_TRACE_DETAIL << m_name << " : Update memory (Ptr : " << new_mem->buffer_ptr() + << ", layout : " << actual_layout.to_short_string() << ")" << std::endl; + m_memory = new_mem; + m_layout = actual_layout; + actual_size = m_memory->size(); + update_device_buffer(); +} + void VariableState::set_layout(const cldnn::layout& new_layout) { + if (m_layout == new_layout) + return; m_layout = new_layout; - GPU_DEBUG_TRACE_DETAIL << "Update state layout to " << new_layout.to_short_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << m_name << " : " << "Update state layout to " << new_layout.to_short_string() << std::endl; update_device_buffer(); } @@ -61,8 +72,12 @@ void VariableState::set_state(const ov::SoPtr& state) { } void VariableState::update_device_buffer() { - if (m_layout.is_dynamic() || m_layout.bytes_count() == 0) + if (m_layout.is_dynamic() || m_layout.bytes_count() == 0) { + m_shape_predictor->reset(); + m_memory.reset(); + actual_size = 0; return; + } if (actual_size < m_layout.bytes_count()) { const auto alloc_type = m_context->get_engine().use_unified_shared_memory() ? cldnn::allocation_type::usm_device : cldnn::allocation_type::cl_mem; diff --git a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp index 1ff00c905bd073..599878ca5566b1 100644 --- a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp +++ b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp @@ -59,7 +59,11 @@ bool ShapePredictor::can_preallocate(size_t desired_buffer_size) { std::pair ShapePredictor::predict_preallocation_shape(const std::string& id, const ov::Shape& current_shape, size_t dt_bitwidth, - bool can_reuse_buffer) { + bool can_reuse_buffer, + int32_t custom_next_iters_prealloc_count) { + size_t next_iters_prealloc_count = custom_next_iters_prealloc_count > 0 + ? static_cast(custom_next_iters_prealloc_count) + : _next_iters_preallocation_count; add_shape(id, current_shape); // Save shape information and exit without pre-allocation suggestion if current @@ -83,7 +87,6 @@ std::pair ShapePredictor::predict_preallocation_shape(const std break; diffs.push_back(result); } - bool can_use_iterations_preallocation = diffs.size() == min_shapes_num - 1; for (size_t i = 1; i < diffs.size(); ++i) { if (diffs[0] != diffs[i]) { @@ -116,7 +119,7 @@ std::pair ShapePredictor::predict_preallocation_shape(const std if (can_use_iterations_preallocation) { // Apply preallocation for the next N iterations - ov::Shape mul_shape(diffs[0].size(), _next_iters_preallocation_count); + ov::Shape mul_shape(diffs[0].size(), next_iters_prealloc_count); auto preallocation_shape = diffs[0] * mul_shape; auto new_shape = current_shape + preallocation_shape; return {true, new_shape}; From d699522843994734c2aeb0dfc0911ac4451eaafd Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 16 Jan 2024 23:50:10 +0100 Subject: [PATCH 032/122] [GHA] Update torchbench tests due to remove of phi_1_5 (#22191) --- tests/model_hub_tests/torch_tests/test_torchbench.py | 5 +---- tests/model_hub_tests/torch_tests/torch_utils.py | 2 +- tests/model_hub_tests/torch_tests/torchbench_models | 1 - 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/model_hub_tests/torch_tests/test_torchbench.py b/tests/model_hub_tests/torch_tests/test_torchbench.py index 425862d14cd6f3..04c2c96de0ae63 100644 --- a/tests/model_hub_tests/torch_tests/test_torchbench.py +++ b/tests/model_hub_tests/torch_tests/test_torchbench.py @@ -29,12 +29,9 @@ def setup_class(self): f"git clone https://github.com/pytorch/benchmark.git {self.repo_dir.name}") subprocess.check_call( ["git", "checkout", "850364ac2678b2363f086b7549254b6cb7df2e4d"], cwd=self.repo_dir.name) - m_list = get_models_list(self._model_list_path) - m_processed_list = [m for m, _, mark, _ in m_list if mark != "skip"] - subprocess.check_call( - [sys.executable, "install.py"]+m_processed_list, cwd=self.repo_dir.name) def load_model(self, model_name, model_link): + subprocess.check_call([sys.executable, "install.py"] + [model_name], cwd=self.repo_dir.name) sys.path.append(self.repo_dir.name) from torchbenchmark import load_model_by_name try: diff --git a/tests/model_hub_tests/torch_tests/torch_utils.py b/tests/model_hub_tests/torch_tests/torch_utils.py index d92462efaf6521..87c9aeb043f596 100644 --- a/tests/model_hub_tests/torch_tests/torch_utils.py +++ b/tests/model_hub_tests/torch_tests/torch_utils.py @@ -59,7 +59,7 @@ def prepare_inputs(self, inputs_info): if isinstance(inputs, dict): return dict((k, v.numpy()) for k, v in inputs.items()) else: - return [i.numpy() for i in inputs] + return flattenize_structure(inputs) def convert_model(self, model_obj): try: diff --git a/tests/model_hub_tests/torch_tests/torchbench_models b/tests/model_hub_tests/torch_tests/torchbench_models index 8634635574b400..6e75f637b71934 100644 --- a/tests/model_hub_tests/torch_tests/torchbench_models +++ b/tests/model_hub_tests/torch_tests/torchbench_models @@ -60,7 +60,6 @@ mobilenet_v2_quantized_qat,None #nanogpt,None,skip,No install.py is found nvidia_deeprecommender,None opacus_cifar10,None,skip,Modules that have backward hooks assigned can't be compiled -phi_1_5,None phlippe_densenet,None phlippe_resnet,None pyhpc_equation_of_state,None,xfail,Accuracy validation failed From 10d7c6b538063c28214acb32d7ff8d6e500fa232 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Wed, 17 Jan 2024 13:38:45 +0900 Subject: [PATCH 033/122] [GPU] Avoid runtime concat opt out if static padding exists in non in-axis and non dyn_pad mask (#21914) * avoid runtime concat opt out if static padding exists in non in-axis and non dyn_pad mask * remove dyn_pad check and move location in eailer check * limit max index of padding check * add unit test for skip runtime in place concat when static padding in non-concat axis * apply condition for build time also * change condition to check dynamic shape instead of runtime --- .../graph_optimizer/prepare_buffer_fusing.cpp | 17 ++++- .../passes/prepare_buffer_fusing_test.cpp | 73 +++++++++++++++++++ 2 files changed, 88 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 6a51ed0504b0ff..90565ef9ceaa26 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -80,6 +80,21 @@ bool concat_in_place_optimization::match(const program_node& concat_node, GPU_DEBUG_IF(debug_config->disable_runtime_buffer_fusing) { do_runtime_buffer_fusing = false; } + + auto concat_axis = concat_params.typed_desc()->axis; + size_t concat_axis_index = concat_axis < 0 ? concat_axis + concat_params.get_output_layout().get_rank() : concat_axis; + auto def_fmt = format::get_default_format(concat_params.get_output_layout().get_rank()); + // If static padding exists in non dyn_pad axis, returns false to avoid optimized out. + if (concat_node.is_dynamic()) { + for (size_t j = 0; j < concat_params.get_output_layout().get_rank(); j++) { + if (j != concat_axis_index) { + if ((concat_params.get_output_layout().data_padding.lower_size().sizes(def_fmt)[j] != 0) + || (concat_params.get_output_layout().data_padding.upper_size().sizes(def_fmt)[j] != 0)) + return false; + } + } + } + auto pred_nodes = concat_node.get_dependencies(); for (auto p : pred_nodes) { // TODO : In dynamic shape only one user is allowed for optimzied concat @@ -105,9 +120,7 @@ bool concat_in_place_optimization::match(const program_node& concat_node, // Otherwise, use explicit concat instead. auto output_format = concat_params.get_output_layout().format; auto output_datatype = concat_params.get_output_layout().data_type; - auto concat_axis = concat_params.typed_desc()->axis; - auto def_fmt = format::get_default_format(concat_params.get_output_layout().get_rank()); auto lower_padd_in_axis = concat_params.get_output_layout().data_padding.lower_size().sizes(def_fmt)[concat_axis]; lower_padd_in_axis = std::max(lower_padd_in_axis, pred_params[0].get_output_layout().data_padding.lower_size().sizes(def_fmt)[concat_axis]); diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index 46d830d3e2cda2..d6a672e1ce2f56 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -886,6 +886,79 @@ TEST(prepare_buffer_fusing, test_checking_padding_supported) { ASSERT_EQ(concat.can_be_optimized(), false); } +TEST(prepare_buffer_fusing, skip_in_place_concat_padding_in_non_concat_axis_of_dynamic) { + tests::random_generator rg(GET_SUITE_NAME); + auto& engine = get_test_engine(); + auto in_layout = layout{ ov::PartialShape{ov::Dimension::dynamic(), 3, ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + data_types::f16, format::bfyx}; + + auto begin = engine.allocate_memory({ ov::PartialShape{4}, data_types::i64, format::bfyx }); + auto end = engine.allocate_memory({ ov::PartialShape{4}, data_types::i64, format::bfyx }); + auto strides = engine.allocate_memory({ ov::PartialShape{4}, data_types::i64, format::bfyx }); + set_values(begin, {0, 0, 0, 0}); + set_values(end, {0, 0, 0, 9223372036854775807 }); + set_values(strides, {1, 1, 1, 2}); + + auto concat_padding = padding({0,0,1,1}, {0,0,1,1}); + + + auto in_static_layout = layout{ ov::PartialShape{1, 3, 320, 640}, data_types::f16, format::bfyx}; + auto input1_mem = engine.allocate_memory(in_static_layout); + auto input2_mem = engine.allocate_memory(in_static_layout); + auto input3_mem = engine.allocate_memory(in_static_layout); + auto input4_mem = engine.allocate_memory(in_static_layout); + + auto in1 = rg.generate_random_1d(input1_mem->count(), 0, 1); + auto in2 = rg.generate_random_1d(input2_mem->count(), 0, 1); + auto in3 = rg.generate_random_1d(input3_mem->count(), 0, 1); + auto in4 = rg.generate_random_1d(input4_mem->count(), 0, 1); + + set_values(input1_mem, in1); + set_values(input2_mem, in2); + set_values(input3_mem, in3); + set_values(input4_mem, in4); + + topology topology( + input_layout("input1", in_layout), + input_layout("input2", in_layout), + input_layout("input3", in_layout), + input_layout("input4", in_layout), + data("begin", begin), + data("end", end), + data("strides", strides), + strided_slice("strided_slice1", input_info("input1"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + strided_slice("strided_slice2", input_info("input2"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + strided_slice("strided_slice3", input_info("input3"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + strided_slice("strided_slice4", input_info("input4"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + concatenation("concat", {input_info("strided_slice1"), input_info("strided_slice2"), input_info("strided_slice3"), input_info("strided_slice4")}, 1, concat_padding), + reorder("reorder", input_info("concat"), format::fs_b_yx_fsv32, data_types::f16)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::optimize_data(true)); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + + auto program = program::build_program(engine, topology, config, false, true); + program_wrapper::apply_opt_pass(*program); + ASSERT_NE(program, nullptr); + + auto& concat = program->get_node("concat"); + ASSERT_EQ(concat.can_be_optimized(), false); + + network network(engine, topology, config); + network.set_input_data("input1", input1_mem); + network.set_input_data("input2", input2_mem); + network.set_input_data("input3", input3_mem); + network.set_input_data("input4", input4_mem); + auto outputs = network.execute(); + + const auto& concat_inst = network.get_primitive("concat"); + ASSERT_EQ(concat_inst->can_be_optimized(), false); +} + #ifdef ENABLE_ONEDNN_FOR_GPU TEST(prepare_buffer_fusing, in_place_onednn_concat_static) { auto& engine = get_test_engine(); From a086e9a3d8d2cce4b3ed871054e009e1a3012520 Mon Sep 17 00:00:00 2001 From: Tingqian Li Date: Wed, 17 Jan 2024 13:07:31 +0800 Subject: [PATCH 034/122] [CPU] Optimize ops in shape infer subgraph (#21932) --- src/plugins/intel_cpu/src/nodes/concat.cpp | 38 +++++++++++++ src/plugins/intel_cpu/src/nodes/concat.h | 2 + src/plugins/intel_cpu/src/nodes/gather.cpp | 44 +++++++++++++++ src/plugins/intel_cpu/src/nodes/gather.h | 3 ++ .../intel_cpu/src/nodes/scatter_update.cpp | 23 ++++++++ .../src/utils/debug_capabilities.cpp | 38 +++++++++++++ .../intel_cpu/src/utils/debug_capabilities.h | 3 +- .../functional/single_layer_tests/gather.cpp | 31 +++++++++++ .../single_layer_tests/scatter_update.cpp | 6 +++ .../src/shape_infer_subgraph.cpp | 54 +++++++++++++++++++ 10 files changed, 241 insertions(+), 1 deletion(-) create mode 100644 src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shape_infer_subgraph.cpp diff --git a/src/plugins/intel_cpu/src/nodes/concat.cpp b/src/plugins/intel_cpu/src/nodes/concat.cpp index 1d4d75a40bd5d9..b50cfb13949bcd 100644 --- a/src/plugins/intel_cpu/src/nodes/concat.cpp +++ b/src/plugins/intel_cpu/src/nodes/concat.cpp @@ -342,6 +342,26 @@ void Concat::prepareParams() { hasOuterLoop = true; } } + + canOptimize1DCase = false; + if (outputShape.size() == 1 && outputStrides[0] == 1 && outputShape[0] <= 64 && elemSize == 4) { + // output is small 1d vector (which is typical in shape inference subgraph), + // in this case, inputs are also small 1d vector and single thread naive impl is faster + canOptimize1DCase = true; + for (size_t i = 0; i < getParentEdges().size(); i++) { + const auto& srcMemPtr = getParentEdgesAtPort(i)[0]->getMemoryPtr(); + const auto srcMemDesc = srcMemPtr->getDescPtr()->as(); + const auto& inputShape = srcMemDesc->getBlockDims(); + const auto& strides = srcMemDesc->getStrides(); + if (inputShape.size() != 1 || strides.size() != 1) { + canOptimize1DCase = false; + break; + } + } + if (canOptimize1DCase) + return; + } + std::vector srcs_d; for (size_t i = 0; i < getParentEdges().size(); i++) { const auto& srcMemPtr = getParentEdgesAtPort(i)[0]->getMemoryPtr(); @@ -451,6 +471,11 @@ void Concat::execute(dnnl::stream strm) { return; } + if (canOptimize1DCase) { + exec1DCase(); + return; + } + if (canOptimizeNspc) { execNspcSpecCase(); return; @@ -479,6 +504,19 @@ ov::element::Type Concat::getRuntimePrecision() const { return getMaxPrecision(getInputPrecisions()); } +void Concat::exec1DCase() { + DEBUG_LOG(getName(), " exec1DCase"); + auto* dst = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->getData()); + for (size_t i = 0; i < getParentEdges().size(); i++) { + const auto& srcMemPtr = getParentEdgeAt(i)->getMemoryPtr(); + const auto& srcShape = srcMemPtr->getStaticDims(); + const auto* src = reinterpret_cast(srcMemPtr->getData()); + for (size_t i = 0; i < srcShape[0]; i++) { + *dst++ = src[i]; + } + } +} + void Concat::execNspcSpecCase() { const auto& dst_memory = getChildEdgeAt(0)->getMemory(); const size_t num_src = getParentEdges().size(); diff --git a/src/plugins/intel_cpu/src/nodes/concat.h b/src/plugins/intel_cpu/src/nodes/concat.h index d751e3649b04dd..9236c8420e8d00 100644 --- a/src/plugins/intel_cpu/src/nodes/concat.h +++ b/src/plugins/intel_cpu/src/nodes/concat.h @@ -36,9 +36,11 @@ class Concat : public Node { size_t reorderedAxis = 0; bool canBeInPlace = false; bool canOptimizeNspc = false; + bool canOptimize1DCase = false; void execRef(); size_t inverseOrder(const VectorDims& order, size_t axis); void execNspcSpecCase(); + void exec1DCase(); std::vector inputStrides; std::vector nelemToCopy; // byte moved in each iter std::vector dstOffset; // dst offset for each input diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index 6a9949365ced87..102dbf509d00ea 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -268,6 +268,18 @@ void Gather::prepareParams() { if (getSelectedPrimitiveDescriptor() == nullptr) THROW_ERROR(" has unidentified preferable primitive descriptor."); + // short 1D vector fast execution impl (typical in shape infer subgraph) + canOptimize1DCase = false; + if (dataSrcRank <= 1 && dataMemPtr->getDesc().getPrecision() == ov::element::i32) { + const auto& dataDims = dataMemPtr->getStaticDims(); + const auto& idxDims = idxMemPtr->getStaticDims(); + if ((dataDims.size() == 0 || (dataDims.size() == 1 && dataDims[0] <= 64)) && + (idxDims.size() == 0 || (idxDims.size() == 1 && idxDims[0] <= 64))) { + canOptimize1DCase = true; + return; + } + } + if (!isAxisInputConst) { axis = (reinterpret_cast(getParentEdgeAt(GATHER_AXIS)->getMemoryPtr()->getData()))[0]; if (axis < 0) @@ -317,6 +329,11 @@ void Gather::execute(dnnl::stream strm) { if (isInPlace()) { return; } + + if (canOptimize1DCase) { + exec1DCase(); + return; + } #if defined(OPENVINO_ARCH_X86_64) if (jitKernel && jitKernel->isSupportedConfiguration(afterAxisSize)) { const void* srcIndices = getParentEdgeAt(GATHER_INDICES)->getMemoryPtr()->getData(); @@ -376,6 +393,10 @@ void Gather::executeDynamicImpl(dnnl::stream strm) { if (isInPlace()) { return; } + if (canOptimize1DCase) { + exec1DCase(); + return; + } #if defined(OPENVINO_ARCH_X86_64) if (jitKernel && jitKernel->isSupportedConfiguration(afterAxisSize)) { const void* srcIndices = getParentEdgeAt(GATHER_INDICES)->getMemoryPtr()->getData(); @@ -536,6 +557,29 @@ void Gather::execReference() { }); } +void Gather::exec1DCase() { + DEBUG_LOG(getName(), " exec1DCase"); + auto* pdst = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->getData()); + auto srcMemPtr = getParentEdgeAt(GATHER_DATA)->getMemoryPtr(); + auto idxMemPtr = getParentEdgeAt(GATHER_INDICES)->getMemoryPtr(); + const auto* psrc = reinterpret_cast(srcMemPtr->getData()); + const auto* pidx = reinterpret_cast(idxMemPtr->getData()); + + const auto& idxDims = idxMemPtr->getStaticDims(); + const auto idxCnt = (idxDims.size() == 0) ? 1 : idxDims[0]; + auto axisDim = srcMemPtr->getStaticDims()[0]; + for (size_t i = 0; i < idxCnt; i++) { + auto ii = pidx[i]; + if (ii < 0) { + if (reverseIndexing) + ii += axisDim; + else + ii = axisDim; + } + pdst[i] = psrc[ii]; + } +} + bool Gather::created() const { return getType() == Type::Gather; } diff --git a/src/plugins/intel_cpu/src/nodes/gather.h b/src/plugins/intel_cpu/src/nodes/gather.h index bc91b106cf573a..87f4f3a09ce5be 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.h +++ b/src/plugins/intel_cpu/src/nodes/gather.h @@ -56,6 +56,9 @@ class Gather : public Node { void initShortParams(threadExecParams& p, uint64_t start); void execReference(); + bool canOptimize1DCase = false; + void exec1DCase(); + bool isDataShapeStat = false; bool isIdxShapeStat = false; bool isAxisInputConst = false; diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp index 092ff83119a4e1..cd3e3c41aeb503 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp @@ -277,6 +277,29 @@ void ScatterUpdate::execute(dnnl::stream strm) { const auto& srcDataDim = getParentEdgeAt(DATA_ID)->getMemory().getStaticDims(); const auto& indicesDim = getParentEdgeAt(INDICES_ID)->getMemory().getStaticDims(); size_t srcRank = srcDataDim.size(); + + // 1d short vector scatter update optimized for shape inference subgraph + if (scatterUpdateMode == ScatterUpdateMode::ScatterUpdate && srcDataDim.size() == 1 && indicesDim.size() <= 1 && + indicesPrec == ov::element::i32 && dataPrec == ov::element::i32 && srcDataDim[0] <= 64) { + auto updateDims = updateMemPtr->getStaticDims(); + if (updateDims.size() <= 1) { + DEBUG_LOG(getName(), " exec1DCase"); + auto updateCnt = (updateDims.size() == 0) ? 1 : updateDims[0]; + auto srcLength = srcMemPtr->getStaticDims()[0]; + auto* psrc = reinterpret_cast(srcPtr); + auto* pdst = reinterpret_cast(dstPtr); + for (size_t i = 0; i < srcLength; i++) { + pdst[i] = psrc[i]; + } + auto* pindices = reinterpret_cast(indicesPtr); + auto* pupdate = reinterpret_cast(updatePtr); + for (size_t i = 0; i < updateCnt; i++) { + pdst[pindices[i]] = pupdate[i]; + } + return; + } + } + int axis = 0; if (axisRelaxed) { auto axisMemPtr = getParentEdgeAt(AXIS_ID)->getMemoryPtr(); diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index 50c6cdb8fb6cac..2018f57daa6615 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -10,6 +10,7 @@ #include #ifdef CPU_DEBUG_CAPS +#include "cpu_memory.h" #include "debug_capabilities.h" #include "node.h" #include "edge.h" @@ -608,6 +609,43 @@ std::ostream & operator<<(std::ostream & os, const dnnl::memory::format_tag form return os; } +template +std::string to_string(const T* values, size_t N, size_t maxsize) { + std::stringstream ss; + for (size_t i = 0; i < N; i++) { + if (i > 0) + ss << ","; + if (ss.tellp() > static_cast(maxsize)) { + ss << "..." << N << "in total"; + break; + } + if (std::is_same::value || std::is_same::value) + ss << static_cast(values[i]); + else + ss << values[i]; + } + return ss.str(); +} + +std::ostream& operator<<(std::ostream& os, const IMemory& mem) { + const auto& desc = mem.getDesc(); + os << desc; + if (mem.isAllocated()) { + os << " ["; + if (desc.getPrecision() == ov::element::i32) { + os << to_string(reinterpret_cast(mem.getData()), mem.getSize() / sizeof(int32_t), 256); + } else if (desc.getPrecision() == ov::element::f32) { + os << to_string(reinterpret_cast(mem.getData()), mem.getSize() / sizeof(float), 256); + } else if (desc.getPrecision() == ov::element::i64) { + os << to_string(reinterpret_cast(mem.getData()), mem.getSize() / sizeof(int64_t), 256); + } else { + os << " ? "; + } + os << "]"; + } + return os; +} + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.h b/src/plugins/intel_cpu/src/utils/debug_capabilities.h index c8494c6e06d303..f593bfa36dc3d7 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.h +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.h @@ -44,7 +44,7 @@ class NodeDesc; class MemoryDesc; class Node; class Edge; - +class IMemory; class PrintableModel { public: PrintableModel(const ov::Model& model, std::string tag = "", std::string prefix = "") : model(model), tag(tag), prefix(prefix) {} @@ -92,6 +92,7 @@ class PrintableTimer { std::ostream & operator<<(std::ostream & os, const NodeDesc& desc); std::ostream & operator<<(std::ostream & os, const Node& node); std::ostream & operator<<(std::ostream & os, const MemoryDesc& desc); +std::ostream & operator<<(std::ostream & os, const IMemory& mem); std::ostream & operator<<(std::ostream & os, const Edge& edge); std::ostream & operator<<(std::ostream & os, const PrintableModel& model); std::ostream & operator<<(std::ostream & os, const PrintableDelta& us); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp index 6d9ac97310f82a..3362bda02fdb78 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp @@ -274,6 +274,20 @@ INSTANTIATE_TEST_SUITE_P(smoke_static_1D, ::testing::Values(additionalConfig[0])), GatherLayerTestCPU::getTestCaseName); +const std::vector> staticInputShapes1DI32 = {{{{}, {{1}}}, {{}, {{1}}}}, + {{{}, {{15}}}, {{}, {{15}}}}, + {{{}, {{64}}}, {{}, {{64}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_static_1D_I32, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(staticInputShapes1DI32), + ::testing::Values(std::tuple{0, 0}), + ::testing::Values(ElementType::i32), + ::testing::Values(true), + ::testing::Values(CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + const std::vector> dynamicInputShapes1D = { {{{ov::Dimension{1, 70}}, // Dynamic shape 0 {{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {11}, {13}, @@ -293,6 +307,23 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic_1D, ::testing::Values(additionalConfig[0])), GatherLayerTestCPU::getTestCaseName); +const std::vector> dynamicInputShapes1DI32 = { + {{{ov::Dimension{1, 70}}, // Dynamic shape 0 + {{1}, {15}, {64}}}, // Target shapes + {{-1}, // Dynamic shape 1 + {{1}, {15}, {64}}}} // Target shapes +}; + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_1D_I32, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes1DI32), + ::testing::Values(std::tuple{0, 0}), + ::testing::Values(ElementType::i32), + ::testing::Values(true, false), + ::testing::Values(CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + ///// 4D JIT ///// std::vector> get4DShapesJitStat(int maxBatchDims) { std::vector> result = {}; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp index 3c43939e654bf2..fdf408ded1e676 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp @@ -112,6 +112,12 @@ const std::vector scatterParams = { {{4, 2, {3, 9}, {4, 11}, {2, 3}, {2, 4}}, {{4, 2, 9, 10, 3, 4}, {4, 2, 3, 4, 3, 4}, {4, 2, 9, 11, 2, 2}}}}, IndicesDescription{{4, 2}, {0, 2, 4, 6, 1, 3, 5, 7}}, Axis{0}}, + ScatterUpdateLayerParams{ScatterUpdateShapes{ + {{-1}, {{9}, {32}, {63}, {64}}}, + {{-1}, {{2}, {2}, {2}, {2}}}, + }, + IndicesDescription{{2}, {1, 8}}, + Axis{0}}, }; const std::vector inputPrecisions = { diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shape_infer_subgraph.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shape_infer_subgraph.cpp new file mode 100644 index 00000000000000..71dd0a11e99e93 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shape_infer_subgraph.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +class ShapeInferSubgraphTest : virtual public SubgraphBaseTest { +public: + void run() override { + ov::element::Type netPrecision = inType = outType = ov::element::f32; + targetDevice = ov::test::utils::DEVICE_CPU; + + ov::ParameterVector params{ + std::make_shared(netPrecision, ov::PartialShape({-1, -1, -1}))}; + + auto const_op = [](const std::vector& values) { + return op::v0::Constant::create(ElementType::i64, {values.size()}, values); + }; + + auto shapeOf = std::make_shared(params[0]); + auto gather1 = std::make_shared(shapeOf, const_op({0}), const_op({0})); + auto gather2 = std::make_shared(shapeOf, const_op({1, 2}), const_op({0})); + auto concat = + std::make_shared(ov::NodeVector{gather1, const_op({32}), gather2, const_op({128})}, 0); + + auto gather3 = std::make_shared(shapeOf, const_op({1}), const_op({0})); + auto add = std::make_shared(gather1, gather3); + auto scatter_update = + std::make_shared(const_op({0, 0}), const_op({1}), add, const_op({0})); + + ov::ResultVector results{std::make_shared(concat), + std::make_shared(scatter_update)}; + function = std::make_shared(results, params, "shape_infer"); + + std::vector input_shapes = {{4, 2, 3}}; + init_input_shapes(ov::test::static_shapes_to_test_representation(input_shapes)); + ov::test::SubgraphBaseTest::run(); + } +}; + +namespace { +TEST_F(ShapeInferSubgraphTest, smoke_ShapeInferSubgraphTest_CPU) { + run(); +} +} // namespace +} // namespace test +} // namespace ov From a6c691528fa18a2a8dc65ddf4190088fe8ed209a Mon Sep 17 00:00:00 2001 From: Yuan Hu Date: Wed, 17 Jan 2024 13:46:26 +0800 Subject: [PATCH 035/122] [CPU] Remove two skips in function tests --- .../functional/shared_tests_instances/skip_tests_config.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 7636d1e66b3465..53f3374a31ca1f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -138,11 +138,6 @@ std::vector disabledTestPatterns() { R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)", // Issue: 105838 R"(smoke_NmsLayerTest.*)", - // Issue: 95590 - R"(.*CachingSupportCase.*CompileModelCacheTestBase.*(TIwithLSTMcell1|MatMulBias|2InputSubtract)_(u|i).*)", - // Issue: 95607 - R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*(TIwithLSTMcell1|MatMulBias|2InputSubtract)_(i|u).*)", - R"(.*CachingSupportCase.*ReadConcatSplitAssign.*)", // 94982. FP32->I32 conversion issue in the reference implementation. There can be some garbage in the rest of // float values like 0.333333745. // The kernel does not have such garbage. The diff 0.000000745 is taken into account in calculations and affects From 563efc6a8ae2534ea9107dfd6439d204b7cbacbc Mon Sep 17 00:00:00 2001 From: Jade Cho Date: Wed, 17 Jan 2024 17:17:19 +0900 Subject: [PATCH 036/122] [GPU] Change default capacity of impls cache (#22200) + Change 1000 to 300 to reduce memory usage. --- src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp index 75398c280aebc2..f69cf71482bb78 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp @@ -306,7 +306,7 @@ struct program { // if subgraph can be optimized if it consists of only inputs and corresponding outputs bool _can_be_optimized; std::unique_ptr _impls_cache; - const size_t _impls_cache_capacity = 10000; + const size_t _impls_cache_capacity = 300; std::shared_ptr _compilation_context; bool _loaded_from_cache = false; From b3469344f3e9666f5c2d7d0f4ed575c1848732fc Mon Sep 17 00:00:00 2001 From: Sofya Balandina Date: Wed, 17 Jan 2024 09:55:22 +0000 Subject: [PATCH 037/122] [api conformance] Enable version, core_threading, caching_tests to api conformance (#21991) --- .../src/ov_plugin/caching_tests.cpp | 10 ++++++ .../src/ov_plugin/core_threading_tests.cpp | 31 +++++++++++++++++++ .../src/ov_plugin/properties.cpp | 2 +- .../src/ov_plugin/version.cpp | 18 +++++++++++ 4 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/core_threading_tests.cpp create mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/version.cpp diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp index 426e8a262573ed..5401768ed9cc2a 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp @@ -65,4 +65,14 @@ INSTANTIATE_TEST_SUITE_P(ov_plugin_floating_point, CompileModelCacheTestBase, ::testing::Values(pluginConfig)), CompileModelCacheTestBase::getTestCaseName); +const std::vector default_properties = { + {ov::enable_profiling(false)} +}; + +INSTANTIATE_TEST_SUITE_P(ov_plugin, CompileModelCacheRuntimePropertiesTestBase, + ::testing::Combine( + ::testing::Values(targetDevice), + ::testing::ValuesIn(ov::test::conformance::generate_ov_configs(default_properties))), + CompileModelCacheRuntimePropertiesTestBase::getTestCaseName); + } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/core_threading_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/core_threading_tests.cpp new file mode 100644 index 00000000000000..2d565d04df0dee --- /dev/null +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/core_threading_tests.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "behavior/ov_plugin/core_threading.hpp" +#include "ov_api_conformance_helpers.hpp" + +using namespace ov::test::behavior; +using namespace ov::test::conformance; + +namespace { + +INSTANTIATE_TEST_SUITE_P(ov_plugin, CoreThreadingTest, + testing::Values(std::tuple{targetDevice, {{ov::enable_profiling(false)}}}), + CoreThreadingTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(ov_plugin, + CoreThreadingTestsWithIter, + testing::Combine(testing::Values(std::tuple{targetDevice, {{ov::enable_profiling(false)}}}), + testing::Values(4), + testing::Values(50)), + CoreThreadingTestsWithIter::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(ov_plugin, CoreThreadingTestsWithCacheEnabled, + testing::Combine(testing::Values(std::tuple{targetDevice, {{ov::enable_profiling(false)}}}), + testing::Values(20), + testing::Values(10)), + CoreThreadingTestsWithCacheEnabled::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp index 1302cbcd11c020..1d654443bee0bb 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp @@ -112,6 +112,6 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(targetDevice)); INSTANTIATE_TEST_SUITE_P( - ov_plugin_remove_mandatory, OVBasicPropertiesTestsP, + ov_plugin_mandatory, OVBasicPropertiesTestsP, ::testing::ValuesIn(generate_ov_pairs_plugin_name_by_device())); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/version.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/version.cpp new file mode 100644 index 00000000000000..2ace60f5425dfb --- /dev/null +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/version.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_api_conformance_helpers.hpp" +#include "behavior/ov_plugin/version.hpp" + +using namespace ov::test::behavior; +using namespace ov::test::conformance; + +namespace { + +INSTANTIATE_TEST_SUITE_P(ov_plugin_mandatory, + VersionTests, + ::testing::Values(targetDevice), + VersionTests::getTestCaseName); + +} \ No newline at end of file From 222184ca28ee1814ed0e9445c42ded65d3e11e73 Mon Sep 17 00:00:00 2001 From: Alina Kladieva Date: Wed, 17 Jan 2024 10:57:15 +0100 Subject: [PATCH 038/122] [GHA] Run full scope on merge queue events (#22172) --- .github/actions/smart-ci/action.yml | 4 +++ .github/actions/smart-ci/smart_ci.py | 36 +++++++++++++++---- .github/workflows/android_arm64.yml | 1 + .github/workflows/fedora.yml | 1 + .github/workflows/linux.yml | 1 + .github/workflows/linux_arm64.yml | 1 + .../linux_conditional_compilation.yml | 1 + .github/workflows/linux_riscv.yml | 1 + .github/workflows/mac.yml | 1 + .github/workflows/mac_arm64.yml | 1 + .github/workflows/webassembly.yml | 1 + .github/workflows/windows.yml | 1 + .../windows_conditional_compilation.yml | 1 + 13 files changed, 45 insertions(+), 6 deletions(-) diff --git a/.github/actions/smart-ci/action.yml b/.github/actions/smart-ci/action.yml index 23717c9dde50ce..ebe420d4ef301a 100644 --- a/.github/actions/smart-ci/action.yml +++ b/.github/actions/smart-ci/action.yml @@ -13,6 +13,9 @@ inputs: commit_sha: description: "GitHub commit hash. Used if no PR number is set" required: false + ref_name: + description: "GitHub ref name" + required: false component_pattern: description: "Pattern to extract component name from PR label. If not set, any label is considered a component name" required: false @@ -88,6 +91,7 @@ runs: python ${{ github.action_path }}/smart_ci.py \ $([[ -n "${{ inputs.pr }}" ]] && echo '--pr ${{ inputs.pr }}' || echo '-s ${{ inputs.commit_sha }}') \ -r ${{ inputs.repository }} \ + -f "${{ inputs.ref_name }}" \ -p "${{ inputs.component_pattern }}" \ -c "${{ inputs.components_config }}" \ -m "${{ inputs.components_config_schema }}" \ diff --git a/.github/actions/smart-ci/smart_ci.py b/.github/actions/smart-ci/smart_ci.py index fc88294247c221..ae6786d9882bad 100644 --- a/.github/actions/smart-ci/smart_ci.py +++ b/.github/actions/smart-ci/smart_ci.py @@ -109,11 +109,26 @@ def get_changed_component_names(pr, all_possible_components: set, component_patt return components +def get_changeset(gh_api, pr, target_branch, commit_sha): + """Returns changeset either from PR or commit""" + if pr: + return gh_api.pulls.list_files(pr) + if target_branch: + target_branch_head_commit = gh_api.repos.get_branch(target_branch).commit.sha + # In merge-queue branch all commits between head of target branch and head of current branch (commit_sha) + # contain changes added to queue earlier to be validated together. Getting all of them + changes from + # commit_sha below + changed_files = gh_api.repos.compare_commits(f'{target_branch_head_commit}...{commit_sha}').get('files', []) + return changed_files + raise ValueError(f'Either "pr" or "target_branch" parameter must be non-empty') + + def parse_args(): parser = argparse.ArgumentParser(description='Returns product components changed in a given PR or commit') parser.add_argument('--pr', type=int, required=False, help='PR number. If not set, --commit is used') parser.add_argument('-s', '--commit-sha', required=False, help='Commit SHA. If not set, --pr is used') parser.add_argument('-r', '--repo', help='GitHub repository') + parser.add_argument('-f', '--ref_name', required=False, help='GitHub ref name') parser.add_argument('-p', '--pattern', default=None, help='Pattern to extract component name from PR label. ' 'If not set, any label is considered a component name') parser.add_argument('-c', '--components-config', default='.github/components.yml', @@ -172,18 +187,27 @@ def main(): component_name = component_name_from_label(label, args.pattern) all_possible_components.add(component_name if component_name else label) - no_match_files_changed = False + run_full_scope = False # For now, we don't want to apply smart ci rules for post-commits is_postcommit = not pr - if is_postcommit: + + merge_queue_prefix = 'gh-readonly-queue/' + is_merge_queue = args.ref_name.startswith(merge_queue_prefix) + merge_queue_target_branch = re.findall(f'^{merge_queue_prefix}(.*)/', args.ref_name)[0] if is_merge_queue else None + + if is_merge_queue: + logger.info(f"The run is a merge-queue run, executing full validation scope for all components, if " + f"not all queued changes match patterns in 'skip-when-only-listed-files-changed'") + run_full_scope = True + elif is_postcommit: logger.info(f"The run is a post-commit run, executing full validation scope for all components") + run_full_scope = True else: no_match_files_changed = 'no-match-files' in [label.name for label in pr.labels] if no_match_files_changed: logger.info(f"There are changed files that don't match any pattern in labeler config, " f"executing full validation scope for all components") - - run_full_scope = is_postcommit or no_match_files_changed + run_full_scope = True # In post-commits - validate all components regardless of changeset # In pre-commits - validate only changed components with their dependencies @@ -197,7 +221,7 @@ def main(): affected_components = cfg.get_affected_components(changed_component_names) skip_workflow = False - if args.pr and not run_full_scope: + if is_merge_queue or (args.pr and not run_full_scope): if args.skip_when_only_listed_labels_set: excepted_labels = set(args.skip_when_only_listed_labels_set.split(',')) excepted_labels_only = changed_component_names - excepted_labels == set() @@ -205,7 +229,7 @@ def main(): if not skip_workflow and args.skip_when_only_listed_files_changed: # To avoid spending extra API requests running step below only if necessary - changed_files = gh_api.pulls.list_files(args.pr) + changed_files = get_changeset(gh_api, args.pr, merge_queue_target_branch, args.commit_sha) patterns = set(args.skip_when_only_listed_files_changed.split(',')) matched_files_only = all(any(fnmatch(f.filename, pattern) for pattern in patterns) for f in changed_files) diff --git a/.github/workflows/android_arm64.yml b/.github/workflows/android_arm64.yml index a8deb0e3d476e1..d313929a1b016e 100644 --- a/.github/workflows/android_arm64.yml +++ b/.github/workflows/android_arm64.yml @@ -31,6 +31,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 84434981be989d..7863d04f47dac4 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -31,6 +31,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 319abaa44d564a..4e37bfe6b6c0da 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -38,6 +38,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index fd8403e0de6c53..9894bccaa48615 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -35,6 +35,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index 79ac560f84b88a..d8f6edbc867803 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -35,6 +35,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 088fddccf1b210..ff8fa44c0c7ab0 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -34,6 +34,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 0165980d1b2f57..c645781522039e 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -52,6 +52,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 64873a9b104138..4e7ed1c2a24d49 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -51,6 +51,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/webassembly.yml b/.github/workflows/webassembly.yml index c5d94f267e4298..135e6a76c49fbb 100644 --- a/.github/workflows/webassembly.yml +++ b/.github/workflows/webassembly.yml @@ -31,6 +31,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e25ff48ca31128..ff7ae0310aaaec 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -34,6 +34,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index 7780e50eedc894..af156906db340a 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -37,6 +37,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' From 14183ae7d1acae2da7e8acc92d86379e8bd2a70b Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Wed, 17 Jan 2024 14:19:22 +0400 Subject: [PATCH 039/122] [TF Hub] Mark up models failing due to unsupported operations (#22207) * [TF Hub] Mark up models failing due to unsupported operations Signed-off-by: Kazantsev, Roman * Update tests/model_hub_tests/tf_hub_tests/nightly_models --------- Signed-off-by: Kazantsev, Roman --- .../tf_hub_tests/nightly_models | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/model_hub_tests/tf_hub_tests/nightly_models b/tests/model_hub_tests/tf_hub_tests/nightly_models index 758ef889f64c0d..5767c438f23408 100644 --- a/tests/model_hub_tests/tf_hub_tests/nightly_models +++ b/tests/model_hub_tests/tf_hub_tests/nightly_models @@ -1,8 +1,8 @@ -universal-sentence-encoder,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/universal-sentence-encoder/versions/2 +universal-sentence-encoder,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/universal-sentence-encoder/versions/2,xfail,128994 127962 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 SparseFillEmptyRows StaticRegexReplace StringSplit StringToHashBucketFast imagenet/mobilenet_v1_100_224/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-224-classification/versions/2 imagenet/mobilenet_v2_100_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-224-classification/versions/2 universal-sentence-encoder-multilingual,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual/versions/2 -universal-sentence-encoder-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/large/versions/2 +universal-sentence-encoder-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/large/versions/2,xfail,128994 127962 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 SparseFillEmptyRows StaticRegexReplace StringSplit StringToHashBucketFast imagenet/mobilenet_v2_075_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/075-224-classification/versions/2 movenet/singlepose/lightning,https://www.kaggle.com/models/google/movenet/frameworks/tensorFlow2/variations/singlepose-lightning/versions/4 imagenet/mobilenet_v1_025_224/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/025-224-classification/versions/2 @@ -13,7 +13,7 @@ nnlm-en-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/ bert_en_uncased_L-12_H-768_A-12,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/en-uncased-l-12-h-768-a-12/versions/4 bert_uncased_L-12_H-768_A-12,https://www.kaggle.com/models/google/bert/frameworks/tensorFlow1/variations/uncased-l-12-h-768-a-12/versions/1,xfail,129153 TimeoutError or killed with a signal 11 elmo,https://www.kaggle.com/models/google/elmo/frameworks/tensorFlow1/variations/elmo/versions/3 -universal-sentence-encoder-multilingual-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-large/versions/2 +universal-sentence-encoder-multilingual-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-large/versions/2,xfail,129830 unsupported operations RaggedTensorToSparse SegmentMean SentencepieceOp SentencepieceTokenizeOp small_bert/bert_en_uncased_L-4_H-256_A-4,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-4-h-256-a-4/versions/2 imagenet/resnet_v2_50/feature_vector,https://www.kaggle.com/models/google/resnet-v2/frameworks/tensorFlow2/variations/50-feature-vector/versions/2 spice,https://www.kaggle.com/models/google/spice/frameworks/tensorFlow1/variations/spice/versions/2,xfail,128817 Model references undeclared parameters @@ -28,7 +28,7 @@ efficientnet/lite0/feature-vector,https://www.kaggle.com/models/tensorflow/effic i3d-kinetics-400,https://www.kaggle.com/models/deepmind/i3d-kinetics/frameworks/tensorFlow1/variations/400/versions/1 imagenet/mobilenet_v2_035_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/035-224-classification/versions/2 tf2-preview/gnews-swivel-20dim,https://www.kaggle.com/models/google/gnews-swivel/frameworks/tensorFlow2/variations/tf2-preview-20dim/versions/1,skip,128989 AttributeError NoneType object has no attribute shape or dtype -faster_rcnn/openimages_v4/inception_resnet_v2,https://www.kaggle.com/models/google/faster-rcnn-inception-resnet-v2/frameworks/tensorFlow1/variations/faster-rcnn-openimages-v4-inception-resnet-v2/versions/1 +faster_rcnn/openimages_v4/inception_resnet_v2,https://www.kaggle.com/models/google/faster-rcnn-inception-resnet-v2/frameworks/tensorFlow1/variations/faster-rcnn-openimages-v4-inception-resnet-v2/versions/1,xfail,127962 unsupported operation LookupTableFindV2 imagenet/mobilenet_v2_140_224/feature_vector,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/140-224-feature-vector/versions/2 imagenet/mobilenet_v2_100_224/feature_vector,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-224-feature-vector/versions/2 bert_en_cased_L-12_H-768_A-12,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/en-cased-l-12-h-768-a-12/versions/4 @@ -55,12 +55,12 @@ efficientdet/lite0/detection,https://www.kaggle.com/models/tensorflow/efficientd small_bert/bert_en_uncased_L-2_H-128_A-2,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-2-h-128-a-2/versions/2 albert_base,https://www.kaggle.com/models/google/albert/frameworks/tensorFlow1/variations/base/versions/3,skip,129153 TimeoutError or killed with a signal 11 nnlm-ja-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/ja-dim128/versions/1,skip,120721 AssertionError No signatures for a model -universal-sentence-encoder-multilingual-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-qa/versions/2 +universal-sentence-encoder-multilingual-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-qa/versions/2,xfail,129830 unsupported operations RaggedTensorToSparse SegmentMean SentencepieceOp SentencepieceTokenizeOp nnlm-ja-dim128-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/ja-dim128-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model LaBSE,https://www.kaggle.com/models/google/labse/frameworks/tensorFlow2/variations/labse/versions/2 nnlm-en-dim50-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/en-dim50-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model resnet_50/feature_vector,https://www.kaggle.com/models/tensorflow/resnet-50/frameworks/tensorFlow2/variations/feature-vector/versions/1 -universal-sentence-encoder-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/qa/versions/2 +universal-sentence-encoder-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/qa/versions/2,xfail,127962 128994 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 StaticRegexReplace StringSplit StringToHashBucketFast biggan-deep-256,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/deep-256/versions/1 efficientdet/lite2/detection,https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/tensorFlow2/variations/lite2-detection/versions/1 imagenet/mobilenet_v2_050_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/050-224-classification/versions/2 @@ -119,7 +119,7 @@ imagenet/resnet_v2_50/classification,https://www.kaggle.com/models/google/resnet aiy/vision/classifier/birds_V1,https://www.kaggle.com/models/google/aiy/frameworks/tensorFlow1/variations/vision-classifier-birds-v1/versions/1 MuRIL,https://www.kaggle.com/models/google/muril/frameworks/tensorFlow2/variations/muril/versions/1 efficientdet/lite1/feature-vector,https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/tensorFlow2/variations/lite1-feature-vector/versions/1 -random-nnlm-en-dim128,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim128/versions/1 +random-nnlm-en-dim128,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim128/versions/1,xfail,127962 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 SparseFillEmptyRows SparseSegmentSqrtN StringSplit StringToHashBucketFast imagenet/inception_resnet_v2/classification,https://www.kaggle.com/models/google/inception-resnet-v2/frameworks/tensorFlow2/variations/classification/versions/2 ssd_mobilenet_v2/fpnlite_320x320,https://www.kaggle.com/models/tensorflow/ssd-mobilenet-v2/frameworks/tensorFlow2/variations/fpnlite-320x320/versions/1 centernet/resnet50v1_fpn_512x512,https://www.kaggle.com/models/tensorflow/centernet-resnet/frameworks/tensorFlow2/variations/50v1-fpn-512x512/versions/1 @@ -194,7 +194,7 @@ efficientnet/b7/classification,https://www.kaggle.com/models/tensorflow/efficien small_bert/bert_en_uncased_L-12_H-256_A-4,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-12-h-256-a-4/versions/2 imagenet/efficientnet_v2_imagenet21k_ft1k_xl/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-ft1k-xl-feature-vector/versions/1 ssd_mobilenet_v1/fpn_640x640,https://www.kaggle.com/models/tensorflow/ssd-mobilenet-v1/frameworks/tensorFlow2/variations/fpn-640x640/versions/1 -mil-nce/s3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/s3d/versions/1 +mil-nce/s3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/s3d/versions/1,xfail,127962 129654 128996 unsupported operations LookupTableFindV2 StringLower StringSplitV2 imagenet/nasnet_mobile/feature_vector,https://www.kaggle.com/models/google/nasnet/frameworks/tensorFlow2/variations/mobile-feature-vector/versions/2 efficientnet/b4/feature-vector,https://www.kaggle.com/models/google/efficientnet/frameworks/tensorFlow1/variations/b4-feature-vector/versions/1 imagenet/efficientnet_v2_imagenet21k_m/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-m-feature-vector/versions/2 @@ -225,7 +225,7 @@ imagenet/resnet_v2_101/feature_vector,https://www.kaggle.com/models/google/resne imagenet/efficientnet_v2_imagenet21k_l/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-l-feature-vector/versions/2 imagenet/nasnet_large/classification,https://www.kaggle.com/models/google/nasnet/frameworks/tensorFlow2/variations/large-classification/versions/2 faster_rcnn/resnet152_v1_1024x1024,https://www.kaggle.com/models/tensorflow/faster-rcnn-resnet-v1/frameworks/tensorFlow2/variations/faster-rcnn-resnet152-v1-1024x1024/versions/1,skip,128695 Inference results mismatch -vit_s16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-fe/versions/1 +vit_s16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-fe/versions/1,xfail,118324 unsupported operation XlaGather zh_segmentation,https://www.kaggle.com/models/google/zh-segmentation/frameworks/tensorFlow1/variations/zh-segmentation/versions/1 wiki40b-lm-es,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/es/versions/1 centernet/resnet50v1_fpn_512x512_kpts,https://www.kaggle.com/models/tensorflow/centernet-resnet/frameworks/tensorFlow2/variations/50v1-fpn-512x512-kpts/versions/1 @@ -265,7 +265,7 @@ imagenet/efficientnet_v2_imagenet21k_ft1k_b3/feature_vector,https://www.kaggle.c humpback_whale,https://www.kaggle.com/models/google/humpback-whale/frameworks/tensorFlow2/variations/humpback-whale/versions/1,skip,120720 InvalidArgumentError Graph execution error nnlm-id-dim50,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/id-dim50/versions/1,skip,120721 AssertionError No signatures for a model nonsemantic-speech-benchmark/frill,https://www.kaggle.com/models/google/nonsemantic-speech-benchmark/frameworks/tensorFlow2/variations/frill/versions/1 -vit_s16_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-classification/versions/1 +vit_s16_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-classification/versions/1,xfail,118324 unsupported operation XlaGather faster_rcnn/resnet152_v1_800x1333,https://www.kaggle.com/models/tensorflow/faster-rcnn-resnet-v1/frameworks/tensorFlow2/variations/faster-rcnn-resnet152-v1-800x1333/versions/1 bit/s-r152x4,https://www.kaggle.com/models/google/bit/frameworks/tensorFlow2/variations/s-r152x4/versions/1 imagenet/resnet_v1_101/classification,https://www.kaggle.com/models/google/resnet-v1/frameworks/tensorFlow2/variations/101-classification/versions/2 @@ -300,7 +300,7 @@ efficientnet/b5/feature-vector,https://www.kaggle.com/models/google/efficientnet distilbert_multi_cased_preprocess,https://www.kaggle.com/models/jeongukjae/distilbert/frameworks/tensorFlow2/variations/multi-cased-preprocess/versions/2,skip,128989 AttributeError NoneType object has no attribute shape or dtype nnlm-de-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/de-dim128/versions/1,skip,120721 AssertionError No signatures for a model bertseq2seq/roberta24_gigaword,https://www.kaggle.com/models/google/bertseq2seq/frameworks/tensorFlow1/variations/roberta24-gigaword/versions/1,skip,128817 Model references undeclared parameters -vit_b8_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-fe/versions/1 +vit_b8_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-fe/versions/1,xfail,118324 unsupported operation XlaGather aiy/vision/classifier/insects_V1,https://www.kaggle.com/models/google/aiy/frameworks/tensorFlow1/variations/vision-classifier-insects-v1/versions/1 bertseq2seq/roberta24_cnndm,https://www.kaggle.com/models/google/bertseq2seq/frameworks/tensorFlow1/variations/roberta24-cnndm/versions/1,skip,128817 Model references undeclared parameters movinet/a1/base/kinetics-600/classification,https://www.kaggle.com/models/google/movinet/frameworks/tensorFlow2/variations/a1-base-kinetics-600-classification/versions/3 @@ -399,12 +399,12 @@ imagenet/mobilenet_v1_050_128/classification,https://www.kaggle.com/models/googl tf2-preview/nnlm-de-dim50-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/tf2-preview-de-dim50-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model imagenet/mobilenet_v1_100_192/feature_vector,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-192-feature-vector/versions/2 efficientnet/b6/classification,https://www.kaggle.com/models/tensorflow/efficientnet/frameworks/tensorFlow2/variations/b6-classification/versions/1 -vit_b8_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-classification/versions/1 +vit_b8_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-classification/versions/1,xfail,118324 unsupported operation XlaGather universal-sentence-encoder-xling/en-es,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow1/variations/xling-en-es/versions/1 -mil-nce/i3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/i3d/versions/1 -vit_l16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-l16-fe/versions/1 +mil-nce/i3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/i3d/versions/1,xfail,127962 129654 128996 unsupported operations LookupTableFindV2 StringLower StringSplitV2 +vit_l16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-l16-fe/versions/1,xfail,118324 unsupported operation XlaGather nonsemantic-speech-benchmark/frill-nofrontend,https://www.kaggle.com/models/google/nonsemantic-speech-benchmark/frameworks/tensorFlow2/variations/frill-nofrontend/versions/1 -vit_r50_l32_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-r50-l32-fe/versions/1 +vit_r50_l32_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-r50-l32-fe/versions/1,xfail,118324 118325 unsupported operation XlaGather XlaReduceWindow imagenet/efficientnet_v2_imagenet21k_ft1k_b2/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-ft1k-b2-feature-vector/versions/1 remote_sensing/so2sat-resnet50,https://www.kaggle.com/models/google/resnet50/frameworks/tensorFlow1/variations/remote-sensing-so2sat-resnet50/versions/1 imagenet/efficientnet_v2_imagenet21k_ft1k_b3/classification,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-ft1k-b3-classification/versions/1 @@ -572,7 +572,7 @@ small_bert/bert_uncased_L-2_H-256_A-4,https://www.kaggle.com/models/google/bert/ edgetpu/vision/autoseg-edgetpu/fused_argmax/s,https://www.kaggle.com/models/google/autoseg-edgetpu/frameworks/tensorFlow2/variations/fused-argmax-s/versions/1 image_augmentation/flipx_crop_rotate_color,https://www.kaggle.com/models/google/image-augmentation/frameworks/tensorFlow1/variations/flipx-crop-rotate-color/versions/1,skip,128817 Model references undeclared parameters circularnet_3,https://www.kaggle.com/models/google/circularnet/frameworks/tensorFlow2/variations/3/versions/1 -random-nnlm-en-dim50,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim50/versions/1 +random-nnlm-en-dim50,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim50/versions/1,xfail,127962 128996 128995 unsupported operation LookupTableFindV2 LookupTableSizeV2 StringSplit StringToHashBucketFast mixer_b16_i21k_classification,https://www.kaggle.com/models/spsayakpaul/mlp-mixer/frameworks/tensorFlow2/variations/mixer-b16-i21k-classification/versions/1 imagenet/mobilenet_v1_025_160/quantops/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow1/variations/025-160-quantops-classification/versions/2,skip,128695 Inference results mismatch imagenet/mobilenet_v1_075_224/quantops/feature_vector,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow1/variations/075-224-quantops-feature-vector/versions/2,skip,128695 Inference results mismatch From 388d2bb6f25f468f9b6f6e29eab71a62ca25f25e Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Wed, 17 Jan 2024 02:28:39 -0800 Subject: [PATCH 040/122] [ONNX] Frontend refactoring (#22178) * Cleanup ngraph headers * Updates of editor.hpp/cpp * Replaced Function by ov::Model in graph.cpp/hpp * Fixed code style * Refactored operations which are needed to build on master --- .../include/onnx_import/core/node.hpp | 8 ++-- .../include/onnx_import/core/operator_set.hpp | 1 - .../include/onnx_import/onnx_utils.hpp | 2 +- .../onnx/frontend/src/core/attribute.hpp | 2 +- .../onnx/frontend/src/core/graph.cpp | 37 ++++++++------- .../onnx/frontend/src/core/graph.hpp | 8 ++-- .../onnx/frontend/src/core/sparse_tensor.hpp | 6 +-- .../onnx/frontend/src/core/transform.hpp | 2 +- .../onnx/frontend/src/core/value_info.hpp | 17 ++++--- .../onnx/frontend/src/edge_mapper.cpp | 2 +- .../onnx/frontend/src/edge_mapper.hpp | 8 ++-- src/frontends/onnx/frontend/src/editor.cpp | 10 ++-- src/frontends/onnx/frontend/src/editor.hpp | 26 +++++------ .../onnx/frontend/src/input_model.cpp | 6 +-- .../onnx/frontend/src/op/conv_integer.cpp | 43 +++++++++-------- .../onnx/frontend/src/op/conv_integer.hpp | 3 +- .../onnx/frontend/src/op/eye_like.cpp | 19 ++++---- .../onnx/frontend/src/op/eye_like.hpp | 1 - src/frontends/onnx/frontend/src/op/loop.cpp | 46 +++++++++---------- src/frontends/onnx/frontend/src/op/loop.hpp | 5 +- .../onnx/frontend/src/op/matmul_integer.cpp | 39 ++++++++-------- .../onnx/frontend/src/op/matmul_integer.hpp | 3 +- .../onnx/frontend/src/ops_bridge.hpp | 1 - .../onnx/frontend/src/utils/common.cpp | 1 - .../onnx/frontend/src/utils/conv_factory.cpp | 1 - .../onnx/frontend/src/utils/convpool.hpp | 1 - .../onnx/frontend/src/utils/reshape.hpp | 9 ++-- .../src/utils/tensor_external_data.cpp | 2 - .../onnx/frontend/src/utils/variadic.hpp | 15 ++---- 29 files changed, 152 insertions(+), 172 deletions(-) diff --git a/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp b/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp index e46c37321820af..ed114552cbdb13 100644 --- a/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp +++ b/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp @@ -17,11 +17,11 @@ #include #include -#include "ngraph/deprecated.hpp" -#include "ngraph/except.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" #include "onnx_import/onnx_importer_visibility.hpp" +#include "openvino/core/deprecated.hpp" +#include "openvino/core/except.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/constant.hpp" namespace ONNX_NAMESPACE { // forward declaration diff --git a/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp b/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp index e55ff9cfbf634f..69bb95a62834cb 100644 --- a/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp +++ b/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp @@ -8,7 +8,6 @@ #include #include -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp b/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp index 631c689cf0dd66..8e4de8a6605635 100644 --- a/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp +++ b/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp @@ -17,9 +17,9 @@ #include #include -#include "ngraph/deprecated.hpp" #include "onnx_import/core/operator_set.hpp" #include "onnx_importer_visibility.hpp" +#include "openvino/core/deprecated.hpp" namespace ngraph { namespace onnx_import { diff --git a/src/frontends/onnx/frontend/src/core/attribute.hpp b/src/frontends/onnx/frontend/src/core/attribute.hpp index 4bd9756bccbbda..ea8bedb5a28d0f 100644 --- a/src/frontends/onnx/frontend/src/core/attribute.hpp +++ b/src/frontends/onnx/frontend/src/core/attribute.hpp @@ -8,7 +8,7 @@ #include "core/sparse_tensor.hpp" #include "core/tensor.hpp" -#include "ngraph/except.hpp" +#include "openvino/core/except.hpp" namespace ngraph { namespace onnx_import { diff --git a/src/frontends/onnx/frontend/src/core/graph.cpp b/src/frontends/onnx/frontend/src/core/graph.cpp index ac94cc4af26746..df61ef1a7371db 100644 --- a/src/frontends/onnx/frontend/src/core/graph.cpp +++ b/src/frontends/onnx/frontend/src/core/graph.cpp @@ -11,7 +11,6 @@ #include "core/transform.hpp" #include "core/value_info.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" #include "onnx_framework_node.hpp" #include "onnx_import/core/node.hpp" @@ -148,7 +147,7 @@ Graph::Graph(const std::string& model_dir, for (const auto& initializer_tensor : m_model->get_graph().initializer()) { if (initializer_tensor.has_name()) { Tensor tensor = Tensor{initializer_tensor, m_model_dir, m_mmap_cache}; - std::shared_ptr ov_constant; + std::shared_ptr ov_constant; // For each initializer create a Constant node and store it in cache try { ov_constant = tensor.get_ov_constant(); @@ -165,7 +164,7 @@ Graph::Graph(const std::string& model_dir, } } - // Process all ONNX graph inputs, convert them to nGraph nodes and store in cache + // Process all ONNX graph inputs, convert them to OV nodes and store in cache for (const auto& input : m_model->get_graph().input()) { // Check if a Constant node was already created from an initializer if (m_cache->contains(input.name())) { @@ -183,7 +182,7 @@ void Graph::convert_to_ov_nodes() { const float total = static_cast(m_model->get_graph().node().size()); unsigned int completed = 0u; std::map op_statistics; - // Process ONNX graph nodes, convert to nGraph nodes + // Process ONNX graph nodes, convert to OV nodes for (const auto& node_proto : m_model->get_graph().node()) { if (m_extensions.telemetry) { op_statistics[node_proto.op_type()]++; @@ -249,10 +248,10 @@ void Graph::set_metadata(std::shared_ptr& model) const { } } -std::shared_ptr Graph::convert() { +std::shared_ptr Graph::convert() { convert_to_ov_nodes(); remove_dangling_parameters(); - auto function = create_function(); + auto function = create_model(); set_metadata(function); return function; } @@ -263,10 +262,10 @@ OutputVector Graph::make_framework_nodes(const Node& onnx_node) { if (onnx_node.has_subgraphs()) { const auto& subgraphs = onnx_node.get_subgraphs(); auto inputs = onnx_node.get_ng_inputs(); - std::vector> functions; + std::vector> models; for (const auto& kv : subgraphs) { auto& subgraph = kv.second; - functions.push_back(subgraph->decode()); + models.push_back(subgraph->decode()); for (const auto& input : subgraph->get_inputs_from_parent()) { const auto& name = input.get_node()->get_friendly_name(); if (std::find_if(inputs.begin(), inputs.end(), [&name](const Output& n) -> bool { @@ -276,7 +275,7 @@ OutputVector Graph::make_framework_nodes(const Node& onnx_node) { } } } - framework_node = std::make_shared(onnx_node, functions, inputs); + framework_node = std::make_shared(onnx_node, models, inputs); } else { framework_node = std::make_shared(onnx_node); } @@ -287,7 +286,7 @@ void Graph::decode_to_framework_nodes() { const float total = static_cast(m_model->get_graph().node().size()); unsigned int completed = 0u; std::map op_statistics; - // Process ONNX graph nodes, convert to nGraph nodes + // Process ONNX graph nodes, convert to OV nodes for (const auto& node_proto : m_model->get_graph().node()) { if (m_extensions.telemetry) { op_statistics[node_proto.op_type()]++; @@ -312,22 +311,22 @@ void Graph::decode_to_framework_nodes() { } OPENVINO_SUPPRESS_DEPRECATED_END -std::shared_ptr Graph::create_function() { - auto function = std::make_shared(get_ov_outputs(), m_parameters, get_name()); +std::shared_ptr Graph::create_model() { + auto model = std::make_shared(get_ov_outputs(), m_parameters, get_name()); const auto& onnx_outputs = m_model->get_graph().output(); - for (std::size_t i{0}; i < function->get_output_size(); ++i) { - const auto& result_node = function->get_output_op(i); + for (std::size_t i{0}; i < model->get_output_size(); ++i) { + const auto& result_node = model->get_output_op(i); const std::string onnx_output_name = onnx_outputs.Get(static_cast(i)).name(); result_node->set_friendly_name(onnx_output_name + "/sink_port_0"); const auto& previous_operation = result_node->get_input_node_shared_ptr(0); previous_operation->set_friendly_name(onnx_output_name); } - return function; + return model; } -std::shared_ptr Graph::decode() { +std::shared_ptr Graph::decode() { decode_to_framework_nodes(); - auto function = create_function(); + auto function = create_model(); auto& rt_info = function->get_rt_info(); rt_info[ONNX_GRAPH_RT_ATTRIBUTE] = shared_from_this(); return function; @@ -486,9 +485,9 @@ Output Subgraph::get_ov_node_from_cache(const std::string& name) { return new_param; } -std::shared_ptr Subgraph::convert() { +std::shared_ptr Subgraph::convert() { convert_to_ov_nodes(); - return create_function(); + return create_model(); } const std::vector> Subgraph::get_inputs_from_parent() const { diff --git a/src/frontends/onnx/frontend/src/core/graph.hpp b/src/frontends/onnx/frontend/src/core/graph.hpp index bccde080ad1339..f11f0936f5dadb 100644 --- a/src/frontends/onnx/frontend/src/core/graph.hpp +++ b/src/frontends/onnx/frontend/src/core/graph.hpp @@ -34,8 +34,8 @@ class Graph : public std::enable_shared_from_this { Graph& operator=(const Graph&) = delete; Graph& operator=(Graph&&) = default; - std::shared_ptr decode(); - virtual std::shared_ptr convert(); + std::shared_ptr decode(); + virtual std::shared_ptr convert(); OutputVector get_ov_outputs(); const std::string& get_name() const { return m_model->get_graph().name(); @@ -80,7 +80,7 @@ class Graph : public std::enable_shared_from_this { void convert_to_ov_nodes(); void remove_dangling_parameters(); void set_metadata(std::shared_ptr& model) const; - std::shared_ptr create_function(); + std::shared_ptr create_model(); ParameterVector m_parameters; std::unique_ptr m_model; @@ -111,7 +111,7 @@ class Subgraph : public Graph { /// \return Vector of edge nodes from parent scope. const std::vector> get_inputs_from_parent() const; - std::shared_ptr convert() override; + std::shared_ptr convert() override; Subgraph() = delete; diff --git a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp index c8818e98114580..eb70a82ea08e16 100644 --- a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp @@ -8,8 +8,8 @@ #include -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" #include "tensor.hpp" namespace ngraph { @@ -26,7 +26,7 @@ class SparseTensor { if (m_shape == Shape{0}) { // It's possible to construct a sparse tensor in ONNX with "dims: 0" property // Such tensor contains a scalar. This results in a Shape{0} stored in m_shape. - // In nGraph a scalar is represented with Shape{} and thus this replacement. + // In OpenVINO a scalar is represented with Shape{} and thus this replacement. m_shape = Shape{}; } } diff --git a/src/frontends/onnx/frontend/src/core/transform.hpp b/src/frontends/onnx/frontend/src/core/transform.hpp index c061b7ab88de80..9d8fd4b8c081f1 100644 --- a/src/frontends/onnx/frontend/src/core/transform.hpp +++ b/src/frontends/onnx/frontend/src/core/transform.hpp @@ -48,7 +48,7 @@ static const std::vector legacy_ops_to_fixup = {"DeformableConv2D", /// Some legacy models use custom operators (listed in legacy_ops_to_fixup vector) which /// were registered in the default ONNX domain. This function updates nodes with these /// operations to use OPENVINO_ONNX_DOMAIN in order to process them correctly -/// in the nGraph ONNX Importer. +/// in the OpenVINO ONNX Frontend. /// /// \param model_proto Protobuf message with ONNX model to transform. void fixup_legacy_operators(ONNX_NAMESPACE::ModelProto& model_proto); diff --git a/src/frontends/onnx/frontend/src/core/value_info.hpp b/src/frontends/onnx/frontend/src/core/value_info.hpp index 99ac5b3bca3dba..5004064c425fd3 100644 --- a/src/frontends/onnx/frontend/src/core/value_info.hpp +++ b/src/frontends/onnx/frontend/src/core/value_info.hpp @@ -7,17 +7,16 @@ #include #include "core/tensor.hpp" -#include "default_opset.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/type/element_type.hpp" #include "onnx_common/utils.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" #include "utils/common.hpp" using namespace ov::frontend::onnx::common; +using namespace ov::op; + namespace ngraph { namespace onnx_import { class ValueInfo { @@ -49,7 +48,7 @@ class ValueInfo { if (m_value_info_proto->type().tensor_type().has_elem_type()) { return common::get_ov_element_type(m_value_info_proto->type().tensor_type().elem_type()); } - return ngraph::element::dynamic; + return ov::element::dynamic; } std::shared_ptr get_ov_node(ParameterVector& parameters, @@ -63,14 +62,14 @@ class ValueInfo { } protected: - std::shared_ptr get_ov_parameter() const { - auto parameter = std::make_shared(get_element_type(), get_shape()); + std::shared_ptr get_ov_parameter() const { + auto parameter = std::make_shared(get_element_type(), get_shape()); parameter->set_friendly_name(get_name()); parameter->get_output_tensor(0).set_names({get_name()}); return parameter; } - std::shared_ptr get_ov_constant(const Tensor& tensor) const { + std::shared_ptr get_ov_constant(const Tensor& tensor) const { return tensor.get_ov_constant(); } diff --git a/src/frontends/onnx/frontend/src/edge_mapper.cpp b/src/frontends/onnx/frontend/src/edge_mapper.cpp index 8d81ba4361f562..22e1fb4b8bc009 100644 --- a/src/frontends/onnx/frontend/src/edge_mapper.cpp +++ b/src/frontends/onnx/frontend/src/edge_mapper.cpp @@ -8,7 +8,7 @@ #include -#include "ngraph/except.hpp" +#include "openvino/core/except.hpp" #include "openvino/frontend/exception.hpp" using namespace ov; diff --git a/src/frontends/onnx/frontend/src/edge_mapper.hpp b/src/frontends/onnx/frontend/src/edge_mapper.hpp index 11e1f76deae1a5..b4b4d846337bbb 100644 --- a/src/frontends/onnx/frontend/src/edge_mapper.hpp +++ b/src/frontends/onnx/frontend/src/edge_mapper.hpp @@ -39,8 +39,8 @@ class EdgeMapper { /// In such a case the algorthim tries to match the given node name /// with the input name (providing an input index is not enough). /// If a unique edge is found, it will be returned. - /// If InputEdge cannot be determined based on parameter values an ngraph_error - /// exception will be thrown. + /// If InputEdge cannot be determined based on parameter values an + /// ov:Exception will be thrown. /// /// \param node An EditorNode helper structure created based on a node name /// or a node output name. @@ -56,8 +56,8 @@ class EdgeMapper { /// In such a case the algorthim will try to match the given node name /// with the output name (providing an output index is not enough). /// If after such operation a found edge is unique, it is returned. - /// If OutputEdge cannot be determined based on given params the ngraph_error - /// exception is thrown. + /// If OutputEdge cannot be determined based on given params an + /// ov::Exception is thrown. /// /// \param node An EditorNode helper structure created based on a node name /// or a node output name. diff --git a/src/frontends/onnx/frontend/src/editor.cpp b/src/frontends/onnx/frontend/src/editor.cpp index 0ea9b1455f0ded..906675a0e4998a 100644 --- a/src/frontends/onnx/frontend/src/editor.cpp +++ b/src/frontends/onnx/frontend/src/editor.cpp @@ -22,7 +22,7 @@ using namespace ov; using namespace ov::onnx_editor; using namespace ov::frontend::onnx::common; -NGRAPH_SUPPRESS_DEPRECATED_START +OPENVINO_SUPPRESS_DEPRECATED_START namespace { using namespace ONNX_NAMESPACE; @@ -97,7 +97,7 @@ void add_dim_to_onnx_shape(const Dimension& dim, ONNX_NAMESPACE::TensorShapeProt if (dim.is_static()) { new_dim->set_dim_value(dim.get_length()); } else { - // nGraph Dimension is also considered dynamic if it represents a constrained range + // Dimension is also considered dynamic if it represents a constrained range // of allowed values as well as if it's unconstrained at all. ONNX cannot represent // ranged dimensions so this might not be 100% accurate. The modified ONNX model will // always have a fully dynamic dimension in this case. @@ -140,7 +140,7 @@ std::string extract_name(const T& input_or_initializer) { void modify_initializer(TensorProto& initializer, const std::string& name, - const std::shared_ptr values, + const std::shared_ptr values, ValueInfoProto* input) { const auto elem_type = values->get_element_type(); OPENVINO_ASSERT(is_supported_ov_type(elem_type), @@ -392,7 +392,7 @@ element::Type_t onnx_editor::ONNXModelEditor::get_input_type(const std::string& return ngraph::onnx_import::common::get_ov_element_type(type); } -void onnx_editor::ONNXModelEditor::set_input_shapes(const std::map& input_shapes) { +void onnx_editor::ONNXModelEditor::set_input_shapes(const std::map& input_shapes) { auto* onnx_graph = m_pimpl->m_model_proto->mutable_graph(); for (const auto& input_desc : input_shapes) { @@ -540,7 +540,7 @@ std::shared_ptr onnx_editor::ONNXModelEditor::get_function() const { } void onnx_editor::ONNXModelEditor::set_input_values( - const std::map>& input_values) { + const std::map>& input_values) { auto onnx_graph = m_pimpl->m_model_proto->mutable_graph(); for (const auto& input : input_values) { diff --git a/src/frontends/onnx/frontend/src/editor.hpp b/src/frontends/onnx/frontend/src/editor.hpp index 3edb098e77291c..1a6a5faf60c6a6 100644 --- a/src/frontends/onnx/frontend/src/editor.hpp +++ b/src/frontends/onnx/frontend/src/editor.hpp @@ -9,15 +9,13 @@ #include #include "editor_types.hpp" -#include "ngraph/deprecated.hpp" -#include "ngraph/function.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/type/element_type.hpp" #include "onnx_import/onnx_importer_visibility.hpp" +#include "openvino/core/deprecated.hpp" +#include "openvino/core/model.hpp" #include "openvino/frontend/extension/holder.hpp" #include "openvino/frontend/extension/progress_reporter.hpp" #include "openvino/frontend/extension/telemetry.hpp" +#include "openvino/op/constant.hpp" #include "utils/tensor_external_data.hpp" namespace ov { @@ -25,7 +23,7 @@ namespace onnx_editor { /// \brief A class representing a set of utilities allowing modification of an ONNX model /// /// \note This class can be used to modify an ONNX model before it gets translated to -/// an ngraph::Function by the import_onnx_model function. It lets you modify the +/// an ov::Model by the frontend->convert method. It lets you modify the /// model's input types and shapes, extract a subgraph and more. class ONNX_IMPORTER_API ONNXModelEditor final { public: @@ -73,7 +71,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// be used to modified the ONNX model loaded from a file. This /// method throws an exception if the model doesn't contain any of /// the inputs specified in its parameter. - void set_input_shapes(const std::map& input_shapes); + void set_input_shapes(const std::map& input_shapes); /// \brief Get shape of ONNX tensor indicated by the tensor_name. /// @@ -109,7 +107,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// \param input_values A collection of pairs {input_name: new_input_values} used to /// update the ONNX model. Initializers already existing are /// overwritten. - void set_input_values(const std::map>& input_values); + void set_input_values(const std::map>& input_values); /// \brief Changes the name of given tensor. /// @@ -154,7 +152,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// \brief Returns a serialized ONNX model, possibly modified by the editor. std::string model_string() const; - /// \brief Converts an edited ONNX model to an nGraph Function representation. + /// \brief Converts an edited ONNX model to an OpenVINO Model representation. std::shared_ptr get_function() const; /// \brief Returns a list of all inputs of the in-memory model. @@ -204,8 +202,8 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// In such a case the algorthim tries to match the given node name /// with the input name (providing an input index is not enough). /// If a unique edge is found, it will be returned. - /// If InputEdge cannot be determined based on parameter values an ngraph_error - /// exception will be thrown. + /// If InputEdge cannot be determined based on parameter values an ov::Exception + /// will be thrown. /// /// \param node A node helper structure created based on a node name /// or a node output name. @@ -221,8 +219,8 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// In such a case the algorthim will try to match the given node name /// with the output name (providing an output index is not enough). /// If after such operation a found edge is unique, it is returned. - /// If OutputEdge cannot be determined based on given params the ngraph_error - /// exception is thrown. + /// If OutputEdge cannot be determined based on given params the ov::Exception + /// will be thrown. /// /// \param node A node helper structure created based on a node name /// or a node output name. @@ -287,7 +285,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// std::vector get_output_ports(const EditorNode& node) const; - /// \brief Returns a nGraph function based on edited model + /// \brief Returns a OpenVINO Model based on edited model /// decoded to framework nodes /// std::shared_ptr decode(); diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index b4cb7c168da1e6..e43b1b0bb301a4 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -404,15 +404,15 @@ void InputModel::cut_and_add_new_input(const ov::frontend::Place::Ptr& place, co } void InputModel::set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) { - std::map> map; + std::map> map; if (const auto var_place = std::dynamic_pointer_cast(place)) { auto name = place->get_names().at(0); auto p_shape = m_editor->get_tensor_shape(name); auto el_type = m_editor->get_input_type(name); - std::shared_ptr constant = - ngraph::op::Constant::create(el_type, p_shape.to_shape(), value); + std::shared_ptr constant = + ov::op::v0::Constant::create(el_type, p_shape.to_shape(), value); constant->set_friendly_name(name); map.emplace(name, constant); diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.cpp b/src/frontends/onnx/frontend/src/op/conv_integer.cpp index ad01ea55f39aeb..4f6c2b057c3d43 100644 --- a/src/frontends/onnx/frontend/src/op/conv_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_integer.cpp @@ -4,35 +4,41 @@ #include "op/conv_integer.hpp" -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/unsqueeze.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace { -std::shared_ptr get_filter_zero_point(const OutputVector& inputs) { +std::shared_ptr get_filter_zero_point(const OutputVector& inputs) { const auto& original_zero_point = - (inputs.size() > 3) ? inputs.at(3) : ngraph::op::Constant::create(ngraph::element::i32, {}, {0}); + (inputs.size() > 3) ? inputs.at(3) : v0::Constant::create(ov::element::i32, {}, {0}); const auto filter_zero_point_rank = original_zero_point.get_partial_shape().rank(); if (filter_zero_point_rank.is_static() && filter_zero_point_rank.get_length() == 0) { - return std::make_shared(original_zero_point, element::i32); + return std::make_shared(original_zero_point, element::i32); } else { // in case of 1D zero point filter, it has to be unsqueezed to match the data input's rank - const auto& converted_filter_zero_point = - std::make_shared(original_zero_point, element::i32); - const auto& input_shape = std::make_shared(inputs.at(0), element::i32); - const auto& input_rank = std::make_shared(input_shape, element::i32); + const auto& converted_filter_zero_point = std::make_shared(original_zero_point, element::i32); + const auto& input_shape = std::make_shared(inputs.at(0), element::i32); + const auto& input_rank = std::make_shared(input_shape, element::i32); const auto& input_rank_scalar = reshape::interpret_as_scalar(input_rank); - const auto& one_node = ngraph::op::Constant::create(ngraph::element::i32, {}, {1}); + const auto& one_node = v0::Constant::create(ov::element::i32, {}, {1}); const auto& missing_dimensions = - std::make_shared(one_node, input_rank_scalar, one_node, element::i32); + std::make_shared(one_node, input_rank_scalar, one_node, element::i32); - return std::make_shared(converted_filter_zero_point, missing_dimensions); + return std::make_shared(converted_filter_zero_point, missing_dimensions); } } } // namespace @@ -44,23 +50,22 @@ OutputVector conv_integer(const Node& node) { const auto& input = inputs.at(0); const auto& filter = inputs.at(1); - const auto& input_zero_point = - (inputs.size() > 2) ? inputs.at(2) : ngraph::op::Constant::create(ngraph::element::i32, {}, {0}); + const auto& input_zero_point = (inputs.size() > 2) ? inputs.at(2) : v0::Constant::create(ov::element::i32, {}, {0}); - const auto& converted_input = std::make_shared(input, element::i32); - const auto& converted_filter = std::make_shared(filter, element::i32); + const auto& converted_input = std::make_shared(input, element::i32); + const auto& converted_filter = std::make_shared(filter, element::i32); - const auto& converted_input_zero_point = std::make_shared(input_zero_point, element::i32); + const auto& converted_input_zero_point = std::make_shared(input_zero_point, element::i32); const auto& filter_zero_point = get_filter_zero_point(inputs); - const auto& shifted_input = std::make_shared(converted_input, converted_input_zero_point); - const auto& shifted_filter = std::make_shared(converted_filter, filter_zero_point); + const auto& shifted_input = std::make_shared(converted_input, converted_input_zero_point); + const auto& shifted_filter = std::make_shared(converted_filter, filter_zero_point); const auto& groups = node.get_attribute_value("group", 1); const auto& strides = convpool::get_strides(node); const auto& dilations = convpool::get_dilations(node); const auto& paddings = convpool::get_pads(node); - const ngraph::op::PadType& auto_pad_type = convpool::get_auto_pad(node); + const ov::op::PadType& auto_pad_type = convpool::get_auto_pad(node); const auto& padding_below = paddings.first; const auto& padding_above = paddings.second; diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.hpp b/src/frontends/onnx/frontend/src/op/conv_integer.hpp index 4a933a224c3e5e..01cd9b505fec6a 100644 --- a/src/frontends/onnx/frontend/src/op/conv_integer.hpp +++ b/src/frontends/onnx/frontend/src/op/conv_integer.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { @@ -18,7 +17,7 @@ namespace set_1 { /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of quantized ONNX +/// \return The vector containing OV nodes producing output of quantized ONNX /// convolution operation. OutputVector conv_integer(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/eye_like.cpp b/src/frontends/onnx/frontend/src/op/eye_like.cpp index 30bb9369d288ba..b617bda7f1d47a 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.cpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.cpp @@ -4,16 +4,15 @@ #include "op/eye_like.hpp" -#include - #include "exceptions.hpp" -#include "ngraph/output_vector.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/eye.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/shape_of.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -22,12 +21,10 @@ namespace detail { namespace { /// \brief Split a shape returned by a ShapeOf operation into two outputs: width and height. -OutputVector get_shape_width_and_height(const Output& shape) { - const auto axis = ngraph::op::Constant::create(ngraph::element::i64, {1}, {0}); - const auto height = - std::make_shared(shape, ngraph::op::Constant::create(ngraph::element::i64, {1}, {0}), axis); - const auto width = - std::make_shared(shape, ngraph::op::Constant::create(ngraph::element::i64, {1}, {1}), axis); +OutputVector get_shape_width_and_height(const Output& shape) { + const auto axis = v0::Constant::create(ov::element::i64, {1}, {0}); + const auto height = std::make_shared(shape, v0::Constant::create(ov::element::i64, {1}, {0}), axis); + const auto width = std::make_shared(shape, v0::Constant::create(ov::element::i64, {1}, {1}), axis); return {width, height}; } @@ -59,9 +56,9 @@ OutputVector eye_like(const Node& node) { const auto width = dims.at(0); const auto height = dims.at(1); const auto k = - ov::op::v0::Constant::create(ngraph::element::i64, {1}, {node.get_attribute_value("k", 0)}); + ov::op::v0::Constant::create(ov::element::i64, {1}, {node.get_attribute_value("k", 0)}); - const auto output = std::make_shared(height, width, k, target_type); + const auto output = std::make_shared(height, width, k, target_type); return {output}; } diff --git a/src/frontends/onnx/frontend/src/op/eye_like.hpp b/src/frontends/onnx/frontend/src/op/eye_like.hpp index 011495f1bac732..de3c5fa1d6b24d 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.hpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/loop.cpp b/src/frontends/onnx/frontend/src/op/loop.cpp index 40dcb2ef103c9f..7cc7c16a3ee9a4 100644 --- a/src/frontends/onnx/frontend/src/op/loop.cpp +++ b/src/frontends/onnx/frontend/src/op/loop.cpp @@ -4,17 +4,18 @@ #include "op/loop.hpp" -#include -#include - #include "core/graph.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/function.hpp" -#include "ngraph/op/util/op_types.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/loop.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "openvino/op/util/op_types.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -25,7 +26,7 @@ namespace { /// iterations. /// It allows to replace termination condition body output with /// Constant. -/// As a result ngraph Loop shape inference is able to handle more +/// As a result OV Loop shape inference is able to handle more /// cases. /// /// \param[in] cond_in boolean input to the loop body depicting loop termination condition @@ -33,7 +34,7 @@ namespace { /// \param[in] cond_out loop termination condition computed after each iteration /// /// \return true if termination condition is not modified during loop iterations, false otherwise. -bool is_termination_condition_always_true(const ngraph::Node* cond_in, const ngraph::Node* cond_out) { +bool is_termination_condition_always_true(const ov::Node* cond_in, const ov::Node* cond_out) { return cond_in == cond_out; } } // namespace @@ -55,25 +56,24 @@ OutputVector loop(const Node& node) { } // optional inputs - Output trip_count; + Output trip_count; // trip count skipped or has value max(int64_t) means infinitive loop if (ov::op::util::is_null(ng_inputs.at(0)) || - (ngraph::op::is_constant(ng_inputs.at(0).get_node_shared_ptr()) && - ov::as_type_ptr(ng_inputs.at(0).get_node_shared_ptr())->cast_vector()[0] == + (ov::op::util::is_constant(ng_inputs.at(0).get_node_shared_ptr()) && + ov::as_type_ptr(ng_inputs.at(0).get_node_shared_ptr())->cast_vector()[0] == std::numeric_limits::max())) { // -1 means infinite Loop - trip_count = ngraph::op::Constant::create(ngraph::element::i64, {1}, {-1}); + trip_count = v0::Constant::create(ov::element::i64, {1}, {-1}); } else { trip_count = ng_inputs.at(0); } - Output termination_cond; // true means that first interation should be run + Output termination_cond; // true means that first interation should be run if (ov::op::util::is_null(ng_inputs.at(1).get_node_shared_ptr())) // termination condition skipped { - termination_cond = ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); - } else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && - ov::as_type_ptr(ng_inputs.at(1).get_node_shared_ptr()) - ->cast_vector()[0] == false) { + termination_cond = v0::Constant::create(ov::element::boolean, {1}, {true}); + } else if (ov::op::util::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && + ov::as_type_ptr(ng_inputs.at(1).get_node_shared_ptr())->cast_vector()[0] == false) { // no iteration is performed so initial values are returned OutputVector node_outputs; // final values @@ -90,17 +90,17 @@ OutputVector loop(const Node& node) { } const int64_t concat_axis = 0; - const auto concat_axis_const = ngraph::op::Constant::create(ngraph::element::i64, {1}, {concat_axis}); + const auto concat_axis_const = v0::Constant::create(ov::element::i64, {1}, {concat_axis}); // add dimension along which scan outputs will be concatenated for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size(); ++i) { - body_outputs[i] = std::make_shared(body_outputs[i], concat_axis_const); + body_outputs[i] = std::make_shared(body_outputs[i], concat_axis_const); } const auto& cond_in = body_inputs[1]; const auto& cond_out = body_outputs[0]; // optimization allow to improve nG Loop shape inference if (is_termination_condition_always_true(cond_in.get(), cond_out.get_node())) { - body_outputs[0] = ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); + body_outputs[0] = v0::Constant::create(ov::element::boolean, {1}, {true}); } CHECK_VALID_NODE(node, @@ -123,9 +123,9 @@ OutputVector loop(const Node& node) { ParameterVector body_params(body_inputs.begin() + 2, body_inputs.end()); body_params.emplace(body_params.begin(), body_inputs[0]); // current iteration body input - const auto body = std::make_shared(body_outputs, body_params); - auto loop = std::make_shared(trip_count, termination_cond); - default_opset::Loop::SpecialBodyPorts spec_ports{0, 0}; + const auto body = std::make_shared(body_outputs, body_params); + auto loop = std::make_shared(trip_count, termination_cond); + v5::Loop::SpecialBodyPorts spec_ports{0, 0}; loop->set_special_body_ports(spec_ports); loop->set_function(body); diff --git a/src/frontends/onnx/frontend/src/op/loop.hpp b/src/frontends/onnx/frontend/src/op/loop.hpp index 9e7af71c3daab4..d5a67533367b69 100644 --- a/src/frontends/onnx/frontend/src/op/loop.hpp +++ b/src/frontends/onnx/frontend/src/op/loop.hpp @@ -7,21 +7,20 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Creates nGraph node representing ONNX loop operator. +/// \brief Creates OV node representing ONNX loop operator. /// /// \note Details available here: /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop /// /// \param[in] node The input ONNX node representing this operation. /// -/// \return Vector of nodes containting resulting nGraph nodes. +/// \return Vector of nodes containting resulting OV nodes. /// OutputVector loop(const Node& node); } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp index 26f0100c92e171..a467b4cc82b452 100644 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp @@ -4,11 +4,13 @@ #include "op/matmul_integer.hpp" -#include -#include -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/unsqueeze.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -20,33 +22,30 @@ OutputVector matmul_integer(const Node& node) { const auto& A = inputs.at(0); const auto& B = inputs.at(1); - const auto& A_zero_point = - (inputs.size() > 2) ? inputs.at(2) : ngraph::op::Constant::create(ngraph::element::i32, {1}, {0}); - const auto& B_zero_point = - (inputs.size() > 3) ? inputs.at(3) : ngraph::op::Constant::create(ngraph::element::i32, {1}, {0}); + const auto& A_zero_point = (inputs.size() > 2) ? inputs.at(2) : v0::Constant::create(ov::element::i32, {1}, {0}); + const auto& B_zero_point = (inputs.size() > 3) ? inputs.at(3) : v0::Constant::create(ov::element::i32, {1}, {0}); - const auto& converted_A = std::make_shared(A, element::i32); - const auto& converted_B = std::make_shared(B, element::i32); + const auto& converted_A = std::make_shared(A, element::i32); + const auto& converted_B = std::make_shared(B, element::i32); - const auto& converted_A_zero_point = std::make_shared(A_zero_point, element::i32); - const auto& converted_B_zero_point = std::make_shared(B_zero_point, element::i32); + const auto& converted_A_zero_point = std::make_shared(A_zero_point, element::i32); + const auto& converted_B_zero_point = std::make_shared(B_zero_point, element::i32); const auto& A_zero_point_rank = A_zero_point.get_partial_shape().rank(); - Output shifted_A; + Output shifted_A; if (A_zero_point_rank.is_static() && A_zero_point_rank.get_length() == 1) { - const auto& one_node = ngraph::op::Constant::create(ngraph::element::i32, {1}, {1}); - const auto& reshaped_A_zero_point = - std::make_shared(converted_A_zero_point, one_node); + const auto& one_node = v0::Constant::create(ov::element::i32, {1}, {1}); + const auto& reshaped_A_zero_point = std::make_shared(converted_A_zero_point, one_node); - shifted_A = std::make_shared(converted_A, reshaped_A_zero_point); + shifted_A = std::make_shared(converted_A, reshaped_A_zero_point); } else { - shifted_A = std::make_shared(converted_A, converted_A_zero_point); + shifted_A = std::make_shared(converted_A, converted_A_zero_point); } - const auto& shifted_B = std::make_shared(converted_B, converted_B_zero_point); + const auto& shifted_B = std::make_shared(converted_B, converted_B_zero_point); - const auto& result = std::make_shared(shifted_A, shifted_B); + const auto& result = std::make_shared(shifted_A, shifted_B); return {result}; } diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp b/src/frontends/onnx/frontend/src/op/matmul_integer.hpp index 3da61339848ca6..3bb07670c5778c 100644 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp +++ b/src/frontends/onnx/frontend/src/op/matmul_integer.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { @@ -18,7 +17,7 @@ namespace set_1 { /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX quantizied +/// \return The vector containing OV nodes producing output of ONNX quantizied /// matrix multiplication integer operation. OutputVector matmul_integer(const Node& node); } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/ops_bridge.hpp b/src/frontends/onnx/frontend/src/ops_bridge.hpp index 7cbc25f302b196..2618260bf9129e 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.hpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.hpp @@ -13,7 +13,6 @@ #include "onnx_import/core/operator_set.hpp" #include "openvino/core/deprecated.hpp" -#include "openvino/core/except.hpp" #include "version_range.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/utils/common.cpp b/src/frontends/onnx/frontend/src/utils/common.cpp index aeda69b6063f58..206f0b0325127f 100644 --- a/src/frontends/onnx/frontend/src/utils/common.cpp +++ b/src/frontends/onnx/frontend/src/utils/common.cpp @@ -6,7 +6,6 @@ #include // onnx types -#include "ngraph/graph_util.hpp" #include "onnx_framework_node.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/frontend/exception.hpp" diff --git a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp index 6cd180b6ecc64f..da42025c29bbd4 100644 --- a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp @@ -4,7 +4,6 @@ #include "utils/conv_factory.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/op/group_conv.hpp" diff --git a/src/frontends/onnx/frontend/src/utils/convpool.hpp b/src/frontends/onnx/frontend/src/utils/convpool.hpp index 36d880abcbffe9..e275271918ff2a 100644 --- a/src/frontends/onnx/frontend/src/utils/convpool.hpp +++ b/src/frontends/onnx/frontend/src/utils/convpool.hpp @@ -4,7 +4,6 @@ #pragma once -#include "ngraph/coordinate_diff.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/core/shape.hpp" diff --git a/src/frontends/onnx/frontend/src/utils/reshape.hpp b/src/frontends/onnx/frontend/src/utils/reshape.hpp index e40c119a7ce7e3..57d76d08823f29 100644 --- a/src/frontends/onnx/frontend/src/utils/reshape.hpp +++ b/src/frontends/onnx/frontend/src/utils/reshape.hpp @@ -9,8 +9,7 @@ #include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" +#include "openvino/core/node.hpp" namespace ngraph { namespace onnx_import { @@ -44,7 +43,7 @@ std::vector infer_dimensions(const std::string& node_name, /// /// \return Original node or a node representing a reshape of the original. /// -Output interpret_as_scalar(const Output& node); +Output interpret_as_scalar(const Output& node); /// \brief Reshape node from shape {C} to {1, C, 1, 1,...} /// @@ -58,8 +57,8 @@ Output interpret_as_scalar(const Output& node); /// /// \return Original node or a node representing a reshape of the original. /// -Output reshape_channel_shaped_node_to_nchw(const Output& node, - const Output& expected_rank); +Output reshape_channel_shaped_node_to_nchw(const Output& node, + const Output& expected_rank); } // namespace reshape } // namespace onnx_import diff --git a/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp b/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp index 96dd0a4e0380f5..d96f354c65e1c5 100644 --- a/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp +++ b/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp @@ -58,9 +58,7 @@ Buffer TensorExternalData::load_external_mmap_data(const std:: Buffer TensorExternalData::load_external_data(const std::string& model_dir) const { auto full_path = ov::util::path_join({model_dir, m_data_location}); #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - NGRAPH_SUPPRESS_DEPRECATED_START ov::util::convert_path_win_style(full_path); - NGRAPH_SUPPRESS_DEPRECATED_END std::ifstream external_data_stream(ov::util::string_to_wstring(full_path).c_str(), std::ios::binary | std::ios::in | std::ios::ate); #else diff --git a/src/frontends/onnx/frontend/src/utils/variadic.hpp b/src/frontends/onnx/frontend/src/utils/variadic.hpp index fe48ea92b34938..50e8a5ebadcff5 100644 --- a/src/frontends/onnx/frontend/src/utils/variadic.hpp +++ b/src/frontends/onnx/frontend/src/utils/variadic.hpp @@ -6,10 +6,6 @@ #include -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/shape.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" #include "utils/common.hpp" @@ -17,24 +13,23 @@ namespace ngraph { namespace onnx_import { namespace variadic { -/// \brief Create an nGraph version of an ONNX variadic operation. +/// \brief Create an OpenVINO version of an ONNX variadic operation. /// This creates a subgraph with a series of binary operations. /// /// \param node Incoming ONNX opearation. /// -/// \tparam T Class of an nGraph binary operation (e.g. Add, Minimum, Maximum) +/// \tparam T Class of an OpenVINO binary operation (e.g. Add, Minimum, Maximum) /// -/// \return nGraph node equivalent of the ONNX operation +/// \return OpenVINO node equivalent of the ONNX operation OPENVINO_SUPPRESS_DEPRECATED_START template inline OutputVector make_ng_variadic_op( const Node& node, - const ngraph::op::AutoBroadcastSpec& auto_broadcast = ngraph::op::AutoBroadcastType::NUMPY) { + const ov::op::AutoBroadcastSpec& auto_broadcast = ov::op::AutoBroadcastType::NUMPY) { const OutputVector ng_inputs{node.get_ng_inputs()}; // Templated binary operation - Creates Add, Minimum, Maximum, etc. - const auto binary_operation = [&auto_broadcast](const Output& arg0, - const Output& arg1) { + const auto binary_operation = [&auto_broadcast](const Output& arg0, const Output& arg1) { return std::make_shared(arg0, arg1, auto_broadcast); }; From 92f32081b4d5fc90aabff502dd054ec2ea23fa0d Mon Sep 17 00:00:00 2001 From: Bo Liu Date: Wed, 17 Jan 2024 18:46:29 +0800 Subject: [PATCH 041/122] [CPU] Add support for nan inputs of Sign Op (#22040) @liubo-intel @mitruska Please don't forget to notify other plugins to update Sing Op impl with proper Nans processing. --- .../arithmetic/Sign_1.rst | 2 +- .../include/openvino/reference/sign.hpp | 16 +++- src/core/tests/eval.cpp | 18 +++++ .../intel_cpu/src/nodes/mathematics.cpp | 2 + .../single_layer_tests/classes/activation.cpp | 4 + .../tests/functional/op_reference/sign.cpp | 78 +++++++++++-------- .../common_test_utils/src/ov_tensor_utils.cpp | 6 +- 7 files changed, 88 insertions(+), 38 deletions(-) diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst index f0a306b642617b..08fa0ef7beb814 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst @@ -28,7 +28,7 @@ Sign **Outputs** -* **1**: The result of element-wise *Sign* operation. A tensor of type *T* with mapped elements of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive). +* **1**: The result of element-wise *Sign* operation. A tensor of type *T* with mapped elements of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive), nan is returned for nan inputs. **Types** diff --git a/src/core/reference/include/openvino/reference/sign.hpp b/src/core/reference/include/openvino/reference/sign.hpp index 6363a725164dee..fe65c357046091 100644 --- a/src/core/reference/include/openvino/reference/sign.hpp +++ b/src/core/reference/include/openvino/reference/sign.hpp @@ -17,9 +17,21 @@ constexpr T sign(const T v) { return static_cast(static_cast(v)); } -template () || std::is_signed::value>::type* = nullptr> +template ::type>::value || + std::is_signed::value>::type* = nullptr> constexpr T sign(const T v) { - return static_cast((T{0} < v) - (v < T{0})); + return static_cast(std::isnan(static_cast(v)) ? v : ((T{0} < v) - (v < T{0}))); +} + +template ::type>::value || + std::is_same::type>::value>::type* = nullptr> +T sign(const T v) { + if (std::isnan(static_cast(v))) + return v; + else + return static_cast((T{0} < v) - (v < T{0})); } } // namespace func diff --git a/src/core/tests/eval.cpp b/src/core/tests/eval.cpp index bc77f0c2d653fa..b92adc29b1ac99 100644 --- a/src/core/tests/eval.cpp +++ b/src/core/tests/eval.cpp @@ -1028,6 +1028,24 @@ TEST(eval, evaluate_sign) { ASSERT_EQ(result_val, expec); } +TEST(eval, evaluate_sign_nan) { + auto p = make_shared(element::f16, Shape{2, 3}); + auto sign = make_shared(p); + auto model = make_shared(OutputVector{sign}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{ + make_tensor(Shape{2, 3}, + {std::numeric_limits::quiet_NaN(), -2, 0, -4.8f, 4.8f, -0.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f16); + EXPECT_THAT(read_vector(result), + Pointwise(NanSensitiveFloatEq(), + std::vector{std::numeric_limits::quiet_NaN(), -1, 0, -1, 1, 0})); +} + TEST(eval, evaluate_sin) { auto p = make_shared(element::f32, Shape{11}); auto sin = make_shared(p); diff --git a/src/plugins/intel_cpu/src/nodes/mathematics.cpp b/src/plugins/intel_cpu/src/nodes/mathematics.cpp index 734869737154d7..d22ed520ca78b9 100644 --- a/src/plugins/intel_cpu/src/nodes/mathematics.cpp +++ b/src/plugins/intel_cpu/src/nodes/mathematics.cpp @@ -159,6 +159,8 @@ void Math::execute(dnnl::stream strm) { dst_data[i] = 1.0f; else if (src_data[i] < 0.0f) dst_data[i] = -1.0f; + else if (std::isnan(src_data[i])) + dst_data[i] = src_data[i]; else dst_data[i] = 0.0f; }); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp index 5eafb558b057e4..132450682059e5 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp @@ -83,6 +83,10 @@ void ActivationLayerCPUTest::generate_inputs(const std::vector& targe in_data.range = range; in_data.resolution = resolution; tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); + // cover Sign NAN test case + if ((activationType == utils::ActivationTypes::Sign) && funcInput.get_element_type() == ov::element::f32) { + static_cast(tensor.data())[0] = std::numeric_limits::quiet_NaN(); + } } else { tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } diff --git a/src/plugins/template/tests/functional/op_reference/sign.cpp b/src/plugins/template/tests/functional/op_reference/sign.cpp index ae08360aa38eac..67ba7a81f19e66 100644 --- a/src/plugins/template/tests/functional/op_reference/sign.cpp +++ b/src/plugins/template/tests/functional/op_reference/sign.cpp @@ -59,36 +59,48 @@ TEST_P(ReferenceSignLayerTest, CompareWithHardcodedRefs) { Exec(); } -INSTANTIATE_TEST_SUITE_P(smoke_Sign_With_Hardcoded_Refs, - ReferenceSignLayerTest, - ::testing::Values(SignParams(PartialShape{6}, - element::f32, - element::f32, - std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, - std::vector{1, -1, 0, -1, 1, 0}), - SignParams(PartialShape{6}, - element::f16, - element::f16, - std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, - std::vector{1, -1, 0, -1, 1, 0}), - SignParams(PartialShape{6}, - element::u64, - element::u64, - std::vector{1, 2, 0, 4, 4, 0}, - std::vector{1, 1, 0, 1, 1, 0}), - SignParams(PartialShape{6}, - element::u32, - element::u32, - std::vector{1, 2, 0, 4, 4, 0}, - std::vector{1, 1, 0, 1, 1, 0}), - SignParams(PartialShape{6}, - element::i32, - element::i32, - std::vector{1, -2, 0, -4, 4, -0}, - std::vector{1, -1, 0, -1, 1, 0}), - SignParams(PartialShape{6}, - element::i64, - element::i64, - std::vector{1, -2, 0, -4, 4, -0}, - std::vector{1, -1, 0, -1, 1, 0})), - ReferenceSignLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_Sign_With_Hardcoded_Refs, + ReferenceSignLayerTest, + ::testing::Values( + SignParams(PartialShape{6}, + element::f32, + element::f32, + std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, -1, 1, 0}), + SignParams(PartialShape{7}, + element::f32, + element::f32, + std::vector{1, -2, 0, std::numeric_limits::quiet_NaN(), -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, std::numeric_limits::quiet_NaN(), -1, 1, 0}), + SignParams(PartialShape{6}, + element::f16, + element::f16, + std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, -1, 1, 0}), + SignParams(PartialShape{7}, + element::f16, + element::f16, + std::vector{1, -2, 0, std::numeric_limits::quiet_NaN(), -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, std::numeric_limits::quiet_NaN(), -1, 1, 0}), + SignParams(PartialShape{6}, + element::u64, + element::u64, + std::vector{1, 2, 0, 4, 4, 0}, + std::vector{1, 1, 0, 1, 1, 0}), + SignParams(PartialShape{6}, + element::u32, + element::u32, + std::vector{1, 2, 0, 4, 4, 0}, + std::vector{1, 1, 0, 1, 1, 0}), + SignParams(PartialShape{6}, + element::i32, + element::i32, + std::vector{1, -2, 0, -4, 4, -0}, + std::vector{1, -1, 0, -1, 1, 0}), + SignParams(PartialShape{6}, + element::i64, + element::i64, + std::vector{1, -2, 0, -4, 4, -0}, + std::vector{1, -1, 0, -1, 1, 0})), + ReferenceSignLayerTest::getTestCaseName); diff --git a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp index 2377ad37aaa2f4..3dcc151290759c 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp @@ -352,14 +352,16 @@ void compare(const ov::Tensor& expected, for (size_t i = 0; i < shape_size_cnt; ++i) { double expected_value = expected_data[i]; double actual_value = actual_data[i]; + if (std::isnan(expected_value) && std::isnan(actual_value)) + continue; if (std::isnan(expected_value)) { std::ostringstream out_stream; - out_stream << "Expected value is NAN on coordinate: " << i; + out_stream << "Expected value is NAN but Actual value is not on coordinate: " << i; throw std::runtime_error(out_stream.str()); } if (std::isnan(actual_value)) { std::ostringstream out_stream; - out_stream << "Actual value is NAN on coordinate: " << i; + out_stream << "Actual value is NAN but Expected value is not on coordinate: " << i; throw std::runtime_error(out_stream.str()); } double abs = std::fabs(expected_value - actual_value); From 9f6aa812d745d820b3a5d7f317bf7e49ca46b127 Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Wed, 17 Jan 2024 11:34:59 +0000 Subject: [PATCH 042/122] [GHA] [DOCS] New tests document (#22163) * slightly adapt wording in overview, start with runners doc * populate table; add section about runners choosing * wording * use runners * add doc about Docker images * use better formatting for pros and cons * fix typo * add info about custom actions * start with caches doc * complete sccache * rm non-lfs * add image * populate shared drive section * cleared wording * correct typo; add link * start with add new tests doc * add dedicated workflow section * correct typo * add section about time and machine usage --- docs/dev/ci/github_actions/adding_tests.md | 144 ++++++++++++++++++++- docs/dev/ci/github_actions/overview.md | 4 +- docs/dev/ci/github_actions/runners.md | 2 +- 3 files changed, 145 insertions(+), 5 deletions(-) diff --git a/docs/dev/ci/github_actions/adding_tests.md b/docs/dev/ci/github_actions/adding_tests.md index ba31a0b6a0d6c1..18e19eb5126f1c 100644 --- a/docs/dev/ci/github_actions/adding_tests.md +++ b/docs/dev/ci/github_actions/adding_tests.md @@ -1,9 +1,147 @@ -# How to add New Tests to the OpenVINO GitHub Actions CI +# How to add Tests to the OpenVINO GitHub Actions CI -## Add to the Already Existing Workflow +The OpenVINO repository has [many workflows](./../../../../.github/workflows), their general and structural overview is available [here](./overview.md). -### Add to the Already Existing Job +The workflows have many jobs dedicated to building and testing of OpenVINO. This document describes the topic of adding +tests to these workflows or adding an entirely new workflow. + +## Add Tests to the Already Existing Workflow + +### Add Tests to the Existing Test Suite + +If the new tests could be executed as a part of the already existing test suite, e.g., new OVC Python API tests, +there is no need to change the workflows, the added tests would be executed automatically in the corresponding step. + +Review the [workflows](./../../../../.github/workflows) and their jobs to know which tests are already enabled. +Additionally, review the component's tests and how they are executed. + +### Create a Step in a Job + +If there is no step in the jobs that has the needed test suite, a new step could be added to the job. +The steps are the commands that are executed one by one and united under one job. +Refer to the [official GitHub Actions documentation](https://docs.github.com/en/actions/using-workflows/about-workflows) for more. + +An example step from [`job_python_unit_tests.yml`](./../../../../.github/workflows/job_python_unit_tests.yml): +```yaml +... +steps: +... + - name: OVC unit tests + if: fromJSON(inputs.affected-components).MO.test + run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml +... +``` +It has: +* a `name`: `OVC unit tests` +* `if` condition: `fromJSON(inputs.affected-components).MO.test` + * This step is executed only if the condition evaluates to `true` + * This is a part of the Smart CI system implemented for the OpenVINO workflow. Read [here](./smart_ci.md) about the system and how to use it +* the `run` section with commands to be executed + +To add a new step with new tests, navigate to the needed job and use the above template (or any other step in the job) for the new step. +Refer to [this document](./reusable_workflows.md) to learn more about the workflow and job organisation. ### Create a New Job +If the new tests do not fit in any of the jobs in all the workflows, it is possible to create a dedicated job for them. +An example dedicated job for a single set of tests from [`linux.yml`](./../../../../.github/workflows/linux.yml): +```yaml +NVIDIA_Plugin: + name: NVIDIA plugin + needs: [ Build, Smart_CI ] + timeout-minutes: 15 + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores-32gb + container: + image: openvinogithubactions.azurecr.io/dockerhub/nvidia/cuda:11.8.0-runtime-ubuntu20.04 + volumes: + - /mount:/mount + options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + CMAKE_CUDA_COMPILER_LAUNCHER: sccache + CMAKE_CXX_COMPILER_LAUNCHER: sccache + CMAKE_C_COMPILER_LAUNCHER: sccache + INSTALL_DIR: /__w/openvino/openvino/install + OPENVINO_DEVELOPER_PACKAGE: /__w/openvino/openvino/install/developer_package + OPENVINO_REPO: /__w/openvino/openvino/openvino + OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib + NVIDIA_BUILD_DIR: /__w/openvino/openvino/nvidia_plugin_build + DEBIAN_FRONTEND: 'noninteractive' + SCCACHE_AZURE_KEY_PREFIX: ubuntu20_x86_64_Release + if: fromJSON(needs.smart_ci.outputs.affected_components).NVIDIA + + steps: + ... +``` + +Refer to the [official GitHub Actions documentation](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#about-yaml-syntax-for-workflows) for a complete syntax reference. + +A job: +* needs a name, provided by the `name` key +* needs a runner to execute `steps` on, provided by the `runs-on` key + * Refer to [this document](./runners.md) to learn more about the available runners and how to choose one +* might use Docker to execute `steps` in. The Docker configuration is provided by the `container` key + * Refer to [this document](./docker_images.md) to learn more about the available Docker images and how to choose one +* might use caches to speed up build and/or tests + * Different types of caches are available. Refer to [this document](./caches.md) to learn more about the available caches and how to use one +* might use the Smart CI system to get executed conditionally with the `if` key + * Refer to [this document](./smart_ci.md) for the Smart CI overview and usage +* a series of commands to execute, provided by the `steps` key + * Refer to [this section](#create-a-step-in-the-job) to learn more about `steps` +* might use the build artefacts from the `Build` job + * They could be downloaded using the `actions/download-artifact`, read more about the workflows' structure [here](./overview.md#structure-of-the-workflows) + +If the job could be used in several workflows, it could be transformed into a reusable workflow. +Read more about the reusable workflows [here](./reusable_workflows.md). + ## Create a Dedicated Workflow + +To introduce a new workflow, add a new `.yml` file to the [`.github/workflows`](./../../../../.github) folder. +Refer to the [official GitHub Actions documentation](https://docs.github.com/en/actions/using-workflows/about-workflows#create-an-example-workflow) for a complete syntax reference and browse the existing workflows in [`.github/workflows`](./../../../../.github). + +Refer to the [structural overview of the existing workflows](./overview.md#structure-of-the-workflows), their structure could be used as a template for a new one. + +The dedicated workflow example is [`fedora.yml`](./../../../../.github/workflows/fedora.yml). It has: +* `Smart_CI`, `Build`, `RPM_Packages`, `Overall_Status` jobs + * `Smart_CI` - the [Smart CI system](./smart_ci.md) + * `Build` - pre-requisites installation, building of OpenVINO with certain CMake configuration, packaging and uploading of the artefacts + * `RPM_Packages` - pre-requisites installation, downloading of the artefacts and tests + * `Overall_Status` - the job for collecting the other jobs' statuses +* the uploading and downloading of the build artefacts between jobs using `actions/upload-artifact` and `actions/download-artifact` +* the usage of the [Smart CI system](./smart_ci.md) +* the usage of the [self-hosted runners](./runners.md) and [Docker images](./docker_images.md) +* the usage of [caches](./caches.md) + +## Test Times and Usage + +Be mindful about time and runners usage when adding new steps, jobs and workflows. + +### Adding a Step + +When adding a step in a job, consider checking the times of the other steps in the job, +it is best if the step's execution time does not lengthen the execution time of the job too much and is in-line with the execution times of other steps. + +If the step takes a lot of time, it might be better to [extract it into a separate job](#adding-a-job) so that it runs in parallel with other jobs. +Additionally, when creating a job with this step, it would be possible to [pick a more powerful runner](./runners.md) to shorten the execution time. + +### Adding a Job + +When adding a job, consider checking the times of the other jobs in a workflow, it is best if the new job's execution time +does not exceed the time of the longest job in the workflow. + +If the job takes a lot of time, it might be possible to run it not on the pre-commit basis but on a post-commit/nightly/weekly basis. +Refer to [this document](./overview.md#workflows-triggers-and-schedule) to learn more about triggers and schedules. +Additionally, it could be possible to [pick a more powerful runner](./runners.md) to shorten the execution time. + +### Adding a Workflow + +When adding a workflow, consider checking the times of the other workflows, it is best if the new workflow's execution time +does not exceed the time of the longest workflow. + +If the workflow takes a lot of time, it might be possible to run it not on the pre-commit basis but on a post-commit/nightly/weekly basis. +Refer to [this document](./overview.md#workflows-triggers-and-schedule) to learn more about triggers and schedules. +Additionally, make sure [the right runners](./runners.md) are picked for each job so that the execution times are optimal. diff --git a/docs/dev/ci/github_actions/overview.md b/docs/dev/ci/github_actions/overview.md index 4a48299fda8bbc..270c2eddb0ce9c 100644 --- a/docs/dev/ci/github_actions/overview.md +++ b/docs/dev/ci/github_actions/overview.md @@ -8,6 +8,7 @@ Welcome to the OpenVINO Developer guide on the GitHub Actions infrastructure. Th * [Triggers and schedules](#workflows-triggers-and-schedule) * [Required workflows](#required-workflows) * [Workflow structure](#structure-of-the-workflows) + * [Workflow and job organisation](#workflows-and-jobs-organisation) * [Finding results, artefacts and logs](#finding-results-artefacts-and-logs) * [Custom actions overview](#custom-actions) * [Machines overview](#machines) @@ -44,7 +45,8 @@ Additionally, several supporting workflows build and test OpenVINO for other ope The OpenVINO workflows make use of the rich official and community actions such as `actions/checkout`, `actions/upload-artifact` and others. -Additionally, common jobs, i.e., those featured in several workflows, are extracted into _reusable workflows_. Read more about the used reusable workflows and how to write one [here](./reusable_workflows.md). +Additionally, common jobs, i.e., those featured in several workflows, are extracted into _reusable workflows_. +Read more about the used reusable workflows and how to write one [here](./reusable_workflows.md). You can find more information about reusing actions and workflows [here](https://github.com/marketplace?type=actions) and [here](https://docs.github.com/en/actions/using-workflows/reusing-workflows). diff --git a/docs/dev/ci/github_actions/runners.md b/docs/dev/ci/github_actions/runners.md index cdab5a54958166..8f793168918bcc 100644 --- a/docs/dev/ci/github_actions/runners.md +++ b/docs/dev/ci/github_actions/runners.md @@ -86,7 +86,7 @@ CXX_Unit_Tests: As the C++ tests could not utilize the large number of cores for parallel execution as the build tools in the `Build` job could, it would be pointless to use the `aks-linux-16-cores-32gb` group for them. -The advice is to use runners with more cores/RAM size for the tasks that could load them. +The advice is to use runners with more cores/RAM size for the tasks that **could load them**. It is possible to experiment with different configurations before deciding, i.e., run a job on runners from different groups and observe the gains; if they are significant, e.g., 60 minutes on a 4-core runner vs. 15 minutes on a 16-core runner, From 83f50ba7bdb8ddf39732c0a4e0cdebe4730434cd Mon Sep 17 00:00:00 2001 From: River Li Date: Wed, 17 Jan 2024 20:04:03 +0800 Subject: [PATCH 043/122] [CPU][Test] migrate legacy plugin/compiled_model behavior test cases to be 2.0 (#21960) * [CPU][Test] migrate cpu plugin behavior test cases to be 2.0 - OV_Plugin/CompiledModel * Remove tests from ov_plugin/core_integration.cpp * Replace ov::internal::cpu_bind_thread with ov::hint::enable_cpu_pinning * Resolve reviewer's comments * Fix build error of nvidia plugin * Use OVCompiledGraphImportExportTest to replace OVExecGraphImportExportTest * Remove unused ov::internal::cpu_bind_thread * Update for code reviewing * Solve failure in MacOS * Update * change instantiation name * Update for code reviewing --- src/plugins/intel_cpu/src/plugin.cpp | 2 + .../core_integration.cpp | 2 - .../behavior/compiled_model/import_export.cpp | 70 +++++++++ .../properties.cpp | 72 +++++++-- .../executable_network/exec_graph_info.cpp | 58 -------- .../executable_network/exec_network_base.cpp | 48 ------ .../executable_network/get_metric.cpp | 72 --------- .../exec_network_base.cpp | 53 ------- .../ov_exec_net_import_export.cpp | 49 ------ .../behavior/ov_plugin/caching_tests.cpp | 49 +++--- .../behavior/ov_plugin/core_integration.cpp | 25 +--- .../{plugin => ov_plugin}/plugin_name.cpp | 0 .../behavior/ov_plugin/properties_tests.cpp | 48 +++++- .../behavior/plugin/caching_tests.cpp | 110 -------------- .../behavior/plugin/configuration_tests.cpp | 140 ------------------ .../behavior/plugin/core_integration.cpp | 109 -------------- .../behavior/plugin/core_threading_tests.cpp | 36 ----- .../behavior/plugin/life_time.cpp | 30 ---- .../skip_tests_config.cpp | 8 +- .../import_export.cpp} | 35 +++-- .../behavior/compiled_model/import_export.hpp | 71 ++++++++- .../ov_executable_network/exec_graph_info.hpp | 18 +-- 22 files changed, 307 insertions(+), 798 deletions(-) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/{ov_executable_network => compiled_model}/core_integration.cpp (96%) create mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/import_export.cpp rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/{ov_executable_network => compiled_model}/properties.cpp (75%) delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/{plugin => ov_plugin}/plugin_name.cpp (100%) delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp rename src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/{ov_executable_network/exec_graph_info.cpp => compiled_model/import_export.cpp} (58%) diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 2724ef84f27498..7fa87cae09f948 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -711,6 +711,8 @@ ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) } } return res; + } else if (name == ov::internal::exclusive_async_requests.name()) { + return engConfig.exclusiveAsyncRequests; } return get_ro_property(name, options); } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/core_integration.cpp similarity index 96% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/core_integration.cpp index 0f71d3e80c30ad..f5b80651b56c3e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/core_integration.cpp @@ -8,8 +8,6 @@ using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; - namespace { // // Executable Network GetMetric diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/import_export.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/import_export.cpp new file mode 100644 index 00000000000000..f1284f706cca13 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/import_export.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/test_constants.hpp" +#include "behavior/compiled_model/import_export.hpp" + +namespace { + +using namespace ov::test::behavior; + +const std::vector netPrecisions = { + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, + ov::element::f16, + ov::element::f32, +}; +const ov::AnyMap empty_property = {}; + +INSTANTIATE_TEST_SUITE_P(smoke_serialization, + OVCompiledGraphImportExportTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(empty_property)), + OVCompiledGraphImportExportTest::getTestCaseName); + +TEST_P(OVCompiledModelGraphUniqueNodeNamesTest, CheckUniqueNodeNames) { + std::shared_ptr core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(model, target_device); + auto exec_graph = compiled_model.get_runtime_model(); + + int numReorders = 0; + int expectedReorders = 2; + std::unordered_set names; + ASSERT_NE(exec_graph, nullptr); + + for (const auto& op : exec_graph->get_ops()) { + const auto& rtInfo = op->get_rt_info(); + auto it = rtInfo.find(ov::exec_model_info::LAYER_TYPE); + ASSERT_NE(rtInfo.end(), it); + auto opType = it->second.as(); + + if (opType == "Reorder") { + numReorders++; + } + } + + ASSERT_EQ(numReorders, expectedReorders) + << "Expected reorders: " << expectedReorders << ", actual reorders: " << numReorders; +}; + +const std::vector netPrc = { + ov::element::f32, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, + OVCompiledModelGraphUniqueNodeNamesTest, + ::testing::Combine(::testing::ValuesIn(netPrc), + ::testing::Values(ov::Shape{1, 2, 5, 5}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + OVCompiledModelGraphUniqueNodeNamesTest::getTestCaseName); + +} // namespace + diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp similarity index 75% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp index c2a2028ff02b92..669d7efa4c64d5 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp @@ -4,6 +4,7 @@ #include "behavior/compiled_model/properties.hpp" +#include "behavior/ov_executable_network/get_metric.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/runtime/system_conf.hpp" @@ -73,23 +74,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, const std::vector properties = {{ov::num_streams(ov::streams::NUMA)}, {ov::num_streams(ov::streams::AUTO)}, {ov::num_streams(0), ov::inference_num_threads(1)}, - {ov::num_streams(1), ov::inference_num_threads(1)}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}}; + {ov::num_streams(1), ov::inference_num_threads(1)}}; const std::vector hetero_properties = { {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::num_streams(ov::streams::AUTO)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, }; const std::vector auto_batch_properties = { - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}}, - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}, - {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "1"}}, - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}, - {ov::auto_batch_timeout(10)}}, + {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)")}, + {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)"), ov::auto_batch_timeout(1)}, + {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)"), ov::auto_batch_timeout(10)}, }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, @@ -163,4 +157,60 @@ INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassCompileModelWithCorrectPropertiesTe OVClassCompileModelWithCorrectPropertiesTest, ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(heteroConfigsWithSecondaryProperties))); + +// +// OV CompiledModel Get RO Property +// + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_NETWORK_NAME, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_ThrowsUnsupported, + ::testing::Values("CPU", "HETERO:CPU")); + +// +// OV CompiledModel GetProperty / SetProperty +// + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetConfigTest, OVClassExecutableNetworkGetConfigTest, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkSetConfigTest, OVClassExecutableNetworkSetConfigTest, + ::testing::Values("CPU")); + +// +// Hetero OV CompiledModel Get RO Property +// + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, + ::testing::Values("CPU")); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp deleted file mode 100644 index 165c26385e959a..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "behavior/executable_network/exec_graph_info.hpp" - -namespace { - -using namespace ExecutionGraphTests; - -INSTANTIATE_TEST_SUITE_P(smoke_serialization, ExecGraphSerializationTest, - ::testing::Values(ov::test::utils::DEVICE_CPU), - ExecGraphSerializationTest::getTestCaseName); - -TEST_P(ExecGraphUniqueNodeNames, CheckUniqueNodeNames) { -InferenceEngine::CNNNetwork cnnNet(fnPtr); - -auto ie = PluginCache::get().ie(); -auto execNet = ie->LoadNetwork(cnnNet, target_device); - -InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo(); - -int numReorders = 0; -int expectedReorders = 2; -std::unordered_set names; - -auto function = execGraphInfo.getFunction(); -ASSERT_NE(function, nullptr); - -for (const auto & op : function->get_ops()) { -const auto & rtInfo = op->get_rt_info(); -auto it = rtInfo.find(ExecGraphInfoSerialization::LAYER_TYPE); -ASSERT_NE(rtInfo.end(), it); -auto opType = it->second.as(); - -if (opType == "Reorder") { -numReorders++; -} -} - -ASSERT_EQ(numReorders, expectedReorders) << "Expected reorders: " << expectedReorders << ", actual reorders: " << numReorders; -}; - -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 -}; - -INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, ExecGraphUniqueNodeNames, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({1, 2, 5, 5})), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ExecGraphUniqueNodeNames::getTestCaseName); - -} // namespace - diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp deleted file mode 100644 index 52a4bee4fbc720..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/exec_network_base.hpp" -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - - const std::vector> configs = { - {}, - }; - - const std::vector> heteroConfigs = { - {{"TARGET_FALLBACK", ov::test::utils::DEVICE_CPU}}}; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - ExecutableNetworkBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - ExecutableNetworkBaseTest::getTestCaseName); - - const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16 - }; - - const std::vector> configSetPrc = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configSetPrc)), - ExecNetSetPrecision::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp deleted file mode 100644 index 1e5badc668ffb0..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/get_metric.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -namespace { -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassImportExportTestP, IEClassImportExportTestP, - ::testing::Values("HETERO:CPU")); - -// -// Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("CPU", "HETERO:CPU")); - -// -// Executable Network GetConfig / SetConfig -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest, - ::testing::Values("CPU")); - -// -// Hetero Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, - ::testing::Values("CPU")); - -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp deleted file mode 100644 index 255a87b07229c9..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/compiled_model/compiled_model_base.hpp" -#include "ie_plugin_config.hpp" - -using namespace ov::test::behavior; -namespace { - - const std::vector configs = { - {}, - }; - - const std::vector heteroConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - OVCompiledModelBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - OVCompiledModelBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - - const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16 - }; - - const std::vector configSetPrc = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp deleted file mode 100644 index 16f4c82c74be24..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "behavior/compiled_model/import_export.hpp" - -#include "ie_plugin_config.hpp" -#include - -using namespace ov::test::behavior; -namespace { -const std::vector netPrecisions = { - ov::element::i8, - ov::element::i16, - ov::element::i32, - ov::element::i64, - ov::element::u8, - ov::element::u16, - ov::element::u32, - ov::element::u64, - ov::element::f16, - ov::element::f32, -}; -const std::vector configs = { - {}, -}; - -const std::vector heteroConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, - OVCompiledGraphImportExportTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - OVCompiledGraphImportExportTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, - OVCompiledGraphImportExportTest, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - OVCompiledGraphImportExportTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P( - smoke_OVClassImportExportTestP, OVClassCompiledModelImportExportTestP, - ::testing::Values("HETERO:CPU")); - -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index 644fe58c71e669..da54ed76e2dada 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -3,43 +3,44 @@ // #include "behavior/ov_plugin/caching_tests.hpp" -#include -#include -#include +#include "ov_ops/multiclass_nms_ie_internal.hpp" +#include "ov_ops/nms_ie_internal.hpp" +#include "ov_ops/nms_static_shape_ie.hpp" + +using namespace ov; using namespace ov::test::behavior; -using namespace ngraph; namespace { - static const std::vector precisionsCPU = { - ngraph::element::f32, - ngraph::element::f16, - ngraph::element::i32, - ngraph::element::i64, - ngraph::element::i8, - ngraph::element::u8, - ngraph::element::i16, - ngraph::element::u16, + static const std::vector precisionsCPU = { + ov::element::f32, + ov::element::f16, + ov::element::i32, + ov::element::i64, + ov::element::i8, + ov::element::u8, + ov::element::i16, + ov::element::u16, }; - static const std::vector floatPrecisionsCPU = { - ngraph::element::f32, - ngraph::element::f16, + static const std::vector floatPrecisionsCPU = { + ov::element::f32, + ov::element::f16, }; static const std::vector batchSizesCPU = { 1, 2 }; - static const std::vector precisionsCPUInternal = { - ngraph::element::f32 + static const std::vector precisionsCPUInternal = { + ov::element::f32 }; static const std::vector batchSizesCPUInternal = { 1 }; - static std::shared_ptr simple_function_non_max_suppression_internal(ngraph::element::Type, size_t) { + static std::shared_ptr simple_function_non_max_suppression_internal(element::Type, size_t) { auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); auto max_output_boxes_per_class = ov::op::v0::Constant::create(element::i32, Shape{1}, {10}); @@ -48,11 +49,11 @@ namespace { auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, 0, true, element::i32); auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); + auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } - static std::shared_ptr simple_function_matrix_nms_internal(ngraph::element::Type, size_t) { + static std::shared_ptr simple_function_matrix_nms_internal(element::Type, size_t) { auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); ov::op::v8::MatrixNms::Attributes attr; @@ -60,18 +61,18 @@ namespace { attr.output_type = element::i32; auto nms = std::make_shared>(boxes, scores, attr); auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); + auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } - static std::shared_ptr simple_function_multiclass_nms_internal(ngraph::element::Type, size_t) { + static std::shared_ptr simple_function_multiclass_nms_internal(element::Type, size_t) { auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); ov::op::util::MulticlassNmsBase::Attributes attr; attr.output_type = element::i32; auto nms = std::make_shared(boxes, scores, attr); auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); + auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 85295589ceeb99..934b044a8f752f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -2,36 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/ov_plugin/core_integration.hpp" - -#include - -#include "behavior/ov_plugin/core_integration_sw.hpp" #include "behavior/ov_plugin/query_model.hpp" -#include "openvino/core/type/element_type.hpp" #include "openvino/runtime/core.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; - -// defined in plugin_name.cpp -extern const char * cpu_plugin_file_name; namespace { -// -// IE Class Common tests with -// - -INSTANTIATE_TEST_SUITE_P( - smoke_OVClassImportExportTestP, OVClassImportExportTestP, - ::testing::Values("HETERO:CPU")); // IE Class Query model -INSTANTIATE_TEST_SUITE_P(smoke_OVClassQueryModelTest, OVClassQueryModelTest, ::testing::Values("CPU")); - -// OV Class Load network -INSTANTIATE_TEST_SUITE_P( - smoke_OVClassLoadNetworkTest, OVClassLoadNetworkTestWithThrow, - ::testing::Values("")); +INSTANTIATE_TEST_SUITE_P(smoke_OVClassQueryModelTest, OVClassQueryModelTest, +::testing::Values("CPU")); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/plugin_name.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/plugin_name.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/plugin_name.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/plugin_name.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index b736a5ce7b6be2..bc7aae3c0efe6b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -3,11 +3,9 @@ // #include "behavior/ov_plugin/properties_tests.hpp" - -#include +#include "openvino/runtime/auto/properties.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; namespace { @@ -15,17 +13,53 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVClassCommon, OVBasicPropertiesTestsP, ::testing::Values(std::make_pair("openvino_intel_cpu_plugin", "CPU"))); -const std::vector cpu_properties = { - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, +auto cpu_properties = []() -> std::vector { + std::vector properties = { + {}, + {ov::hint::enable_cpu_pinning(true)}, + {ov::hint::enable_cpu_pinning(false)}, + {ov::enable_profiling(true)}, + {ov::enable_profiling(false)}, + {ov::internal::exclusive_async_requests(true)}, + {ov::internal::exclusive_async_requests(false)}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, + {{ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, {ov::hint::num_requests(1)}}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, + {ov::num_streams(ov::streams::AUTO)}, + {ov::num_streams(8)}, + // check that hints doesn't override customer value (now for streams and later for other config opts) + {{ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, {ov::hint::num_requests(3)}}, + {{ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, {ov::hint::num_requests(3)}}, + }; + + auto numa_nodes = ov::get_available_numa_nodes(); + if (numa_nodes.size() > 1) { + properties.push_back({ov::num_streams(ov::streams::NUMA)}); + } + return properties; }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVPropertiesTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(cpu_properties)), + ::testing::ValuesIn(cpu_properties())), OVPropertiesTests::getTestCaseName); +const std::vector cpu_inproperties = { + {{ov::hint::performance_mode.name(), "DOESN'T EXIST"}}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), {ov::hint::num_requests(-1)}}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + {ov::hint::num_requests.name(), "should be int"}}, + {{ov::num_streams.name(), "OFF"}}, + {{ov::hint::enable_cpu_pinning.name(), "OFF"}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + OVPropertiesIncorrectTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(cpu_inproperties)), + OVPropertiesIncorrectTests::getTestCaseName); + const std::vector cpu_setcore_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), ov::hint::num_requests(2), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp deleted file mode 100644 index 7bc6f7b10d3512..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/caching_tests.hpp" -#include -#include -#include - -using namespace LayerTestsDefinitions; -using namespace ngraph; - -namespace { - static const std::vector precisionsCPU = { - ngraph::element::f32, - ngraph::element::f16, - ngraph::element::i32, - ngraph::element::i64, - ngraph::element::i8, - ngraph::element::u8, - ngraph::element::i16, - ngraph::element::u16, - }; - - static const std::vector floatPrecisionsCPU = { - ngraph::element::f32, - ngraph::element::f16 - }; - - static const std::vector batchSizesCPU = { - 1, 2 - }; - - static const std::vector precisionsCPUInternal = { - ngraph::element::f32 - }; - - static const std::vector batchSizesCPUInternal = { - 1 - }; - - static std::shared_ptr simple_function_non_max_suppression_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - auto max_output_boxes_per_class = ov::op::v0::Constant::create(element::i32, Shape{1}, {10}); - auto iou_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.75}); - auto score_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.7}); - auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, - iou_threshold, score_threshold, 0, true, element::i32); - auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - return func; - } - - static std::shared_ptr simple_function_matrix_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - ov::op::v8::MatrixNms::Attributes attr; - // convert_precision does not support internal op 'NmsStaticShapeIE' - attr.output_type = element::i32; - auto nms = std::make_shared>(boxes, scores, attr); - auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - return func; - } - - static std::shared_ptr simple_function_multiclass_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - ov::op::util::MulticlassNmsBase::Attributes attr; - attr.output_type = element::i32; - auto nms = std::make_shared(boxes, scores, attr); - auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - return func; - } - - static std::vector internal_functions_cpu() { - std::vector funcs = { - nGraphFunctionWithName { simple_function_non_max_suppression_internal, "NonMaxSuppressionIEInternal"}, - nGraphFunctionWithName { simple_function_matrix_nms_internal, "NmsStaticShapeIE_MatrixNms"}, - nGraphFunctionWithName { simple_function_multiclass_nms_internal, "MulticlassNmsIEInternal"}, - }; - return funcs; - } - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getNumericAnyTypeFunctions()), - ::testing::ValuesIn(precisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU_Float, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getFloatingPointOnlyFunctions()), - ::testing::ValuesIn(floatPrecisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU_Internal, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(internal_functions_cpu()), - ::testing::ValuesIn(precisionsCPUInternal), - ::testing::ValuesIn(batchSizesCPUInternal), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LoadNetworkCacheTestBase::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp deleted file mode 100644 index 8f3974817871f7..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/configuration_tests.hpp" - -#include "ie_plugin_config.hpp" -#include "openvino/runtime/system_conf.hpp" - -using namespace BehaviorTestsDefinitions; - -namespace { - #if (defined(__APPLE__) || defined(_WIN32)) - auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto numaNodes = ov::get_available_numa_nodes(); - auto coreTypes = ov::get_available_cores_types(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else if (numaNodes.size() > 1) { - return std::string{CONFIG_VALUE(NUMA)}; - } else { - return std::string{CONFIG_VALUE(NO)}; - } - }()}; - #else - auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto coreTypes = ov::get_available_cores_types(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else { - return std::string{CONFIG_VALUE(YES)}; - } - }()}; - #endif - - INSTANTIATE_TEST_SUITE_P( - smoke_Basic, - DefaultConfigurationTest, - ::testing::Combine( - ::testing::Values("CPU"), - ::testing::Values(DefaultParameter{CONFIG_KEY(CPU_BIND_THREAD), defaultBindThreadParameter})), - DefaultConfigurationTest::getTestCaseName); - - const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 - }; - - const std::vector> conf = { - {} - }; - - const std::vector> Configs = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}, - // check that hints doesn't override customer value (now for streams and later for other config opts) - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}}, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(Configs)), - CorrectConfigTests::getTestCaseName); - - const std::vector> inconfigs = { - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(inconfigs)), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(inconfigs)), - IncorrectConfigAPITests::getTestCaseName); - - const std::vector> ConfigsCheck = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}}, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectConfigCheck, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(ConfigsCheck)), - CorrectConfigCheck::getTestCaseName); - - const std::vector> cpu_prop_config = {{ - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "2"}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}, - }}; - - const std::vector> cpu_loadNetWork_config = {{ - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::NO}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "10"}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}, - }}; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(cpu_prop_config), - ::testing::ValuesIn(cpu_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp deleted file mode 100644 index 6934ffaa19f78c..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/core_integration.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -// defined in plugin_name.cpp -extern const char * cpu_plugin_file_name; - -namespace { -// -// IE Class Common tests with -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassCommon, IEClassBasicTestP, - ::testing::Values(std::make_pair(cpu_plugin_file_name, "CPU"))); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassNetworkTestP, IEClassNetworkTestP, - ::testing::Values("CPU")); - -// -// IE Class GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_STREAMS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, - ::testing::Values("CPU")); - -// -// IE Class GetConfig -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetConfigTest, IEClassGetConfigTest, - ::testing::Values("CPU")); - -////////////////////////////////////////////////////////////////////////////////////////// - -TEST(IEClassBasicTest, smoke_SetConfigAfterCreatedThrow) { - InferenceEngine::Core ie; - std::string value = {}; - - ASSERT_NO_THROW(ie.SetConfig({{KEY_CPU_THREADS_NUM, "1"}}, "CPU")); - ASSERT_NO_THROW(value = ie.GetConfig("CPU", KEY_CPU_THREADS_NUM).as()); - ASSERT_EQ("1", value); - - ASSERT_NO_THROW(ie.SetConfig({{KEY_CPU_THREADS_NUM, "4"}}, "CPU")); - ASSERT_NO_THROW(value = ie.GetConfig("CPU", KEY_CPU_THREADS_NUM).as()); - ASSERT_EQ("4", value); -} - -// IE Class Query network - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest, - ::testing::Values("CPU")); - -// IE Class Load network - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTestWithThrow, - ::testing::Values("")); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp deleted file mode 100644 index 5f1ada306d367a..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#ifdef __GLIBC__ -#include -#endif - -namespace { - -const Params params[] = { - std::tuple{ ov::test::utils::DEVICE_CPU, {{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}}, - std::tuple{ ov::test::utils::DEVICE_HETERO, {{ "TARGET_FALLBACK", ov::test::utils::DEVICE_CPU }}}, -}; - -const Params paramsStreams[] = { - std::tuple{ ov::test::utils::DEVICE_CPU, {{ CONFIG_KEY(CPU_THROUGHPUT_STREAMS), CONFIG_VALUE(CPU_THROUGHPUT_AUTO) }}}, -}; -} // namespace - -INSTANTIATE_TEST_SUITE_P(CPU, CoreThreadingTests, testing::ValuesIn(params), CoreThreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(CPU, CoreThreadingTestsWithIterations, - testing::Combine(testing::ValuesIn(params), - testing::Values(4), - testing::Values(50), - testing::Values(ModelClass::Default)), - CoreThreadingTestsWithIterations::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(CPU_Streams, CoreThreadingTestsWithIterations, - testing::Combine(testing::ValuesIn(paramsStreams), - testing::Values(4), - testing::Values(50), - testing::Values(ModelClass::Default)), - CoreThreadingTestsWithIterations::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp deleted file mode 100644 index 43b40e4688cdad..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/life_time.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - const std::vector> orders = { - // 0 - plugin - // 1 - executable_network - // 2 - infer_request - // 3 - variable state - {3, 0, 1, 2}, - {3, 0, 2, 1}, - {3, 1, 0, 2}, - {3, 1, 2, 0}, - {3, 2, 0, 1}, - {3, 2, 1, 0}, - {0, 3, 1, 2}, - {0, 1, 3, 2} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, HoldersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(orders)), - HoldersTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 53f3374a31ca1f..8c85713adfee26 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -69,15 +69,10 @@ std::vector disabledTestPatterns() { R"(.*ReduceOpsLayerTest.*type=Mean_.*netPRC=(I64|I32).*)", R"(.*ReduceOpsLayerTest.*type=Mean_.*netPRC=U64.*)", // Not implemented yet: - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNetWithIncorrectConfig.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)", - R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*ExecGraphInfo.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*ExecGraphInfo.*)", - R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*CanCreateTwoExeNetworksAndCheckFunction.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*canCreateTwoCompiledModelAndCheckTheir.*)", // CPU does not support dynamic rank // Issue: 66778 @@ -270,9 +265,8 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(smoke_dynamicShapes4D.*INFERENCE_PRECISION_HINT=f16.*)"); // Issue: 124309 retVector.emplace_back(R"(.*InferRequestPreprocessConversionTest.*oLT=NHWC.*)"); - retVector.emplace_back(R"(.*smoke_NoReshape/ExecGraphUniqueNodeNames.CheckUniqueNodeNames.*)"); + retVector.emplace_back(R"(.*smoke_NoReshape/OVCompiledModelGraphUniqueNodeNamesTest.CheckUniqueNodeNames.*)"); retVector.emplace_back(R"(.*smoke_BehaviorTests/InferRequestPerfCountersTest.CheckOperationInPerfMap.*)"); - retVector.emplace_back(R"(smoke_BehaviorTests/ExecutableNetworkBaseTest.CheckExecGraphInfo.*)"); retVector.emplace_back(R"(smoke_BehaviorTests/OVCompiledModelBaseTestOptional.CheckExecGraphInfo.*)"); retVector.emplace_back( R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=FakeQuantizeBinaryConvolution.*)"); diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/compiled_model/import_export.cpp similarity index 58% rename from src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_executable_network/exec_graph_info.cpp rename to src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/compiled_model/import_export.cpp index 3fc385eb81701c..2d89f454c82db5 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/compiled_model/import_export.cpp @@ -1,17 +1,15 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/compiled_model/import_export.hpp" -#include "ov_api_conformance_helpers.hpp" -#include "ie_plugin_config.hpp" -#include +#include "behavior/compiled_model/import_export.hpp" +#include "common_test_utils/test_constants.hpp" +#include "ov_api_conformance_helpers.hpp" using namespace ov::test::behavior; using namespace ov::test::conformance; namespace { - const std::vector ovExecGraphInfoElemTypes = { ov::element::i8, ov::element::i16, @@ -27,7 +25,6 @@ const std::vector ovExecGraphInfoElemTypes = { ov::element::bf16, ov::element::boolean, }; - INSTANTIATE_TEST_SUITE_P(ov_compiled_model, OVCompiledGraphImportExportTest, ::testing::Combine( @@ -35,9 +32,29 @@ INSTANTIATE_TEST_SUITE_P(ov_compiled_model, ::testing::Values(targetDevice), ::testing::Values(pluginConfig)), OVCompiledGraphImportExportTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P( ov_compiled_model, OVClassCompiledModelImportExportTestP, ::testing::Values(targetDevice)); -} // namespace +const std::vector nPrc = { + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, + ov::element::f16, + ov::element::f32, + ov::element::f64, + ov::element::bf16, +}; + +INSTANTIATE_TEST_SUITE_P(ov_compiled_model, + OVCompiledModelGraphUniqueNodeNamesTest, + ::testing::Combine(::testing::ValuesIn(nPrc), + ::testing::Values(ov::Shape{1, 2, 5, 5}), + ::testing::Values(targetDevice)), + OVCompiledModelGraphUniqueNodeNamesTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp index 6a611b6cb2a60b..40eaa9c54c26d0 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp @@ -49,9 +49,9 @@ class OVCompiledGraphImportExportTest : public testing::WithParamInterfaceGetParam(); // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED(); + std::tie(elementType, target_device, configuration) = this->GetParam(); APIBaseTest::SetUp(); } @@ -317,6 +317,75 @@ TEST_P(OVClassCompiledModelImportExportTestP, smoke_ImportNetworkNoThrowWithDevi OV_ASSERT_NO_THROW(executableNetwork.create_infer_request()); } +// +// GetRuntimeModel +// +typedef std::tuple + OVCompiledModelGraphUniqueNodeNamesTestParams; + +class OVCompiledModelGraphUniqueNodeNamesTest + : public testing::WithParamInterface, + public OVCompiledNetworkTestBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + ov::element::Type netPrecision; + ov::Shape inputShapes; + std::string targetDevice; + std::tie(netPrecision, inputShapes, targetDevice) = obj.param; + std::replace(targetDevice.begin(), targetDevice.end(), ':', '_'); + + std::ostringstream result; + result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; + result << "netPRC=" << netPrecision.to_string() << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); + } + + void SetUp() override { + ov::Shape inputShape; + ov::element::Type netPrecision; + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + std::tie(netPrecision, inputShape, target_device) = this->GetParam(); + + APIBaseTest::SetUp(); + + ov::ParameterVector params{std::make_shared(netPrecision, ov::Shape(inputShape))}; + auto split_axis_op = + std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + + auto concat = std::make_shared(split->outputs(), 1); + + ov::ResultVector results{std::make_shared(concat)}; + model = std::make_shared(results, params, "SplitConvConcat"); + } + +protected: + std::shared_ptr model; +}; + +TEST_P(OVCompiledModelGraphUniqueNodeNamesTest, CheckUniqueNodeNames) { + std::shared_ptr core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(model, target_device); + auto exec_graph = compiled_model.get_runtime_model(); + + std::unordered_set names; + ASSERT_NE(exec_graph, nullptr); + + for (const auto& op : exec_graph->get_ops()) { + ASSERT_TRUE(names.find(op->get_friendly_name()) == names.end()) + << "Node with name " << op->get_friendly_name() << "already exists"; + names.insert(op->get_friendly_name()); + + const auto& rtInfo = op->get_rt_info(); + auto it = rtInfo.find(ov::exec_model_info::LAYER_TYPE); + ASSERT_NE(rtInfo.end(), it); + } +}; + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index a0e360c596922c..8479dd5ad1e800 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -14,25 +14,25 @@ namespace ov { namespace test { namespace behavior { -typedef std::tuple< - ov::element::Type_t, // Element type - std::string, // Device name - ov::AnyMap // Config -> OVExecGraphImportExportTestParams; +typedef std::tuple + OVExecGraphImportExportTestParams; class OVExecGraphImportExportTest : public testing::WithParamInterface, public OVCompiledNetworkTestBase { - public: +public: static std::string getTestCaseName(testing::TestParamInfo obj); void SetUp() override; void TearDown() override; - protected: +protected: std::shared_ptr core = utils::PluginCache::get().core(); ov::AnyMap configuration; - ov::element::Type_t elementType; + ov::element::Type elementType; std::shared_ptr function; }; @@ -78,4 +78,4 @@ class OVExecGraphSerializationTest : public testing::WithParamInterface Date: Wed, 17 Jan 2024 04:23:28 -0800 Subject: [PATCH 044/122] Refactoring operations A-B (#22206) --- src/frontends/onnx/frontend/src/op/abs.hpp | 7 +- src/frontends/onnx/frontend/src/op/acos.hpp | 7 +- src/frontends/onnx/frontend/src/op/acosh.hpp | 5 +- .../src/op/adaptive_avg_pooling2d.cpp | 6 +- src/frontends/onnx/frontend/src/op/add.cpp | 11 +-- src/frontends/onnx/frontend/src/op/add.hpp | 3 - src/frontends/onnx/frontend/src/op/affine.cpp | 9 ++- src/frontends/onnx/frontend/src/op/affine.hpp | 1 - src/frontends/onnx/frontend/src/op/and.hpp | 11 +-- src/frontends/onnx/frontend/src/op/argmax.cpp | 1 - src/frontends/onnx/frontend/src/op/argmax.hpp | 9 +-- src/frontends/onnx/frontend/src/op/argmin.cpp | 1 - src/frontends/onnx/frontend/src/op/argmin.hpp | 9 +-- src/frontends/onnx/frontend/src/op/asin.hpp | 8 +- src/frontends/onnx/frontend/src/op/asinh.hpp | 5 +- src/frontends/onnx/frontend/src/op/atan.hpp | 7 +- src/frontends/onnx/frontend/src/op/atanh.hpp | 5 +- src/frontends/onnx/frontend/src/op/aten.cpp | 53 +++++++------ src/frontends/onnx/frontend/src/op/aten.hpp | 2 - .../onnx/frontend/src/op/average_pool.cpp | 1 - .../onnx/frontend/src/op/average_pool.hpp | 5 +- .../onnx/frontend/src/op/batch_norm.cpp | 12 +-- .../onnx/frontend/src/op/batch_norm.hpp | 1 - .../onnx/frontend/src/op/bitshift.cpp | 20 ++--- .../onnx/frontend/src/op/bitshift.hpp | 3 - .../onnx/frontend/src/op/bitwise_and.cpp | 2 +- .../onnx/frontend/src/op/bitwise_and.hpp | 1 - .../onnx/frontend/src/op/bitwise_or.cpp | 2 +- .../onnx/frontend/src/op/bitwise_or.hpp | 1 - .../onnx/frontend/src/op/bitwise_xor.cpp | 2 +- .../onnx/frontend/src/op/bitwise_xor.hpp | 1 - .../onnx/frontend/src/op/blackmanwindow.cpp | 76 +++++++++---------- .../onnx/frontend/src/op/blackmanwindow.hpp | 1 - 33 files changed, 129 insertions(+), 159 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/abs.hpp b/src/frontends/onnx/frontend/src/op/abs.hpp index 34b21c91920062..135dc4b3116200 100644 --- a/src/frontends/onnx/frontend/src/op/abs.hpp +++ b/src/frontends/onnx/frontend/src/op/abs.hpp @@ -7,12 +7,9 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/abs.hpp" namespace ngraph { namespace onnx_import { @@ -22,7 +19,7 @@ inline OutputVector abs(const Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Abs op is not supported"); - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/acos.hpp b/src/frontends/onnx/frontend/src/op/acos.hpp index ab9c64421a80ce..65181f0601efb6 100644 --- a/src/frontends/onnx/frontend/src/op/acos.hpp +++ b/src/frontends/onnx/frontend/src/op/acos.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/acos.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_7 { inline OutputVector acos(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/acosh.hpp b/src/frontends/onnx/frontend/src/op/acosh.hpp index 745a8fa8c5c2cd..eefa36762db808 100644 --- a/src/frontends/onnx/frontend/src/op/acosh.hpp +++ b/src/frontends/onnx/frontend/src/op/acosh.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/acosh.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_9 { inline OutputVector acosh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_9 diff --git a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp index 0aec90df094022..29540fa3175df2 100644 --- a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp +++ b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp @@ -4,8 +4,10 @@ #include "op/adaptive_avg_pooling2d.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" +#include "openvino/op/adaptive_avg_pool.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -19,7 +21,7 @@ OutputVector adaptive_avg_pooling2d(const Node& node) { CHECK_VALID_NODE(node, num_inputs == 2, "adaptive_avg_pooling2d expects 2 input tensors. Got: ", num_inputs); - return {std::make_shared(inputs[0], inputs[1])}; + return {std::make_shared(inputs[0], inputs[1])}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/add.cpp b/src/frontends/onnx/frontend/src/op/add.cpp index 61ac900e731775..3cadf83099eb02 100644 --- a/src/frontends/onnx/frontend/src/op/add.cpp +++ b/src/frontends/onnx/frontend/src/op/add.cpp @@ -4,11 +4,12 @@ #include "op/add.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/add.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -18,19 +19,19 @@ OutputVector add(const Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Add op is not supported"); - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_6 { OutputVector add(const Node& node) { - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_6 namespace set_7 { OutputVector add(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/add.hpp b/src/frontends/onnx/frontend/src/op/add.hpp index 12f9dd548e8008..ed8d9e8aa1a95f 100644 --- a/src/frontends/onnx/frontend/src/op/add.hpp +++ b/src/frontends/onnx/frontend/src/op/add.hpp @@ -7,9 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/affine.cpp b/src/frontends/onnx/frontend/src/op/affine.cpp index 443a353ff853b1..419a3c3f0da14f 100644 --- a/src/frontends/onnx/frontend/src/op/affine.cpp +++ b/src/frontends/onnx/frontend/src/op/affine.cpp @@ -4,9 +4,11 @@ #include "op/affine.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/multiply.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -28,8 +30,7 @@ OutputVector affine(const Node& node) { const auto alpha_const = node.get_attribute_as_constant("alpha", data.get_element_type()); const auto beta_const = node.get_attribute_as_constant("beta", data.get_element_type()); - return { - std::make_shared(std::make_shared(data, alpha_const), beta_const)}; + return {std::make_shared(std::make_shared(data, alpha_const), beta_const)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/affine.hpp b/src/frontends/onnx/frontend/src/op/affine.hpp index ed3e216357e1f3..d69188ccf635a3 100644 --- a/src/frontends/onnx/frontend/src/op/affine.hpp +++ b/src/frontends/onnx/frontend/src/op/affine.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/and.hpp b/src/frontends/onnx/frontend/src/op/and.hpp index f40216e8e14981..c10dd2740896ae 100644 --- a/src/frontends/onnx/frontend/src/op/and.hpp +++ b/src/frontends/onnx/frontend/src/op/and.hpp @@ -7,26 +7,21 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/and.hpp" #include "onnx_import/core/node.hpp" -#include "utils/common.hpp" +#include "openvino/op/logical_and.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector logical_and(const Node& node) { - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { inline OutputVector logical_and(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/argmax.cpp b/src/frontends/onnx/frontend/src/op/argmax.cpp index 5e9fae310c1cfd..0147316231c13e 100644 --- a/src/frontends/onnx/frontend/src/op/argmax.cpp +++ b/src/frontends/onnx/frontend/src/op/argmax.cpp @@ -5,7 +5,6 @@ #include "op/argmax.hpp" #include "exceptions.hpp" -#include "onnx_import/core/node.hpp" #include "utils/arg_min_max_factory.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/argmax.hpp b/src/frontends/onnx/frontend/src/op/argmax.hpp index c88d63dc3fc8de..8e2aec04362cf1 100644 --- a/src/frontends/onnx/frontend/src/op/argmax.hpp +++ b/src/frontends/onnx/frontend/src/op/argmax.hpp @@ -7,29 +7,28 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX ArgMax operation to an nGraph node. +/// \brief Convert ONNX ArgMax operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. OutputVector argmax(const Node& node); } // namespace set_1 namespace set_12 { -/// \brief Convert ONNX ArgMax operation to an nGraph node. +/// \brief Convert ONNX ArgMax operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. OutputVector argmax(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/argmin.cpp b/src/frontends/onnx/frontend/src/op/argmin.cpp index 8c69cb77539742..62d8d272b7c5fb 100644 --- a/src/frontends/onnx/frontend/src/op/argmin.cpp +++ b/src/frontends/onnx/frontend/src/op/argmin.cpp @@ -5,7 +5,6 @@ #include "op/argmin.hpp" #include "exceptions.hpp" -#include "onnx_import/core/node.hpp" #include "utils/arg_min_max_factory.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/argmin.hpp b/src/frontends/onnx/frontend/src/op/argmin.hpp index 7bacbc7b42caf5..1a3ed89a45cc87 100644 --- a/src/frontends/onnx/frontend/src/op/argmin.hpp +++ b/src/frontends/onnx/frontend/src/op/argmin.hpp @@ -7,29 +7,28 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX ArgMin operation to an nGraph node. +/// \brief Convert ONNX ArgMin operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMin operation. OutputVector argmin(const Node& node); } // namespace set_1 namespace set_12 { -/// \brief Convert ONNX ArgMin operation to an nGraph node. +/// \brief Convert ONNX ArgMin operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. OutputVector argmin(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/asin.hpp b/src/frontends/onnx/frontend/src/op/asin.hpp index b4a9590ad17999..b8845871268d85 100644 --- a/src/frontends/onnx/frontend/src/op/asin.hpp +++ b/src/frontends/onnx/frontend/src/op/asin.hpp @@ -7,19 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/asin.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/asin.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector asin(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/asinh.hpp b/src/frontends/onnx/frontend/src/op/asinh.hpp index 317c4c4e183fe2..e402d6e29d81f0 100644 --- a/src/frontends/onnx/frontend/src/op/asinh.hpp +++ b/src/frontends/onnx/frontend/src/op/asinh.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/asinh.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector asinh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/atan.hpp b/src/frontends/onnx/frontend/src/op/atan.hpp index 414256dc420c90..aa251a8210ed02 100644 --- a/src/frontends/onnx/frontend/src/op/atan.hpp +++ b/src/frontends/onnx/frontend/src/op/atan.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/atan.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector atan(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/atanh.hpp b/src/frontends/onnx/frontend/src/op/atanh.hpp index ef3bd26c48ab7f..c7879925a0f7ee 100644 --- a/src/frontends/onnx/frontend/src/op/atanh.hpp +++ b/src/frontends/onnx/frontend/src/op/atanh.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/atanh.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector atanh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/aten.cpp b/src/frontends/onnx/frontend/src/op/aten.cpp index d8103000b5d72a..380718b2745674 100644 --- a/src/frontends/onnx/frontend/src/op/aten.cpp +++ b/src/frontends/onnx/frontend/src/op/aten.cpp @@ -4,12 +4,21 @@ #include "op/aten.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "onnx_import/core/node.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/embeddingbag_offsets_sum.hpp" +#include "openvino/op/embeddingbag_packedsum.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/unsqueeze.hpp" #include "openvino/opsets/opset8.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -41,11 +50,11 @@ OutputVector aten(const Node& node) { Output embedding_bag; if (is_packed_two_inputs) { - embedding_bag = std::make_shared(inputs[0], inputs[1]); + embedding_bag = std::make_shared(inputs[0], inputs[1]); } else if (is_packed_three_inputs) { - embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[3]); + embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[3]); } else if (is_offsets_three_inputs) { - embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[2]); + embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[2]); } else if (inputs.size() >= 4) { // Need to expand embedding table with zeros (default values for empty bags) const auto& emb_tbl_in = inputs[0]; @@ -56,30 +65,28 @@ OutputVector aten(const Node& node) { const auto data_type = emb_tbl_in.get_element_type(); const auto ind_type = indices_in.get_element_type(); - const auto zero_const = std::make_shared(ind_type, Shape{}, 0); + const auto zero_const = std::make_shared(ind_type, Shape{}, 0); // Shape aligned node, filled with zeros - const auto zero_of_data_type_const = std::make_shared(data_type, Shape{1}, 0); - const auto weights_shape_node = std::make_shared(emb_tbl_in, ind_type); - const auto weights_last_dim_idx = std::make_shared(element::i32, Shape{1}, -1); + const auto zero_of_data_type_const = std::make_shared(data_type, Shape{1}, 0); + const auto weights_shape_node = std::make_shared(emb_tbl_in, ind_type); + const auto weights_last_dim_idx = std::make_shared(element::i32, Shape{1}, -1); const auto weights_last_dim = - std::make_shared(weights_shape_node, weights_last_dim_idx, zero_const); - const auto zero_col_node = - std::make_shared(zero_of_data_type_const, weights_last_dim); - const auto default_embeddings_node = std::make_shared(zero_col_node, zero_const); + std::make_shared(weights_shape_node, weights_last_dim_idx, zero_const); + const auto zero_col_node = std::make_shared(zero_of_data_type_const, weights_last_dim); + const auto default_embeddings_node = std::make_shared(zero_col_node, zero_const); // Expanded embedding table weights - const auto weights_concat = - std::make_shared(OutputVector{emb_tbl_in, default_embeddings_node}, 0); + const auto weights_concat = std::make_shared(OutputVector{emb_tbl_in, default_embeddings_node}, 0); // Index in embedding table to fill empty bags - const auto weights_first_dim = std::make_shared( - std::make_shared(weights_shape_node, zero_const, zero_const)); - - embedding_bag = std::make_shared(weights_concat, - indices_in, - offsets_in, - weights_first_dim, // default index - per_sample_weights_in); + const auto weights_first_dim = + std::make_shared(std::make_shared(weights_shape_node, zero_const, zero_const)); + + embedding_bag = std::make_shared(weights_concat, + indices_in, + offsets_in, + weights_first_dim, // default index + per_sample_weights_in); } else { OPENVINO_THROW("Unsupported inputs configuration for ATen `embedding_bag` operation."); diff --git a/src/frontends/onnx/frontend/src/op/aten.hpp b/src/frontends/onnx/frontend/src/op/aten.hpp index 9420bcfd16b6b0..7c6d2198651710 100644 --- a/src/frontends/onnx/frontend/src/op/aten.hpp +++ b/src/frontends/onnx/frontend/src/op/aten.hpp @@ -7,8 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/average_pool.cpp b/src/frontends/onnx/frontend/src/op/average_pool.cpp index ff9bcb9bfbf791..d27df456c9993c 100644 --- a/src/frontends/onnx/frontend/src/op/average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/average_pool.cpp @@ -4,7 +4,6 @@ #include "op/average_pool.hpp" -#include "ngraph/node.hpp" #include "utils/pooling_factory.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/average_pool.hpp b/src/frontends/onnx/frontend/src/op/average_pool.hpp index b3b0d086b6c3d4..03d2bfd36dc9b0 100644 --- a/src/frontends/onnx/frontend/src/op/average_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/average_pool.hpp @@ -7,18 +7,17 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX AveragePool operation to an nGraph node. +/// \brief Convert ONNX AveragePool operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX AveragePool +/// \return The vector containing OV nodes producing output of ONNX AveragePool /// operation. OutputVector average_pool(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.cpp b/src/frontends/onnx/frontend/src/op/batch_norm.cpp index db49ff8d74e7a4..e187128e8bc1f8 100644 --- a/src/frontends/onnx/frontend/src/op/batch_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/batch_norm.cpp @@ -7,9 +7,11 @@ #include #include -#include "default_opset.hpp" #include "exceptions.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/batch_norm.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -22,8 +24,8 @@ OutputVector batch_norm(const Node& node) { auto x = inputs.at(0); auto scale = inputs.at(1); auto bias = inputs.at(2); - Output mean; - Output var; + Output mean; + Output var; double epsilon{node.get_attribute_value("epsilon", 1e-5)}; @@ -40,7 +42,7 @@ OutputVector batch_norm(const Node& node) { if (inputs.size() >= 5) { mean = inputs.at(3); var = inputs.at(4); - return {std::make_shared(x, scale, bias, mean, var, epsilon), + return {std::make_shared(x, scale, bias, mean, var, epsilon), after_bn_mean, after_bn_var, saved_mean, @@ -67,7 +69,7 @@ OutputVector batch_norm(const Node& node) { CHECK_VALID_NODE(node, node.get_outputs_size() == 1, "Training mode of BatchNormalization is not supported."); - return {std::make_shared(x, scale, bias, mean, var, epsilon)}; + return {std::make_shared(x, scale, bias, mean, var, epsilon)}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.hpp b/src/frontends/onnx/frontend/src/op/batch_norm.hpp index bcbf7b4cef1fdc..e8bbbebe828481 100644 --- a/src/frontends/onnx/frontend/src/op/batch_norm.hpp +++ b/src/frontends/onnx/frontend/src/op/batch_norm.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitshift.cpp b/src/frontends/onnx/frontend/src/op/bitshift.cpp index 87354dbe88539d..317ad25ee57c33 100644 --- a/src/frontends/onnx/frontend/src/op/bitshift.cpp +++ b/src/frontends/onnx/frontend/src/op/bitshift.cpp @@ -4,9 +4,13 @@ #include "op/bitshift.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/power.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,8 +18,8 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector bitshift(const Node& node) { - const Output input_x = node.get_ng_inputs().at(0); - const Output input_y = node.get_ng_inputs().at(1); + const Output input_x = node.get_ng_inputs().at(0); + const Output input_y = node.get_ng_inputs().at(1); std::string direction = node.get_attribute_value("direction", ""); @@ -27,14 +31,12 @@ OutputVector bitshift(const Node& node) { "attribute. Given: ", direction); - auto shift = std::make_shared( - default_opset::Constant::create(input_y.get_element_type(), Shape{1}, {2}), - input_y); + auto shift = std::make_shared(v0::Constant::create(input_y.get_element_type(), Shape{1}, {2}), input_y); if (direction == "RIGHT") { - return {std::make_shared(input_x, shift)}; + return {std::make_shared(input_x, shift)}; } else { - return {std::make_shared(input_x, shift)}; + return {std::make_shared(input_x, shift)}; } } diff --git a/src/frontends/onnx/frontend/src/op/bitshift.hpp b/src/frontends/onnx/frontend/src/op/bitshift.hpp index b6ac40023d35f1..24caba1911b770 100644 --- a/src/frontends/onnx/frontend/src/op/bitshift.hpp +++ b/src/frontends/onnx/frontend/src/op/bitshift.hpp @@ -7,9 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp index 54961812505cb5..c82d7dbac27b42 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp @@ -5,7 +5,7 @@ #include "op/bitwise_and.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" +#include "openvino/op/bitwise_and.hpp" using namespace ov::op; diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp b/src/frontends/onnx/frontend/src/op/bitwise_and.hpp index d0f66569c95228..e1d81434342855 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_and.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp index 38d9f04a48b0fa..adb642d2195391 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp @@ -5,7 +5,7 @@ #include "op/bitwise_or.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" +#include "openvino/op/bitwise_or.hpp" using namespace ov::op; diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp b/src/frontends/onnx/frontend/src/op/bitwise_or.hpp index 8bb00623c48a0c..22c17690ee1cc2 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_or.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp index 0fc5e36cd629a8..315538f5c21cac 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp @@ -5,7 +5,7 @@ #include "op/bitwise_xor.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" +#include "openvino/op/bitwise_xor.hpp" using namespace ov::op; diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp index fc0ae510742994..8f0d8a364cd5e1 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index d4bb144725dbfe..b50af889a5377b 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -7,11 +7,18 @@ #include -#include - -#include "default_opset.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -27,57 +34,46 @@ OutputVector blackmanwindow(const Node& node) { // Weights as described in ONNX BlackmanWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow - const auto float_size = std::make_shared(size, ov::element::f32); - const auto a_0 = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.42f}); - const auto a_1 = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{-0.50f}); - const auto a_2 = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.08f}); + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.42f}); + const auto a_1 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{-0.50f}); + const auto a_2 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.08f}); - const auto start = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto one_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto two_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); - const auto four_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f}); - const auto range = std::make_shared(start, size, one_const, ov::element::f32); - const auto pi = - default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto four_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = v0::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); std::shared_ptr factor_1, factor_2; if (periodic) { - factor_1 = std::make_shared( + factor_1 = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - float_size)); - factor_2 = std::make_shared( + std::make_shared(std::make_shared(pi, two_const), float_size)); + factor_2 = std::make_shared( range, - std::make_shared(std::make_shared(pi, four_const), - float_size)); + std::make_shared(std::make_shared(pi, four_const), float_size)); } else { - factor_1 = std::make_shared( + factor_1 = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - std::make_shared(float_size, one_const))); - factor_2 = std::make_shared( + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + factor_2 = std::make_shared( range, - std::make_shared(std::make_shared(pi, four_const), - std::make_shared(float_size, one_const))); + std::make_shared(std::make_shared(pi, four_const), + std::make_shared(float_size, one_const))); } - const auto cos_1 = std::make_shared(factor_1); - const auto cos_2 = std::make_shared(factor_2); - const auto scaled_cos_1 = std::make_shared(cos_1, a_1); - const auto scaled_cos_2 = std::make_shared(cos_2, a_2); - const auto y_values = - std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); + const auto cos_1 = std::make_shared(factor_1); + const auto cos_2 = std::make_shared(factor_2); + const auto scaled_cos_1 = std::make_shared(cos_1, a_1); + const auto scaled_cos_2 = std::make_shared(cos_2, a_2); + const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); if (output_datatype == element::f32) { return {y_values}; } else { - return {std::make_shared(y_values, output_datatype)}; + return {std::make_shared(y_values, output_datatype)}; } } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index ccff09c84817af..ca708f68d1f951 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -6,7 +6,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { From 6f5329250b666767a042d954a51a578af29b7d1b Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Wed, 17 Jan 2024 15:07:34 +0100 Subject: [PATCH 045/122] [PT FE] Fix mask case of aten::index_put_ (#22190) --- .../transforms/aten_index_put_replacer.cpp | 15 ++-------- .../pytorch_tests/test_index_put_.py | 30 ++++++++++++++++--- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp index 141243a5a6e3af..8d2b3975769917 100644 --- a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp @@ -134,21 +134,10 @@ AtenIndexPutReplacer::AtenIndexPutReplacer() { auto input_shape = rg.make(input, element::i32); auto input_rank = rg.make(input_shape, element::i32); auto one_const = v0::Constant::create(element::i32, Shape{1}, {1}); - auto expand_shape = rg.make(one_const, input_rank, BroadcastType::BIDIRECTIONAL); - auto expanded_mask = rg.make(index, expand_shape, BroadcastType::BIDIRECTIONAL); - auto nonzero = rg.make(expanded_mask, element::i32); + auto nonzero = rg.make(index, element::i32); auto input_order = v0::Constant::create(element::i32, Shape{2}, {1, 0}); index = rg.make(nonzero, input_order); - // source can be arbitary shape, select only relevant data - auto const_minus_1 = v0::Constant::create(element::i32, Shape{1}, {-1}); - auto flatten_values = rg.make(values, const_minus_1, false); - auto const_0 = v0::Constant::create(element::i32, Shape{1}, {0}); - - auto index_shape = rg.make(index, element::i32); - auto index_dim_zero = rg.make(index_shape, const_0, const_0); - auto slice_steps = v0::Constant::create(element::i32, Shape{1}, {1}); - auto sliced_source = rg.make(flatten_values, const_0, index_dim_zero, slice_steps, const_0); - auto result = rg.make(input, index, sliced_source); + auto result = rg.make(input, index, values); copy_runtime_info_and_name(index_op, rg.get(), rt_copy_from); replace_node(index_op, result); return true; diff --git a/tests/layer_tests/pytorch_tests/test_index_put_.py b/tests/layer_tests/pytorch_tests/test_index_put_.py index e367d2a6d6805d..dc185596a6812a 100644 --- a/tests/layer_tests/pytorch_tests/test_index_put_.py +++ b/tests/layer_tests/pytorch_tests/test_index_put_.py @@ -178,7 +178,6 @@ def test_nonzero_index_put_(self, ie_device, precision, ir_version, input_data, self.indices_0 = indices[0] self.indices_1 = indices[1] self._test(*self.create_model(accumulate), ie_device, precision, ir_version, trace_model=True, use_convert_model=True) - @pytest.mark.nightly @pytest.mark.precommit @@ -190,10 +189,9 @@ def test_nonzero_index_put_different_ranks(self, ie_device, precision, ir_versio self._test(*self.create_model(False), ie_device, precision, ir_version, trace_model=True, use_convert_model=True) - class TestMask_IndexPut(PytorchLayerTest): def _prepare_input(self): - return (np.random.randn(100, 5).astype(np.float32),np.random.randn(100, 5).astype(np.float32)) + return (np.random.randn(100, 5).astype(np.float32), np.random.randn(100, 5).astype(np.float32)) def create_model(self): class aten_index_put_mask(torch.nn.Module): @@ -208,4 +206,28 @@ def forward(self, x, y): @pytest.mark.nightly @pytest.mark.precommit def test_nonzero_index_put_(self, ie_device, precision, ir_version): - self._test(*self.create_model(), ie_device, precision, ir_version, trace_model=True, use_convert_model=True) + self._test(*self.create_model(), ie_device, precision, + ir_version, trace_model=True, use_convert_model=True) + + +class TestMaskKosmos_IndexPut(PytorchLayerTest): + def _prepare_input(self): + mask = np.random.randint(0, 2, [1, 30]).astype(np.bool_) + num = mask.sum() + return (np.random.randn(1, 30, 50).astype(np.float32), mask.astype(np.int32), np.random.randn(num, 50).astype(np.float32)) + + def create_model(self): + class aten_index_put_mask(torch.nn.Module): + def forward(self, x, y, z): + x[y.to(dtype=torch.bool)] = z + return x + + ref_net = None + + return aten_index_put_mask(), ref_net, "aten::index_put_" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_nonzero_kosmos_index_put_(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, + ir_version, trace_model=True, use_convert_model=True) From 6bd90554401ae2d3343974763a5eb79cf0032c63 Mon Sep 17 00:00:00 2001 From: Jan Iwaszkiewicz Date: Wed, 17 Jan 2024 15:34:06 +0100 Subject: [PATCH 046/122] [PyOV] Align add_extension methods across Python APIs (#22166) --- .../openvino/runtime/utils/node_factory.py | 42 ++++++++++++++++--- .../src/pyopenvino/frontend/frontend.cpp | 18 +++++++- .../src/pyopenvino/graph/node_factory.cpp | 38 ++++++++++++++++- .../pyngraph_mock_frontend_api.cpp | 3 ++ .../tests/test_graph/test_node_factory.py | 15 +++++++ .../tests_python/test_frontend_extension.py | 25 +++++++++++ 6 files changed, 133 insertions(+), 8 deletions(-) diff --git a/src/bindings/python/src/openvino/runtime/utils/node_factory.py b/src/bindings/python/src/openvino/runtime/utils/node_factory.py index b029325268d467..25daf739223dba 100644 --- a/src/bindings/python/src/openvino/runtime/utils/node_factory.py +++ b/src/bindings/python/src/openvino/runtime/utils/node_factory.py @@ -4,13 +4,13 @@ import logging as log -from functools import partial +from functools import partial, singledispatchmethod from typing import Any, Dict, List, Optional, Union from pathlib import Path from openvino._pyopenvino import NodeFactory as _NodeFactory -from openvino.runtime import Node, Output +from openvino.runtime import Node, Output, Extension from openvino.runtime.exceptions import UserInputError @@ -60,8 +60,14 @@ def create( return node - def add_extension(self, lib_path: Union[Path, str]) -> None: - """Add custom operations from extension library. + @singledispatchmethod + def add_extension(self, extension: Union[Path, str, Extension, List[Extension]]) -> None: + raise TypeError(f"Unknown argument type: {type(extension)}") + + @add_extension.register(Path) + @add_extension.register(str) + def _(self, lib_path: Union[Path, str]) -> None: + """Add custom operations from an extension. Extends operation types available for creation by operations loaded from prebuilt C++ library. Enables instantiation of custom @@ -69,7 +75,7 @@ def add_extension(self, lib_path: Union[Path, str]) -> None: operation classes. Other types of extensions, e.g. conversion extensions, if they are exposed in the library, are ignored. - In case if an extension operation type from a library match + In case if an extension operation type from the extension match one of existing operations registered before (from the standard OpenVINO opset or from another extension loaded earlier), a new operation overrides an old operation. @@ -84,6 +90,32 @@ def add_extension(self, lib_path: Union[Path, str]) -> None: """ self.factory.add_extension(lib_path) + @add_extension.register(Extension) + @add_extension.register(list) + def _(self, extension: Union[Extension, List[Extension]]) -> None: + """Add custom operations from extension library. + + Extends operation types available for creation by operations + loaded from prebuilt C++ library. Enables instantiation of custom + operations exposed in that library without direct use of + operation classes. Other types of extensions, e.g. conversion + extensions, if they are exposed in the library, are ignored. + + In case if an extension operation type from a library match + one of existing operations registered before (from the standard + OpenVINO opset or from another extension loaded earlier), a new + operation overrides an old operation. + + Version of an operation is ignored: an operation with a given type and + a given version/opset will override operation with the same type but + different version/opset in the same NodeFactory instance. + Use separate libraries and NodeFactory instances to differentiate + versions/opsets. + + :param extension: A single Extension or list of Extensions. + """ + self.factory.add_extension(extension) + @staticmethod def _arguments_as_outputs(arguments: List[Union[Node, Output]]) -> List[Output]: outputs = [] diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp index 1600e57413dc62..c643ae7a4d16ad 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp @@ -165,13 +165,27 @@ void regclass_frontend_FrontEnd(py::module m) { )"); fem.def("add_extension", - static_cast(&FrontEnd::add_extension), + static_cast>& extension)>( + &FrontEnd::add_extension), R"( + Add extensions defined by objects inheriting from Extension + used in order to extend capabilities of Frontend. + + :param extension: Provided extension objects. + :type extension: List[Extension] + )"); + + fem.def( + "add_extension", + [](FrontEnd& self, const py::object& extension_path) { + return self.add_extension(Common::utils::convert_path_to_string(extension_path)); + }, + R"( Add extension defined in external library indicated by a extension_path used in order to extend capabilities of Frontend. :param extension_path: A path to extension. - :type extension_path: str + :type extension_path: str, Path )"); fem.def("__repr__", [](const FrontEnd& self) -> std::string { diff --git a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp index 6aec4cf32f7c85..67d05982555437 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp @@ -94,10 +94,37 @@ class NodeFactory { return op_node; } + void add_extension(const std::shared_ptr& extension) { + auto so_extension = std::dynamic_pointer_cast(extension); + ov::Extension::Ptr extension_extracted = so_extension ? so_extension->extension() : extension; + if (auto op_extension = std::dynamic_pointer_cast(extension_extracted)) { + auto op_type = op_extension->get_type_info().name; + // keep so extension instead of extension_extracted to hold loaded library + m_opset_so_extensions[op_type] = so_extension; + } + } + + void add_extension(const std::vector>& extensions) { + // Load extension library, seach for operation extensions (derived from ov::BaseOpExtension) and keep + // them in m_opset_so_extensions for future use in create methods. + // NodeFactory provides a simplified API for node creation without involving version of operation. + // It means all operations share the same name space and real operation versions (opsets) from extension + // library are ignored. + for (auto extension : extensions) { + auto so_extension = std::dynamic_pointer_cast(extension); + ov::Extension::Ptr extension_extracted = so_extension ? so_extension->extension() : extension; + if (auto op_extension = std::dynamic_pointer_cast(extension_extracted)) { + auto op_type = op_extension->get_type_info().name; + // keep so extension instead of extension_extracted to hold loaded library + m_opset_so_extensions[op_type] = so_extension; + } + } + } + void add_extension(const std::string& lib_path) { // Load extension library, seach for operation extensions (derived from ov::BaseOpExtension) and keep // them in m_opset_so_extensions for future use in create methods. - // NodeFactory provides a simplified API for node creation withotu involving version of operation. + // NodeFactory provides a simplified API for node creation without involving version of operation. // It means all operations share the same name space and real operation versions (opsets) from extension // library are ignored. auto extensions = ov::detail::load_extensions(lib_path); @@ -148,6 +175,15 @@ void regclass_graph_NodeFactory(py::module m) { return self.create(name, arguments, attributes); }); + node_factory.def("add_extension", [](NodeFactory& self, const std::shared_ptr& extension) { + return self.add_extension(extension); + }); + + node_factory.def("add_extension", + [](NodeFactory& self, const std::vector>& extension) { + return self.add_extension(extension); + }); + node_factory.def("add_extension", [](NodeFactory& self, const py::object& lib_path) { return self.add_extension(Common::utils::convert_path_to_string(lib_path)); }); diff --git a/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp b/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp index 41b60a7e9c62f1..c76fcacbf1d891 100644 --- a/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp +++ b/src/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp @@ -119,6 +119,9 @@ static void register_frontend_wrappers(py::module m) { fe_tensorflow.def( "add_extension", static_cast& extension)>(&FrontEnd::add_extension)); + fe_tensorflow.def("add_extension", + static_cast>& extension)>( + &FrontEnd::add_extension)); fe_tensorflow.def("check_conversion_extension_registered", [](FrontEndWrapperTensorflow& self, const std::string& name) { return self.check_conversion_extension_registered(name); diff --git a/src/bindings/python/tests/test_graph/test_node_factory.py b/src/bindings/python/tests/test_graph/test_node_factory.py index 8c3dd41e123ab3..72ed67db763cf2 100644 --- a/src/bindings/python/tests/test_graph/test_node_factory.py +++ b/src/bindings/python/tests/test_graph/test_node_factory.py @@ -6,6 +6,7 @@ import pytest from sys import platform from openvino import compile_model, Model +from openvino.runtime import Extension import openvino.runtime.opset8 as ov from openvino.runtime.exceptions import UserInputError from openvino.runtime.utils.node_factory import NodeFactory @@ -122,3 +123,17 @@ def test_extension_added_from_library(): del identity assert np.array_equal(tensor, result[0]) + + +def test_add_extension(): + class EmptyExtension(Extension): + def __init__(self) -> None: + super().__init__() + + factory = NodeFactory() + factory.add_extension(EmptyExtension()) + factory.add_extension([EmptyExtension(), EmptyExtension()]) + + data = ov.parameter([1, 2], dtype=np.float32) + param = factory.create("Parameter", data.outputs()) + assert param is not None diff --git a/src/frontends/onnx/tests/tests_python/test_frontend_extension.py b/src/frontends/onnx/tests/tests_python/test_frontend_extension.py index 6cdc890465c374..c747919c8f6600 100644 --- a/src/frontends/onnx/tests/tests_python/test_frontend_extension.py +++ b/src/frontends/onnx/tests/tests_python/test_frontend_extension.py @@ -64,6 +64,31 @@ def custom_converter(node: NodeContext): assert fe.check_conversion_extension_registered("CustomConverter") +@skip_if_frontend_is_disabled(TENSORFLOW_FRONTEND_NAME) +@skip_if_tensorflow_not_install_by_wheel_pkg() +def test_tensorflow_multiple_extensions_fe_wrapper(): + from openvino.frontend.tensorflow import ConversionExtension + from openvino.frontend import NodeContext + + fe = FrontEndWrapperTensorflow() + + def custom_converter_one(node: NodeContext): + node.get_input(0) + node.get_attribute("alpha") + + def custom_converter_two(node: NodeContext): + node.get_input(0) + node.get_attribute("beta") + + extensions = [ + ConversionExtension("CustomConverterOne", custom_converter_one), + ConversionExtension("CustomConverterTwo", custom_converter_two), + ] + fe.add_extension(extensions) + assert fe.check_conversion_extension_registered("CustomConverterOne") + assert fe.check_conversion_extension_registered("CustomConverterTwo") + + @skip_if_frontend_is_disabled(PADDLE_FRONTEND_NAME) def test_paddle_conversion_extension_fe_wrapper(): from openvino.frontend.paddle import ConversionExtension From 28b6fc93945efac68f1fc472cf36a2821e7890bf Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Wed, 17 Jan 2024 16:52:19 +0100 Subject: [PATCH 047/122] uncomment persimmon-8b-base, tvp-base, pvt-tiny-224 (#22212) --- tests/model_hub_tests/torch_tests/hf_transformers_models | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/model_hub_tests/torch_tests/hf_transformers_models b/tests/model_hub_tests/torch_tests/hf_transformers_models index 2cec1a1b744901..fddf1afdcfc172 100644 --- a/tests/model_hub_tests/torch_tests/hf_transformers_models +++ b/tests/model_hub_tests/torch_tests/hf_transformers_models @@ -18,7 +18,7 @@ anugunj/omnivore-swinL-in21k,omnivore,skip,Load problem apple/mobilevitv2-1.0-imagenet1k-256,mobilevitv2,xfail,Unsupported op aten::col2im ArthurZ/jukebox_prior_0,jukebox_prior,skip,Load problem ArthurZ/jukebox-vqvae,jukebox_vqvae,skip,Load problem -ArthurZ/persimmon-8b-base,persimmon,skip,Load problem +ArthurZ/persimmon-8b-base,persimmon ashishpatel26/span-marker-bert-base-fewnerd-coarse-super,span-marker,skip,Load problem asi/albert-act-tiny,albert_act,skip,Load problem BAAI/AltCLIP,altclip @@ -140,7 +140,7 @@ hf-internal-testing/tiny-random-mbart,mbart,xfail,Compile error: CPU plug-in doe hf-internal-testing/tiny-random-MobileNetV2Model,mobilenet_v2 hf-internal-testing/tiny-random-mobilevit,mobilevit hf-internal-testing/tiny-random-MPNetModel,mpnet -hf-internal-testing/tiny-random-MptForCausalLM,mpt,skip,Load problem +hf-internal-testing/tiny-random-MptForCausalLM,mpt hf-internal-testing/tiny-random-NllbMoeForConditionalGeneration,nllb_moe,skip,Load problem hf-internal-testing/tiny-random-NystromformerModel,nystromformer hf-internal-testing/tiny-random-PegasusModel,pegasus,skip,Load problem @@ -178,7 +178,7 @@ ibm/MoLM-350M-4B,moduleformer,skip,Load problem IDEA-CCNL/Randeng-Deltalm-362M-En-Zh,Deltalm,skip,Load problem Inderpreet01/seaformer-semantic-segmentation-large,seaformer,skip,Load problem Intel/dpt-hybrid-midas,dpt -Intel/tvp-base,tvp,skip,Load problem +# Intel/tvp-base,tvp,skip,Load problem # takes too long isemmanuelolowe/code-embedder,instruct-codebert,skip,Load problem isemmanuelolowe/instruct-codet5-5,instruct-codet5,skip,Load problem jaketae/fastspeech2-ljspeech,fastspeech2,skip,Load problem @@ -412,6 +412,6 @@ youzanai/clip-product-title-chinese,clip_chinese_model,skip,Load problem Yova/SmallCapOPT7M,smallcap,skip,Load problem yusufani/trclip-vitl14-e10,trclip,skip,Load problem yysung53/dpr,text_similarity,skip,Load problem -Zetatech/pvt-tiny-224,pvt,skip,Load problem +Zetatech/pvt-tiny-224,pvt ZinengTang/tvlt-base,tvlt,xfail,Conversion is failed for aten::cat: Argument element types are inconsistent zuppif/resnetd-18,resnetd,skip,Load problem From c9763240fa9f8b37b7612e6e44b17110e15d79d8 Mon Sep 17 00:00:00 2001 From: Rupesh Sreeraman Date: Wed, 17 Jan 2024 21:24:10 +0530 Subject: [PATCH 048/122] [ONNX] Extend ONNX Frontend with BitwiseNot-18 operator (#21872) * [ONNX] Extend ONNX Frontend with BitwiseNot-18 operator * Updated bitwise_not implementation * Enabled bitwise_not_2d and bitwise_not_4d tests --------- Co-authored-by: Katarzyna Mitrus --- .../onnx/frontend/src/op/bitwise_not.cpp | 23 +++++++++++ .../onnx/frontend/src/op/bitwise_not.hpp | 26 ++++++++++++ .../onnx/frontend/src/ops_bridge.cpp | 2 + .../onnx/tests/models/bitwise_not.prototxt | 41 +++++++++++++++++++ src/frontends/onnx/tests/onnx_import.in.cpp | 10 +++++ .../onnx/tests/tests_python/test_backend.py | 2 - 6 files changed, 102 insertions(+), 2 deletions(-) create mode 100644 src/frontends/onnx/frontend/src/op/bitwise_not.cpp create mode 100644 src/frontends/onnx/frontend/src/op/bitwise_not.hpp create mode 100644 src/frontends/onnx/tests/models/bitwise_not.prototxt diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.cpp b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp new file mode 100644 index 00000000000000..403a65c86ab287 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op/bitwise_not.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "default_opset.hpp" + +using namespace ov::op; + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector bitwise_not(const Node& node) { + return {std::make_shared(node.get_ng_inputs().at(0))}; +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.hpp b/src/frontends/onnx/frontend/src/op/bitwise_not.hpp new file mode 100644 index 00000000000000..be3112b9de7e49 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/bitwise_not.hpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector bitwise_not(const Node& node); + +} // namespace set_1 + +} // namespace op + +} // namespace onnx_import + +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index fc584553a54baf..faff600625a6cf 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -29,6 +29,7 @@ #include "op/batch_norm.hpp" #include "op/bitshift.hpp" #include "op/bitwise_and.hpp" +#include "op/bitwise_not.hpp" #include "op/bitwise_or.hpp" #include "op/bitwise_xor.hpp" #include "op/blackmanwindow.hpp" @@ -356,6 +357,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BitShift", 1, bitshift); REGISTER_OPERATOR("BitwiseAnd", 1, bitwise_and); + REGISTER_OPERATOR("BitwiseNot", 1, bitwise_not); REGISTER_OPERATOR("BitwiseOr", 1, bitwise_or); REGISTER_OPERATOR("BitwiseXor", 1, bitwise_xor); REGISTER_OPERATOR("BlackmanWindow", 1, blackmanwindow); diff --git a/src/frontends/onnx/tests/models/bitwise_not.prototxt b/src/frontends/onnx/tests/models/bitwise_not.prototxt new file mode 100644 index 00000000000000..29e97d88172b18 --- /dev/null +++ b/src/frontends/onnx/tests/models/bitwise_not.prototxt @@ -0,0 +1,41 @@ +ir_version: 9 +producer_name: "BitwiseNotModel" +graph { + node { + input: "x" + output: "y" + name: "BitwiseNotNode" + op_type: "BitwiseNot" + } + name: "BitwiseNotGraph" + input { + name: "x" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 5 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 5 + } + } + } + } + } +} +opset_import { + domain: "" + version: 16 +} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 5269cf3b0ce355..6d014e79ad7f3e 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6219,3 +6219,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_bitwise_xor_broadcast_condition) { test_case.run(); } + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_bitwise_not) { + auto model = convert_model("bitwise_not.onnx"); + + auto test_case = ov::test::TestCase(model, s_device); + test_case.add_input(Shape{5}, {5, 10, 200, 35, 1}); + test_case.add_expected_output(Shape{5}, {-6, -11, -201, -36, -2}); + + test_case.run(); +} diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index de362b10af46f7..f131ce48ed58a9 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -418,9 +418,7 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ( xfail_issue_99949, - "OnnxBackendNodeModelTest.test_bitwise_not_2d_cpu", "OnnxBackendNodeModelTest.test_bitwise_not_3d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_not_4d_cpu", ), ( xfail_issue_99950, From 3ccc99452c95e224b633d8ef1c81c3250ede8f96 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Wed, 17 Jan 2024 17:56:46 +0100 Subject: [PATCH 049/122] [PT FE] Add dict key name to tensor name when decomposing (#22215) --- src/frontends/pytorch/src/transforms/dict_resolver.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frontends/pytorch/src/transforms/dict_resolver.cpp b/src/frontends/pytorch/src/transforms/dict_resolver.cpp index d51eb793813bf7..a4c237210ae4d9 100644 --- a/src/frontends/pytorch/src/transforms/dict_resolver.cpp +++ b/src/frontends/pytorch/src/transforms/dict_resolver.cpp @@ -48,6 +48,7 @@ bool DictParameterResolver::run_on_model(const std::shared_ptr& model) { getitem_node->get_output_partial_shape(0)); new_param->set_friendly_name(name); getitem_node->output(0).replace(new_param); + new_param->output(0).set_names({name}); new_params.push_back(new_param); changed = true; } else { From 2c365239b521edffbcc6beb77629845dcac1432c Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Wed, 17 Jan 2024 21:19:46 +0100 Subject: [PATCH 050/122] [DOCS] Removal of docs for Deployment Manager (#22214) * deployment manager docs removal * Update releasenotes_for_openvino.rst --- .../releasenotes_for_openvino.rst | 139 +++++----- .../openvino_legacy_features.rst | 9 - .../deployment-manager-tool.rst | 237 ------------------ 3 files changed, 69 insertions(+), 316 deletions(-) delete mode 100644 docs/articles_en/openvino_workflow/deployment_intro/deployment-manager-tool.rst diff --git a/docs/articles_en/about_openvino/releasenotes_for_openvino.rst b/docs/articles_en/about_openvino/releasenotes_for_openvino.rst index 49e4608a0e07e0..5af02c68d44f88 100644 --- a/docs/articles_en/about_openvino/releasenotes_for_openvino.rst +++ b/docs/articles_en/about_openvino/releasenotes_for_openvino.rst @@ -12,19 +12,19 @@ anywhere. We are proud to announce the release of OpenVINO 2023.2 introducing a of new features, improvements, and deprecations aimed at enhancing the developer experience. -New and changed in 2023.2 +New and changed in 2023.2 ########################### Summary of major features and improvements ++++++++++++++++++++++++++++++++++++++++++++ -* More Generative AI coverage and framework integrations to minimize code changes. +* More Generative AI coverage and framework integrations to minimize code changes. * **Expanded model support for direct PyTorch model conversion** - automatically convert additional models directly from PyTorch or execute via ``torch.compile`` with OpenVINO as the backend. * **New and noteworthy models supported** - we have enabled models used for chatbots, - instruction following, code generation, and many more, including prominent models + instruction following, code generation, and many more, including prominent models like Llava, chatGLM, Bark (text to audio) and LCM (Latent Consistency Models, an optimized version of Stable Diffusion). * **Easier optimization and conversion of Hugging Face models** - compress LLM models @@ -33,33 +33,33 @@ Summary of major features and improvements * **OpenVINO is now available on Conan** - a package manager which allows more seamless package management for large scale projects for C and C++ developers. -* Broader Large Language Model (LLM) support and more model compression techniques. +* Broader Large Language Model (LLM) support and more model compression techniques. * Accelerate inference for LLM models on Intel® CoreTM CPU and iGPU with the - use of Int8 model weight compression. + use of Int8 model weight compression. * Expanded model support for dynamic shapes for improved performance on GPU. * Preview support for Int4 model format is now included. Int4 optimized model weights are now available to try on Intel® Core™ CPU and iGPU, to accelerate models like Llama 2 and chatGLM2. * The following Int4 model compression formats are supported for inference in runtime: - + * Generative Pre-training Transformer Quantization (GPTQ); with GPTQ-compressed models, you can access them through the Hugging Face repositories. * Native Int4 compression through Neural Network Compression Framework (NNCF). * More portability and performance to run AI at the edge, in the cloud, or locally. - + * **In 2023.1 we announced full support for ARM** architecture, now we have improved - performance by enabling FP16 model formats for LLMs and integrating additional + performance by enabling FP16 model formats for LLMs and integrating additional acceleration libraries to improve latency. - + Support Change and Deprecation Notices ++++++++++++++++++++++++++++++++++++++++++ * The OpenVINO™ Development Tools package (pip install openvino-dev) is deprecated - and will be removed from installation options and distribution channels with - 2025.0. To learn more, refer to the + and will be removed from installation options and distribution channels with + 2025.0. To learn more, refer to the :doc:`OpenVINO Legacy Features and Components page `. To ensure optimal performance, install the OpenVINO package (pip install openvino), which includes essential components such as OpenVINO Runtime, OpenVINO Converter, @@ -67,17 +67,16 @@ Support Change and Deprecation Notices * Tools: - * :doc:`Deployment Manager ` - is deprecated and will be removed in the 2024.0 release. + * Deployment Manager is deprecated and will be removed in the 2024.0 release. * Accuracy Checker is deprecated and will be discontinued with 2024.0. - * Post-Training Optimization Tool (POT) is deprecated and will be - discontinued with 2024.0. + * Post-Training Optimization Tool (POT) is deprecated and will be + discontinued with 2024.0. * Model Optimizer is deprecated and will be fully supported up until the 2025.0 release. Model conversion to the OpenVINO format should be performed through - OpenVINO Model Converter, which is part of the PyPI package. Follow the + OpenVINO Model Converter, which is part of the PyPI package. Follow the :doc:`Model Optimizer to OpenVINO Model Converter transition ` - guide for smoother transition. Known limitations are TensorFlow model with - TF1 Control flow and object detection models. These limitations relate to + guide for smoother transition. Known limitations are TensorFlow model with + TF1 Control flow and object detection models. These limitations relate to the gap in TensorFlow direct conversion capabilities which will be addressed in upcoming releases. * PyTorch 1.13 support is deprecated in Neural Network Compression Framework (NNCF) @@ -87,8 +86,8 @@ Support Change and Deprecation Notices * Intel® Gaussian & Neural Accelerator (Intel® GNA) will be deprecated in a future release. We encourage developers to use the Neural Processing Unit (NPU) for low powered systems like Intel® Core™ Ultra or 14th generation and beyond. - * OpenVINO C++/C/Python 1.0 APIs will be discontinued with 2024.0. - * Python 3.7 support has been discontinued. + * OpenVINO C++/C/Python 1.0 APIs will be discontinued with 2024.0. + * Python 3.7 support has been discontinued. OpenVINO™ Development Tools ++++++++++++++++++++++++++++++++++++++++++ @@ -99,63 +98,63 @@ List of components and their changes: * :doc:`OpenVINO Model Converter tool ` now supports the original framework shape format. * `Neural Network Compression Framework (NNCF) `__ - + * Added data-free Int4 weight compression support for LLMs in OpenVINO IR with ``nncf.compress_weights()``. * Improved quantization time of LLMs with NNCF PTQ API for ``nncf.quantize()`` and ``nncf.quantize_with_accuracy_control()``. * Added support for SmoothQuant and ChannelAlighnment algorithms in NNCF HyperParameter - Tuner for automatic optimization of their hyperparameters during quantization. + Tuner for automatic optimization of their hyperparameters during quantization. * Added quantization support for the ``IF`` operation of models in OpenVINO format to speed up such models. * NNCF Post-training Quantization for PyTorch backend is now supported with - ``nncf.quantize()`` and the common implementation of quantization algorithms. - * Added support for PyTorch 2.1. PyTorch 1.13 support has been deprecated. + ``nncf.quantize()`` and the common implementation of quantization algorithms. + * Added support for PyTorch 2.1. PyTorch 1.13 support has been deprecated. -OpenVINO™ Runtime (previously known as Inference Engine) +OpenVINO™ Runtime (previously known as Inference Engine) --------------------------------------------------------- -* OpenVINO Common +* OpenVINO Common * Operations for reference implementations updated from legacy API to API 2.0. - * Symbolic transformation introduced the ability to remove Reshape operations + * Symbolic transformation introduced the ability to remove Reshape operations surrounding MatMul operations. -* OpenVINO Python API +* OpenVINO Python API * Better support for the ``openvino.properties`` submodule, which now allows the use - of properties directly, without additional parenthesis. Example use-case: + of properties directly, without additional parenthesis. Example use-case: ``{openvino.properties.cache_dir: “./some_path/”}``. * Added missing properties: ``execution_devices`` and ``loaded_from_cache``. * Improved error propagation on imports from OpenVINO package. -* AUTO device plug-in (AUTO) +* AUTO device plug-in (AUTO) * o Provided additional option to improve performance of cumulative throughput - (or MULTI), where part of CPU resources can be reserved for GPU inference + (or MULTI), where part of CPU resources can be reserved for GPU inference when GPU and CPU are both used for inference (using ``ov::hint::enable_cpu_pinning(true)``). This avoids the performance issue of CPU resource contention where there - is not enough CPU resources to schedule tasks for GPU + is not enough CPU resources to schedule tasks for GPU (`PR #19214 `__). * CPU * Introduced support of GPTQ quantized Int4 models, with improved performance - compared to Int8 weight-compressed or FP16 models. In the CPU plugin, + compared to Int8 weight-compressed or FP16 models. In the CPU plugin, the gain in performance is achieved by FullyConnected acceleration with 4bit weight decompression (`PR #20607 `__). * Improved performance of Int8 weight-compressed large language models on some platforms, such as 13th Gen Intel Core - (`PR #20607 `__). + (`PR #20607 `__). * Further reduced memory consumption of select large language models on - CPU platforms with AMX and AVX512 ISA, by eliminating extra memory copy - with a unified weight layout - (`PR #19575 `__). + CPU platforms with AMX and AVX512 ISA, by eliminating extra memory copy + with a unified weight layout + (`PR #19575 `__). * Fixed performance issue observed in 2023.1 release on select Xeon CPU - platform with improved thread workload partitioning matching L2 cache - utilization + platform with improved thread workload partitioning matching L2 cache + utilization (`PR #20436 `__). * Extended support of configuration (enable_cpu_pinning) on Windows platforms to allow fine-grain control on CPU resource used for inference @@ -168,34 +167,34 @@ OpenVINO™ Runtime (previously known as Inference Engine) * GPU * Enhanced inference performance for Large Language Models. - * Introduced int8 weight compression to boost LLM performance. + * Introduced int8 weight compression to boost LLM performance. (`PR #19548 `__). * Implemented Int4 GPTQ weight compression for improved LLM performance. * Optimized constant weights for LLMs, resulting in better memory usage and faster model loading. * Optimized gemm (general matrix multiply) and fc (fully connected) for - enhanced performance on iGPU. + enhanced performance on iGPU. (`PR #19780 `__). * Completed GPU plugin migration to API 2.0. * Added support for oneDNN 3.3 version. * Model Import Updates - * TensorFlow Framework Support + * TensorFlow Framework Support * Supported conversion of models from memory in keras.Model and tf.function formats. `PR #19903 `__ * Supported TF 2.14. `PR #20385 `__ - * PyTorch Framework Support + * PyTorch Framework Support * Supported Int4 GPTQ models. - * New operations supported. + * New operations supported. - * ONNX Framework Support + * ONNX Framework Support - * Added support for ONNX version 1.14.1 + * Added support for ONNX version 1.14.1 (`PR #18359 `__) @@ -209,17 +208,17 @@ Introduced an extension of the KServe gRPC API, enabling streaming input and output for servables with Mediapipe graphs. This extension ensures the persistence of Mediapipe graphs within a user session, improving processing performance. This enhancement supports stateful graphs, such as tracking algorithms, and -enables the use of source calculators. +enables the use of source calculators. (`see additional documentation `__) * Mediapipe framework has been updated to the version 0.10.3. * model_api used in the openvino inference Mediapipe calculator has been updated - and included with all its features. -* Added a demo showcasing gRPC streaming with Mediapipe graph. + and included with all its features. +* Added a demo showcasing gRPC streaming with Mediapipe graph. (`see here `__) * Added parameters for gRPC quota configuration and changed default gRPC channel arguments to add rate limits. It will minimize the risks of impact of the service - from uncontrolled flow of requests. + from uncontrolled flow of requests. * Updated python clients requirements to match wide range of python versions from 3.6 to 3.11 Learn more about the changes in https://github.com/openvinotoolkit/model_server/releases @@ -233,7 +232,7 @@ Jupyter Notebook Tutorials Cross-lingual Books Alignment With Transformers * `LLM chatbot `__ Create LLM-powered Chatbot - + * Updated to include Int4 weight compression and Zephyr 7B model * `Bark Text-to-Speech `__ @@ -295,18 +294,18 @@ Known issues | **ID - 121959** | *Component* - CPU plugin | *Description:* -| During inference using latency hint on selected hybrid CPU platforms - (such as 12th or 13th Gen Intel CORE), there is a sporadic occurrence of - increased latency caused by the operating system scheduling of P-cores or +| During inference using latency hint on selected hybrid CPU platforms + (such as 12th or 13th Gen Intel CORE), there is a sporadic occurrence of + increased latency caused by the operating system scheduling of P-cores or E-cores during OpenVINO initialization. | *Workaround:* -| This will be fixed in the next OpenVINO release. +| This will be fixed in the next OpenVINO release. | **ID - 123101** -| *Component* - GPU plugin +| *Component* - GPU plugin | *Description:* | Hung up of GPU plugin on A770 Graphics (dGPU) in case of - large batch size (1750). + large batch size (1750). | *Workaround:* | Decrease the batch size, wait for fixed driver released. @@ -320,19 +319,19 @@ three types of operating systems: Windows, Linux, and macOS. || Component || License | Location | +================================+===================================+=================+=================+=======================+=================================================+ || OpenVINO (Inference Engine) C++ Runtime || Dual licensing: || /runtime/* | -|| Unified API to integrate the inference with application logic || Intel® OpenVINO™ Distribution License (Version May 2021) || /runtime/include/* | -|| OpenVINO (Inference Engine) Headers || Apache 2.0 || | +|| Unified API to integrate the inference with application logic || Intel® OpenVINO™ Distribution License (Version May 2021) || /runtime/include/* | +|| OpenVINO (Inference Engine) Headers || Apache 2.0 || | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ || OpenVINO (Inference Engine) Pythion API || Apache 2.0 || /python/* | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ || OpenVINO (Inference Engine) Samples || Apache 2.0 || /samples/* | || Samples that illustrate OpenVINO C++/ Python API usage || || | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ -|| [Deprecated] Deployment manager || Apache 2.0 || /tools/deployment_manager/* | -|| The Deployment Manager is a Python* command-line tool that || || | -|| creates a deployment package by assembling the model, IR files, || || | -|| your application, and associated dependencies into a runtime || || | -|| package for your target device. || || | +|| [Deprecated] Deployment manager || Apache 2.0 || /tools/deployment_manager/* | +|| The Deployment Manager is a Python* command-line tool that || || | +|| creates a deployment package by assembling the model, IR files, || || | +|| your application, and associated dependencies into a runtime || || | +|| package for your target device. || || | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ @@ -360,7 +359,7 @@ enabled hardware, software or service activation. Learn more at `http://www.intel.com/ `__ or from the OEM or retailer. -No computer system can be absolutely secure. +No computer system can be absolutely secure. Intel, Atom, Arria, Core, Movidius, Xeon, OpenVINO, and the Intel logo are trademarks of Intel Corporation in the U.S. and/or other countries. @@ -371,18 +370,18 @@ Other names and brands may be claimed as the property of others. Copyright © 2023, Intel Corporation. All rights reserved. -For more complete information about compiler optimizations, see our Optimization Notice. - -Performance varies by use, configuration and other factors. Learn more at +For more complete information about compiler optimizations, see our Optimization Notice. + +Performance varies by use, configuration and other factors. Learn more at `www.Intel.com/PerformanceIndex `__. Download +++++++++++++++++++++++++++++++++++++++++++++ `The OpenVINO product selector tool `__ -provides easy access to the right packages that match your desired OS, version, +provides easy access to the right packages that match your desired OS, version, and distribution options. - + diff --git a/docs/articles_en/documentation/openvino_legacy_features.rst b/docs/articles_en/documentation/openvino_legacy_features.rst index d6953d26f59cf9..1b1e00db3e232b 100644 --- a/docs/articles_en/documentation/openvino_legacy_features.rst +++ b/docs/articles_en/documentation/openvino_legacy_features.rst @@ -10,7 +10,6 @@ Legacy Features and Components OpenVINO Development Tools package Model Optimizer / Conversion API - Deploy Application with Deployment Manager Open Model ZOO @@ -47,14 +46,6 @@ offering. | :doc:`See how to use OVC ` | :doc:`See how to transition from the legacy solution ` -| **OpenVINO Deployment Manager** -| *New solution:* the tool is no longer needed -| *Old solution:* discontinuation planned for OpenVINO 2024.0 -| -| It is recommended to explore alternative deployment solutions available in OpenVINO. -| :doc:`See how to deploy locally ` - - | **Open Model ZOO** | *New solution:* users are encouraged to use public model repositories diff --git a/docs/articles_en/openvino_workflow/deployment_intro/deployment-manager-tool.rst b/docs/articles_en/openvino_workflow/deployment_intro/deployment-manager-tool.rst deleted file mode 100644 index 7249ca45f00cc8..00000000000000 --- a/docs/articles_en/openvino_workflow/deployment_intro/deployment-manager-tool.rst +++ /dev/null @@ -1,237 +0,0 @@ -.. {#openvino_docs_install_guides_deployment_manager_tool} - -Deploying Your Application with Deployment Manager -================================================== - - -.. meta:: - :description: OpenVINO™ Deployment Manager assembles the model, OpenVINO IR - files, your application, dependencies and creates a deployment - package for a target device. - - -.. warning:: - - Note that OpenVINO Deployment Manager is deprecated and will be removed in OpenVINO 2024.0. - -The OpenVINO™ Deployment Manager is a Python command-line tool that creates a deployment package by assembling the model, OpenVINO IR files, your application, and associated dependencies into a runtime package for your target device. This tool is delivered within the Intel® Distribution of OpenVINO™ toolkit for Linux, Windows and macOS release packages. It is available in the ``/tools/deployment_manager`` directory after installation. - -This article provides instructions on how to create a package with Deployment Manager and then deploy the package to your target systems. - -Prerequisites -#################### - -To use the Deployment Manager tool, the following requirements need to be met: - -* Intel® Distribution of OpenVINO™ toolkit is installed. See the :doc:`Installation Guide ` for instructions on different operating systems. -* To run inference on a target device other than CPU, device drivers must be pre-installed: - - * **For GPU**, see :doc:`Configurations for Intel® Processor Graphics (GPU) `. - * **For GNA**, see :doc:`Intel® Gaussian & Neural Accelerator (GNA) ` - -.. important:: - - The operating system on the target system must be the same as the development system on which you are creating the package. For example, if the target system is Ubuntu 18.04, the deployment package must be created from the OpenVINO™ toolkit installed on Ubuntu 18.04. - -.. tip:: - - If your application requires additional dependencies, including the Microsoft Visual C++ Redistributable, use the `'--user_data' option <#running-deployment-manager-in-standard-cli-mode>`__ to add them to the deployment archive. Install these dependencies on the target host before running inference. - -Creating Deployment Package Using Deployment Manager -#################################################### - -To create a deployment package that includes inference-related components of OpenVINO™ toolkit, you can run the Deployment Manager tool in either interactive or standard CLI mode . - -Running Deployment Manager in Interactive Mode -++++++++++++++++++++++++++++++++++++++++++++++ - -.. dropdown:: Click to expand/collapse - - The interactive mode provides a user-friendly command-line interface that guides through the process with text prompts. - - To launch the Deployment Manager in interactive mode, open a new terminal window, go to the Deployment Manager tool directory, and run the tool script without parameters: - - .. tab-set:: - - .. tab-item:: Windows - :sync: windows - - .. code-block:: bat - - cd \tools\deployment_manager - .\deployment_manager.py - - .. tab-item:: Linux - :sync: linux - - .. code-block:: sh - - cd /tools/deployment_manager - ./deployment_manager.py - - .. tab-item:: macOS - :sync: macos - - .. code-block:: sh - - cd /tools/deployment_manager - ./deployment_manager.py - - - The target device selection dialog is displayed: - - .. image:: _static/images/selection_dialog.png - :alt: Deployment Manager selection dialog - - Use the options provided on the screen to complete the selection of the target devices, and press **Enter** to proceed to the package generation dialog. To interrupt the generation process and exit the program, type **q** and press **Enter**. - - Once the selection is accepted, the package generation dialog will appear: - - .. image:: _static/images/configuration_dialog.png - :alt: Deployment Manager configuration dialog - - The target devices selected in the previous step appear on the screen. To go back and change the selection, type **b** and press **Enter**. Use the default settings, or use the following options to configure the generation process: - - * ``o. Change output directory`` (optional): the path to the output directory. By default, it is set to your home directory. - - * ``u. Provide (or change) path to folder with user data`` (optional): the path to a directory with user data (OpenVINO IR, model, dataset, etc.) files and subdirectories required for inference, which will be added to the deployment archive. By default, it is set to ``None``, which means that copying the user data to the target system need to be done separately. - - * ``t. Change archive name`` (optional): the deployment archive name without extension. By default, it is set to ``openvino_deployment_package``. - - After all the parameters are set, type **g** and press **Enter** to generate the package for the selected target devices. To interrupt the generation process and exit the program, type **q** and press **Enter**. - - Once the script has successfully completed, the deployment package is generated in the specified output directory. - - -Running Deployment Manager in Standard CLI Mode -+++++++++++++++++++++++++++++++++++++++++++++++ - - -.. dropdown:: Click to expand/collapse - - You can also run the Deployment Manager tool in the standard CLI mode. In this mode, specify the target devices and other parameters as command-line arguments of the Deployment Manager Python script. This mode facilitates integrating the tool in an automation pipeline. - - To launch the Deployment Manager tool in the standard mode: open a new terminal window, go to the Deployment Manager tool directory, and run the tool command with the following syntax: - - .. tab-set:: - - .. tab-item:: Windows - :sync: windows - - .. code-block:: bat - - cd \tools\deployment_manager - .\deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data] - - .. tab-item:: Linux - :sync: linux - - .. code-block:: sh - - cd /tools/deployment_manager - ./deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data] - - .. tab-item:: macOS - :sync: macos - - .. code-block:: sh - - cd /tools/deployment_manager - ./deployment_manager.py <--targets> [--output_dir] [--archive_name] [--user_data] - - - The following options are available: - - * ``<--targets>`` (required): the list of target devices to run inference. To specify more than one target, separate them with spaces, for example, ``--targets cpu gpu``. - To get a list of currently available targets, run the program with the ``-h`` option. - - * ``[--output_dir]`` (optional): the path to the output directory. By default, it is set to your home directory. - - * ``[--archive_name]`` (optional): a deployment archive name without extension. By default, it is set to ``openvino_deployment_package``. - - * ``[--user_data]`` (optional): the path to a directory with user data (OpenVINO IR, model, dataset, etc.) files and subdirectories required for inference, which will be added to the deployment archive. By default, it is set to ``None``, which means copying the user data to the target system need to be performed separately. - - Once the script has successfully completed, the deployment package is generated in the output directory specified. - - -Deploying Package on Target Systems -################################### - -Once the Deployment Manager has successfully completed, the ``.tar.gz`` (on Linux or macOS) or ``.zip`` (on Windows) package is generated in the specified output directory. - -To deploy the OpenVINO Runtime components from the development machine to the target system, perform the following steps: - -1. Copy the generated archive to the target system by using your preferred method. - -2. Extract the archive to the destination directory on the target system. If the name of your archive is different from the default one shown below, replace ``openvino_deployment_package`` with your specified name. - - .. tab-set:: - - .. tab-item:: Windows - :sync: windows - - .. code-block:: bat - - Use the archiver of your choice to unzip the file. - - .. tab-item:: Linux - :sync: linux - - .. code-block:: sh - - tar xf openvino_deployment_package.tar.gz -C - - .. tab-item:: macOS - :sync: macos - - .. code-block:: sh - - tar xf openvino_deployment_package.tar.gz -C - - - Now, the package is extracted to the destination directory. The following files and subdirectories are created: - - * ``setupvars.sh`` — a copy of ``setupvars.sh``. - * ``runtime`` — contains the OpenVINO runtime binary files. - * ``install_dependencies`` — a snapshot of the ``install_dependencies`` directory from the OpenVINO installation directory. - * ```` — the directory with the user data (OpenVINO IR, model, dataset, etc.) specified while configuring the package. - -3. On a target Linux system, to run inference install additional dependencies by running the ``install_openvino_dependencies.sh`` script: - - .. code-block:: sh - - cd /openvino/install_dependencies - sudo -E ./install_openvino_dependencies.sh - - -4. Set up the environment variables: - - .. tab-set:: - - .. tab-item:: Windows - :sync: windows - - .. code-block:: bat - - cd \openvino\ - .\setupvars.bat - - .. tab-item:: Linux - :sync: linux - - .. code-block:: sh - - cd /openvino/ - source ./setupvars.sh - - .. tab-item:: macOS - :sync: macos - - .. code-block:: sh - - cd /openvino/ - source ./setupvars.sh - - -Now, you have finished the deployment of the OpenVINO Runtime components to the target system. - From be6ede1d8ac65fea4fa8e3075b1df32c27fbed17 Mon Sep 17 00:00:00 2001 From: River Li Date: Thu, 18 Jan 2024 04:59:45 +0800 Subject: [PATCH 051/122] [Core] remove some legacy header files (#22217) --- .../include/openvino/util/common_util.hpp | 16 ++ .../ie_iexecutable_network_internal.hpp | 2 +- .../interface/ie_iinfer_request_internal.hpp | 2 +- .../interface/ie_iplugin_internal.hpp | 2 +- .../interface/ie_ivariable_state_internal.hpp | 2 +- src/inference/dev_api/debug.h | 222 ------------------ src/inference/dev_api/ie_algorithm.hpp | 125 ---------- .../dev_api/ie_performance_hints.hpp | 104 -------- src/inference/dev_api/so_ptr.hpp | 11 - .../interface/ie_iinfer_request_internal.cpp | 13 +- .../interface/ie_iplugin_internal.cpp | 3 +- src/inference/src/ie_network_reader.cpp | 2 +- .../tests/functional/debug_tests.cpp | 21 -- .../ie_infer_async_request_base_test.cpp | 2 +- .../tests/unit/ie_executable_network_test.cpp | 2 +- src/plugins/intel_cpu/src/infer_request.cpp | 1 - .../subgraph_tests/src/rotary_pos_emb.cpp | 2 - .../src/behavior/plugin/hetero_synthetic.cpp | 1 - .../src/subgraph/reshape_permute_reshape.cpp | 4 +- .../common_test_utils/src/data_utils.cpp | 1 - 20 files changed, 32 insertions(+), 506 deletions(-) delete mode 100644 src/inference/dev_api/debug.h delete mode 100644 src/inference/dev_api/ie_algorithm.hpp delete mode 100644 src/inference/dev_api/ie_performance_hints.hpp delete mode 100644 src/inference/dev_api/so_ptr.hpp delete mode 100644 src/inference/tests/functional/debug_tests.cpp diff --git a/src/common/util/include/openvino/util/common_util.hpp b/src/common/util/include/openvino/util/common_util.hpp index b1d731ed318caf..fc1cea05887f39 100644 --- a/src/common/util/include/openvino/util/common_util.hpp +++ b/src/common/util/include/openvino/util/common_util.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +120,21 @@ bool contains(const std::vector& vec, const V& v) { }); } +/** + * @brief multiply vector's values + * @param vec - vector with values + * @return result of multiplication + */ +template +T product(std::vector const& vec) { + if (vec.empty()) + return 0; + T ret = vec[0]; + for (size_t i = 1; i < vec.size(); ++i) + ret *= vec[i]; + return ret; +} + /** * @brief Associative containers doesnt work with remove_if algorithm * @tparam ContainerT diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp index ba9420bc51bdf6..6b89010ac995fe 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp @@ -12,7 +12,7 @@ #include "cpp/ie_cnn_network.h" #include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" #include "ie_parameter.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" namespace ov { class Function; diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp index 1c8f2b5ec786ef..438b94acb65c2d 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp @@ -12,7 +12,7 @@ #include "ie_common.h" #include "ie_input_info.hpp" #include "openvino/core/node_output.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" namespace InferenceEngine { diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index 9ae8659be3db42..42df35371800a3 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -21,8 +21,8 @@ #include "ie_parameter.hpp" #include "openvino/core/extension.hpp" #include "openvino/runtime/iplugin.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "openvino/util/pp.hpp" -#include "so_ptr.hpp" using namespace ov::threading; diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp index a2a0aabf997f70..f04a34bf841e63 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp @@ -8,7 +8,7 @@ #include #include "ie_blob.h" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" namespace InferenceEngine { diff --git a/src/inference/dev_api/debug.h b/src/inference/dev_api/debug.h deleted file mode 100644 index e626d8670a63b0..00000000000000 --- a/src/inference/dev_api/debug.h +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Basic debugging tools - * @file debug.h - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ie_algorithm.hpp" - -namespace InferenceEngine { -namespace details { - -/** - * @brief Serializes a `std::vector` to a `std::ostream` - * @ingroup ie_dev_api_error_debug - * @param out An output stream - * @param vec A vector to serialize - * @return A reference to a `std::stream` - */ -template -inline std::ostream& operator<<(std::ostream& out, const std::vector& vec) { - if (vec.empty()) - return std::operator<<(out, "[]"); - out << "[" << vec[0]; - for (unsigned i = 1; i < vec.size(); i++) { - out << ", " << vec[i]; - } - return out << "]"; -} - -/** - * @brief trim from start (in place) - * @ingroup ie_dev_api_error_debug - * @param s - string to trim - */ -inline void ltrim(std::string& s) { - s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { - return !std::isspace(c); - })); -} - -/** - * @brief trim from end (in place) - * @ingroup ie_dev_api_error_debug - * @param s - string to trim - */ -inline void rtrim(std::string& s) { - s.erase(std::find_if(s.rbegin(), - s.rend(), - [](int c) { - return !std::isspace(c); - }) - .base(), - s.end()); -} - -/** - * @brief Trims std::string from both ends (in place) - * @ingroup ie_dev_api_error_debug - * @param s A reference to a std::tring to trim - * @return A reference to a trimmed std::string - */ -inline std::string& trim(std::string& s) { - ltrim(s); - rtrim(s); - return s; -} - -/** - * @brief split string into a vector of substrings - * @ingroup ie_dev_api_error_debug - * @param src - string to split - * @param delimiter - string used as a delimiter - * @return vector of substrings - */ -inline std::vector split(const std::string& src, const std::string& delimiter) { - std::vector tokens; - std::string tokenBuf; - size_t prev = 0, pos = 0, srcLength = src.length(), delimLength = delimiter.length(); - do { - pos = src.find(delimiter, prev); - if (pos == std::string::npos) { - pos = srcLength; - } - tokenBuf = src.substr(prev, pos - prev); - if (!tokenBuf.empty()) { - tokens.push_back(tokenBuf); - } - prev = pos + delimLength; - } while (pos < srcLength && prev < srcLength); - return tokens; -} - -/** - * @brief create a string representation for a vector of values, without any suffixes or prefixes - * @ingroup ie_dev_api_error_debug - * @param vec Vector of values - * @param glue A separator - * @return A string representation - */ -template -std::string joinVec(std::vector const& vec, std::string const& glue = std::string(",")) { - if (vec.empty()) - return ""; - std::stringstream oss; - oss << vec[0]; - for (size_t i = 1; i < vec.size(); i++) - oss << glue << vec[i]; - return oss.str(); -} - -/** - * @brief create a string representation for a vector of values, enclosing text in a square brackets - * @ingroup ie_dev_api_error_debug - * @param vec - vector of values - * @return string representation - */ -template -std::string dumpVec(std::vector const& vec) { - return "[" + joinVec(vec) + "]"; -} - -/** - * @brief multiply vector's values - * @ingroup ie_dev_api_error_debug - * @param vec - vector with values - * @return result of multiplication - */ -template -T product(std::vector const& vec) { - if (vec.empty()) - return 0; - T ret = vec[0]; - for (size_t i = 1; i < vec.size(); ++i) - ret *= vec[i]; - return ret; -} - -/** - * @brief check if vectors contain same values - * @ingroup ie_dev_api_error_debug - * @param v1 - first vector - * @param v2 - second vector - * @return true if vectors contain same values - */ -template -bool equal(const std::vector& v1, const std::vector& v2) { - if (v1.size() != v2.size()) - return false; - for (auto i1 = v1.cbegin(), i2 = v2.cbegin(); i1 != v1.cend(); ++i1, ++i2) { - if (*i1 != *i2) - return false; - } - return true; -} - -#ifdef _WIN32 -# define strncasecmp _strnicmp -#endif - -/** - * @brief Checks whether two `std::string`s are equal - * @ingroup ie_dev_api_error_debug - * @param lhs A first `std::string` to compare - * @param rhs A second `std::string` to compare - * @param ignoreCase Whether to ignore case-sensitivity, default is `true` - * @return `True` in case of `std::string`s are equal, `false` otherwise - */ -inline bool equal(const std::string& lhs, const std::string& rhs, bool ignoreCase = true) { - return (lhs.size() == rhs.size()) && (ignoreCase ? 0 == strncasecmp(lhs.c_str(), rhs.c_str(), lhs.size()) - : 0 == strncmp(lhs.c_str(), rhs.c_str(), lhs.size())); -} - -/** - * @brief check string end with given substring - * @ingroup ie_dev_api_error_debug - * @param src - string to check - * @param with - given substring - * @return true if string end with given substring - */ -inline bool endsWith(const std::string& src, const char* with) { - int wl = static_cast(strlen(with)); - int so = static_cast(src.length()) - wl; - if (so < 0) - return false; - return 0 == strncmp(with, &src[so], wl); -} - -/** - * @brief Converts all upper-case letters in a std::string to lower case - * @ingroup ie_dev_api_error_debug - * @param s A std::tring to convert - * @return An output std::string in lower case - */ -inline std::string tolower(const std::string& s) { - std::string ret; - ret.resize(s.length()); - std::transform(s.begin(), s.end(), ret.begin(), [](char c) { - return static_cast(::tolower(static_cast(c))); - }); - return ret; -} -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/dev_api/ie_algorithm.hpp b/src/inference/dev_api/ie_algorithm.hpp deleted file mode 100644 index c87d2df7616e98..00000000000000 --- a/src/inference/dev_api/ie_algorithm.hpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file with simple helper functions for STL containters - * @file ie_algorithm.hpp - */ - -#pragma once -#include -#include -#include - -namespace InferenceEngine { - -/** - * @brief A namespace with non-public Inference Engine Plugin API - * @ingroup ie_dev_api - */ -namespace details { - -/** - * @brief Simple helper function to check element presence in container - * container must provede stl-compliant find member function - * - * @param container - Container to check - * @param element - element to check - * - * @return true if element present in container - */ -template -bool contains(const C& container, const T& element) { - return container.find(element) != container.end(); -} - -/** - * @brief Associative containers doesnt work with remove_if algorithm - * @tparam ContainerT - * @tparam PredicateT - * @param data An associative container - * @param predicate A predicate to remove values conditionally - */ -template -inline void erase_if(Container& data, const PredicateT& predicate) { - for (auto it = std::begin(data); it != std::end(data);) { - if (predicate(*it)) { - it = data.erase(it); - } else { - ++it; - } - } -} - -/** - * @brief Multiplies container - * - * @param[in] beg The `begin` iterator - * @param[in] en The `end` iterator - * - * @tparam TIterator An iterator type - * - * @return A result of multiplication. - */ -template -auto product(TIterator beg, TIterator en) -> typename std::remove_reference::type { - return std::accumulate(beg, - en, - static_cast::type>(1), - std::multiplies::type>()); -} - -/** - * @brief Clips element to be in range `[min, max]` - * - * @param idx The pointer to element. - * @param[in] min The minimum value - * @param[in] max The maximum value - */ -inline void clipping(int* idx, const int min, const int max) { - (*idx) = ((*idx) > min) ? (*idx) : min; - (*idx) = ((*idx) < max) ? (*idx) : (max - 1); -} - -/** - * @brief Set containers intersection - * @tparam Set - * @param lhs First set container - * @param rhs Second set container - * @return Set intersection - */ -template -static Set Intersection(const Set& lhs, const Set& rhs) { - Set result; - const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs; - const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs; - for (auto&& val : minSizeSet) { - if (InferenceEngine::details::contains(maxSizeSet, val)) { - result.insert(val); - } - } - return result; -} - -/** - * @brief Check whether two sets intersect - * @tparam Set - * @param lhs First set container - * @param rhs Second set container - * @return true if two sets interesect false otherwise - */ -template -static bool Intersects(const Set& lhs, const Set& rhs) { - const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs; - const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs; - for (auto&& val : minSizeSet) { - if (InferenceEngine::details::contains(maxSizeSet, val)) { - return true; - } - } - return false; -} - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/dev_api/ie_performance_hints.hpp b/src/inference/dev_api/ie_performance_hints.hpp deleted file mode 100644 index 495ebc17be66d9..00000000000000 --- a/src/inference/dev_api/ie_performance_hints.hpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for config that holds the performance hints - * @file ie_performance_hints.hpp - */ - -#pragma once -#include -#include - -namespace InferenceEngine { -struct PerfHintsConfig { - std::string ovPerfHint = "LATENCY"; - int ovPerfHintNumRequests = 0; - - /** - * @brief Parses configuration key/value pair - * @param key configuration key - * @param value configuration values - */ - void SetConfig(const std::string& key, const std::string& value) { - if (PluginConfigParams::KEY_PERFORMANCE_HINT == key) { - ovPerfHint = CheckPerformanceHintValue(value); - } else if (PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS == key) { - ovPerfHintNumRequests = CheckPerformanceHintRequestValue(value); - } - } - - /** - * @brief Return configuration value - * @param key configuration key - * @return configuration value wrapped into Parameter - */ - Parameter GetConfig(const std::string& key) { - if (PluginConfigParams::KEY_PERFORMANCE_HINT == key) { - return ovPerfHint; - } else if (PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS == key) { - return ovPerfHintNumRequests; - } else { - IE_THROW() << "Unsupported Performance Hint config: " << key << std::endl; - } - } - - /** - * @brief Supported Configuration keys - * @return vector of supported configuration keys - */ - static std::vector SupportedKeys() { - return {PluginConfigParams::KEY_PERFORMANCE_HINT, PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS}; - } - - /** - * @brief Checks configuration key and value, otherwise throws - * @param configuration key + value - * @return void - */ - static void CheckConfigAndValue(std::pair kvp) { - if (kvp.first == PluginConfigParams::KEY_PERFORMANCE_HINT) - CheckPerformanceHintValue(kvp.second); - else if (kvp.first == PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS) - CheckPerformanceHintRequestValue(kvp.second); - else - IE_THROW() << "Unsupported Performance Hint config: " << kvp.first << std::endl; - } - - /** - * @brief Returns configuration value if it is valid, otherwise throws - * @param configuration value - * @return configuration value - */ - static std::string CheckPerformanceHintValue(const std::string& val) { - if (val == PluginConfigParams::LATENCY || val == PluginConfigParams::THROUGHPUT || - val == PluginConfigParams::CUMULATIVE_THROUGHPUT || val == PluginConfigParams::UNDEFINED) - return val; - else - IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_PERFORMANCE_HINT - << ". Expected only " << PluginConfigParams::LATENCY << "/" << PluginConfigParams::THROUGHPUT - << "/" << PluginConfigParams::CUMULATIVE_THROUGHPUT << "/" << PluginConfigParams::UNDEFINED; - } - - /** - * @brief Returns configuration value if it is valid, otherwise throws - * @param configuration value as string - * @return configuration value as number - */ - static int CheckPerformanceHintRequestValue(const std::string& val) { - int val_i = -1; - try { - val_i = std::stoi(val); - if (val_i >= 0) - return val_i; - else - throw std::logic_error("wrong val"); - } catch (const std::exception&) { - IE_THROW() << "Wrong value of " << val << " for property key " - << PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS - << ". Expected only positive integer numbers"; - } - } -}; -} // namespace InferenceEngine diff --git a/src/inference/dev_api/so_ptr.hpp b/src/inference/dev_api/so_ptr.hpp deleted file mode 100644 index c9aa52642a6d46..00000000000000 --- a/src/inference/dev_api/so_ptr.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a wrapper class for handling plugin instantiation and releasing resources - * @file so_ptr.hpp - */ -#pragma once - -#include "openvino/runtime/so_ptr.hpp" diff --git a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp index bf1ffd10ac4acf..cf4dd6a59c28aa 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp @@ -11,12 +11,11 @@ #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "cpp_interfaces/plugin_itt.hpp" -#include "debug.h" -#include "ie_algorithm.hpp" #include "ie_blob.h" #include "ie_common.h" #include "ie_ngraph_utils.hpp" #include "openvino/core/partial_shape.hpp" +#include "openvino/util/common_util.hpp" #include "transformations/utils/utils.hpp" namespace InferenceEngine { @@ -139,7 +138,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us auto& devBlob = _deviceInputs[name]; size_t inputSize = foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR - ? InferenceEngine::details::product(foundInput->getTensorDesc().getDims()) + ? ov::util::product(foundInput->getTensorDesc().getDims()) : 1; if (!isInputDynamic && dataSize != inputSize) { IE_THROW() << "Input tensor size is not equal network input size (" << dataSize << "!=" << inputSize @@ -149,7 +148,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us devBlob = userBlob; } else { size_t outputSize = foundOutput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR - ? details::product(foundOutput->getTensorDesc().getDims()) + ? ov::util::product(foundOutput->getTensorDesc().getDims()) : 1; if (!isOutputDynamic && dataSize != outputSize) { IE_THROW() << "Output blob size is not equal network output size (" << dataSize << "!=" << outputSize @@ -279,7 +278,7 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, const auto input = findInputByNodeName(name); isDynamic = input && input->get_output_partial_shape(0).is_dynamic(); dims = foundInputPair->second->getTensorDesc().getDims(); - refSize = foundInputPair->second->getTensorDesc().getLayout() != SCALAR ? details::product(dims) : 1; + refSize = foundInputPair->second->getTensorDesc().getLayout() != SCALAR ? ov::util::product(dims) : 1; } else { auto foundOutputPair = std::find_if(std::begin(_networkOutputs), std::end(_networkOutputs), @@ -299,10 +298,10 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, // need to immediately throw here dims = foundOutputPair->second->getTensorDesc().getDims(); } - refSize = foundOutputPair->second->getTensorDesc().getLayout() != SCALAR ? details::product(dims) : 1; + refSize = foundOutputPair->second->getTensorDesc().getLayout() != SCALAR ? ov::util::product(dims) : 1; } } else { - refSize = details::product(refDims); + refSize = ov::util::product(refDims); } if (!isDynamic && refSize != blob->size()) { diff --git a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp index dbe91e775cb855..97571141411f89 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp @@ -24,7 +24,6 @@ #include "cpp/ie_cnn_network.h" #include "dev/converter_utils.hpp" #include "exec_graph_info.hpp" -#include "ie_algorithm.hpp" #include "ie_api.h" #include "ie_icore.hpp" #include "ie_iextension.h" @@ -274,7 +273,7 @@ std::unordered_set GetRemovedNodes(const std::shared_ptrget_ops()) { - if (!InferenceEngine::details::contains(transformedNodeNames, originalNode->get_friendly_name())) + if (transformedNodeNames.find(originalNode->get_friendly_name()) == transformedNodeNames.end()) result.emplace(originalNode->get_friendly_name()); } diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp index b9543193a58a78..80343432e4335c 100644 --- a/src/inference/src/ie_network_reader.cpp +++ b/src/inference/src/ie_network_reader.cpp @@ -29,8 +29,8 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/runtime/shared_buffer.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "openvino/util/shared_object.hpp" -#include "so_ptr.hpp" #include "transformations/rt_info/old_api_map_order_attribute.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/inference/tests/functional/debug_tests.cpp b/src/inference/tests/functional/debug_tests.cpp deleted file mode 100644 index 6f72c8d88d768a..00000000000000 --- a/src/inference/tests/functional/debug_tests.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "debug.h" - -using DebugTests = ::testing::Test; - -TEST_F(DebugTests, tolowerWorksWithEmptyString) { - std::string str = ""; - ASSERT_STREQ("", InferenceEngine::details::tolower(str).c_str()); -} - -TEST_F(DebugTests, shouldConvertToLowerCase) { - std::string str = "Hello, World!1"; - ASSERT_STREQ("hello, world!1", InferenceEngine::details::tolower(str).c_str()); -} diff --git a/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp b/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp index 613124cf040004..6b13290c3e7250 100644 --- a/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp +++ b/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp @@ -10,7 +10,7 @@ #include #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp" diff --git a/src/inference/tests/unit/ie_executable_network_test.cpp b/src/inference/tests/unit/ie_executable_network_test.cpp index a1bd8d9bb7bf5a..142214c3ef8e15 100644 --- a/src/inference/tests/unit/ie_executable_network_test.cpp +++ b/src/inference/tests/unit/ie_executable_network_test.cpp @@ -12,7 +12,7 @@ #include "cpp/ie_executable_network_base.hpp" #include "openvino/runtime/compiled_model.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp" diff --git a/src/plugins/intel_cpu/src/infer_request.cpp b/src/plugins/intel_cpu/src/infer_request.cpp index bf54bab2e917ad..7537c0f1f915d7 100644 --- a/src/plugins/intel_cpu/src/infer_request.cpp +++ b/src/plugins/intel_cpu/src/infer_request.cpp @@ -6,7 +6,6 @@ #include "async_infer_request.h" #include "compiled_model.h" -#include "debug.h" #include "dnnl_extension_utils.h" #include "itt.h" #include "memory_desc/dnnl_blocked_memory_desc.h" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp index ae0f14f88dc54c..ffdeb1ee5caf98 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include #include #include diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp index 5e257bee4903db..e8a0640f563459 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp @@ -9,7 +9,6 @@ #include "common_test_utils/file_utils.hpp" #include "openvino/util/file_util.hpp" #include -#include "ie_algorithm.hpp" #include "common_test_utils/subgraph_builders/split_conv_concat.hpp" #include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" #include "common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp" diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp index 36ea849035635c..cda8494ecd99df 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "shared_test_classes/subgraph/reshape_permute_reshape.hpp" +#include "openvino/util/common_util.hpp" namespace SubgraphTestsDefinitions { std::string ReshapePermuteReshape::getTestCaseName(const testing::TestParamInfo &obj) { @@ -23,7 +23,7 @@ namespace SubgraphTestsDefinitions { std::vector> inputs; InferenceEngine::Precision netPrecision; std::tie(inputs, netPrecision, targetDevice) = this->GetParam(); - const std::size_t input_dim = InferenceEngine::details::product(inputs[0]); + const std::size_t input_dim = ov::util::product(inputs[0]); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); std::vector shape_input{1, input_dim}; ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(shape_input))}; diff --git a/src/tests/test_utils/common_test_utils/src/data_utils.cpp b/src/tests/test_utils/common_test_utils/src/data_utils.cpp index 61860b6de3e8eb..1b662ed3df9fbc 100644 --- a/src/tests/test_utils/common_test_utils/src/data_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/data_utils.cpp @@ -5,7 +5,6 @@ #include "common_test_utils/data_utils.hpp" #include "blob_factory.hpp" -#include "debug.h" // to allow putting vector into exception string stream #include "ie_blob.h" #include "openvino/core/deprecated.hpp" #include "openvino/core/type/element_type_traits.hpp" From f326a6ede03285b83dfe64a464b65f27c3a850ac Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Wed, 17 Jan 2024 22:17:27 +0100 Subject: [PATCH 052/122] Refactor GPU shared beh tests 2 (#21997) * Update gpu beh tests to use api 2.0 shared tests * Apply comments * Fix * Fix * Apply comments * Apply comments --- .../behavior/ov_plugin/core_integration.cpp | 81 +++++++++++++++++++ .../behavior/ov_plugin/life_time.cpp | 5 ++ .../behavior/plugin/caching_tests.cpp | 51 ------------ .../behavior/plugin/life_time.cpp | 27 ------- .../skip_tests_config.cpp | 4 + .../tests/functional/skip_tests_config.cpp | 3 + .../ov_executable_network/get_metric.hpp | 1 - .../behavior/ov_plugin/core_integration.hpp | 8 +- .../behavior/ov_plugin/query_model.hpp | 7 ++ .../CPU/expected_failures_API.csv | 1 + 10 files changed, 105 insertions(+), 83 deletions(-) delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 5a5d028b788c28..8bff5ce426b757 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -10,6 +10,87 @@ using namespace ov::test::behavior; namespace { + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassBasicTestP, OVClassBasicTestP, + ::testing::Values(std::make_pair(std::string("openvino_intel_gpu_plugin"), std::string(ov::test::utils::DEVICE_GPU)))); + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassNetworkTestP, OVClassNetworkTestP, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU))); + +// +// OV Class GetMetric +// + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_SUPPORTED_METRICS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_AVAILABLE_DEVICES, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_FULL_DEVICE_NAME, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_OPTIMIZATION_CAPABILITIES, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_DEVICE_GOPS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_DEVICE_TYPE, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_RANGE_FOR_STREAMS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_ThrowUnsupported, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetConfigTest, OVClassGetConfigTest_ThrowUnsupported, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetAvailableDevices, OVClassGetAvailableDevices, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + // IE Class Common tests with // INSTANTIATE_TEST_SUITE_P(nightly_OVClassModelTestP, OVClassModelTestP, ::testing::Values("GPU")); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp index 5eeacde0093d87..2ef40e0017c98b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -14,4 +14,9 @@ namespace { ::testing::Values(//ov::test::utils::DEVICE_BATCH, "HETERO:GPU"), OVHoldersTest::getTestCaseName); + + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVHoldersTestOnImportedNetwork, + ::testing::Values(ov::test::utils::DEVICE_GPU), + OVHoldersTestOnImportedNetwork::getTestCaseName); + } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp deleted file mode 100644 index 5f3e4120047a82..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/caching_tests.hpp" - -using namespace LayerTestsDefinitions; - -namespace { - static const std::vector precisionsGPU = { - ngraph::element::f32, - ngraph::element::f16, - ngraph::element::i32, - ngraph::element::i64, - ngraph::element::i8, - ngraph::element::u8, - ngraph::element::i16, - ngraph::element::u16, - }; - - static const std::vector floatPrecisionsGPU = { - ngraph::element::f32, - ngraph::element::f16 - }; - - static const std::vector batchSizesGPU = { - 1, 2 - }; - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_GPU, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getNumericAnyTypeFunctions()), - ::testing::ValuesIn(precisionsGPU), - ::testing::ValuesIn(batchSizesGPU), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_GPU_Float, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getFloatingPointOnlyFunctions()), - ::testing::ValuesIn(floatPrecisionsGPU), - ::testing::ValuesIn(batchSizesGPU), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_KernelCachingSupportCase_GPU, LoadNetworkCompiledKernelsCacheTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(std::make_pair(std::map(), "blob"))), - LoadNetworkCompiledKernelsCacheTest::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp deleted file mode 100644 index 6ab85ed4b453ed..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/life_time.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - const std::vector> orders = { - // 0 - plugin - // 1 - executable_network - // 2 - infer_request - {0, 1, 2}, - {0, 2, 1}, - {1, 0, 2}, - {1, 2, 0}, - {2, 0, 1}, - {2, 1, 0} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, HoldersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(orders)), - HoldersTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 70d0ee093e2b26..880983fb32d9ce 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -58,6 +58,10 @@ std::vector disabledTestPatterns() { R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*CompareWithRefImpl.*)", // Issue: 124060 R"(.*smoke_GridSample/GridSampleLayerTest.Inference/.*model_type=f16.*)", + // Issue: 128924 + R"(.*OVClassModelTestP/OVClassModelTestP.ImportModelWithNullContextThrows.*)", + // Issue: 129802 + R"(.*smoke_OVClassBasicTestP/OVClassBasicTestP.registerExistingPluginThrows.*)", #if defined(_WIN32) R"(.*KernelCachingSupportCase.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)", #endif diff --git a/src/plugins/template/tests/functional/skip_tests_config.cpp b/src/plugins/template/tests/functional/skip_tests_config.cpp index 0743b5837f2dab..288877da4c7bfa 100644 --- a/src/plugins/template/tests/functional/skip_tests_config.cpp +++ b/src/plugins/template/tests/functional/skip_tests_config.cpp @@ -121,6 +121,9 @@ std::vector disabledTestPatterns() { R"(.*eltwiseOpType=Mod_secondaryInputType=PARAMETER_opType=VECTOR_NetType=(f16|f32).*)", // Interpreter backend doesn't implement evaluate method for OP Multiply (by GroupNormalizationDecomposition) R"(.*ReferenceGroupNormalization.*_f64*)", + // Issue: 128924 + R"(.*OVClassModelTestP/OVClassModelTestP.ImportModelWithNullContextThrows.*)", + }; #ifdef _WIN32 diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp index d9a64036e8c668..a306d741276241 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp @@ -59,7 +59,6 @@ using OVClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS = O using OVClassExecutableNetworkGetMetricTest_ThrowsUnsupported = OVCompiledModelClassBaseTestP; using OVClassExecutableNetworkGetConfigTest = OVCompiledModelClassBaseTestP; using OVClassExecutableNetworkSetConfigTest = OVCompiledModelClassBaseTestP; -using OVClassExecutableNetworkGetConfigTest = OVCompiledModelClassBaseTestP; class OVClassExecutableNetworkGetMetricTestForSpecificConfig : public OVClassNetworkTest, diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index bc6df2494a9012..8ac84b3abf0dd2 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -521,12 +521,12 @@ TEST_P(OVClassBasicTestP, SetConfigAllNoThrow) { TEST_P(OVClassBasicTestP, SetGetConfigForTbbTerminateThrows) { ov::Core ie = createCoreWithTemplate(); bool value = false; - ASSERT_NO_THROW(ie.set_property({ov::force_tbb_terminate(true)})); - ASSERT_NO_THROW(value = ie.get_property(target_device, ov::force_tbb_terminate)); + ASSERT_NO_THROW(ie.set_property(target_device, {ov::force_tbb_terminate(true)})); + ASSERT_NO_THROW(value = ie.get_property(ov::force_tbb_terminate.name()).as()); ASSERT_TRUE(value); - ASSERT_NO_THROW(ie.set_property({{ov::force_tbb_terminate(false)}})); - ASSERT_NO_THROW(value = ie.get_property(target_device, ov::force_tbb_terminate)); + ASSERT_NO_THROW(ie.set_property(target_device, {ov::force_tbb_terminate(false)})); + ASSERT_NO_THROW(value = ie.get_property(ov::force_tbb_terminate.name()).as()); ASSERT_FALSE(value); } diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp index ff0710e3edc9af..98473b6b9cad6c 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp @@ -39,6 +39,13 @@ TEST_P(OVClassModelTestP, QueryModelWithKSO) { } } +TEST_P(OVClassModelTestP, ImportModelWithNullContextThrows) { + ov::Core ie = createCoreWithTemplate(); + ov::RemoteContext context; + std::istringstream stream("None"); + ASSERT_THROW(ie.import_model(stream, context, {}), ov::Exception); +} + TEST_P(OVClassQueryModelTest, QueryModelWithMatMul) { ov::Core ie = createCoreWithTemplate(); diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv index 2a7b644c116539..b06fa77c49833b 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv @@ -3,6 +3,7 @@ ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperti ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_TYPE:},1.0 ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_ARCHITECTURE:},1.0 ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=CPU_,1.0 +ov_plugin_mandatory/OVClassModelTestP.ImportModelWithNullContextThrows/0,1 ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/3,1.0 ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/2,1.0 ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/1,1.0 From 8f3bc9d44c5c7701025afc0e37f5cfd9b5ae54ac Mon Sep 17 00:00:00 2001 From: Taylor Yeonbok Lee Date: Wed, 17 Jan 2024 14:23:17 -0800 Subject: [PATCH 053/122] Excluded softmax from async compilation if it is using optimized shape agnosti kernel (#22204) --- src/plugins/intel_gpu/src/graph/primitive_inst.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 996c90ccb99cec..5a9110edc19412 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -683,10 +683,10 @@ bool primitive_inst::use_async_compilation() { GPU_DEBUG_IF(debug_config->disable_async_compilation) { return false; } - return (_node->is_type() || - _node->is_type() || - _node->is_type() || - _node->is_type()); + + return (_node->is_type() || _node->is_type() || _node->is_type() || + (_node->is_type() && _node->get_selected_impl() && + _node->get_selected_impl()->get_kernel_name().find("softmax_gpu_ref") != std::string::npos)); } bool primitive_inst::update_impl() { From 6ad6ac7deef702b7bda0d46d5eaa196e20573823 Mon Sep 17 00:00:00 2001 From: River Li Date: Thu, 18 Jan 2024 06:58:18 +0800 Subject: [PATCH 054/122] Use exec_model_info.hpp to replace exec_graph_info.hpp (#22179) Co-authored-by: Ilya Lavrenov --- .../tensorflow/tests/compilation.cpp | 11 +++--- src/inference/dev_api/exec_graph_info.hpp | 38 ------------------- .../interface/ie_iplugin_internal.cpp | 2 +- src/inference/src/ie_common.cpp | 1 - src/plugins/intel_cpu/src/graph_dumper.cpp | 34 ++++++++--------- .../classes/convolution.cpp | 2 +- .../single_layer_tests/group_convolution.cpp | 6 +-- .../src/matmul_decompress_convert.cpp | 2 +- .../src/matmul_quantized_subgraph.cpp | 2 +- .../functional/test_utils/cpu_test_utils.hpp | 3 -- .../compiled_model/compiled_model_base.hpp | 10 ++--- .../behavior/compiled_model/import_export.hpp | 9 ++--- .../executable_network/exec_network_base.hpp | 10 ++--- .../ov_executable_network/exec_graph_info.hpp | 4 +- .../exec_network_base.hpp | 20 +++++----- .../executable_network/exec_graph_info.cpp | 9 +++-- .../runtime_precision.cpp | 23 ++++++----- .../src/base/snippets_test_utils.cpp | 5 ++- 18 files changed, 74 insertions(+), 117 deletions(-) delete mode 100644 src/inference/dev_api/exec_graph_info.hpp diff --git a/src/frontends/tensorflow/tests/compilation.cpp b/src/frontends/tensorflow/tests/compilation.cpp index 09e2466f5d7471..c4dff3f40f925e 100644 --- a/src/frontends/tensorflow/tests/compilation.cpp +++ b/src/frontends/tensorflow/tests/compilation.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include - #include "gtest/gtest.h" +#include "openvino/frontend/manager.hpp" +#include "openvino/openvino.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "tf_utils.hpp" #include "utils.hpp" @@ -54,7 +53,7 @@ TEST_F(CompileModelsTests, ModelWithSplitConvConcat) ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); const auto runtime_model = compiled_model.get_runtime_model(); auto get_layer_type = [](const std::shared_ptr& node) { - return node->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + return node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); }; const auto ops = runtime_model->get_ops(); EXPECT_EQ(0, std::count_if(ops.begin(), ops.end(), [&](const std::shared_ptr& node) { @@ -76,7 +75,7 @@ TEST_F(CompileModelsTests, ModelWithShapeOf) { ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); const auto runtime_model = compiled_model.get_runtime_model(); auto get_layer_type = [](const std::shared_ptr& node) { - return node->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + return node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); }; const auto ops = runtime_model->get_ops(); // one Input, one Eltwise and one Output diff --git a/src/inference/dev_api/exec_graph_info.hpp b/src/inference/dev_api/exec_graph_info.hpp deleted file mode 100644 index e0541fecfd392e..00000000000000 --- a/src/inference/dev_api/exec_graph_info.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A file defines names to be used by plugins to create execution graph. - * It's an API between plugin and WorkBench tool. - * @file exec_graph_info.hpp - */ - -#pragma once - -#include - -#include "openvino/op/op.hpp" -#include "openvino/runtime/exec_model_info.hpp" - -/** - * @brief A namespace with const values for Execution Graph parameters names. - * @ingroup ie_dev_exec_graph - * Executable Graph Info is represented in CNNNetwork format with general ExecutionNode nodes inside - * including connections between the nodes. Each node describes an executable hardware-specific - * primitive and stores its parameters within ExecutionNode::get_rt_info map. - * There is a list of general keys for the parameters map. - */ -namespace ExecGraphInfoSerialization { - -using ov::exec_model_info::EXECUTION_ORDER; -using ov::exec_model_info::ExecutionNode; -using ov::exec_model_info::IMPL_TYPE; -using ov::exec_model_info::LAYER_TYPE; -using ov::exec_model_info::ORIGINAL_NAMES; -using ov::exec_model_info::OUTPUT_LAYOUTS; -using ov::exec_model_info::OUTPUT_PRECISIONS; -using ov::exec_model_info::PERF_COUNTER; -using ov::exec_model_info::RUNTIME_PRECISION; - -} // namespace ExecGraphInfoSerialization diff --git a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp index 97571141411f89..5d67f5035b82fa 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp @@ -23,7 +23,6 @@ #include "cnn_network_ngraph_impl.hpp" #include "cpp/ie_cnn_network.h" #include "dev/converter_utils.hpp" -#include "exec_graph_info.hpp" #include "ie_api.h" #include "ie_icore.hpp" #include "ie_iextension.h" @@ -37,6 +36,7 @@ #include "openvino/core/runtime_attribute.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/pass/manager.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "openvino/runtime/threading/executor_manager.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/inference/src/ie_common.cpp b/src/inference/src/ie_common.cpp index a111adaca6a2e9..5e8aa69596c87a 100644 --- a/src/inference/src/ie_common.cpp +++ b/src/inference/src/ie_common.cpp @@ -11,7 +11,6 @@ #include #include -#include "exec_graph_info.hpp" #include "ie_blob.h" #include "ie_extension.h" #include "ie_iextension.h" diff --git a/src/plugins/intel_cpu/src/graph_dumper.cpp b/src/plugins/intel_cpu/src/graph_dumper.cpp index 0afa7c2ada5ce4..9c16c9ab73271d 100644 --- a/src/plugins/intel_cpu/src/graph_dumper.cpp +++ b/src/plugins/intel_cpu/src/graph_dumper.cpp @@ -4,16 +4,16 @@ #include "graph_dumper.h" -#include "utils/debug_capabilities.h" -#include "exec_graph_info.hpp" +#include "dnnl_debug.h" #include "openvino/pass/manager.hpp" #include "openvino/pass/serialize.hpp" -#include +#include "openvino/runtime/exec_model_info.hpp" +#include "utils/debug_capabilities.h" -#include -#include -#include #include +#include +#include +#include namespace ov { namespace intel_cpu { @@ -28,16 +28,16 @@ std::map extract_node_metadata(const NodePtr &node) { if (node->getType() == Type::Input && node->isConstant()) { // We need to separate Input and Const layers - serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = "Const"; + serialization_info[ov::exec_model_info::LAYER_TYPE] = "Const"; } else { - serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = NameFromType(node->getType()); + serialization_info[ov::exec_model_info::LAYER_TYPE] = NameFromType(node->getType()); } // Original layers - serialization_info[ExecGraphInfoSerialization::ORIGINAL_NAMES] = node->getOriginalLayers(); + serialization_info[ov::exec_model_info::ORIGINAL_NAMES] = node->getOriginalLayers(); // Implementation type name - serialization_info[ExecGraphInfoSerialization::IMPL_TYPE] = node->getPrimitiveDescriptorType(); + serialization_info[ov::exec_model_info::IMPL_TYPE] = node->getPrimitiveDescriptorType(); std::string outputPrecisionsStr; if (!node->getChildEdges().empty()) { @@ -62,7 +62,7 @@ std::map extract_node_metadata(const NodePtr &node) { outputPrecisionsStr = node->getParentEdgeAt(0)->getMemory().getDesc().getPrecision().get_type_name(); } } - serialization_info[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = outputPrecisionsStr; + serialization_info[ov::exec_model_info::OUTPUT_PRECISIONS] = outputPrecisionsStr; std::string outputLayoutsStr; auto outDescs = node->getSelectedPrimitiveDescriptor()->getConfig().outConfs; @@ -87,18 +87,18 @@ std::map extract_node_metadata(const NodePtr &node) { } else { outputLayoutsStr = dnnl::utils::fmt2str(dnnl::memory::format_tag::undef); } - serialization_info[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = outputLayoutsStr; + serialization_info[ov::exec_model_info::OUTPUT_LAYOUTS] = outputLayoutsStr; // Performance if (node->PerfCounter().avg() != 0) { - serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(node->PerfCounter().avg()); + serialization_info[ov::exec_model_info::PERF_COUNTER] = std::to_string(node->PerfCounter().avg()); } else { - serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed"; // it means it was not calculated yet + serialization_info[ov::exec_model_info::PERF_COUNTER] = "not_executed"; // it means it was not calculated yet } - serialization_info[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(node->getExecIndex()); + serialization_info[ov::exec_model_info::EXECUTION_ORDER] = std::to_string(node->getExecIndex()); - serialization_info[ExecGraphInfoSerialization::RUNTIME_PRECISION] = node->getRuntimePrecision().get_type_name(); + serialization_info[ov::exec_model_info::RUNTIME_PRECISION] = node->getRuntimePrecision().get_type_name(); return serialization_info; } @@ -164,7 +164,7 @@ std::shared_ptr dump_graph_as_ie_ngraph_net(const Graph &graph) { results.emplace_back(std::make_shared(get_inputs(node).back())); return_node = results.back(); } else { - return_node = std::make_shared( + return_node = std::make_shared( get_inputs(node), node->getSelectedPrimitiveDescriptor()->getConfig().outConfs.size()); for (size_t port = 0; port < return_node->get_output_size(); ++port) { diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp index 60205e1a0591a7..3cb45936f4709f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp @@ -83,7 +83,7 @@ void ConvolutionLayerCPUTest::checkBiasFusing(ov::CompiledModel& execNet) const return it->second.as(); }; - if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == "Convolution") { + if (getExecValue(ov::exec_model_info::LAYER_TYPE) == "Convolution") { foundConv = true; ASSERT_EQ(3, node->inputs().size()); break; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp index fba83b23446dbc..18ae356d62ae05 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp @@ -101,7 +101,7 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterfacesecond.as(); }; - if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == "Convolution") { + if (getExecValue(ov::exec_model_info::LAYER_TYPE) == "Convolution") { foundConv = true; ASSERT_EQ(3, node->inputs().size()); break; @@ -225,8 +225,8 @@ TEST_P(ExpectFallbackGroupConvolutionLayerCPUTest, CompareWithRefs) { OPENVINO_ASSERT(rtInfo.end() != it); return it->second.as(); }; - if ("Convolution" == getExecValue(ExecGraphInfoSerialization::LAYER_TYPE)) { - auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE); + if ("Convolution" == getExecValue(ov::exec_model_info::LAYER_TYPE)) { + auto primType = getExecValue(ov::exec_model_info::IMPL_TYPE); ASSERT_TRUE(selectedType != primType) << "primType is unexpected: " << primType; } } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp index 520a9613651288..22e89df313694d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp @@ -147,7 +147,7 @@ class MatMulDecompressConvertTest : public testing::WithParamInterfaceget_ops()) { - if (getExecValue(fcNode->get_rt_info(), ExecGraphInfoSerialization::LAYER_TYPE) == "FullyConnected") { + if (getExecValue(fcNode->get_rt_info(), ov::exec_model_info::LAYER_TYPE) == "FullyConnected") { const auto& constNode = fcNode->get_input_node_shared_ptr(1); ov::element::Type expectedType( getExecValue(constNode->get_rt_info(), ov::exec_model_info::OUTPUT_PRECISIONS)); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp index c99e21c34cc9d9..590502e858e112 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp @@ -118,7 +118,7 @@ class MatmulBrgemmInt8Test : public testing::WithParamInterfacesecond.as(); }; if (node->get_friendly_name() == nodeName) { - auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE); + auto primType = getExecValue(ov::exec_model_info::IMPL_TYPE); ASSERT_TRUE(primTypeCheck(primType)) << "primType is unexpected: " << primType << " Expected: " << selectedType; ASSERT_EQ(node->get_output_element_type(0), outType); ASSERT_EQ(node->get_input_element_type(0), inType); diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp index 9bd423ced2f973..6de63875d7c8a2 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp @@ -10,9 +10,6 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp" -// To be removed -#include "exec_graph_info.hpp" - namespace CPUTestUtils { typedef enum { undef, diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index bfbd7437668efb..413d3cecebeb86 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -2,7 +2,6 @@ // SPDX-License-Identifcorer: Apache-2.0 // -#include #include #include #include @@ -13,6 +12,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "openvino/op/concat.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "openvino/runtime/tensor.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" @@ -367,10 +367,10 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoBeforeExecution) { }; // Each layer from the execGraphInfo network must have PM data option set - EXPECT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); + EXPECT_EQ("not_executed", getExecValue(ov::exec_model_info::PERF_COUNTER)); // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); if (origFromExecLayer.empty()) { constCnt++; } else { @@ -420,7 +420,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { // At least one layer in the topology should be executed and have valid perf counter value try { - float x = static_cast(std::atof(getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); + float x = static_cast(std::atof(getExecValue(ov::exec_model_info::PERF_COUNTER).c_str())); std::cout << "TIME: " << x << std::endl; EXPECT_GE(x, 0.0f); hasOpWithValidTime = true; @@ -429,7 +429,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); std::vector origFromExecLayerSep = ov::test::utils::splitStringByDelimiter(origFromExecLayer); if (origFromExecLayer.empty()) { constCnt++; diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp index 40eaa9c54c26d0..bfee36762478ef 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp @@ -4,15 +4,14 @@ #include -#include -#include #include "base/ov_behavior_test_utils.hpp" -#include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/common_utils.hpp" #include "common_test_utils/file_utils.hpp" - -#include "functional_test_utils/plugin_cache.hpp" +#include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "openvino/pass/serialize.hpp" +#include "openvino/runtime/exec_model_info.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp index db7b2ba376a3db..13904d2fa055ce 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "base/behavior_test_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/file_utils.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "openvino/core/model.hpp" #include "openvino/op/relu.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" @@ -183,10 +183,10 @@ TEST_P(ExecutableNetworkBaseTest, CheckExecGraphInfoBeforeExecution) { }; // Each layer from the execGraphInfo network must have PM data option set - ASSERT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); + ASSERT_EQ("not_executed", getExecValue(ov::exec_model_info::PERF_COUNTER)); // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); if (origFromExecLayer.empty()) { constCnt++; } else { @@ -236,7 +236,7 @@ TEST_P(ExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // At least one layer in the topology should be executed and have valid perf counter value try { - float x = static_cast(std::atof(getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); + float x = static_cast(std::atof(getExecValue(ov::exec_model_info::PERF_COUNTER).c_str())); std::cout << "TIME: " << x << std::endl; ASSERT_GE(x, 0.0f); hasOpWithValidTime = true; @@ -244,7 +244,7 @@ TEST_P(ExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); std::vector origFromExecLayerSep = ov::test::utils::splitStringByDelimiter(origFromExecLayer); if (origFromExecLayer.empty()) { constCnt++; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index 8479dd5ad1e800..b8f6f691cf5737 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -5,10 +5,10 @@ #include -#include "exec_graph_info.hpp" #include "base/ov_behavior_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "pugixml.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp index 372a62a504540c..d035e65a811a89 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp @@ -2,22 +2,22 @@ // SPDX-License-Identifcorer: Apache-2.0 // -#include #include -#include #include "base/ov_behavior_test_utils.hpp" #include "common_test_utils/file_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "openvino/op/concat.hpp" -#include "openvino/runtime/tensor.hpp" +#include "common_test_utils/subgraph_builders/concat_with_params.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" #include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp" -#include "common_test_utils/subgraph_builders/concat_with_params.hpp" #include "common_test_utils/subgraph_builders/single_split.hpp" #include "common_test_utils/subgraph_builders/split_concat.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/pass/serialize.hpp" +#include "openvino/runtime/exec_model_info.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { namespace test { @@ -291,10 +291,10 @@ TEST_P(OVExecutableNetworkBaseTest, CheckExecGraphInfoBeforeExecution) { }; // Each layer from the execGraphInfo network must have PM data option set - EXPECT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); + EXPECT_EQ("not_executed", getExecValue(ov::exec_model_info::PERF_COUNTER)); // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); if (origFromExecLayer.empty()) { constCnt++; } else { @@ -343,7 +343,7 @@ TEST_P(OVExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // At least one layer in the topology should be executed and have valid perf counter value try { - float x = static_cast(std::atof(getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); + float x = static_cast(std::atof(getExecValue(ov::exec_model_info::PERF_COUNTER).c_str())); std::cout << "TIME: " << x << std::endl; EXPECT_GE(x, 0.0f); hasOpWithValidTime = true; @@ -352,7 +352,7 @@ TEST_P(OVExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); std::vector origFromExecLayerSep = ov::test::utils::splitStringByDelimiter(origFromExecLayer); if (origFromExecLayer.empty()) { constCnt++; diff --git a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp index 38072636f3d370..cebdc2ded6775d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "common_test_utils/test_common.hpp" -#include #include "behavior/executable_network/exec_graph_info.hpp" +#include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/exec_model_info.hpp" + namespace ExecutionGraphTests { const char serialize_test_model[] = R"V0G0N( @@ -490,7 +491,7 @@ TEST_P(ExecGraphUniqueNodeNames, CheckUniqueNodeNames) { names.insert(op->get_friendly_name()); const auto & rtInfo = op->get_rt_info(); - auto it = rtInfo.find(ExecGraphInfoSerialization::LAYER_TYPE); + auto it = rtInfo.find(ov::exec_model_info::LAYER_TYPE); ASSERT_NE(rtInfo.end(), it); } }; diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp index 9052182aae4f89..ee78994247f238 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp @@ -2,23 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include -#include - -#include "exec_graph_info.hpp" +#include "execution_graph_tests/runtime_precision.hpp" #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/ov_plugin_cache.hpp" -#include "functional_test_utils/skip_tests_config.hpp" #include "common_test_utils/node_builders/binary_convolution.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" #include "common_test_utils/node_builders/constant.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" +#include "functional_test_utils/ov_plugin_cache.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/runtime/exec_model_info.hpp" -#include "execution_graph_tests/runtime_precision.hpp" +#include +#include +#include +#include +#include +#include namespace ExecutionGraphTests { diff --git a/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp b/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp index d6bf5c5b487c42..433da771cb2b6d 100644 --- a/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp +++ b/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp @@ -3,8 +3,9 @@ // #include "shared_test_classes/base/snippets_test_utils.hpp" + #include "functional_test_utils/skip_tests_config.hpp" -#include "exec_graph_info.hpp" +#include "openvino/runtime/exec_model_info.hpp" namespace ov { namespace test { @@ -17,7 +18,7 @@ void SnippetsTestsCommon::validateNumSubgraphs() { size_t num_subgraphs = 0; size_t num_nodes = 0; for (const auto &op : compiled_model->get_ops()) { - auto layer_type = op->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + auto layer_type = op->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); // todo: Ignore reorders only after (Const or Inputs) or before outputs. // Alternatively, force plain layouts for convolutions, matmuls, FCs, etc., so reorders won't be inserted. if (layer_type == "Const" || From 87806378e0eb54f095fc9454ba4480bc6f424ff2 Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Thu, 18 Jan 2024 14:25:38 +0800 Subject: [PATCH 055/122] [AUTO Plugin][Func Test] Clean 1.0 related code in test (#22222) Signed-off-by: Zhai, Xuejun --- .../functional/behavior/auto_func_test.cpp | 25 ------------------- .../core_integration.cpp | 2 -- .../exec_network_base.cpp | 1 - .../ov_exec_net_import_export.cpp | 4 +-- .../ov_executable_network/properties.cpp | 3 --- .../behavior/ov_plugin/caching_tests.cpp | 6 ++--- .../behavior/ov_plugin/core_integration.cpp | 4 +-- .../behavior/ov_plugin/properties_tests.cpp | 3 +-- 8 files changed, 6 insertions(+), 42 deletions(-) diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp index f0941660d12d30..b51f2b7a7ae6b5 100644 --- a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp @@ -9,7 +9,6 @@ #include #include "common_test_utils/file_utils.hpp" -#include "ie_plugin_config.hpp" #include "openvino/core/any.hpp" #include "openvino/core/except.hpp" #include "openvino/opsets/opset11.hpp" @@ -571,18 +570,6 @@ class MockPluginSupportBatchAndContext : public MockPluginBase { return decltype(ov::streams::num)::value_type{2}; } else if (name == ov::compilation_num_threads.name()) { return decltype(ov::compilation_num_threads)::value_type{4}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } else if (name == ov::internal::supported_properties) { return decltype(ov::internal::supported_properties)::value_type( {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); @@ -690,18 +677,6 @@ class MockPlugin : public MockPluginBase { return decltype(ov::enable_profiling)::value_type{false}; } else if (name == ov::streams::num.name()) { return decltype(ov::streams::num)::value_type{2}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } else if (name == ov::internal::supported_properties) { return decltype(ov::internal::supported_properties)::value_type( {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp index 20cb407b3ee694..208c136c9681d2 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp @@ -7,8 +7,6 @@ using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; - namespace { // // Executable Network GetMetric diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp index 60d1c7b6a90e3b..b22383c2193543 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp @@ -3,7 +3,6 @@ // #include "behavior/compiled_model/compiled_model_base.hpp" -#include "ie_plugin_config.hpp" using namespace ov::test::behavior; namespace { diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp index f264a55c667a9f..290f7ede951130 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp @@ -1,10 +1,8 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include - #include "behavior/compiled_model/import_export.hpp" -#include "ie_plugin_config.hpp" +#include "common_test_utils/test_constants.hpp" using namespace ov::test::behavior; namespace { diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index e5f8fa28768bf2..18f0a61a64a003 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -46,9 +46,6 @@ auto default_affinity = [] { const std::vector multi_properties = { {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::num_streams(ov::streams::AUTO)}, - {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, }; INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index 9ab07c0fce5e2b..32ab0eb2de956c 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -4,9 +4,9 @@ #include "behavior/ov_plugin/caching_tests.hpp" -#include -#include -#include +#include "ov_ops/multiclass_nms_ie_internal.hpp" +#include "ov_ops/nms_ie_internal.hpp" +#include "ov_ops/nms_static_shape_ie.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index c5afda521a5ca1..0fe3abe27364f3 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -4,15 +4,13 @@ #include "behavior/ov_plugin/core_integration.hpp" -#include - #include "behavior/ov_plugin/core_integration_sw.hpp" #include "behavior/ov_plugin/query_model.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/core.hpp" +#include "openvino/runtime/properties.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; // defined in plugin_name.cpp extern const char* cpu_plugin_file_name; diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index bcbf29760effe1..1a6f122e0b590b 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -4,10 +4,9 @@ #include "behavior/ov_plugin/properties_tests.hpp" -#include +#include "openvino/runtime/auto/properties.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; namespace { const std::vector multi_Auto_properties = { From 1c70677b36d4a627e694be60db0d0deee13d5220 Mon Sep 17 00:00:00 2001 From: Andrew Kwangwoong Park Date: Thu, 18 Jan 2024 16:57:34 +0900 Subject: [PATCH 056/122] [GPU] Fuse type conversion only reorders to the prev FC nodes (#22103) * Add transformation which fuse convert with FC or FC compressed * Fix to correct output data types * Add transformation test --- .../intel_gpu/src/graph/fully_connected.cpp | 8 +-- .../transformations/fc_convert_fusion.cpp | 62 +++++++++++++++++ .../transformations/fc_convert_fusion.hpp | 19 ++++++ .../src/plugin/transformations_pipeline.cpp | 2 + .../tests/unit/shape_infer/matmul_si_test.cpp | 2 +- .../test_cases/fully_connected_gpu_test.cpp | 2 +- .../fc_convert_fusion_test.cpp | 66 +++++++++++++++++++ 7 files changed, 155 insertions(+), 6 deletions(-) create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.cpp create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp create mode 100644 src/plugins/intel_gpu/tests/unit/transformations/fc_convert_fusion_test.cpp diff --git a/src/plugins/intel_gpu/src/graph/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/fully_connected.cpp index 7dd86bd52b3e6a..d566339103bf48 100644 --- a/src/plugins/intel_gpu/src/graph/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/fully_connected.cpp @@ -97,8 +97,8 @@ layout fully_connected_inst::calc_output_layout(fully_connected_node const& node auto input_pshape = input_layout.get_partial_shape(); auto weights_layout = *impl_param.weights_layout; auto weights_pshape = weights_layout.get_partial_shape(); - auto output_type = input_layout.data_type; - if ((output_type == data_types::u8 || output_type == data_types::i8) && desc->output_data_types[0]) + auto output_type = desc->output_data_types[0].value_or(input_layout.data_type); + if (data_type_traits::is_i8_u8(input_layout.data_type) && desc->output_data_types[0]) output_type = *desc->output_data_types[0]; if (impl_param.has_fused_primitives()) { @@ -139,8 +139,8 @@ std::vector fully_connected_inst::calc_output_layouts(fully_connected_no auto input_layout = impl_param.get_input_layout(); auto weights_layout = *impl_param.weights_layout; - auto output_type = input_layout.data_type; - if (data_type_traits::is_i8_u8(output_type) && desc->output_data_types[0]) + auto output_type = desc->output_data_types[0].value_or(input_layout.data_type); + if (data_type_traits::is_i8_u8(input_layout.data_type) && desc->output_data_types[0]) output_type = *desc->output_data_types[0]; if (impl_param.has_fused_primitives()) { diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.cpp new file mode 100644 index 00000000000000..a5d798e4c2721c --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fc_convert_fusion.hpp" + +#include "intel_gpu/op/fully_connected.hpp" +#include "intel_gpu/op/fully_connected_compressed.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/pass/pattern/op/or.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/utils/utils.hpp" + +namespace ov { +namespace intel_gpu { + +FullyConnectedConvertFusion::FullyConnectedConvertFusion() { + using namespace ov::pass::pattern; + + auto data = any_input(); + auto weights = any_input(); + auto fully_connected = wrap_type({data, weights}, consumers_count(1)); + auto fully_connected_compressed = wrap_type({data, weights, any_input(), any_input()}, consumers_count(1)); + auto fc = std::make_shared(OutputVector{fully_connected, fully_connected_compressed}); + auto convert = wrap_type({fc}, type_matches(element::f32)); + + ov::matcher_pass_callback callback = [=](Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + + const auto& m_data = pattern_map.at(data).get_node_shared_ptr(); + const auto& m_weights = pattern_map.at(weights).get_node_shared_ptr(); + const auto& m_convert = pattern_map.at(convert).get_node_shared_ptr(); + auto output_type = m_convert->get_output_element_type(0); + + std::shared_ptr m_fc = nullptr; + std::shared_ptr new_fc = nullptr; + auto it = pattern_map.find(fully_connected); + if (it != pattern_map.end()) { + m_fc = it->second.get_node_shared_ptr(); + new_fc = std::make_shared(m_data, m_weights, output_type); + } else { + m_fc = pattern_map.at(fully_connected_compressed).get_node_shared_ptr(); + new_fc = std::make_shared(m_data, + m_weights, + m_fc->input_value(2), + m_fc->input_value(3), + output_type); + } + new_fc->set_friendly_name(m_convert->get_friendly_name()); + copy_runtime_info(m.get_matched_nodes(), new_fc); + replace_node(m_convert, new_fc); + + return true; + }; + + auto m = std::make_shared(convert, "FullyConnectedConvertFusion"); + this->register_matcher(m, callback); +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp new file mode 100644 index 00000000000000..44db1882f8e87f --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" + +namespace ov { +namespace intel_gpu { + +class FullyConnectedConvertFusion: public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("FullyConnectedConvertFusion", "0"); + FullyConnectedConvertFusion(); +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index cea3bbfa391cf0..8bd282d655d564 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -122,6 +122,7 @@ #include "plugin/transformations/binary_conv_to_conv.hpp" #include "plugin/transformations/move_convert_after_gather.hpp" #include "plugin/transformations/kv_cache_fusion.hpp" +#include "plugin/transformations/fc_convert_fusion.hpp" #include "transformations/low_precision/mark_dequantization_subgraph.hpp" #include "low_precision/pull_reshape_through_dequantization.hpp" @@ -698,6 +699,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { manager.register_pass(); manager.register_pass(); manager.register_pass(); + manager.register_pass(); // This is supposed to be the last pass to ensure that we don't have name collisions until // GPU plugin stops using friendly names for program creation diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp index 26162165ee0b2a..a641305b4329af 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp @@ -110,7 +110,7 @@ INSTANTIATE_TEST_SUITE_P(smoke, fully_connected_test, { layout{ov::PartialShape{10, 1024}, data_types::f32, format::bfyx}, layout{ov::PartialShape{1000, 1024}, data_types::f32, format::bfyx}, - data_types::i32, false, false, + data_types::f32, false, false, layout{ov::PartialShape{10, 1000}, data_types::f32, format::bfyx} }, { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp index 5ac5dc06f1ed6f..945d52a2f57ec0 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp @@ -1212,7 +1212,7 @@ class fully_connected_gpu_tests: public ::testing::Test { input_layout("input", input_mem->get_layout()), data("weights", weights_mem), data("scale", scale_mem), - fully_connected("fc_prim", input_info("input"), "weights", "", "scale", "", data_types::f32, padding(), 2, 2) + fully_connected("fc_prim", input_info("input"), "weights", "", "scale", "", data_types::f16, padding(), 2, 2) ); auto config = get_test_default_config(engine); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/fc_convert_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/fc_convert_fusion_test.cpp new file mode 100644 index 00000000000000..0440918e9f8caf --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/transformations/fc_convert_fusion_test.cpp @@ -0,0 +1,66 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include +#include +#include "common_test_utils/ov_test_utils.hpp" +#include + +#include + +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/parameter.hpp" +#include "intel_gpu/op/fully_connected.hpp" +#include "intel_gpu/op/fully_connected_compressed.hpp" + +using namespace testing; +using namespace ov::intel_gpu; + +TEST_F(TransformationTestsF, FullyConnectedConvertFusionTest1) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, 16 }); + auto weights_const = ov::op::v0::Constant::create(ov::element::u8, ov::Shape{ 32, 16 }, { 1 }); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto zp_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto fc_compressed = std::make_shared(input, weights_const, scale_const, zp_const); + auto convert = std::make_shared(fc_compressed, ov::element::f32); + + model = std::make_shared(ov::NodeVector{convert}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, 16 }); + auto weights_const = ov::op::v0::Constant::create(ov::element::u8, ov::Shape{ 32, 16 }, { 1 }); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto zp_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto fc_compressed = std::make_shared(input, weights_const, scale_const, zp_const, ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{ fc_compressed }, ov::ParameterVector{ input }); + } +} + +TEST_F(TransformationTestsF, FullyConnectedConvertFusionTest2) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{3, 2, 2}); + auto input2 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{2, 2}, {1}); + auto matmul = std::make_shared(input1, input2); + auto convert = std::make_shared(matmul, ov::element::f32); + + model = std::make_shared(ov::NodeVector{convert}, ov::ParameterVector{input1}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{3, 2, 2}); + auto input2 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{2, 2}, {1}); + auto matmul = std::make_shared(input1, input2, ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{ matmul }, ov::ParameterVector{ input1 }); + } +} From 438a3db011e6cfc4921bf9cb2f308831b9d74eac Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Thu, 18 Jan 2024 12:09:18 +0400 Subject: [PATCH 057/122] [OP CONFORMANCE] Extend expected_failures_CPU by sporadic failure test (#22226) --- .../github/skip_configs/CPU/expected_failures_OP.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv index 1cd7e681a499f4..e67b2c0fa73f01 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv @@ -209,3 +209,4 @@ conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dyn conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=b7a53df966d640f075cea7421ca5989ca91ca638e7af16aff33bc275eb7dfe9c_Device=CPU_Config=(),0.000352708 conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=5b185120e46fc0a2238ff4de19e278888ecda5fbae130c62e1ec21b4883ee61d_Device=CPU_Config=(),6.62629e-06 conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=bda73cc94d837df9fb535743febd300cf0baf7fdf48ff538c079a4a7ca291592_Device=CPU_Config=(),2.89071e-06 +conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=dynamic_IR=e255ef2321233444ce6e4fdeb513a9b271987457aa9bd456948b64f589de1e2b_Device=CPU_Config=(),9.4639279043362649e-05 From fb52c880da411c247ff3973c5bc9de0d40a9ed34 Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Thu, 18 Jan 2024 16:18:26 +0800 Subject: [PATCH 058/122] Xuejun/clean hetero plugin (#22209) * [HETERO Plugin] clean src code & test with 1.0 Signed-off-by: Zhai, Xuejun * [HETERO Plugin] remove variable dump_graph Signed-off-by: Zhai, Xuejun * [HETERO Plugin] remove TARGET_FALLBACK Signed-off-by: Zhai, Xuejun * Fix errors Signed-off-by: Zhai, Xuejun * Remove 1.0 tests related TARGET_FALLBACK, which all coverts by 2.0 tests Signed-off-by: Zhai, Xuejun --------- Signed-off-by: Zhai, Xuejun --- src/inference/src/dev/core_impl.cpp | 7 +- .../tests/functional/caching_test.cpp | 10 +- src/plugins/hetero/src/compiled_model.cpp | 17 -- src/plugins/hetero/src/config.cpp | 32 +-- src/plugins/hetero/src/config.hpp | 3 - src/plugins/hetero/src/graph_debug_dump.hpp | 2 +- src/plugins/hetero/src/plugin.cpp | 27 +-- .../hetero/tests/functional/hetero_tests.cpp | 31 --- .../tests/functional/properties_tests.cpp | 33 --- .../ov_plugin/core_threading_tests.cpp | 2 +- .../gpu_dyn_batch_shape_tests.cpp | 2 +- .../behavior/plugin/core_integration.cpp | 48 ---- .../executable_network/get_metric.hpp | 16 -- .../behavior/plugin/core_integration.hpp | 207 ------------------ 14 files changed, 17 insertions(+), 420 deletions(-) diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 514dd798149249..4d9a5a6b8f6545 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -1526,11 +1526,8 @@ ov::SoPtr ov::CoreImpl::load_model_from_cache( ov::AnyMap ov::CoreImpl::create_compile_config(const ov::Plugin& plugin, const ov::AnyMap& user_config) const { ov::AnyMap property_config; - // 0. Move ov::device::priorities / TARGET_FALLBACK key to property_config - auto device_priorities_it = user_config.find("TARGET_FALLBACK"); - if (device_priorities_it == user_config.end()) { - device_priorities_it = user_config.find(ov::device::priorities.name()); - } + // 0. Move ov::device::priorities key to property_config + auto device_priorities_it = user_config.find(ov::device::priorities.name()); if (device_priorities_it != user_config.end()) { property_config[device_priorities_it->first] = device_priorities_it->second.as(); } diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index a8f10175218da5..8956d5dfa2eddf 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -1883,7 +1883,7 @@ TEST_P(CachingTest, LoadHetero_TargetFallbackFromCore) { }); testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(m_cacheDir)); - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock"}}); m_testFunction(core); }); // Ensure that only 1 blob (for Hetero) is created @@ -1900,7 +1900,7 @@ TEST_P(CachingTest, LoadHetero_TargetFallbackFromCore) { } testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(m_cacheDir)); - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock"}}); m_testFunction(core); comp_models.clear(); }); @@ -2020,7 +2020,7 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { }); testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(m_cacheDir)); - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.1"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock.1"}}); m_testFunction(core); }); } @@ -2034,7 +2034,7 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { EXPECT_CALL(*net, export_model(_)).Times(0); } testLoad([&](ov::Core& core) { - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.1"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock.1"}}); core.set_property(ov::cache_dir(m_cacheDir)); m_testFunction(core); }); @@ -2048,7 +2048,7 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { EXPECT_CALL(net, export_model(_)).Times(1); }); testLoad([&](ov::Core& core) { - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.51"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock.51"}}); core.set_property(ov::cache_dir(m_cacheDir)); m_testFunction(core); comp_models.clear(); diff --git a/src/plugins/hetero/src/compiled_model.cpp b/src/plugins/hetero/src/compiled_model.cpp index ac54825dfd0647..35e587e85f4d75 100644 --- a/src/plugins/hetero/src/compiled_model.cpp +++ b/src/plugins/hetero/src/compiled_model.cpp @@ -8,7 +8,6 @@ #include "async_infer_request.hpp" #include "graph_debug_dump.hpp" -#include "ie_plugin_config.hpp" #include "itt.hpp" #include "op/device_subgraph.hpp" #include "openvino/op/util/op_types.hpp" @@ -268,7 +267,6 @@ std::shared_ptr ov::hetero::CompiledModel::get_hetero_ } ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { - OPENVINO_SUPPRESS_DEPRECATED_START const auto& add_ro_properties = [](const std::string& name, std::vector& properties) { properties.emplace_back(ov::PropertyName{name, ov::PropertyMutability::RO}); }; @@ -280,13 +278,6 @@ ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { ov::hetero::number_of_submodels}; return ro_properties; }; - const auto& to_string_vector = [](const std::vector& properties) { - std::vector ret; - for (const auto& property : properties) { - ret.emplace_back(property); - } - return ret; - }; if (ov::supported_properties == name) { auto supported_properties = default_ro_properties(); @@ -294,13 +285,6 @@ ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { add_ro_properties(ov::device::properties.name(), supported_properties); add_ro_properties(ov::device::priorities.name(), supported_properties); return decltype(ov::supported_properties)::value_type(supported_properties); - } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { - auto metrics = default_ro_properties(); - add_ro_properties(METRIC_KEY(SUPPORTED_METRICS), metrics); - add_ro_properties(METRIC_KEY(SUPPORTED_CONFIG_KEYS), metrics); - return to_string_vector(metrics); - } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - return to_string_vector(m_cfg.get_supported()); } else if (ov::device::properties == name) { ov::AnyMap all_devices = {}; for (const auto& comp_model_desc : m_compiled_submodels) { @@ -340,7 +324,6 @@ ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { return decltype(ov::hetero::number_of_submodels)::value_type{m_compiled_submodels.size()}; } return m_cfg.get(name); - OPENVINO_SUPPRESS_DEPRECATED_END } const std::vector>& ov::hetero::CompiledModel::inputs() const { diff --git a/src/plugins/hetero/src/config.cpp b/src/plugins/hetero/src/config.cpp index 35cae2b56d87da..d182a684d9e4c1 100644 --- a/src/plugins/hetero/src/config.cpp +++ b/src/plugins/hetero/src/config.cpp @@ -4,25 +4,21 @@ #include "config.hpp" -#include "ie/ie_plugin_config.hpp" #include "openvino/runtime/internal_properties.hpp" #include "openvino/runtime/properties.hpp" using namespace ov::hetero; -Configuration::Configuration() : dump_graph(false) {} +Configuration::Configuration() {} Configuration::Configuration(const ov::AnyMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) { - OPENVINO_SUPPRESS_DEPRECATED_START *this = defaultCfg; for (const auto& it : config) { const auto& key = it.first; const auto& value = it.second; - if (HETERO_CONFIG_KEY(DUMP_GRAPH_DOT) == key) { - dump_graph = value.as(); - } else if ("TARGET_FALLBACK" == key || ov::device::priorities == key) { + if (ov::device::priorities == key) { device_priorities = value.as(); } else { if (throwOnUnsupported) @@ -30,36 +26,23 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa device_properties.emplace(key, value); } } - OPENVINO_SUPPRESS_DEPRECATED_END } ov::Any Configuration::get(const std::string& name) const { - OPENVINO_SUPPRESS_DEPRECATED_START - if (name == HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)) { - return {dump_graph}; - } else if (name == "TARGET_FALLBACK" || name == ov::device::priorities) { + if (name == ov::device::priorities) { return {device_priorities}; } else { OPENVINO_THROW("Property was not found: ", name); } - OPENVINO_SUPPRESS_DEPRECATED_END } std::vector Configuration::get_supported() const { - OPENVINO_SUPPRESS_DEPRECATED_START - static const std::vector names = {HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), - "TARGET_FALLBACK", - ov::device::priorities}; + static const std::vector names = {ov::device::priorities}; return names; - OPENVINO_SUPPRESS_DEPRECATED_END } ov::AnyMap Configuration::get_hetero_properties() const { - OPENVINO_SUPPRESS_DEPRECATED_START - return {{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), dump_graph}, - {"TARGET_FALLBACK", device_priorities}, - {ov::device::priorities.name(), device_priorities}}; - OPENVINO_SUPPRESS_DEPRECATED_END + return {{ov::device::priorities.name(), device_priorities}}; } ov::AnyMap Configuration::get_device_properties() const { @@ -67,8 +50,5 @@ ov::AnyMap Configuration::get_device_properties() const { } bool Configuration::dump_dot_files() const { - bool res = dump_graph; - if (std::getenv("OPENVINO_HETERO_VISUALIZE")) - res = true; - return res; + return std::getenv("OPENVINO_HETERO_VISUALIZE") != NULL; } \ No newline at end of file diff --git a/src/plugins/hetero/src/config.hpp b/src/plugins/hetero/src/config.hpp index 65606011976cb3..92878e9c785782 100644 --- a/src/plugins/hetero/src/config.hpp +++ b/src/plugins/hetero/src/config.hpp @@ -35,9 +35,6 @@ struct Configuration { std::string device_priorities; ov::AnyMap device_properties; - -private: - bool dump_graph; }; } // namespace hetero } // namespace ov \ No newline at end of file diff --git a/src/plugins/hetero/src/graph_debug_dump.hpp b/src/plugins/hetero/src/graph_debug_dump.hpp index 388d2906a21f17..14e153290a3362 100644 --- a/src/plugins/hetero/src/graph_debug_dump.hpp +++ b/src/plugins/hetero/src/graph_debug_dump.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/openvino.hpp" namespace ov { namespace hetero { diff --git a/src/plugins/hetero/src/plugin.cpp b/src/plugins/hetero/src/plugin.cpp index f5be5822c49b40..91eceb66da4a81 100644 --- a/src/plugins/hetero/src/plugin.cpp +++ b/src/plugins/hetero/src/plugin.cpp @@ -13,7 +13,6 @@ #include #include "compiled_model.hpp" -#include "ie/ie_plugin_config.hpp" #include "itt.hpp" #include "openvino/runtime/device_id_parser.hpp" #include "openvino/runtime/internal_properties.hpp" @@ -129,11 +128,6 @@ void ov::hetero::Plugin::set_property(const ov::AnyMap& properties) { } ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyMap& properties) const { - OPENVINO_SUPPRESS_DEPRECATED_START - const auto& add_ro_properties = [](const std::string& name, std::vector& properties) { - properties.emplace_back(ov::PropertyName{name, ov::PropertyMutability::RO}); - }; - const auto& default_ro_properties = []() { std::vector ro_properties{ov::supported_properties, ov::device::full_name, @@ -144,25 +138,9 @@ ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyM std::vector rw_properties{ov::device::priorities}; return rw_properties; }; - const auto& to_string_vector = [](const std::vector& properties) { - std::vector ret; - for (const auto& property : properties) { - ret.emplace_back(property); - } - return ret; - }; Configuration full_config{properties, m_cfg}; - if (METRIC_KEY(SUPPORTED_METRICS) == name) { - auto metrics = default_ro_properties(); - - add_ro_properties(METRIC_KEY(SUPPORTED_METRICS), metrics); - add_ro_properties(METRIC_KEY(SUPPORTED_CONFIG_KEYS), metrics); - add_ro_properties(METRIC_KEY(IMPORT_EXPORT_SUPPORT), metrics); - return to_string_vector(metrics); - } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - return to_string_vector(full_config.get_supported()); - } else if (ov::supported_properties == name) { + if (ov::supported_properties == name) { auto ro_properties = default_ro_properties(); auto rw_properties = default_rw_properties(); @@ -176,8 +154,6 @@ ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyM ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}; } else if (ov::device::full_name == name) { return decltype(ov::device::full_name)::value_type{get_device_name()}; - } else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) { - return true; } else if (ov::internal::caching_properties == name) { return decltype(ov::internal::caching_properties)::value_type{ov::hetero::caching_device_properties.name()}; } else if (ov::hetero::caching_device_properties == name) { @@ -187,7 +163,6 @@ ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyM } else { return full_config.get(name); } - OPENVINO_SUPPRESS_DEPRECATED_END } ov::Any ov::hetero::Plugin::caching_device_properties(const std::string& device_priorities) const { diff --git a/src/plugins/hetero/tests/functional/hetero_tests.cpp b/src/plugins/hetero/tests/functional/hetero_tests.cpp index 55440556f0552b..3e2a64ee103584 100644 --- a/src/plugins/hetero/tests/functional/hetero_tests.cpp +++ b/src/plugins/hetero/tests/functional/hetero_tests.cpp @@ -8,7 +8,6 @@ #include #include "common_test_utils/file_utils.hpp" -#include "ie_plugin_config.hpp" #include "openvino/core/any.hpp" #include "openvino/core/except.hpp" #include "openvino/opsets/opset11.hpp" @@ -593,7 +592,6 @@ class MockPluginReshape : public MockPluginBase { RO_property(ov::available_devices.name()), RO_property(ov::loaded_from_cache.name()), RO_property(ov::device::uuid.name()), - RO_property(METRIC_KEY(IMPORT_EXPORT_SUPPORT)), }; // the whole config is RW before network is loaded. const static std::vector rwProperties{ @@ -635,23 +633,9 @@ class MockPluginReshape : public MockPluginBase { std::vector capabilities; capabilities.push_back(ov::device::capability::EXPORT_IMPORT); return decltype(ov::device::capabilities)::value_type(capabilities); - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) { - return true; } else if (ov::internal::caching_properties == name) { std::vector caching_properties = {ov::device::uuid}; return decltype(ov::internal::caching_properties)::value_type(caching_properties); - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } else if (name == ov::loaded_from_cache.name()) { return m_loaded_from_cache; } else if (name == ov::enable_profiling.name()) { @@ -695,7 +679,6 @@ class MockPluginSubtract : public MockPluginBase { RO_property(ov::available_devices.name()), RO_property(ov::loaded_from_cache.name()), RO_property(ov::device::uuid.name()), - RO_property(METRIC_KEY(IMPORT_EXPORT_SUPPORT)), }; // the whole config is RW before network is loaded. const static std::vector rwProperties{ @@ -737,23 +720,9 @@ class MockPluginSubtract : public MockPluginBase { return m_loaded_from_cache; } else if (name == ov::enable_profiling.name()) { return decltype(ov::enable_profiling)::value_type{m_profiling}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) { - return true; } else if (ov::internal::caching_properties == name) { std::vector caching_properties = {ov::device::uuid}; return decltype(ov::internal::caching_properties)::value_type(caching_properties); - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } OPENVINO_THROW("Unsupported property: ", name); } diff --git a/src/plugins/hetero/tests/functional/properties_tests.cpp b/src/plugins/hetero/tests/functional/properties_tests.cpp index d72a4237134cdd..b4518d5707218f 100644 --- a/src/plugins/hetero/tests/functional/properties_tests.cpp +++ b/src/plugins/hetero/tests/functional/properties_tests.cpp @@ -21,35 +21,6 @@ TEST_F(HeteroTests, get_property_supported_properties) { } } -TEST_F(HeteroTests, get_property_supported_metrics) { - const std::vector supported_metrics = {ov::supported_properties.name(), - ov::device::full_name.name(), - ov::device::capabilities.name(), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(IMPORT_EXPORT_SUPPORT)}; - auto actual_supported_metrics = - core.get_property("HETERO", METRIC_KEY(SUPPORTED_METRICS)).as>(); - EXPECT_EQ(supported_metrics.size(), actual_supported_metrics.size()); - for (auto& supported_metric : supported_metrics) { - ASSERT_TRUE(std::find(actual_supported_metrics.begin(), actual_supported_metrics.end(), supported_metric) != - actual_supported_metrics.end()); - } -} - -TEST_F(HeteroTests, get_property_supported_configs) { - const std::vector supported_configs = {"HETERO_DUMP_GRAPH_DOT", - "TARGET_FALLBACK", - ov::device::priorities.name()}; - auto actual_supported_configs = - core.get_property("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); - EXPECT_EQ(supported_configs.size(), actual_supported_configs.size()); - for (auto& supported_config : supported_configs) { - ASSERT_TRUE(std::find(actual_supported_configs.begin(), actual_supported_configs.end(), supported_config) != - actual_supported_configs.end()); - } -} - TEST_F(HeteroTests, get_property_internal_supported_properties) { const std::vector supported_properties = {ov::internal::caching_properties}; auto actual_supported_properties = core.get_property("HETERO", ov::internal::supported_properties); @@ -71,8 +42,4 @@ TEST_F(HeteroTests, set_property_device_priorities) { EXPECT_EQ("", core.get_property("HETERO", ov::device::priorities)); core.set_property("HETERO", ov::device::priorities("MOCK0,MOCK1")); EXPECT_EQ("MOCK0,MOCK1", core.get_property("HETERO", ov::device::priorities)); - EXPECT_EQ("MOCK0,MOCK1", core.get_property("HETERO", "TARGET_FALLBACK").as()); - core.set_property("HETERO", {{"TARGET_FALLBACK", "MOCK1,MOCK0"}}); - EXPECT_EQ("MOCK1,MOCK0", core.get_property("HETERO", ov::device::priorities)); - EXPECT_EQ("MOCK1,MOCK0", core.get_property("HETERO", "TARGET_FALLBACK").as()); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp index 7e3ab594a90e50..0139db99ac116d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp @@ -7,7 +7,7 @@ namespace { const Params params[] = { std::tuple{ov::test::utils::DEVICE_CPU, {{ov::enable_profiling(true)}}}, - std::tuple{ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", ov::test::utils::DEVICE_CPU}}}, + std::tuple{ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), ov::test::utils::DEVICE_CPU}}}, }; const Params paramsStreams[] = { diff --git a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp index 7e479025dab3da..ddc47d19f96b1d 100644 --- a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp @@ -119,7 +119,7 @@ auto config = []() { }; auto hetero_config = []() { - return ov::AnyMap{{"TARGET_FALLBACK", ov::test::utils::DEVICE_GPU}}; + return ov::AnyMap{{ov::device::priorities.name(), ov::test::utils::DEVICE_GPU}}; }; const std::vector input_shapes = { diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp index 858ba1f0d7b254..8ae5896dd84498 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -65,54 +65,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, ::testing::Values(ov::test::utils::DEVICE_TEMPLATE)); -// -// IE Class SetConfig -// - -class IEClassSetConfigTestHETERO : public BehaviorTestsUtils::IEClassNetworkTest, - public BehaviorTestsUtils::IEPluginTestBase { - void SetUp() override { - IEClassNetworkTest::SetUp(); - IEPluginTestBase::SetUp(); - } -}; - -TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) { - { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO")); - ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT))); - bool dump = p.as(); - - ASSERT_TRUE(dump); - } - - { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO)}}, "HETERO")); - ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT))); - bool dump = p.as(); - - ASSERT_FALSE(dump); - } - - { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO")); - ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT))); - bool dump = p.as(); - - ASSERT_TRUE(dump); - } -} - // // IE Class GetConfig // diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp index cf55d7514b2330..5f9fa25bf2c65b 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp @@ -351,20 +351,4 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThro std::cout << "Exe network name: " << std::endl << networkname << std::endl; } - -TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, GetMetricNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - setHeteroNetworkAffinity(target_device); - - InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(actualCnnNetwork, heteroDeviceName); - - ASSERT_NO_THROW(p = exeNetwork.GetConfig("TARGET_FALLBACK")); - auto targets = p.as(); - auto expectedTargets = target_device; - - std::cout << "Exe network fallback targets: " << targets << std::endl; - ASSERT_EQ(expectedTargets, targets); -} } // namespace BehaviorTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index 4271c923ad5419..fdca29e5522b2d 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -308,26 +308,6 @@ TEST_P(IEClassBasicTestP, SetGetConfigForTbbTerminateThrows) { ASSERT_FALSE(value); } -TEST_P(IEClassBasicTestP, SetConfigHeteroTargetFallbackThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - ASSERT_NO_THROW(ie.SetConfig({{"TARGET_FALLBACK", target_device}}, ov::test::utils::DEVICE_HETERO)); -} - -TEST(IEClassBasicTest, smoke_SetConfigHeteroNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - bool value = false; - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), InferenceEngine::PluginConfigParams::YES}}, - ov::test::utils::DEVICE_HETERO)); - ASSERT_NO_THROW(value = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as()); - ASSERT_TRUE(value); - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), InferenceEngine::PluginConfigParams::NO}}, - ov::test::utils::DEVICE_HETERO)); - ASSERT_NO_THROW(value = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as()); - ASSERT_FALSE(value); -} - TEST_P(IEClassSpecificDeviceTestSetConfig, SetConfigSpecificDeviceNoThrow) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); @@ -509,13 +489,6 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithKSO) { } } -TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::QueryNetworkResult res; - ASSERT_NO_THROW(res = ie.QueryNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", target_device}})); - ASSERT_LT(0, res.supportedLayersMap.size()); -} - TEST_P(IEClassNetworkTestP, DISABLED_QueryNetworkMultiThrows) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); try { @@ -771,14 +744,6 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) { ASSERT_THROW(p = ie.GetConfig(ov::test::utils::DEVICE_HETERO, "unsupported_config"), InferenceEngine::Exception); } -TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_THROW(p = ie.GetConfig(ov::test::utils::DEVICE_HETERO + std::string(":") + target_device, HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)), - InferenceEngine::Exception); -} - TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigThrow) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); InferenceEngine::Parameter p; @@ -841,19 +806,6 @@ TEST_P(IEClassGetAvailableDevices, GetAvailableDevicesNoThrow) { // QueryNetwork with HETERO on particular device // -TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - auto deviceIDs = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - if (deviceIDs.empty()) - GTEST_FAIL() << "Incorrect DeviceID number" << std::endl; - ASSERT_NO_THROW(ie.QueryNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, - {{"TARGET_FALLBACK", target_device + "." + deviceIDs[0] + "," + target_device}})); -} - TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); @@ -884,16 +836,6 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithInvalidDeviceIDThrows) { ASSERT_THROW(ie.QueryNetwork(actualCnnNetwork, target_device + ".l0"), InferenceEngine::Exception); } -TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - ASSERT_THROW(ie.QueryNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, - {{"TARGET_FALLBACK", target_device + ".100," + target_device}}), InferenceEngine::Exception); -} - // // LoadNetwork // @@ -930,11 +872,6 @@ TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDeviceNoThrow) { ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO + std::string(":") + target_device)); } -TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", target_device}})); -} - TEST_P(IEClassNetworkTestP, LoadNetworkCreateDefaultExecGraphResult) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); auto net = ie.LoadNetwork(actualCnnNetwork, target_device); @@ -1044,150 +981,6 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithInvalidDeviceIDThrows) { ASSERT_THROW(ie.LoadNetwork(actualCnnNetwork, target_device + ".l0"), InferenceEngine::Exception); } -TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - ASSERT_THROW(ie.LoadNetwork(actualCnnNetwork, "HETERO", - {{"TARGET_FALLBACK", target_device + ".100," + ov::test::utils::DEVICE_CPU}}), InferenceEngine::Exception); -} - -TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - ASSERT_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, - {{"TARGET_FALLBACK", target_device + "," + ov::test::utils::DEVICE_CPU}, - {CONFIG_KEY(DEVICE_ID), "110"}}), InferenceEngine::Exception); -} - -// -// LoadNetwork with HETERO on MULTI combinations particular device -// - -TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + '.' + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - std::string targetFallback(ov::test::utils::DEVICE_MULTI + std::string(",") + target_device); - ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", targetFallback}})); -} - -TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + std::string(".") + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_MULTI, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_HETERO}, - {"TARGET_FALLBACK", devices}})); -} - -// -// QueryNetwork with HETERO on MULTI combinations particular device -// - -TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + '.' + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - auto function = multinputCnnNetwork.getFunction(); - ASSERT_NE(nullptr, function); - std::unordered_set expectedLayers; - for (auto &&node : function->get_ops()) { - expectedLayers.emplace(node->get_friendly_name()); - } - InferenceEngine::QueryNetworkResult result; - std::string targetFallback(ov::test::utils::DEVICE_MULTI + std::string(",") + target_device); - ASSERT_NO_THROW(result = ie.QueryNetwork(multinputCnnNetwork, ov::test::utils::DEVICE_HETERO, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", targetFallback}})); - - std::unordered_set actualLayers; - for (auto &&layer : result.supportedLayersMap) { - actualLayers.emplace(layer.first); - } - ASSERT_EQ(expectedLayers, actualLayers); -} - -TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + "." + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - auto function = multinputCnnNetwork.getFunction(); - ASSERT_NE(nullptr, function); - std::unordered_set expectedLayers; - for (auto &&node : function->get_ops()) { - expectedLayers.emplace(node->get_friendly_name()); - } - InferenceEngine::QueryNetworkResult result; - ASSERT_NO_THROW(result = ie.QueryNetwork(multinputCnnNetwork, ov::test::utils::DEVICE_MULTI, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_HETERO}, - {"TARGET_FALLBACK", devices}})); - - std::unordered_set actualLayers; - for (auto &&layer : result.supportedLayersMap) { - actualLayers.emplace(layer.first); - } - ASSERT_EQ(expectedLayers, actualLayers); -} - TEST_P(IEClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); { From 3adf60ccd5e91406eccf2e62437f1e4cdfb14b86 Mon Sep 17 00:00:00 2001 From: Chenhu Wang Date: Thu, 18 Jan 2024 17:24:56 +0800 Subject: [PATCH 059/122] [CPU] Reshape in place memory sharing test (#21952) * reshape in place is not applicable when input is shared * subgraph test case * refine subgraph test for specific case * comments apply --- .../subgraph_tests/src/reshape_inplace.cpp | 61 ++++++++++++++++++- 1 file changed, 59 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp index 1fe39571e2843e..7a6ed5be8aa8b5 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp @@ -9,8 +9,11 @@ namespace ov { namespace test { -// Subgraph: +// These tests are designed for correctness of reshape's in-place implementation. /* + * Case 1: + * Subgraph + * * params[0] params[1] * | | * constant shapeOf / @@ -22,7 +25,7 @@ namespace test { * | * result * - * This test is designed for correctness of reshape's in-place implementation. + * * Due to non-const target shape parameter (params[1]), reshape node * is non-constant node even though the input tensor is constant node. @@ -81,5 +84,59 @@ class InPlaceReshapeFromConstantCheck : public SubgraphBaseTest { TEST_F(InPlaceReshapeFromConstantCheck, smoke_CPU_InPlaceReshapeFromConstantCheck) { run(); } + +/* Case 2: + * Subgraph + * + * params[0] params[1] + * \ / + * \ / + * add---reshape2---result2 + * | + * reshape1 + * | + * MVN + * | + * result1 + * + * The same memory is shared between the `result2` input and `MVN` output. The CPU graph inplace memory conflict + * resolution logic must prevent `result2` data being rewritten by the MVN node. + */ + +class InPlaceReshapeShareInputCheck : public SubgraphBaseTest { +protected: + void SetUp() override { + const auto rtPrc = ov::element::f32; + const ov::Shape inpShape = {1, 16, 16}; + targetStaticShapes = {{inpShape, inpShape}}; + targetDevice = ov::test::utils::DEVICE_CPU; + ov::ParameterVector params{std::make_shared(rtPrc, inpShape), + std::make_shared(rtPrc, inpShape)}; + + auto add = std::make_shared(params[0], params[1]); + std::vector newShape1 = {1, 1, 16, 16}; + auto targetShape1 = std::make_shared(ov::element::i64, ov::Shape{4}, newShape1); + auto reshape1 = std::make_shared(add, targetShape1, false); + auto mvn = std::make_shared(reshape1, + ov::op::v0::Constant::create(ov::element::i32, ov::Shape{2}, {2, 3}), + true, + 0.1, + ov::op::MVNEpsMode::INSIDE_SQRT); + auto res1 = std::make_shared(mvn); + + std::vector newShape2 = {1, 4, 8, 8}; + auto targetShape2 = std::make_shared(ov::element::i64, ov::Shape{4}, newShape2); + auto reshape2 = std::make_shared(add, targetShape2, false); + + auto res2 = std::make_shared(reshape2); + + function = std::make_shared(ov::ResultVector{res1, res2}, params, "reshape_share_input_check"); + } +}; + +TEST_F(InPlaceReshapeShareInputCheck, smoke_CPU_InPlaceReshapeShareInputCheck) { + run(); +} + } // namespace test } // namespace ov From cc78c3397a9785b6a298feae2220af31d16ee50a Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Thu, 18 Jan 2024 11:34:28 +0100 Subject: [PATCH 060/122] [DOCS] Removal of docs for GNA (#22213) * GNA docs removal * legacy features tab fix * Update Device_Plugins.rst * Update openvino_legacy_features.rst * Delete deployment-manager-tool.rst --- .../Supported_Devices.rst | 40 +- ...supported_operations_inference_devices.rst | 296 +++++------ .../openvino_legacy_features.rst | 22 +- .../get_started/configurations-header.rst | 16 +- .../configurations-for-intel-gna.rst | 91 ---- .../installing-openvino-overview.rst | 1 - .../installing-openvino-apt.rst | 2 +- ...installing-openvino-from-archive-linux.rst | 22 +- .../installing-openvino-yum.rst | 2 +- .../installing-openvino-brew.rst | 22 +- .../installing-openvino-conan.rst | 24 +- .../installing-openvino-conda.rst | 2 +- .../installing-openvino-pip.rst | 20 +- .../installing-openvino-vcpkg.rst | 22 +- .../openvino_samples/benchmark_tool.rst | 4 +- .../openvino_samples/hello_query_device.rst | 53 +- .../openvino_workflow/deployment_intro.rst | 4 +- .../deployment_intro/local-distribution.rst | 24 +- .../running_inference_with_openvino.rst | 6 +- .../Device_Plugins.rst | 42 +- .../Device_Plugins/GNA.rst | 466 ------------------ .../Device_Plugins/GPU.rst | 54 +- .../precision_control.rst | 10 +- .../auto_device_selection.rst | 56 +-- .../hetero_execution.rst | 6 +- ...ss-lingual-books-alignment-with-output.rst | 228 ++++----- ...le-diffusion-text-to-image-with-output.rst | 218 ++++---- ...diffusion-v2-infinite-zoom-with-output.rst | 258 +++++----- ...diffusion-v2-text-to-image-with-output.rst | 202 ++++---- .../253-zeroscope-text2video-with-output.rst | 118 ++--- ...sively-multilingual-speech-with-output.rst | 170 +++---- docs/snippets/gna/__init__.py | 2 - docs/snippets/gna/configure.cpp | 17 - docs/snippets/gna/configure.py | 23 - docs/snippets/gna/import_export.cpp | 29 -- docs/snippets/gna/import_export.py | 31 -- docs/snippets/gna/set_batch.cpp | 29 -- docs/snippets/gna/set_batch.py | 33 -- 38 files changed, 936 insertions(+), 1729 deletions(-) delete mode 100644 docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst delete mode 100644 docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst delete mode 100644 docs/snippets/gna/__init__.py delete mode 100644 docs/snippets/gna/configure.cpp delete mode 100644 docs/snippets/gna/configure.py delete mode 100644 docs/snippets/gna/import_export.cpp delete mode 100644 docs/snippets/gna/import_export.py delete mode 100644 docs/snippets/gna/set_batch.cpp delete mode 100644 docs/snippets/gna/set_batch.py diff --git a/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst b/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst index 54c827ffd5f89a..22a24c8858a992 100644 --- a/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst +++ b/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst @@ -3,58 +3,56 @@ Supported Devices ================= - + .. meta:: - :description: Check the list of officially supported models in Intel® + :description: Check the list of officially supported models in Intel® Distribution of OpenVINO™ toolkit. OpenVINO enables you to implement its inference capabilities in your own software, -utilizing various hardware. It currently supports the following processing units +utilizing various hardware. It currently supports the following processing units (for more details, see :doc:`system requirements `): -* :doc:`CPU ` -* :doc:`GPU ` -* :doc:`GNA ` - +* :doc:`CPU ` +* :doc:`GPU ` + .. note:: - GNA, currently available in the Intel® Distribution of OpenVINO™ toolkit, - will be deprecated together with the hardware being discontinued - in future CPU solutions. - + With OpenVINO 2024 release, GNA has been deprecated together + with the hardware being discontinued in future CPU solutions. + With OpenVINO™ 2023.0 release, support has been cancelled for: - Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X - Intel® Vision Accelerator Design with Intel® Movidius™ - + To keep using the MYRIAD and HDDL plugins with your hardware, revert to the OpenVINO 2022.3 LTS release. -Beside running inference with a specific device, +Beside running inference with a specific device, OpenVINO offers automated inference management with the following inference modes: -* :doc:`Automatic Device Selection ` - automatically selects the best device - available for the given task. It offers many additional options and optimizations, including inference on +* :doc:`Automatic Device Selection ` - automatically selects the best device + available for the given task. It offers many additional options and optimizations, including inference on multiple devices at the same time. -* :doc:`Multi-device Inference ` - executes inference on multiple devices. +* :doc:`Multi-device Inference ` - executes inference on multiple devices. Currently, this mode is considered a legacy solution. Using Automatic Device Selection is advised. -* :doc:`Heterogeneous Inference ` - enables splitting inference among several devices +* :doc:`Heterogeneous Inference ` - enables splitting inference among several devices automatically, for example, if one device doesn’t support certain operations. -Devices similar to the ones used for benchmarking can be accessed using `Intel® DevCloud for the Edge `__, -a remote development environment with access to Intel® hardware and the latest versions of the Intel® Distribution +Devices similar to the ones used for benchmarking can be accessed using `Intel® DevCloud for the Edge `__, +a remote development environment with access to Intel® hardware and the latest versions of the Intel® Distribution of OpenVINO™ Toolkit. `Learn more `__ or `Register here `__. To learn more about each of the supported devices and modes, refer to the sections of: -* :doc:`Inference Device Support ` +* :doc:`Inference Device Support ` * :doc:`Inference Modes ` For setting up a relevant configuration, refer to the -:doc:`Integrate with Customer Application ` +:doc:`Integrate with Customer Application ` topic (step 3 "Configure input and output"). diff --git a/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst b/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst index e6a5a3bbd44289..9939c5ffc572f4 100644 --- a/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst +++ b/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst @@ -14,154 +14,154 @@ for a more detailed and most recent listing of operations that are implemented a See the full conformance report table -================================= =============== ============== ================ ================== - Operations CPU (x86) GPU GNA CPU (Arm®) -================================= =============== ============== ================ ================== - Abs Supported** Supported Not Supported Supported - Acos Supported** Supported Not Supported Supported**** - Acosh Supported** Supported Not Supported Supported**** - Activation-Clamp Supported*** Supported Supported Supported - Activation-ELU Supported*** Supported Not Supported Supported - Activation-Exp Supported*** Supported Supported Supported - Activation-Leaky ReLU Supported*** Supported Supported Not Supported - Activation-Not Supported*** Supported Not Supported Not Supported - Activation-PReLU Supported*** Supported Not Supported Supported - Activation-ReLU Supported*** Supported Supported Supported - Activation-ReLU6 Supported*** Supported Not Supported Not Supported - Activation-Sigmoid/Logistic Supported*** Supported Supported Supported - Activation-TanH Supported*** Supported Supported Supported - ArgMax Supported** Supported Not Supported Not Supported - Asin Supported** Supported Not Supported Supported**** - Asinh Supported** Supported Not Supported Supported**** - Atan Supported** Supported Not Supported Supported**** - Atanh Supported** Supported Not Supported Supported**** - BatchNormalization Supported Supported Not Supported Supported - BinaryConvolution Supported Supported Not Supported Not Supported - Broadcast Supported** Supported Not Supported Supported - Ceil Supported** Supported Not Supported Supported - Concat Supported*** Supported Supported Supported - Const Supported Supported Supported Supported - Convolution-Dilated Supported Supported Not Supported Supported - Convolution-Dilated 3D Supported Supported Not Supported Not Supported - Convolution-Grouped Supported Supported Not Supported Supported - Convolution-Grouped 3D Supported Supported Not Supported Not Supported - Convolution-Ordinary Supported Supported Supported* Supported - Convolution-Ordinary 3D Supported Supported Not Supported Not Supported - Cos Supported** Supported Not Supported Supported**** - Cosh Supported** Supported Not Supported Supported**** - Crop Supported Supported Supported Not Supported - CTCGreedyDecoder Supported** Supported** Not Supported Supported**** - Deconvolution Supported Supported Not Supported Not Supported - Deconvolution 3D Supported Supported Not Supported Not Supported - DeformableConvolution Supported Supported Not Supported Not Supported - DepthToSpace Supported** Supported Not Supported Supported* - DetectionOutput Supported** Supported Not Supported Supported**** - Eltwise-And Supported*** Supported Not Supported Supported - Eltwise-Add Supported*** Supported Not Supported Supported - Eltwise-Div Supported*** Supported Not Supported Supported - Eltwise-Equal Supported*** Supported Not Supported Supported* - Eltwise-FloorMod Supported*** Supported Not Supported Supported**** - Eltwise-Greater Supported*** Supported Not Supported Supported - Eltwise-GreaterEqual Supported*** Supported Not Supported Supported - Eltwise-Less Supported*** Supported Not Supported Supported* - Eltwise-LessEqual Supported*** Supported Not Supported Supported* - Eltwise-LogicalAnd Supported*** Supported Not Supported Supported - Eltwise-LogicalOr Supported*** Supported Not Supported Supported - Eltwise-LogicalXor Supported*** Supported Not Supported Supported - Eltwise-Max Supported*** Supported Not Supported Supported - Eltwise-Min Supported*** Supported Not Supported Supported - Eltwise-Mul Supported*** Supported Supported Supported - Eltwise-NotEqual Supported*** Supported Not Supported Supported* - Eltwise-Pow Supported*** Supported Not Supported Supported - Eltwise-Prod Supported*** Supported Supported Not Supported - Eltwise-SquaredDiff Supported*** Supported Not Supported Supported - Eltwise-Sub Supported*** Supported Supported Supported - Eltwise-Sum Supported*** Supported Supported Supported**** - Erf Supported** Supported Not Supported Supported**** - Exp Supported Supported Supported Supported - FakeQuantize Supported Not Supported Not Supported Supported* - Fill Supported** Not Supported Not Supported Not Supported - Flatten Supported Supported Not Supported Not Supported - Floor Supported** Supported Not Supported Supported - FullyConnected (Inner Product) Supported*** Supported Supported Supported - Gather Supported** Supported Not Supported Supported* - GatherTree Supported** Not Supported Not Supported Supported**** - Gemm Supported Supported Not Supported Not Supported - GRN Supported** Supported** Not Supported Supported - HardSigmoid Supported** Supported Not Supported Supported**** - Interp Supported** Supported** Not Supported Supported* - Log Supported** Supported Supported Supported - LRN (Norm) Supported Supported Not Supported Supported* - LSTMCell Supported Supported Supported Supported - GRUCell Supported Supported Supported Supported - RNNCell Supported Supported Not Supported Supported - LSTMSequence Supported Supported Supported Supported**** - GRUSequence Supported Supported Supported Supported**** - RNNSequence Supported Supported Not Supported Supported**** - LogSoftmax Supported** Supported Not Supported Supported - Memory Supported Not Supported Supported Not Supported - MVN Supported** Supported Not Supported Supported* - Neg Supported** Supported Not Supported Supported - NonMaxSuppression Supported** Not Supported Not Supported Supported**** - Normalize Supported** Supported Not Supported Supported* - OneHot Supported** Supported Not Supported Supported**** - Pad Supported** Supported Not Supported Supported* - Permute Supported Supported Supported* Not Supported - Pooling(AVG,MAX) Supported Supported Supported Supported - Pooling(AVG,MAX) 3D Supported Supported Not Supported Supported* - Power Supported** Supported Supported* Supported - PowerFile Supported** Not Supported Not Supported Not Supported - PriorBox Supported** Supported Not Supported Supported - PriorBoxClustered Supported** Supported** Not Supported Supported - Proposal Supported** Supported Not Supported Supported**** - PSROIPooling Supported** Supported Not Supported Supported**** - Range Supported** Not Supported Not Supported Not Supported - Reciprocal Supported** Supported Not Supported Not Supported - ReduceAnd Supported** Supported Not Supported Supported**** - ReduceL1 Supported** Supported Not Supported Supported - ReduceL2 Supported** Supported Not Supported Supported - ReduceLogSum Supported** Supported Not Supported Supported - ReduceLogSumExp Supported** Supported Not Supported Not Supported - ReduceMax Supported** Supported Not Supported Supported - ReduceMean Supported** Supported Not Supported Supported - ReduceMin Supported** Supported Not Supported Supported - ReduceOr Supported** Supported Not Supported Supported**** - ReduceProd Supported** Supported Not Supported Supported - ReduceSum Supported** Supported Not Supported Supported - ReduceSumSquare Supported** Supported Not Supported Not Supported - RegionYolo Supported** Supported Not Supported Supported**** - ReorgYolo Supported** Supported Not Supported Supported - Resample Supported** Supported Not Supported Not Supported - Reshape Supported*** Supported Supported Supported - ReverseSequence Supported** Supported Not Supported Supported**** - RNN Supported Not Supported Not Supported Supported - ROIPooling Supported Supported* Not Supported Supported**** - ScaleShift Supported*** Supported Supported Not Supported - ScatterUpdate Supported** Not Supported Not Supported Not Supported - Select Supported Supported Not Supported Supported - Selu Supported** Supported Not Supported Supported**** - ShuffleChannels Supported** Supported Not Supported Supported - Sign Supported** Supported Not Supported Supported - Sin Supported** Supported Not Supported Supported - Sinh Supported** Supported Not Supported Supported**** - SimplerNMS Supported** Supported Not Supported Not Supported - Slice Supported*** Supported Supported Not Supported - SoftMax Supported*** Supported Not Supported Supported - Softplus Supported** Supported Not Supported Supported - Softsign Supported** Supported Supported Not Supported - SpaceToDepth Supported** Not Supported Not Supported Supported* - SpatialTransformer Supported** Not Supported Not Supported Not Supported - Split Supported*** Supported Supported Supported - Squeeze Supported** Supported Supported Supported - StridedSlice Supported** Supported Not Supported Supported* - Tan Supported** Supported Not Supported Supported**** - TensorIterator Supported Not Supported Supported Supported - Tile Supported*** Supported** Not Supported Supported - TopK Supported** Supported Not Supported Supported**** - Unpooling Not Supported Supported Not Supported Not Supported - Unsqueeze Supported** Supported Supported Supported - Upsampling Not Supported Supported Not Supported Not Supported -================================= =============== ============== ================ ================== +================================= =============== ============== ================== + Operations CPU (x86) GPU CPU (Arm®) +================================= =============== ============== ================== + Abs Supported** Supported Supported + Acos Supported** Supported Supported**** + Acosh Supported** Supported Supported**** + Activation-Clamp Supported*** Supported Supported + Activation-ELU Supported*** Supported Supported + Activation-Exp Supported*** Supported Supported + Activation-Leaky ReLU Supported*** Supported Not Supported + Activation-Not Supported*** Supported Not Supported + Activation-PReLU Supported*** Supported Supported + Activation-ReLU Supported*** Supported Supported + Activation-ReLU6 Supported*** Supported Not Supported + Activation-Sigmoid/Logistic Supported*** Supported Supported + Activation-TanH Supported*** Supported Supported + ArgMax Supported** Supported Not Supported + Asin Supported** Supported Supported**** + Asinh Supported** Supported Supported**** + Atan Supported** Supported Supported**** + Atanh Supported** Supported Supported**** + BatchNormalization Supported Supported Supported + BinaryConvolution Supported Supported Not Supported + Broadcast Supported** Supported Supported + Ceil Supported** Supported Supported + Concat Supported*** Supported Supported + Const Supported Supported Supported + Convolution-Dilated Supported Supported Supported + Convolution-Dilated 3D Supported Supported Not Supported + Convolution-Grouped Supported Supported Supported + Convolution-Grouped 3D Supported Supported Not Supported + Convolution-Ordinary Supported Supported Supported + Convolution-Ordinary 3D Supported Supported Not Supported + Cos Supported** Supported Supported**** + Cosh Supported** Supported Supported**** + Crop Supported Supported Not Supported + CTCGreedyDecoder Supported** Supported** Supported**** + Deconvolution Supported Supported Not Supported + Deconvolution 3D Supported Supported Not Supported + DeformableConvolution Supported Supported Not Supported + DepthToSpace Supported** Supported Supported* + DetectionOutput Supported** Supported Supported**** + Eltwise-And Supported*** Supported Supported + Eltwise-Add Supported*** Supported Supported + Eltwise-Div Supported*** Supported Supported + Eltwise-Equal Supported*** Supported Supported* + Eltwise-FloorMod Supported*** Supported Supported**** + Eltwise-Greater Supported*** Supported Supported + Eltwise-GreaterEqual Supported*** Supported Supported + Eltwise-Less Supported*** Supported Supported* + Eltwise-LessEqual Supported*** Supported Supported* + Eltwise-LogicalAnd Supported*** Supported Supported + Eltwise-LogicalOr Supported*** Supported Supported + Eltwise-LogicalXor Supported*** Supported Supported + Eltwise-Max Supported*** Supported Supported + Eltwise-Min Supported*** Supported Supported + Eltwise-Mul Supported*** Supported Supported + Eltwise-NotEqual Supported*** Supported Supported* + Eltwise-Pow Supported*** Supported Supported + Eltwise-Prod Supported*** Supported Not Supported + Eltwise-SquaredDiff Supported*** Supported Supported + Eltwise-Sub Supported*** Supported Supported + Eltwise-Sum Supported*** Supported Supported**** + Erf Supported** Supported Supported**** + Exp Supported Supported Supported + FakeQuantize Supported Not Supported Supported* + Fill Supported** Not Supported Not Supported + Flatten Supported Supported Not Supported + Floor Supported** Supported Supported + FullyConnected (Inner Product) Supported*** Supported Supported + Gather Supported** Supported Supported* + GatherTree Supported** Not Supported Supported**** + Gemm Supported Supported Not Supported + GRN Supported** Supported** Supported + HardSigmoid Supported** Supported Supported**** + Interp Supported** Supported** Supported* + Log Supported** Supported Supported + LRN (Norm) Supported Supported Supported* + LSTMCell Supported Supported Supported + GRUCell Supported Supported Supported + RNNCell Supported Supported Supported + LSTMSequence Supported Supported Supported**** + GRUSequence Supported Supported Supported**** + RNNSequence Supported Supported Supported**** + LogSoftmax Supported** Supported Supported + Memory Supported Not Supported Not Supported + MVN Supported** Supported Supported* + Neg Supported** Supported Supported + NonMaxSuppression Supported** Not Supported Supported**** + Normalize Supported** Supported Supported* + OneHot Supported** Supported Supported**** + Pad Supported** Supported Supported* + Permute Supported Supported Not Supported + Pooling(AVG,MAX) Supported Supported Supported + Pooling(AVG,MAX) 3D Supported Supported Supported* + Power Supported** Supported Supported + PowerFile Supported** Not Supported Not Supported + PriorBox Supported** Supported Supported + PriorBoxClustered Supported** Supported** Supported + Proposal Supported** Supported Supported**** + PSROIPooling Supported** Supported Supported**** + Range Supported** Not Supported Not Supported + Reciprocal Supported** Supported Not Supported + ReduceAnd Supported** Supported Supported**** + ReduceL1 Supported** Supported Supported + ReduceL2 Supported** Supported Supported + ReduceLogSum Supported** Supported Supported + ReduceLogSumExp Supported** Supported Not Supported + ReduceMax Supported** Supported Supported + ReduceMean Supported** Supported Supported + ReduceMin Supported** Supported Supported + ReduceOr Supported** Supported Supported**** + ReduceProd Supported** Supported Supported + ReduceSum Supported** Supported Supported + ReduceSumSquare Supported** Supported Not Supported + RegionYolo Supported** Supported Supported**** + ReorgYolo Supported** Supported Supported + Resample Supported** Supported Not Supported + Reshape Supported*** Supported Supported + ReverseSequence Supported** Supported Supported**** + RNN Supported Not Supported Supported + ROIPooling Supported Supported* Supported**** + ScaleShift Supported*** Supported Not Supported + ScatterUpdate Supported** Not Supported Not Supported + Select Supported Supported Supported + Selu Supported** Supported Supported**** + ShuffleChannels Supported** Supported Supported + Sign Supported** Supported Supported + Sin Supported** Supported Supported + Sinh Supported** Supported Supported**** + SimplerNMS Supported** Supported Not Supported + Slice Supported*** Supported Not Supported + SoftMax Supported*** Supported Supported + Softplus Supported** Supported Supported + Softsign Supported** Supported Not Supported + SpaceToDepth Supported** Not Supported Supported* + SpatialTransformer Supported** Not Supported Not Supported + Split Supported*** Supported Supported + Squeeze Supported** Supported Supported + StridedSlice Supported** Supported Supported* + Tan Supported** Supported Supported**** + TensorIterator Supported Not Supported Supported + Tile Supported*** Supported** Supported + TopK Supported** Supported Supported**** + Unpooling Not Supported Supported Not Supported + Unsqueeze Supported** Supported Supported + Upsampling Not Supported Supported Not Supported +================================= =============== ============== ================== | `*` - support is limited to the specific parameters. Refer to "Known Layer Limitations" section for the device :doc:`from the list of supported `. | `**` - support is implemented via :doc:`Extensibility mechanism `. diff --git a/docs/articles_en/documentation/openvino_legacy_features.rst b/docs/articles_en/documentation/openvino_legacy_features.rst index 1b1e00db3e232b..311e1bc2529bd9 100644 --- a/docs/articles_en/documentation/openvino_legacy_features.rst +++ b/docs/articles_en/documentation/openvino_legacy_features.rst @@ -93,21 +93,21 @@ offering. | Compile tool is now deprecated. If you need to compile a model for inference on a specific device, use the following script: - .. tab-set:: + .. tab-set:: - .. tab-item:: Python - :sync: py + .. tab-item:: Python + :sync: py - .. doxygensnippet:: docs/snippets/export_compiled_model.py - :language: python - :fragment: [export_compiled_model] + .. doxygensnippet:: docs/snippets/export_compiled_model.py + :language: python + :fragment: [export_compiled_model] - .. tab-item:: C++ - :sync: cpp + .. tab-item:: C++ + :sync: cpp - .. doxygensnippet:: docs/snippets/export_compiled_model.cpp - :language: cpp - :fragment: [export_compiled_model] + .. doxygensnippet:: docs/snippets/export_compiled_model.cpp + :language: cpp + :fragment: [export_compiled_model] | :doc:`see which devices support import / export ` | :doc:`Learn more on preprocessing steps ` diff --git a/docs/articles_en/get_started/configurations-header.rst b/docs/articles_en/get_started/configurations-header.rst index cab6ad46b8d6ac..b92039c2ae6886 100644 --- a/docs/articles_en/get_started/configurations-header.rst +++ b/docs/articles_en/get_started/configurations-header.rst @@ -5,7 +5,7 @@ Additional Configurations For Hardware .. meta:: - :description: Learn how to create additional configurations for your devices + :description: Learn how to create additional configurations for your devices to work with Intel® Distribution of OpenVINO™ toolkit. .. _additional configurations: @@ -13,13 +13,11 @@ Additional Configurations For Hardware .. toctree:: :maxdepth: 2 :hidden: - + For GPU For NPU - For GNA - -For certain use cases, you may need to install additional software, to use the full +For certain use cases, you may need to install additional software, to use the full potential of OpenVINO™. Check the following list for components for elements used in your workflow: @@ -35,15 +33,9 @@ your workflow: See the :doc:`guide on NPU configuration ` for details. -| **GNA drivers** -| If you want to run inference on a GNA (note that it is currently being deprecated and will no longer - be supported beyond 2023.2), make sure your GPU's drivers are properly installed. See the - :doc:`guide on GNA configuration ` - for details. - | **Open Computer Vision Library** | OpenCV is used to extend the capabilities of some models, for example enhance some of - OpenVINO samples, when used as a dependency in compilation. To install OpenCV for OpenVINO, see the + OpenVINO samples, when used as a dependency in compilation. To install OpenCV for OpenVINO, see the `instructions on GtHub `__. diff --git a/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst b/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst deleted file mode 100644 index 606572e97284af..00000000000000 --- a/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. {#openvino_docs_install_guides_configurations_for_intel_gna} - -Configurations for Intel® Gaussian & Neural Accelerator (GNA) with OpenVINO™ -============================================================================ - - -.. meta:: - :description: Learn how to provide additional configuration for Intel® - Gaussian & Neural Accelerator (GNA) to work with Intel® - Distribution of OpenVINO™ toolkit on your system. - - -.. note:: - - On platforms where Intel® GNA is not enabled in the BIOS, the driver cannot be installed, so the GNA plugin uses the software emulation mode only. - - -Drivers and Dependencies -######################## - - -Intel® GNA hardware requires a driver to be installed on the system. - -.. _gna guide: - -Linux -#################### - -Prerequisites -++++++++++++++++++++ - -Ensure that make, gcc, and Linux kernel headers are installed. Use the following command to install required software: - -.. code-block:: sh - - sudo apt-get install gcc make linux-headers-generic - - -Configuration steps -++++++++++++++++++++ - -1. Download `Intel® GNA driver for Ubuntu Linux 18.04.3 LTS (with HWE Kernel version 5.4+) `__ -2. Run the sample_install.sh script provided in the installation package: - - .. code-block:: sh - - prompt$ ./scripts/sample_install.sh - - -You can also build and install the driver manually by using the following commands: - -.. code-block:: sh - - prompt$ cd src/ - prompt$ make - prompt$ sudo insmod intel_gna.ko - - -To unload the driver: - -.. code-block:: sh - - prompt$ sudo rmmod intel_gna - - -.. _gna guide windows: - - -Windows -#################### - -Intel® GNA driver for Windows is available through Windows Update. - -What’s Next? -#################### - -Now you are ready to try out OpenVINO™. You can use the following tutorials to write your applications using Python and C/C++. - -* Developing in Python: - - * `Start with tensorflow models with OpenVINO™ `__ - * `Start with ONNX and PyTorch models with OpenVINO™ `__ - * `Start with PaddlePaddle models with OpenVINO™ `__ - -* Developing in C/C++: - - * :doc:`Image Classification Async C++ Sample ` - * :doc:`Hello Classification C++ Sample ` - * :doc:`Hello Reshape SSD C++ Sample ` - - diff --git a/docs/articles_en/get_started/installing-openvino-overview.rst b/docs/articles_en/get_started/installing-openvino-overview.rst index 37e8701b7fd5f6..f4688f9f766f09 100644 --- a/docs/articles_en/get_started/installing-openvino-overview.rst +++ b/docs/articles_en/get_started/installing-openvino-overview.rst @@ -54,7 +54,6 @@ Install OpenVINO™ 2024.0 =============== ========== ====== ========= ======== ============ ========== ========== CPU V V V V V V V GPU V V V V V V V - GNA V n/a n/a n/a n/a n/a n/a NPU V n/a n/a n/a n/a n/a n/a =============== ========== ====== ========= ======== ============ ========== ========== diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst index b1b4c7fe8600e8..056bfed3e861b6 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst @@ -13,7 +13,7 @@ Install Intel® Distribution of OpenVINO™ Toolkit for Linux Using APT Reposito Note that the APT distribution: * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference + * does not offer support for NPU inference * is dedicated to Linux users only * additionally includes code samples diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst index a4311c04e280a1..3e39eb243968ce 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst @@ -21,17 +21,17 @@ Install OpenVINO™ Runtime on Linux from an Archive File .. dropdown:: Inference Options - =================== ===== ===== ===== ===== - Operating System CPU GPU GNA NPU - =================== ===== ===== ===== ===== - Debian9 armhf V n/a n/a n/a - Ubuntu18 arm64 V n/a n/a n/a - CentOS7 x86_64 V V n/a n/a - Ubuntu18 x86_64 V V V n/a - Ubuntu20 x86_64 V V V V - Ubuntu22 x86_64 V V V V - RHEL8 x86_64 V V V n/a - =================== ===== ===== ===== ===== + =================== ===== ===== ===== + Operating System CPU GPU NPU + =================== ===== ===== ===== + Debian9 armhf V n/a n/a + Ubuntu18 arm64 V n/a n/a + CentOS7 x86_64 V V n/a + Ubuntu18 x86_64 V V n/a + Ubuntu20 x86_64 V V V + Ubuntu22 x86_64 V V V + RHEL8 x86_64 V V n/a + =================== ===== ===== ===== .. tab-set:: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst index 89cce25a142d4f..d03895cd1d4932 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst @@ -13,7 +13,7 @@ Install OpenVINO™ Runtime on Linux From YUM Repository Note that the YUM distribution: * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference + * does not offer support for NPU inference * is dedicated to Linux users only * additionally includes code samples diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst index 6b6b9fcea70066..0940d68969a3c8 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst @@ -5,15 +5,15 @@ Install OpenVINO™ Runtime via Homebrew .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Linux and macOS + :description: Learn how to install OpenVINO™ Runtime on Linux and macOS operating systems, using Homebrew. .. note:: - + Note that the `Homebrew `__ distribution: * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference + * does not offer support for NPU inference * is dedicated to macOS (both arm64 and x86_64) and Linux (x86_64 only) users. @@ -24,10 +24,10 @@ Install OpenVINO™ Runtime via Homebrew | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -38,7 +38,7 @@ Install OpenVINO™ Runtime via Homebrew .. tab-item:: Linux :sync: linux - + * `Homebrew `_ * `CMake 3.13 or higher, 64-bit `__ * GCC 7.5.0 (for Ubuntu 18.04), GCC 9.3.0 (for Ubuntu 20.04) or GCC 11.3.0 (for Ubuntu 22.04) @@ -46,13 +46,13 @@ Install OpenVINO™ Runtime via Homebrew .. tab-item:: macOS :sync: macos - + * `Homebrew `_ - * `CMake 3.13 or higher `__ (choose "macOS 10.13 or later"). Add ``/Applications/CMake.app/Contents/bin`` to path (for default installation). + * `CMake 3.13 or higher `__ (choose "macOS 10.13 or later"). Add ``/Applications/CMake.app/Contents/bin`` to path (for default installation). * `Python 3.8 - 3.11 `__ . Install and add it to path. * Apple Xcode Command Line Tools. In the terminal, run ``xcode-select --install`` from any directory to install it. * (Optional) Apple Xcode IDE (not required for OpenVINO™, but useful for development) - + Installing OpenVINO Runtime ########################### @@ -72,8 +72,8 @@ Installing OpenVINO Runtime brew list -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst index fbc8bfa87aa622..8bb322092f32da 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime from Conan Package Manager .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using Conan Package Manager. .. note:: - + Note that the Conan Package Manager distribution: * offers C/C++ API only - * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * does not offer support for NPU inference + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) @@ -25,10 +25,10 @@ Install OpenVINO™ Runtime from Conan Package Manager Full requirement listing is available in: :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: `Product Specifications `__ @@ -64,14 +64,14 @@ Installing OpenVINO Runtime with Conan Package Manager .. code-block:: sh conan install conanfile.txt --build=missing - - By default, OpenVINO is statically compiled, together with all available + + By default, OpenVINO is statically compiled, together with all available plugins and frontends. To build a version tailored to your needs, check - what options there are on the `Conan Package Manager page for OpenVINO `__ + what options there are on the `Conan Package Manager page for OpenVINO `__ and extend the command, like so: - + .. code-block:: sh - + conan install conanfile.txt --build=missing -o:h 'openvino/*:enable_intel_gpu=False' -o:h 'openvino/*:enable_onnx_frontend=False' -o:h 'openvino/*:shared=True' 3. Configure and compile your project with OpenVINO: @@ -82,7 +82,7 @@ Installing OpenVINO Runtime with Conan Package Manager cmake --build --parallel .. note:: - + OpenVINO can be used with any build interface, as long as it is supported by Conan 2.0. Read `more `__. Additional Resources diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst index a65e6669018668..37cca12efa23b0 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst @@ -14,7 +14,7 @@ Install OpenVINO™ Runtime from Conda Forge Note that the Conda Forge distribution: * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference + * does not offer support for NPU inference * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst index 5185fc192d8e9b..badaa9fb433222 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst @@ -5,18 +5,18 @@ Install Intel® Distribution of OpenVINO™ Toolkit from PyPI Repository .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using a PyPi package. .. note:: - + Note that the PyPi distribution: - + * offers the Python API only - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) - * Windows and Linux do not offer support for GNA and NPU inference + * Windows and Linux do not offer support for NPU inference * macOS offers support only for CPU inference .. tab-set:: @@ -27,11 +27,11 @@ Install Intel® Distribution of OpenVINO™ Toolkit from PyPI Repository | Full requirement listing is available in: | :doc:`System Requirements Page ` | `PyPi OpenVINO page `__ - - + + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -120,8 +120,8 @@ Run the command below: If installation was successful, you will see the list of available devices. -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst index fbd7035e083354..e7bfba2850eff0 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime via vcpkg .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using vcpkg. .. note:: - + Note that the vcpkg distribution: * offers C/C++ API only - * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * does not offer support for NPU inference + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) .. tab-set:: @@ -24,10 +24,10 @@ Install OpenVINO™ Runtime via vcpkg | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -41,7 +41,7 @@ Install OpenVINO™ Runtime via vcpkg Installing OpenVINO Runtime ########################### -1. Make sure that you have installed vcpkg on your system. If not, follow the +1. Make sure that you have installed vcpkg on your system. If not, follow the `vcpkg installation instructions `__. @@ -52,7 +52,7 @@ Installing OpenVINO Runtime vcpkg install openvino vcpkg also enables you to install only selected components, by specifying them in the command. - See the list of `available features `__, for example: + See the list of `available features `__, for example: .. code-block:: sh @@ -64,8 +64,8 @@ Installing OpenVINO Runtime vcpkg install 'openvino:x64-windows-static' -Note that the vcpkg installation means building all packages and dependencies from source, -which means the compiler stage will require additional time to complete the process. +Note that the vcpkg installation means building all packages and dependencies from source, +which means the compiler stage will require additional time to complete the process. After installation, you can use OpenVINO in your product's cmake scripts: @@ -80,7 +80,7 @@ And running from terminal: cmake -B -S -DCMAKE_TOOLCHAIN_FILE=/scripts/buildsystems/vcpkg.cmake Congratulations! You've just Installed and used OpenVINO in your project! For some use cases you may still -need to install additional components. Check the +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. diff --git a/docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst b/docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst index aaf029273f75fb..eb746da602d9a4 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst @@ -195,7 +195,7 @@ Device To set which device benchmarking runs on, use the ``-d `` argument. This will tell ``benchmark_app`` to run benchmarking on that specific device. The benchmark -app supports CPU, GPU, and GNA devices. In order to use GPU, the system +app supports CPU and GPU devices. In order to use GPU, the system must have the appropriate drivers installed. If no device is specified, ``benchmark_app`` will default to using ``CPU``. @@ -454,7 +454,7 @@ following usage message: Device-specific performance options: -nthreads NUMBER_THREADS, --number_threads NUMBER_THREADS - Number of threads to use for inference on the CPU, GNA (including HETERO and MULTI cases). + Number of threads to use for inference on the CPU (including HETERO and MULTI cases). -pin {YES,NO,NUMA,HYBRID_AWARE}, --infer_threads_pinning {YES,NO,NUMA,HYBRID_AWARE} Optional. Enable threads->cores ('YES' which is OpenVINO runtime's default for conventional CPUs), threads->(NUMA)nodes ('NUMA'), diff --git a/docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst b/docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst index 73294f95a49747..ccb14bc3293c80 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst @@ -100,32 +100,7 @@ For example: [ INFO ] PERFORMANCE_HINT: [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 [ INFO ] PERF_COUNT: NO - [ INFO ] - [ INFO ] GNA : - [ INFO ] SUPPORTED_METRICS: - [ INFO ] AVAILABLE_DEVICES: GNA_SW - [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS: 1 - [ INFO ] FULL_DEVICE_NAME: GNA_SW - [ INFO ] GNA_LIBRARY_FULL_VERSION: 3.0.0.1455 - [ INFO ] IMPORT_EXPORT_SUPPORT: True - [ INFO ] - [ INFO ] SUPPORTED_CONFIG_KEYS (default values): - [ INFO ] EXCLUSIVE_ASYNC_REQUESTS: NO - [ INFO ] GNA_COMPACT_MODE: YES - [ INFO ] GNA_COMPILE_TARGET: - [ INFO ] GNA_DEVICE_MODE: GNA_SW_EXACT - [ INFO ] GNA_EXEC_TARGET: - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE: - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE_GENERATION: - [ INFO ] GNA_LIB_N_THREADS: 1 - [ INFO ] GNA_PRECISION: I16 - [ INFO ] GNA_PWL_MAX_ERROR_PERCENT: 1.000000 - [ INFO ] GNA_PWL_UNIFORM_DESIGN: NO - [ INFO ] GNA_SCALE_FACTOR: 1.000000 - [ INFO ] GNA_SCALE_FACTOR_0: 1.000000 - [ INFO ] LOG_LEVEL: LOG_NONE - [ INFO ] PERF_COUNT: NO - [ INFO ] SINGLE_THREAD: YES + .. tab-item:: C++ :sync: cpp @@ -155,31 +130,7 @@ For example: [ INFO ] PERFORMANCE_HINT : "" [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS : 0 [ INFO ] PERF_COUNT : NO - [ INFO ] - [ INFO ] GNA - [ INFO ] SUPPORTED_METRICS: - [ INFO ] AVAILABLE_DEVICES : [ GNA_SW_EXACT ] - [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS : 1 - [ INFO ] FULL_DEVICE_NAME : GNA_SW_EXACT - [ INFO ] GNA_LIBRARY_FULL_VERSION : 3.0.0.1455 - [ INFO ] IMPORT_EXPORT_SUPPORT : true - [ INFO ] SUPPORTED_CONFIG_KEYS (default values): - [ INFO ] EXCLUSIVE_ASYNC_REQUESTS : NO - [ INFO ] GNA_COMPACT_MODE : YES - [ INFO ] GNA_COMPILE_TARGET : "" - [ INFO ] GNA_DEVICE_MODE : GNA_SW_EXACT - [ INFO ] GNA_EXEC_TARGET : "" - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE : "" - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE_GENERATION : "" - [ INFO ] GNA_LIB_N_THREADS : 1 - [ INFO ] GNA_PRECISION : I16 - [ INFO ] GNA_PWL_MAX_ERROR_PERCENT : 1.000000 - [ INFO ] GNA_PWL_UNIFORM_DESIGN : NO - [ INFO ] GNA_SCALE_FACTOR : 1.000000 - [ INFO ] GNA_SCALE_FACTOR_0 : 1.000000 - [ INFO ] LOG_LEVEL : LOG_NONE - [ INFO ] PERF_COUNT : NO - [ INFO ] SINGLE_THREAD : YES + Additional Resources #################### diff --git a/docs/articles_en/openvino_workflow/deployment_intro.rst b/docs/articles_en/openvino_workflow/deployment_intro.rst index 2ff005c91e9587..d446af92915f0f 100644 --- a/docs/articles_en/openvino_workflow/deployment_intro.rst +++ b/docs/articles_en/openvino_workflow/deployment_intro.rst @@ -12,7 +12,7 @@ Deploy Locally Optimize Binaries Size .. meta:: - :description: There are several ways of deploying OpenVINO™ application once + :description: There are several ways of deploying OpenVINO™ application once its development has been finished. @@ -74,5 +74,5 @@ Building a local distribution will require more detailed information, and you wi .. note:: - Depending on your target OpenVINO devices, the following configurations might be needed for deployed machines: :doc:`Configurations for GPU `, :doc:`Configurations for GNA `. + Depending on your target OpenVINO devices, the following configuration might be needed for deployed machines: :doc:`Configurations for GPU `. diff --git a/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.rst b/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.rst index 04c4c71e52a552..d1b3cc898241e2 100644 --- a/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.rst +++ b/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.rst @@ -5,14 +5,14 @@ Libraries for Local Distribution .. meta:: - :description: A local distribution will have its own copies of OpenVINO - Runtime binaries along with a set of required libraries + :description: A local distribution will have its own copies of OpenVINO + Runtime binaries along with a set of required libraries needed to deploy the application. With local distribution, each C or C++ application/installer has its own copies of OpenVINO Runtime binaries. However, OpenVINO has a scalable plugin-based architecture, which means that some components can be loaded in runtime only when they are really needed. This guide helps you understand what minimal set of libraries is required to deploy the application. -Local distribution is also suitable for OpenVINO binaries built from source using `Build instructions `__, +Local distribution is also suitable for OpenVINO binaries built from source using `Build instructions `__, but this guide assumes that OpenVINO Runtime is built dynamically. For `Static OpenVINO Runtime `__, select the required OpenVINO capabilities at the CMake configuration stage using `CMake Options for Custom Compilation `__, then build and link the OpenVINO components to the final application. .. note:: @@ -44,7 +44,6 @@ For each inference device, OpenVINO Runtime has its own plugin library: - ``openvino_intel_cpu_plugin`` for :doc:`Intel® CPU devices ` - ``openvino_intel_gpu_plugin`` for :doc:`Intel® GPU devices ` -- ``openvino_intel_gna_plugin`` for :doc:`Intel® GNA devices ` - ``openvino_arm_cpu_plugin`` for :doc:`ARM CPU devices ` Depending on which devices are used in the app, the corresponding libraries should be included in the distribution package. @@ -63,10 +62,7 @@ As shown in the picture above, some plugin libraries may have OS-specific depend +--------------+-------------------------+-------------------------------------------------------+ | GPU | | OpenCL.dll | | ``C:\Windows\System32\opencl.dll`` | | | | cache.json | | ``.\runtime\bin\intel64\Release\cache.json`` or | - | | | | ``.\runtime\bin\intel64\Debug\cache.json`` | - +--------------+-------------------------+-------------------------------------------------------+ - | GNA | gna.dll | | ``.\runtime\bin\intel64\Release\gna.dll`` or | - | | | | ``.\runtime\bin\intel64\Debug\gna.dll`` | + | | | | | ``.\runtime\bin\intel64\Debug\cache.json`` | +--------------+-------------------------+-------------------------------------------------------+ | Arm® CPU | — | — | +--------------+-------------------------+-------------------------------------------------------+ @@ -91,8 +87,6 @@ As shown in the picture above, some plugin libraries may have OS-specific depend | GPU | | libOpenCL.so | | ``/usr/lib/x86_64-linux-gnu/libOpenCL.so.1`` | | | | cache.json | | ``./runtime/lib/intel64/cache.json`` | +--------------+-------------------------+-------------------------------------------------------+ - | GNA | libgna.so | ``./runtime/lib/intel64/libgna.so.3`` | - +--------------+-------------------------+-------------------------------------------------------+ .. tab-item:: macOS arm64 :sync: macos-arm-64 @@ -152,7 +146,7 @@ Examples **CPU + OpenVINO IR in C application** -In this example, the application is written in C, performs inference on CPU, and reads models stored in the OpenVINO IR format. +In this example, the application is written in C, performs inference on CPU, and reads models stored in the OpenVINO IR format. The following libraries are used: ``openvino_c``, ``openvino``, ``openvino_intel_cpu_plugin``, and ``openvino_ir_frontend``. @@ -163,9 +157,9 @@ The following libraries are used: ``openvino_c``, ``openvino``, ``openvino_intel **MULTI execution on GPU and CPU in `tput` mode** -In this example, the application is written in C++, performs inference :doc:`simultaneously on GPU and CPU devices ` with the `ov::hint::PerformanceMode::THROUGHPUT `__ property set, and reads models stored in the ONNX format. +In this example, the application is written in C++, performs inference :doc:`simultaneously on GPU and CPU devices ` with the `ov::hint::PerformanceMode::THROUGHPUT `__ property set, and reads models stored in the ONNX format. -The following libraries are used: ``openvino``, ``openvino_intel_gpu_plugin``, ``openvino_intel_cpu_plugin``, ``openvino_auto_plugin``, ``openvino_auto_batch_plugin``, and ``openvino_onnx_frontend``. +The following libraries are used: ``openvino``, ``openvino_intel_gpu_plugin``, ``openvino_intel_cpu_plugin``, ``openvino_auto_plugin``, ``openvino_auto_batch_plugin``, and ``openvino_onnx_frontend``. - The ``openvino`` library is a main dependency of the application. The app links against this library. - ``openvino_intel_gpu_plugin`` and ``openvino_intel_cpu_plugin`` are used for inference. @@ -175,9 +169,9 @@ The following libraries are used: ``openvino``, ``openvino_intel_gpu_plugin``, ` **Auto-Device Selection between GPU and CPU** -In this example, the application is written in C++, performs inference with the :doc:`Automatic Device Selection ` mode, limiting device list to GPU and CPU, and reads models :doc:`created using C++ code `. +In this example, the application is written in C++, performs inference with the :doc:`Automatic Device Selection ` mode, limiting device list to GPU and CPU, and reads models :doc:`created using C++ code `. -The following libraries are used: ``openvino``, ``openvino_auto_plugin``, ``openvino_intel_gpu_plugin``, and ``openvino_intel_cpu_plugin``. +The following libraries are used: ``openvino``, ``openvino_auto_plugin``, ``openvino_intel_gpu_plugin``, and ``openvino_intel_cpu_plugin``. - The ``openvino`` library is a main dependency of the application. The app links against this library. - ``openvino_auto_plugin`` is used to enable Automatic Device Selection. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst index 04557711952299..17595ffdae3692 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst @@ -21,7 +21,7 @@ Running Inference with OpenVINO™ OpenVINO Runtime is a set of C++ libraries with C and Python bindings providing a common API -to deploy inference on the platform of your choice. You can run any of the +to deploy inference on the platform of your choice. You can run any of the :doc:`supported model formats ` directly or convert the model and save it to the :doc:`OpenVINO IR ` format, for maximum performance. @@ -38,13 +38,13 @@ OpenVINO IR provides by far the best first-inference latency scores. For more detailed information on how to convert, read, and compile supported model formats see the :doc:`Model Preparation article `. - + Note that TensorFlow models can be run using the :doc:`torch.compile feature `, as well as the standard ways of :doc:`converting TensorFlow ` or running its inference. -OpenVINO Runtime uses a plugin architecture. Its plugins are software components that contain complete implementation for inference on a particular Intel® hardware device: CPU, GPU, GNA, etc. Each plugin implements the unified API and provides additional hardware-specific APIs for configuring devices or API interoperability between OpenVINO Runtime and underlying plugin backend. +OpenVINO Runtime uses a plugin architecture. Its plugins are software components that contain complete implementation for inference on a particular Intel® hardware device: CPU, GPU, etc. Each plugin implements the unified API and provides additional hardware-specific APIs for configuring devices or API interoperability between OpenVINO Runtime and underlying plugin backend. The scheme below illustrates the typical workflow for deploying a trained deep learning model: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst index c3c6b27d2ba9ac..be9e210702de1b 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst @@ -5,8 +5,8 @@ Inference Device Support .. meta:: - :description: The list of types of devices and corresponding plugins which - are compatible with OpenVINO Runtime and support inference + :description: The list of types of devices and corresponding plugins which + are compatible with OpenVINO Runtime and support inference of deep learning models. @@ -17,7 +17,6 @@ Inference Device Support openvino_docs_OV_UG_supported_plugins_CPU openvino_docs_OV_UG_supported_plugins_GPU openvino_docs_OV_UG_supported_plugins_NPU - openvino_docs_OV_UG_supported_plugins_GNA openvino_docs_OV_UG_query_api @@ -26,7 +25,6 @@ OpenVINO™ Runtime can infer deep learning models using the following device ty * :doc:`CPU ` * :doc:`GPU ` * :doc:`NPU ` -* :doc:`GNA ` * :doc:`Arm® CPU ` For a more detailed list of hardware, see :doc:`Supported Devices `. @@ -39,20 +37,20 @@ Feature Support Matrix The table below demonstrates support of key features by OpenVINO device plugins. -========================================================================================= ============================ ========== =========== =========== - Capability CPU GPU NPU GNA -========================================================================================= ============================ ========== =========== =========== - :doc:`Heterogeneous execution ` Yes Yes No - :doc:`Multi-device execution ` Yes Yes Partial - :doc:`Automatic batching ` No Yes No - :doc:`Multi-stream execution ` Yes (Intel® x86-64 only) Yes No - :doc:`Models caching ` Yes Partial Yes - :doc:`Dynamic shapes ` Yes Partial No - :doc:`Import/Export ` Yes No Yes - :doc:`Preprocessing acceleration ` Yes Yes No - :doc:`Stateful models ` Yes No Yes - :doc:`Extensibility ` Yes Yes No -========================================================================================= ============================ ========== =========== =========== +========================================================================================= ============================ ========== =========== + Capability CPU GPU NPU +========================================================================================= ============================ ========== =========== + :doc:`Heterogeneous execution ` Yes Yes No + :doc:`Multi-device execution ` Yes Yes Partial + :doc:`Automatic batching ` No Yes No + :doc:`Multi-stream execution ` Yes (Intel® x86-64 only) Yes No + :doc:`Models caching ` Yes Partial Yes + :doc:`Dynamic shapes ` Yes Partial No + :doc:`Import/Export ` Yes No Yes + :doc:`Preprocessing acceleration ` Yes Yes No + :doc:`Stateful models ` Yes No Yes + :doc:`Extensibility ` Yes Yes No +========================================================================================= ============================ ========== =========== For more details on plugin-specific feature limitations, see the corresponding plugin pages. @@ -70,8 +68,6 @@ The OpenVINO Runtime API features dedicated methods of enumerating devices and t Device: GPU.0 ... Device: GPU.1 - ... - Device: GNA A simple programmatic way to enumerate the devices and use with the multi-device is as follows: @@ -80,14 +76,14 @@ A simple programmatic way to enumerate the devices and use with the multi-device .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/MULTI2.cpp :language: cpp :fragment: [part2] -Beyond the typical "CPU", "GPU", and so on, when multiple instances of a device are available, the names are more qualified. +Beyond the typical "CPU", "GPU", and so on, when multiple instances of a device are available, the names are more qualified. For example, this is how two GPUs can be listed (iGPU is always GPU.0): .. code-block:: sh @@ -104,7 +100,7 @@ So, the explicit configuration to use both would be "MULTI:GPU.1,GPU.0". Accordi .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/MULTI3.cpp :language: cpp :fragment: [part3] diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst deleted file mode 100644 index 2169d1269393f9..00000000000000 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst +++ /dev/null @@ -1,466 +0,0 @@ -.. {#openvino_docs_OV_UG_supported_plugins_GNA} - -GNA Device -========== - - - - - -.. meta:: - :description: The GNA plugin in OpenVINO™ Runtime enables running inference - on Intel® Gaussian & Neural Accelerator (GNA) and in the - software execution mode on CPU. - - -The Intel® Gaussian & Neural Accelerator (GNA) is a low-power neural coprocessor for continuous inference at the edge. - -Intel® GNA is not intended to replace typical inference devices such as the CPU and GPU. It is designed for offloading -continuous inference workloads including but not limited to noise reduction or speech recognition -to save power and free CPU resources. It lets you run inference on Intel® GNA, as well as the CPU, in the software execution mode. -For more details on how to configure a system to use GNA, see the :doc:`GNA configuration page `. - -.. note:: - - Intel's GNA is being discontinued and Intel® Core™ Ultra (formerly known as Meteor Lake) - will be the last generation of hardware to include it. - For this reason, the GNA plugin will soon be discontinued. - Consider Intel's new Neural Processing Unit as a low-power solution for offloading - neural network computation, for processors offering the technology. - - - -Intel® GNA Generational Differences -########################################################### - -The first (1.0) and second (2.0) versions of Intel® GNA found in 10th and 11th generation Intel® Core™ Processors may be considered -functionally equivalent. Intel® GNA 2.0 provided performance improvement with respect to Intel® GNA 1.0. - -======================= ======================== - Intel CPU generation GNA HW Version -======================= ======================== -10th, 11th GNA 2.0 -12th, 13th GNA 3.0 -14th GNA 3.5 -======================= ======================== - -In this documentation, "GNA 2.0" refers to Intel® GNA hardware delivered on 10th and 11th generation Intel® Core™ processors, -and the term "GNA 3.0" refers to GNA hardware delivered on 12th, 13th generation Intel® Core™ processors, and the term -"GNA 3.5" refers to GNA hardware delivered on 14th generation of Intel® Core™ processors. - -Intel® GNA Forward and Backward Compatibility -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -When a model is run, using the GNA plugin, it is compiled internally for the specific hardware target. It is possible to export a compiled model, -using `Import/Export <#import-export>`__ functionality to use it later. In general, there is no guarantee that a model compiled and -exported for GNA 2.0 runs on GNA 3.0 or vice versa. - -================== ======================== ======================================================= ======================================================= - Hardware Compile target 2.0 Compile target 3.0 Compile target 3.5 -================== ======================== ======================================================= ======================================================= - GNA 2.0 Supported Not supported (incompatible layers emulated on CPU) Not supported (incompatible layers emulated on CPU) - GNA 3.0 Partially supported Supported Not supported (incompatible layers emulated on CPU) - GNA 3.5 Partially supported Partially supported Supported -================== ======================== ======================================================= ======================================================= - -.. note:: - - In most cases, a network compiled for GNA 2.0 runs as expected on GNA 3.0. However, performance may be worse - compared to when a network is compiled specifically for the latter. The exception is a network with convolutions - with the number of filters greater than 8192 (see the `Model and Operation Limitations <#model-and-operation-limitations>`__ section). - - -For optimal work with POT quantized models, which include 2D convolutions on GNA 3.0/3.5 hardware, the following requirements should be satisfied: - -* Choose a compile target with priority on: cross-platform execution, performance, memory, or power optimization. -* To check interoperability in your application use: ``ov::intel_gna::execution_target`` and ``ov::intel_gna::compile_target``. - -Software Emulation Mode -########################################################### - -Software emulation mode is used by default on platforms without GNA hardware support. Therefore, model runs even if there is no GNA HW within your platform. -GNA plugin enables switching the execution between software emulation mode and hardware execution mode once the model has been loaded. -For details, see a description of the ``ov::intel_gna::execution_mode`` property. - -Recovery from Interruption by High-Priority Windows Audio Processes -############################################################################ - -GNA is designed for real-time workloads i.e., noise reduction. For such workloads, processing should be time constrained. -Otherwise, extra delays may cause undesired effects such as *audio glitches*. The GNA driver provides a Quality of Service (QoS) -mechanism to ensure that processing can satisfy real-time requirements. The mechanism interrupts requests that might cause -high-priority Windows audio processes to miss the schedule. As a result, long running GNA tasks terminate early. - -To prepare the applications correctly, use Automatic QoS Feature described below. - -Automatic QoS Feature on Windows -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -Starting with the 2021.4.1 release of OpenVINO™ and the 03.00.00.1363 version of Windows GNA driver, the execution mode of -``ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK`` has been available to ensure that workloads satisfy real-time execution. -In this mode, the GNA driver automatically falls back on CPU for a particular infer request if the HW queue is not empty. -Therefore, there is no need for explicitly switching between GNA and CPU. - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/gna/configure.py - :language: py - :fragment: [import] - - .. doxygensnippet:: docs/snippets/gna/configure.py - :language: py - :fragment: [ov_gna_exec_mode_hw_with_sw_fback] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/gna/configure.cpp - :language: cpp - :fragment: [include] - - .. doxygensnippet:: docs/snippets/gna/configure.cpp - :language: cpp - :fragment: [ov_gna_exec_mode_hw_with_sw_fback] - - -.. note:: - - Due to the "first come - first served" nature of GNA driver and the QoS feature, this mode may lead to increased - CPU consumption if there are several clients using GNA simultaneously. Even a lightweight competing infer request, - not cleared at the time when the user's GNA client process makes its request, can cause the user's request to be - executed on CPU, unnecessarily increasing CPU utilization and power. - - -Supported Inference Data Types -########################################################### - -Intel® GNA essentially operates in the low-precision mode which represents a mix of 8-bit (``i8``), 16-bit (``i16``), and 32-bit (``i32``) -integer computations. Unlike other OpenVINO devices supporting low-precision execution, it can calculate quantization factors at the -model loading time. Therefore, a model can be run without calibration. However, this mode may not provide satisfactory accuracy -because the internal quantization algorithm is based on heuristics, the efficiency of which depends on the model and dynamic range of input data. -This mode is going to be deprecated soon. GNA supports the ``i16`` and ``i8`` quantized data types as inference precision of internal primitives. - -:doc:`Hello Query Device C++ Sample ` can be used to print out supported data types for all detected devices. - -For POT quantized models, the ``ov::hint::inference_precision`` property has no effect except in cases described in the -`Model and Operation Limitations section <#model-and-operation-limitations>`__. - - -Supported Features -########################################################### - -Model Caching -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -Due to import/export functionality support (see below), cache for GNA plugin may be enabled via common ``ov::cache_dir`` property of OpenVINO™. - -For more details, see the :doc:`Model caching overview `. - - -Import/Export -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -The GNA plugin supports import/export capability, which helps decrease first inference time significantly. -The model compile target is the same as the execution target by default. If there is no GNA HW in the system, -the default value for the execution target corresponds to available hardware or latest hardware version, -supported by the plugin (i.e., GNA 3.0). - -To export a model for a specific version of GNA HW, use the ``ov::intel_gna::compile_target`` property and then export the model: - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/gna/import_export.py - :language: py - :fragment: [ov_gna_export] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/gna/import_export.cpp - :language: cpp - :fragment: [ov_gna_export] - - -Import model: - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/gna/import_export.py - :language: py - :fragment: [ov_gna_import] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/gna/import_export.cpp - :language: cpp - :fragment: [ov_gna_import] - - -To compile a model, use :ref:`compile Tool `. - - -Stateful Models -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -GNA plugin natively supports stateful models. For more details on such models, refer to the :doc:`Stateful models `. - -.. note:: - - The GNA is typically used in streaming scenarios when minimizing latency is important. Taking into account that POT does not - support the ``TensorIterator`` operation, the recommendation is to use the ``transform`` option of model conversion API - to apply ``LowLatency2`` transformation when converting an original model. - -Profiling -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -The GNA plugin allows turning on profiling, using the ``ov::enable_profiling`` property. -With the following methods, you can collect profiling information with various performance data about execution on GNA: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - ``openvino.InferRequest.get_profiling_info`` - - .. tab-item:: C++ - :sync: cpp - - ``ov::InferRequest::get_profiling_info`` - - -The current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. -The API enables you to retrieve counter units in cycles. You can convert cycles to seconds as follows: - -.. code-block:: sh - - seconds = cycles / frequency - - -Refer to the table below for the frequency of Intel® GNA inside particular processors: - -========================================================== ================================== - Processor Frequency of Intel® GNA, MHz -========================================================== ================================== -Intel® Core™ processors 400 -Intel® processors formerly codenamed Elkhart Lake 200 -Intel® processors formerly codenamed Gemini Lake 200 -========================================================== ================================== - - -Inference request performance counters provided for the time being: - -* The number of total cycles spent on scoring in hardware, including compute and memory stall cycles -* The number of stall cycles spent in hardware - - -Supported Properties -########################################################### - -Read-write Properties -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -In order to take effect, the following parameters must be set before model compilation or passed as additional arguments to ``ov::Core::compile_model()``: - -- ``ov::cache_dir`` -- ``ov::enable_profiling`` -- ``ov::hint::inference_precision`` -- ``ov::hint::num_requests`` -- ``ov::intel_gna::compile_target`` -- ``ov::intel_gna::firmware_model_image_path`` -- ``ov::intel_gna::execution_target`` -- ``ov::intel_gna::pwl_design_algorithm`` -- ``ov::intel_gna::pwl_max_error_percent`` -- ``ov::intel_gna::scale_factors_per_input`` - -These parameters can be changed after model compilation ``ov::CompiledModel::set_property``: - -- ``ov::hint::performance_mode`` -- ``ov::intel_gna::execution_mode`` -- ``ov::log::level`` - -Read-only Properties -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -- ``ov::available_devices`` -- ``ov::device::capabilities`` -- ``ov::device::full_name`` -- ``ov::intel_gna::library_full_version`` -- ``ov::optimal_number_of_infer_requests`` -- ``ov::range_for_async_infer_requests`` -- ``ov::supported_properties`` - -Limitations -########################################################### - -Model and Operation Limitations -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -Due to the specification of hardware architecture, Intel® GNA supports a limited set of operations (including their kinds and combinations). -For example, GNA Plugin should not be expected to run computer vision models because the plugin does not fully support 2D convolutions. -The exception are the models specifically adapted for the GNA Plugin. - -Limitations include: - -- Prior to GNA 3.0, only 1D convolutions are natively supported on the HW; 2D convolutions have specific limitations (see the table below). -- The number of output channels for convolutions must be a multiple of 4. -- The maximum number of filters is 65532 for GNA 2.0 and 8192 for GNA 3.0. -- Starting with Intel® GNA 3.5 the support for Int8 convolution weights has been added. Int8 weights can be used in models quantized by POT. -- *Transpose* layer support is limited to the cases where no data reordering is needed or when reordering is happening for two dimensions, at least one of which is not greater than 8. -- Splits and concatenations are supported for continuous portions of memory (e.g., split of 1,2,3,4 to 1,1,3,4 and 1,1,3,4 or concats of 1,2,3,4 and 1,2,3,5 to 2,2,3,4). -- For *Multiply*, *Add* and *Subtract* layers, auto broadcasting is only supported for constant inputs. - - -Support for 2D Convolutions up to GNA 3.0 ------------------------------------------------------------ - -The Intel® GNA 1.0 and 2.0 hardware natively supports only 1D convolutions. However, 2D convolutions can be mapped to 1D when -a convolution kernel moves in a single direction. Initially, a limited subset of Intel® GNA 3.0 features are added to the -previous feature set including: - -* **2D VALID Convolution With Small 2D Kernels:** Two-dimensional convolutions with the following kernel dimensions - [``H``,``W``] are supported: [1,1], [2,2], [3,3], [2,1], [3,1], [4,1], [5,1], [6,1], [7,1], [1,2], or [1,3]. - Input tensor dimensions are limited to [1,8,16,16] <= [``N``,``C``,``H``,``W``] <= [1,120,384,240]. Up to 384 ``C`` - channels may be used with a subset of kernel sizes (see the table below). Up to 256 kernels (output channels) - are supported. Pooling is limited to pool shapes of [1,1], [2,2], or [3,3]. Not all combinations of kernel - shape and input tensor shape are supported (see the tables below for exact limitations). - -The tables below show that the exact limitation on the input tensor width W depends on the number of input channels -*C* (indicated as *Ci* below) and the kernel shape. There is much more freedom to choose the input tensor height and number of output channels. - -The following tables provide a more explicit representation of the Intel(R) GNA 3.0 2D convolution operations -initially supported. The limits depend strongly on number of input tensor channels (*Ci*) and the input tensor width (*W*). -Other factors are kernel height (*KH*), kernel width (*KW*), pool height (*PH*), pool width (*PW*), horizontal pool step (*SH*), -and vertical pool step (*PW*). For example, the first table shows that for a 3x3 kernel with max pooling, only square pools are supported, -and *W* is limited to 87 when there are 64 input channels. - - -:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i16) <../../../docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/assets/GNA_Maximum_Input_Tensor_Widths_i16.csv>` - -:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i8) <../../../docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/assets/GNA_Maximum_Input_Tensor_Widths_i8.csv>` - - -.. note:: - - The above limitations only apply to the new hardware 2D convolution operation. For GNA 3.0, when possible, the Intel® GNA - plugin graph compiler flattens 2D convolutions so that the second generation Intel® GNA 1D convolution operations - (without these limitations) may be used. The plugin will also flatten 2D convolutions regardless of the sizes if GNA 2.0 - compilation target is selected (see below). -Support for Convolutions since GNA 3.5 --------------------------------------------------------------------------------------------------------------------------------------- - -Starting from Intel® GNA 3.5, 1D convolutions are handled in a different way than in GNA 3.0. Convolutions have the following limitations: - -============================ ======================= ================= - Limitation Convolution 1D Convolution 2D -============================ ======================= ================= -Input height 1 1-65535 -Input Width 1-65535 1-65535 -Input channel number 1 1-1024 -Kernel number 1-8192 1-8192 -Kernel height 1 1-255 -Kernel width 1-2048 1-256 -Stride height 1 1-255 -Stride width 1-2048 1-256 -Dilation height 1 1 -Dilation width 1 1 -Pooling window height 1-1 1-255 -Pooling window width 1-255 1-255 -Pooling stride height 1 1-255 -Pooling stride width 1-255 1-255 -============================ ======================= ================= - - -Limitations for GNA 3.5 refers to the specific dimension. The full range of parameters is not always fully supported, -e.g. where Convolution 2D Kernel can have height 255 and width 256, it may not work with Kernel with shape 255x256. - -Support for 2D Convolutions using POT ------------------------------------------------------------ - -For POT to successfully work with the models including GNA3.0 2D convolutions, the following requirements must be met: - -* All convolution parameters are natively supported by HW (see tables above). -* The runtime precision is explicitly set by the ``ov::hint::inference_precision`` property as ``i8`` for the models produced by - the ``performance mode`` of POT, and as ``i16`` for the models produced by the ``accuracy mode`` of POT. - - -Batch Size Limitation -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -Intel® GNA plugin supports processing of context-windowed speech frames in batches of 1-8 frames. -Refer to the :doc:`Layout API overview ` to determine batch dimension. -To set the layout of model inputs in runtime, use the :doc:`Optimize Preprocessing ` guide: - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/gna/set_batch.py - :language: py - :fragment: [import] - - .. doxygensnippet:: docs/snippets/gna/set_batch.py - :language: py - :fragment: [ov_gna_set_nc_layout] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/gna/set_batch.cpp - :language: cpp - :fragment: [include] - - .. doxygensnippet:: docs/snippets/gna/set_batch.cpp - :language: cpp - :fragment: [ov_gna_set_nc_layout] - - -then set batch size: - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/gna/set_batch.py - :language: py - :fragment: [ov_gna_set_batch_size] - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/gna/set_batch.cpp - :language: cpp - :fragment: [ov_gna_set_batch_size] - - -Increasing batch size only improves efficiency of ``MatMul`` layers. - -.. note:: - - For models with ``Convolution``, ``LSTMCell``, ``GRUCell``, or ``ReadValue`` / ``Assign`` operations, the only supported batch size is 1. - - -Compatibility with Heterogeneous mode -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -:doc:`Heterogeneous execution ` is currently not supported by GNA plugin. - -See Also -########################################################### - -* :doc:`Supported Devices ` -* :doc:`Converting Model ` diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst index ed7889e996471c..348a8f4570880f 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst @@ -11,8 +11,8 @@ GPU Device openvino_docs_OV_UG_supported_plugins_GPU_RemoteTensor_API .. meta:: - :description: The GPU plugin in the Intel® Distribution of OpenVINO™ toolkit - is an OpenCL based plugin for inference of deep neural + :description: The GPU plugin in the Intel® Distribution of OpenVINO™ toolkit + is an OpenCL based plugin for inference of deep neural networks on Intel® GPus. @@ -38,7 +38,7 @@ Device Naming Convention For demonstration purposes, see the :doc:`Hello Query Device C++ Sample ` that can print out the list of available devices with associated indices. Below is an example output (truncated to the device names only): .. code-block:: sh - + ./hello_query_device Available devices: Device: CPU @@ -46,14 +46,12 @@ For demonstration purposes, see the :doc:`Hello Query Device C++ Sample ` can be used to print out the supported data types for all detected devices. @@ -177,7 +175,7 @@ Alternatively, it can be enabled explicitly via the device notion, for example ` .. tab-set:: - + .. tab-item:: Batching via BATCH plugin .. tab-set:: @@ -224,9 +222,9 @@ If either the ``ov::num_streams(n_streams)`` with ``n_streams > 1`` or the ``ov: multiple streams are created for the model. In the case of GPU plugin each stream has its own host thread and an associated OpenCL queue which means that the incoming infer requests can be processed simultaneously. -.. note:: +.. note:: - Simultaneous scheduling of kernels to different queues does not mean that the kernels are actually executed in parallel on the GPU device. + Simultaneous scheduling of kernels to different queues does not mean that the kernels are actually executed in parallel on the GPU device. The actual behavior depends on the hardware architecture and in some cases the execution may be serialized inside the GPU driver. When multiple inferences of the same model need to be executed in parallel, the multi-stream feature is preferred to multiple instances of the model or application. @@ -241,30 +239,30 @@ Dynamic Shapes .. note:: Currently, dynamic shape support for GPU is a preview feature and has the following limitations: - + - It mainly supports NLP models (Natural Language Processing). Not all operations and optimization passes support dynamic shapes. - As a result, a given model may crash or experience significant performance drops. + As a result, a given model may crash or experience significant performance drops. - Due to the dominant runtime overhead on the host device, dynamic shapes may perform worse than static shapes on a discrete GPU. - Dynamic rank is not supported. The general description of what dynamic shapes are and how they are used can be found in -:doc:`dynamic shapes guide `. +:doc:`dynamic shapes guide `. To support dynamic shape execution, the following basic infrastructures are implemented: - Runtime shape inference: infers output shapes of each primitive for a new input shape at runtime. -- Shape agnostic kernels: new kernels that can run arbitrary shapes. If a shape-agnostic kernel is not available, +- Shape agnostic kernels: new kernels that can run arbitrary shapes. If a shape-agnostic kernel is not available, the required kernel is compiled at runtime for each shape. -- Asynchronous kernel compilation: even when a shape-agnostic kernel is available, +- Asynchronous kernel compilation: even when a shape-agnostic kernel is available, the GPU plugin compiles an optimal kernel for the given shape and preserves it in the in-memory cache for future use. - In-memory cache: preserves kernels compiled at runtime and weights reordered for the specific kernels. Bounded dynamic batch ----------------------------------------------------------- -It is worth noting that the internal behavior differs in the case of bounded-batch dynamic shapes, +It is worth noting that the internal behavior differs in the case of bounded-batch dynamic shapes, which means that only the batch dimension is dynamic and it has a fixed upper bound. -While general dynamic shapes can run on one compiled model, for the bounded dynamic batch the GPU plugin creates ``log2(N)`` +While general dynamic shapes can run on one compiled model, for the bounded dynamic batch the GPU plugin creates ``log2(N)`` low-level execution graphs in batch sizes equal to the powers of 2, to emulate the dynamic behavior (``N`` - is the upper bound for the batch dimension here). As a result, the incoming infer request with a specific batch size is executed via the minimal combination of internal networks. For example, a batch size of 33 may be executed via two internal networks with batch sizes of 32 and 1. @@ -315,15 +313,15 @@ Recommendations for performance improvement - Use bounded dynamic shapes whenever possible - The GPU plugin needs to reallocate memory if the current shape is larger than the maximum of the previous shapes, which causes additional overhead. - - Using a bounded dynamic shape will help to reduce such overhead. For example, use ``{ov::Dimension(1, 10), ov::Dimension(1, 384)}`` + - Using a bounded dynamic shape will help to reduce such overhead. For example, use ``{ov::Dimension(1, 10), ov::Dimension(1, 384)}`` instead of ``{ov::Dimension(-1), ov::Dimension(-1)}``. - Note that a bounded dynamic *batch* is handled differently as mentioned above. - Use permanent cache, e.g., OpenVino model_cache, to reduce the runtime re-compilation overhead - GPU plugin deploys in-memory cache to store compiled kernels for previously used shapes, - but the size of such an in-memory cache is limited. Therefore, it is recommended to use - a permanent cache such as OpenVino model_cache. For more details, See + but the size of such an in-memory cache is limited. Therefore, it is recommended to use + a permanent cache such as OpenVino model_cache. For more details, See :doc:`Model caching overview `. - The longer the inference sequence, the better throughput can be obtained, because it can @@ -336,8 +334,8 @@ Recommendations for performance improvement and the GPU plugin is unusable, any not-yet-started compilation tasks for optimal kernels will be canceled. However, if the application process allows enough time for the enqueued asynchronous compilation tasks, the more optimal kernels become available, enabling better - throughput. For example, running 200 inputs of - ``{[1, 1], ..., [1, 50], [1, 1], ... , [1, 50], [1, 1], ..., [1, 50], [1, 1], ..., [1, 50]}`` + throughput. For example, running 200 inputs of + ``{[1, 1], ..., [1, 50], [1, 1], ... , [1, 50], [1, 1], ..., [1, 50], [1, 1], ..., [1, 50]}`` may achieve better throughput than running 100 inputs of ``{[1, 1], ..., [1, 50], [1, 1], ... , [1,50]}``. @@ -374,15 +372,15 @@ For more details, see the :doc:`preprocessing API`. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst index 63de6309809943..25d5535808f378 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst @@ -12,7 +12,7 @@ The choice of data types is essential to the inference runtime, which can have a Inference precision no longer depends on the precision of IR, which means that users have several options to find the balance between model performance and accuracy. -Essentially, the IR precision becomes a way of compressing the model by reducing the precision of the weights, and it does not affect how the devices execute the model. This change clears up a lot of confusion where, for example, you couldn't execute a high-performance model on the GPU by default, and the behavior between devices was different. +Essentially, the IR precision becomes a way of compressing the model by reducing the precision of the weights, and it does not affect how the devices execute the model. This change clears up a lot of confusion where, for example, you couldn't execute a high-performance model on the GPU by default, and the behavior between devices was different. This guide will focus on how to control inference precision. And using lower precision is important for performance because compute bandwidth tends to be higher for smaller data types, and hardware often has special blocks for efficient multiply-accumulate operations with smaller data types only (e.g. Intel Xᵉ Matrix Extensions (XMX) on GPU and Intel Advanced Matrix Extensions (AMX) on CPU do not support ``f32``). Also, I/O operations requires less memory due to the smaller tensor byte size. This guide will focus on how to control inference precision. @@ -33,14 +33,14 @@ Code examples: .. tab-item:: Python :sync: py - + .. doxygensnippet:: docs/snippets/cpu/ov_execution_mode.py :language: python :fragment: [ov:execution_mode:part0] .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/cpu/ov_execution_mode.cpp :language: cpp :fragment: [ov:execution_mode:part0] @@ -49,11 +49,11 @@ Code examples: Inference Precision ################### -``ov::hint::inference_precision`` precision is a lower-level property that allows you to specify the exact precision the user wants, but is less portable. For example, CPU supports ``f32`` inference precision and ``bf16`` on some platforms, GPU supports ``f32`` and ``f16`` while GNA supports ``i8`` and ``i16``, so if a user wants to an application that uses multiple devices, they have to handle all these combinations manually or let OV do it automatically by using higher level ``execution_mode`` property. Another thing is that ``inference_precision`` is also a hint, so the value provided is not guaranteed to be used by Runtime (mainly in cases where the current device does not have the required hardware capabilities). +``ov::hint::inference_precision`` precision is a lower-level property that allows you to specify the exact precision the user wants, but is less portable. For example, CPU supports ``f32`` inference precision and ``bf16`` on some platforms, GPU supports ``f32`` and ``f16``, so if a user wants to an application that uses multiple devices, they have to handle all these combinations manually or let OV do it automatically by using higher level ``execution_mode`` property. Another thing is that ``inference_precision`` is also a hint, so the value provided is not guaranteed to be used by Runtime (mainly in cases where the current device does not have the required hardware capabilities). .. note:: - All devices (except GNA) only support floating-point data types (``f32``, ``f16``, ``bf16``) as a value for ``inference_precision`` attribute, because quantization cannot be done in Runtime. The GNA plugin has the ability to perform model quantization on ``core.compile_model()`` call, so it supports integer data types in addition to ``f32``. + All devices only support floating-point data types (``f32``, ``f16``, ``bf16``) as a value for ``inference_precision`` attribute, because quantization cannot be done in Runtime. Additional Resources diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst index 4f07e650af8223..5667203e2fe11b 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst @@ -4,8 +4,8 @@ Automatic Device Selection ========================== .. meta:: - :description: The Automatic Device Selection mode in OpenVINO™ Runtime - detects available devices and selects the optimal processing + :description: The Automatic Device Selection mode in OpenVINO™ Runtime + detects available devices and selects the optimal processing unit for inference automatically. @@ -53,11 +53,11 @@ The logic behind the choice is as follows: | | (e.g. Intel® Core™ Ultra) | | +----------+-----------------------------------------------------+------------------------------------+ -.. note:: +.. note:: Note that NPU is currently excluded from the default priority list. To use it for inference, you need to specify it explicitly - + How AUTO Works ############## @@ -71,14 +71,14 @@ For example, if you use a CPU and a GPU, the first-inference latency of AUTO wil Note that if you choose to exclude CPU from the priority list or disable the initial CPU acceleration feature via ``ov::intel_auto::enable_startup_fallback``, it will be unable to support the initial model compilation stage. The models with dynamic -input/output or stateful :doc:`stateful` -operations will be loaded to the CPU if it is in the candidate list. Otherwise, +input/output or stateful :doc:`stateful` +operations will be loaded to the CPU if it is in the candidate list. Otherwise, these models will follow the normal flow and be loaded to the device based on priority. .. image:: _static/images/autoplugin_accelerate.svg -This mechanism can be easily observed in the :ref:`Using AUTO with Benchmark app sample ` +This mechanism can be easily observed in the :ref:`Using AUTO with Benchmark app sample ` section, showing how the first-inference latency (the time it takes to compile the model and perform the first inference) is reduced when using AUTO. For example: @@ -102,7 +102,7 @@ model and perform the first inference) is reduced when using AUTO. For example: Using AUTO ########## -Following the OpenVINO™ naming convention, the Automatic Device Selection mode is assigned the label of "AUTO". +Following the OpenVINO™ naming convention, the Automatic Device Selection mode is assigned the label of "AUTO". It may be defined with no additional parameters, resulting in defaults being used, or configured further with the following setup options: @@ -197,7 +197,7 @@ the following setup options: Inference with AUTO is configured similarly to when device plugins are used: you compile the model on the plugin with configuration and execute inference. -The code samples on this page assume following import(Python)/using (C++) are included at the beginning of code snippets. +The code samples on this page assume following import(Python)/using (C++) are included at the beginning of code snippets. .. tab-set:: @@ -244,7 +244,7 @@ See the following code for using AUTO and specifying devices: .. doxygensnippet:: docs/snippets/AUTO0.cpp :language: cpp :fragment: [part0] - + Note that OpenVINO Runtime lets you use "GPU" as an alias for "GPU.0" in function calls. More details on enumerating devices can be found in :doc:`Working with devices `. @@ -258,20 +258,20 @@ To check what devices are present in the system, you can use Device API, as list .. tab-item:: Python :sync: py - + .. code-block:: sh - + openvino.runtime.Core.available_devices - + See the Hello Query Device Python Sample for reference. .. tab-item:: C++ :sync: cpp - + .. code-block:: sh - + ov::runtime::Core::get_available_devices() - + See the Hello Query Device C++ Sample for reference. @@ -284,16 +284,16 @@ You can also exclude hardware devices from AUTO, for example, to reserve CPU for .. tab-item:: Python :sync: py - + .. code-block:: sh - + compiled_model = core.compile_model(model=model, device_name="AUTO:-CPU") .. tab-item:: C++ :sync: cpp - + .. code-block:: sh - + ov::CompiledModel compiled_model = core.compile_model(model, "AUTO:-CPU"); @@ -342,38 +342,38 @@ CUMULATIVE_THROUGHPUT has similar behavior as :doc:`the Multi-Device execution m If device priority is specified when using CUMULATIVE_THROUGHPUT, AUTO will run inference requests on devices based on the priority. In the following example, AUTO will always try to use GPU first, and then use CPU if GPU is busy: .. tab-set:: - + .. tab-item:: Python :sync: py .. code-block:: sh - + compiled_model = core.compile_model(model, "AUTO:GPU,CPU", {hints.performance_mode: hints.PerformanceMode.CUMULATIVE_THROUGHPUT}) .. tab-item:: C++ :sync: cpp .. code-block:: sh - + ov::CompiledModel compiled_model = core.compile_model(model, "AUTO:GPU,CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); - + If AUTO is used without specifying any device names, and if there are multiple GPUs in the system, CUMULATIVE_THROUGHPUT mode will use all of the GPUs by default. If the system has more than two GPU devices, AUTO will remove CPU from the device candidate list to keep the GPUs running at full capacity. A full list of system devices and their unique identifiers can be queried using ov::Core::get_available_devices (for more information, see :doc:`Query Device Properties `). To explicitly specify which GPUs to use, set their priority when compiling with AUTO: .. tab-set:: - + .. tab-item:: Python :sync: py .. code-block:: sh - + compiled_model = core.compile_model(model, "AUTO:GPU.1,GPU.0", {hints.performance_mode: hints.PerformanceMode.CUMULATIVE_THROUGHPUT}) .. tab-item:: C++ :sync: cpp .. code-block:: sh - + ov::CompiledModel compiled_model = core.compile_model(model, "AUTO:GPU.1,GPU.0", ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); @@ -490,7 +490,7 @@ For limited device choice: .. code-block:: sh - benchmark_app –d AUTO:CPU,GPU,GNA –m -i -niter 1000 + benchmark_app –d AUTO:CPU,GPU –m -i -niter 1000 For more information, refer to the :doc:`Benchmark Tool ` article. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/hetero_execution.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/hetero_execution.rst index 03035947649802..b7d62da2c3701b 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/hetero_execution.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/hetero_execution.rst @@ -5,7 +5,7 @@ Heterogeneous execution .. meta:: - :description: Heterogeneous execution mode in OpenVINO Runtime enables + :description: Heterogeneous execution mode in OpenVINO Runtime enables the inference of one model on several computing devices. @@ -68,7 +68,7 @@ Randomly selecting operations and setting affinities may lead to decrease in mod The Automatic Mode -------------------- -It decides automatically which operation is assigned to which device according to the support from dedicated devices (``GPU``, ``CPU``, ``GNA``, etc.) and query model step is called implicitly by Hetero device during model compilation. +It decides automatically which operation is assigned to which device according to the support from dedicated devices (``GPU``, ``CPU``, etc.) and query model step is called implicitly by Hetero device during model compilation. The automatic mode causes "greedy" behavior and assigns all operations that can be executed on a given device to it, according to the priorities you specify (for example, ``ov::device::priorities("GPU,CPU")``). It does not take into account device peculiarities such as the inability to infer certain operations without other special operations placed before or after that layer. If the device plugin does not support the subgraph topology constructed by the HETERO device, then you should set affinity manually. @@ -199,7 +199,7 @@ where: * ``HETERO`` stands for the Heterogeneous execution * ``GPU,CPU`` points to a fallback policy with the priority on GPU and fallback to CPU -You can also point to more than two devices: ``-d HETERO:GNA,GPU,CPU`` +You can also point to more than two devices: ``-d HETERO:GPU,CPU`` Additional Resources #################### diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst index 059c581f3fd1f3..23cfd4c0a13ead 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst @@ -82,23 +82,23 @@ To get the texts, we will pass the IDs to the .. code:: ipython3 import requests - - + + def get_book_by_id(book_id: int, gutendex_url: str = "https://gutendex.com/") -> str: book_metadata_url = gutendex_url + "/books/" + str(book_id) request = requests.get(book_metadata_url, timeout=30) request.raise_for_status() - + book_metadata = request.json() text_format_key = "text/plain" text_plain = [k for k in book_metadata["formats"] if k.startswith(text_format_key)] book_url = book_metadata["formats"][text_plain[0]] return requests.get(book_url).text - - + + en_book_id = 1399 de_book_id = 44956 - + anna_karenina_en = get_book_by_id(en_book_id) anna_karenina_de = get_book_by_id(de_book_id) @@ -112,7 +112,7 @@ Let’s check that we got the right books by showing a part of the texts: .. parsed-literal:: The Project Gutenberg eBook of Anna Karenina - + This ebook is for the use of anyone anywhere in the United States and most other parts of the world at no cost and with almost no restrictions whatsoever. You may copy it, give it away or re-use it under the terms @@ -120,36 +120,36 @@ Let’s check that we got the right books by showing a part of the texts: at www.gutenberg.org. If you are not located in the United States, you will have to check the laws of the country where you are located before using this eBook. - + Title: Anna Karenina - - + + Author: graf Leo Tolstoy - + Translator: Constance Garnett - + Release date: July 1, 1998 [eBook #1399] Most recently updated: April 9, 2023 - + Language: English - - - + + + *** START OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA *** [Illustration] - - - - - ANNA KARENINA - - by Leo Tolstoy - - Translated by Constance Garnett - + + + + + ANNA KARENINA + + by Leo Tolstoy + + Translated by Constance Garnett + Contents - - + + PART ONE PART TWO PART THREE @@ -158,18 +158,18 @@ Let’s check that we got the right books by showing a part of the texts: PART SIX PART SEVEN PART EIGHT - - - - + + + + PART ONE - + Chapter 1 - - + + Happy families are all alike; every unhappy family is unhappy in its own way. - + Everything was in confusion in the Oblonskys’ house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced @@ -240,11 +240,11 @@ the last occurrence of these asterisks. import re from contextlib import contextmanager from tqdm.auto import tqdm - - + + start_pattern_en = r"\nPART ONE" anna_karenina_en = re.split(start_pattern_en, anna_karenina_en)[1].strip() - + end_pattern_en = "*** END OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA ***" anna_karenina_en = anna_karenina_en.split(end_pattern_en)[0].strip() @@ -277,12 +277,12 @@ Let’s cut it out and define some cleaning functions. def remove_single_newline(text: str) -> str: return re.sub(r"\n(?!\n)", " ", text) - - + + def unify_quotes(text: str) -> str: return re.sub(r"['\"»«“”]", '"', text) - - + + def remove_markup(text: str) -> str: text = text.replace(">=", "").replace("=<", "") return re.sub(r"_\w|\w_", "", text) @@ -295,29 +295,29 @@ needed. .. code:: ipython3 disable_tqdm = False - - + + @contextmanager def disable_tqdm_context(): global disable_tqdm disable_tqdm = True yield disable_tqdm = False - - + + def clean_text(text: str) -> str: text_cleaning_pipeline = [ remove_single_newline, unify_quotes, remove_markup, - ] + ] progress_bar = tqdm(text_cleaning_pipeline, disable=disable_tqdm) for clean_func in progress_bar: progress_bar.set_postfix_str(clean_func.__name__) text = clean_func(text) return text - - + + chapter_1_en = clean_text(chapter_1_en) chapter_1_de = clean_text(chapter_1_de) @@ -356,15 +356,15 @@ languages. .. code:: ipython3 import pysbd - - + + splitter_en = pysbd.Segmenter(language="en", clean=True) splitter_de = pysbd.Segmenter(language="de", clean=True) - - + + sentences_en = splitter_en.segment(chapter_1_en) sentences_de = splitter_de.segment(chapter_1_de) - + len(sentences_en), len(sentences_de) @@ -414,8 +414,8 @@ different language pairs still producing good results. import torch from openvino.runtime import CompiledModel as OVModel import openvino as ov - - + + model_id = "rasa/LaBSE" pt_model = AutoModel.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) @@ -450,8 +450,8 @@ best fit. for sent in tqdm(sentences, disable=disable_tqdm) ] return torch.vstack(embeddings) - - + + embeddings_en_pt = get_embeddings(sentences_en, pt_model) embeddings_de_pt = get_embeddings(sentences_de, pt_model) @@ -491,10 +491,10 @@ The converted model must be compiled for the target device using the example_input=tokenizer("test", return_tensors="pt").data, input=inputs_info, ) - + core = ov.Core() compiled_model = core.compile_model(ov_model, "CPU") - + embeddings_en = get_embeddings(sentences_en, compiled_model) embeddings_de = get_embeddings(sentences_de, compiled_model) @@ -550,40 +550,40 @@ the converted model is the same as the original one. import seaborn as sns import matplotlib.pyplot as plt - - + + sns.set_style("whitegrid") - - + + def transform(x): x = x - np.mean(x) return x / np.var(x) - - + + def calculate_alignment_matrix( first: np.ndarray, second: np.ndarray, threshold: float = 1e-3 ) -> np.ndarray: similarity = first @ second.T # 1 similarity_en_to_de = np.apply_along_axis(transform, -1, similarity) # 2 similarity_de_to_en = np.apply_along_axis(transform, -2, similarity) # 2 - + both_one = (similarity_en_to_de > threshold) * ( similarity_de_to_en > threshold ) # 3 and 4 return both_one - - + + threshold = 0.028 - + alignment_matrix = calculate_alignment_matrix(embeddings_en, embeddings_de, threshold) alignment_matrix_pt = calculate_alignment_matrix( embeddings_en_pt.detach().numpy(), embeddings_de_pt.detach().numpy(), threshold, ) - + graph, axis = plt.subplots(1, 2, figsize=(10, 5), sharey=True) - + for matrix, ax, title in zip( (alignment_matrix, alignment_matrix_pt), axis, ("OpenVINO", "PyTorch") ): @@ -592,7 +592,7 @@ the converted model is the same as the original one. plot.set_xlabel("German") if title == "OpenVINO": plot.set_ylabel("English") - + graph.tight_layout() @@ -612,8 +612,8 @@ will be lists of German sentence numbers. for en_idx, de_idx in zip(*np.nonzero(alignment_matrix)): aligned[en_idx].append(de_idx) return aligned - - + + aligned = make_alignment(alignment_matrix) aligned @@ -698,22 +698,22 @@ and JS. from IPython.display import display, HTML from itertools import zip_longest from io import StringIO - - + + def create_interactive_table( list1: List[str], list2: List[str], mapping: Dict[int, List[int]] ) -> str: def inverse_mapping(mapping): inverse_map = {idx: [] for idx in range(len(list2))} - + for key, values in mapping.items(): for value in values: inverse_map[value].append(key) - + return inverse_map - + inversed_mapping = inverse_mapping(mapping) - + table_html = StringIO() table_html.write( '' @@ -729,9 +729,9 @@ and JS. else: table_html.write("") table_html.write("") - + table_html.write("
Sentences ENSentences DE
") - + hover_script = ( """ diff --git a/docs/sphinx_setup/_templates/layout.html b/docs/sphinx_setup/_templates/layout.html index e85f73556ddcd0..d227c9e56a8675 100644 --- a/docs/sphinx_setup/_templates/layout.html +++ b/docs/sphinx_setup/_templates/layout.html @@ -10,6 +10,7 @@ + @@ -24,11 +25,5 @@ {% block docs_navbar %} {{ super() }} -
-

OpenVINO 2022.1 has introduced OpenVINO API 2.0. For more information on transition steps from the previous API, see the transition guide

- -
- +{% include 'baner.html' %} {% endblock %} diff --git a/docs/sphinx_setup/_templates/search.html b/docs/sphinx_setup/_templates/search.html index 9902e9fe71b1a4..d87f6d44423f8e 100644 --- a/docs/sphinx_setup/_templates/search.html +++ b/docs/sphinx_setup/_templates/search.html @@ -2,9 +2,6 @@ {% set title = _('Search') %} {%- block content %} - {# Added to support a banner with an alert #} - - {% block docs_navbar %} {{ super() }}
From 00d50dc69edffe03fa045b7a84a1c6ff2db24772 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Thu, 18 Jan 2024 13:17:31 +0100 Subject: [PATCH 067/122] [DOCS] Updating templates for Binder and Google Colab buttons (#21888) * Update for Binder and Google Colab buttons * change image source --- docs/nbdoc/consts.py | 82 ++++++-------------------------------------- docs/nbdoc/nbdoc.py | 49 +++++++++++++++++--------- 2 files changed, 43 insertions(+), 88 deletions(-) diff --git a/docs/nbdoc/consts.py b/docs/nbdoc/consts.py index 22a1fda6418f49..3a8ca27255bf19 100644 --- a/docs/nbdoc/consts.py +++ b/docs/nbdoc/consts.py @@ -13,90 +13,28 @@ file_with_binder_notebooks = Path('../../docs/notebooks/notebooks_with_binder_buttons.txt').resolve(strict=True) file_with_colab_notebooks = Path('../../docs/notebooks/notebooks_with_colab_buttons.txt').resolve(strict=True) openvino_notebooks_ipynb_list = Path('../../docs/notebooks/all_notebooks_paths.txt').resolve(strict=True) - +binder_image_source = "https://mybinder.org/badge_logo.svg" +colab_image_source = "https://colab.research.google.com/assets/colab-badge.svg" +github_image_source = "https://badgen.net/badge/icon/github?icon=github&label" # Templates -binder_template = """ -This tutorial is also available as a Jupyter notebook that can be cloned directly from GitHub. -See the |installation_link| for instructions to run this tutorial locally on Windows, Linux or macOS. -To run without installing anything, click the "launch binder" button. - -|binder_link| |github_link| - -.. |installation_link| raw:: html - - installation guide - -.. |binder_link| raw:: html - - Binder - -.. |github_link| raw:: html - - Github - -\n -""" -colab_template = """ -This tutorial is also available as a Jupyter notebook that can be cloned directly from GitHub. -See the |installation_link| for instructions to run this tutorial locally on Windows, Linux or macOS. -To run without installing anything, click the "Open in Colab" button. - -|colab_link| |github_link| - -.. |installation_link| raw:: html - - installation guide - -.. |colab_link| raw:: html - - Google Colab - -.. |github_link| raw:: html - - Github - -\n -""" binder_colab_template = """ -This tutorial is also available as a Jupyter notebook that can be cloned directly from GitHub. -See the |installation_link| for instructions to run this tutorial locally on Windows, Linux or macOS. -To run without installing anything, click the "launch binder" or "Open in Colab" button. - -|binder_link| |colab_link| |github_link| - -.. |installation_link| raw:: html - - installation guide +This Jupyter notebook can be launched on-line, opening an interactive environment in a browser window. +You can also make a |installation_link|. Choose one of the following options: -.. |binder_link| raw:: html +{{ link_binder }}{{ link_colab }}{{ link_git }} - Binder - -.. |colab_link| raw:: html - - Google Colab - -.. |github_link| raw:: html - - Github +{{ installation_link }} \n """ no_binder_template = """ -This tutorial is also available as a Jupyter notebook that can be cloned directly from GitHub. -See the |installation_link| for instructions to run this tutorial locally on Windows, Linux or macOS. - -|github_link| - -.. |installation_link| raw:: html - - installation guide +This Jupyter notebook can be launched after a |installation_link| only. -.. |github_link| raw:: html +{{ link_git }} - Github +{{ installation_link }} \n """ diff --git a/docs/nbdoc/nbdoc.py b/docs/nbdoc/nbdoc.py index 63de5d47aeb426..5261940cbaaa72 100644 --- a/docs/nbdoc/nbdoc.py +++ b/docs/nbdoc/nbdoc.py @@ -8,8 +8,6 @@ ) from consts import ( artifacts_link, - binder_template, - colab_template, binder_colab_template, blacklisted_extensions, notebooks_path, @@ -23,8 +21,11 @@ notebooks_repo, notebooks_binder, notebooks_colab, - + binder_image_source, + colab_image_source, + github_image_source, ) + from notebook import Notebook from section import Section from glob import glob @@ -150,37 +151,53 @@ def __init__(self, nb_path: str = notebooks_path): for n in matching_notebooks: matching_notebooks_paths.append(n) - def add_binder(self, buttons_list: list, cbuttons_list: list, template_with_colab_and_binder: str = binder_colab_template, template_with_binder: str = binder_template, template_with_colab: str = colab_template, template_without_binder: str = no_binder_template): - """Function working as an example how to add binder button to existing rst files + def add_binder(self, buttons_list: list, cbuttons_list: list, template_with_colab_and_binder: str = binder_colab_template, template_without_binder: str = no_binder_template): + """A function working as an example of how to add Binder or Google Colab buttons to existing RST files. - :param buttons_list: List of notebooks that work on Binder. + :param buttons_list: A list of notebooks that work on Binder. :type buttons_list: list - :param template_with_binder: Template of button added to rst file if Binder is available. Defaults to binder_template. - :type template_with_binder: str - :param template_without_binder: Template of button added to rst file if Binder isn't available. Defaults to no_binder_template. + :param cbuttons_list: A list of notebooks that work on Google Colab. + :type cbuttons_list: list + :param template_with_colab_and_binder: A template with buttons added to an RST file if Binder and/or Google Colab are available. Defaults to template_with_colab_and_binder. + :type template_with_colab_and_binder: str + :param template_without_binder: A template with buttons added to an RST file if neither Binder nor Google Colab are available. Defaults to no_binder_template. :type template_without_binder: str - :raises FileNotFoundError: In case of failure of adding content, error will appear + :raises FileNotFoundError: In case of a failure in adding the content, an error will appear. """ + for notebook_file, nb_path in zip([ nb for nb in os.listdir(self.nb_path) if verify_notebook_name(nb) ], matching_notebooks_paths): notebook_item = '-'.join(notebook_file.split('-')[:-2]) + local_install = ".. |installation_link| raw:: html\n\n local installation \n\n" + binder_badge = ".. raw:: html\n\n Binder\n\n" + colab_badge = ".. raw:: html\n\n Google Colab\n\n" + github_badge = ".. raw:: html\n\n Github

\n\n" binder_data = { "owner": repo_owner, "repo": repo_name, "folder": repo_directory, - "link_git": notebooks_repo + nb_path, - "link_binder": notebooks_binder + nb_path, - "link_colab": notebooks_colab + nb_path, + "link_git": github_badge, + "link_binder": binder_badge if notebook_item in buttons_list else "", + "link_colab": colab_badge if notebook_item in cbuttons_list else "", + "installation_link": local_install } - if notebook_item in buttons_list: - template = template_with_colab_and_binder if notebook_item in cbuttons_list else template_with_binder + if notebook_item in buttons_list or notebook_item in cbuttons_list: + template = template_with_colab_and_binder else: - template = template_with_colab if notebook_item in cbuttons_list else template_without_binder + template = template_without_binder button_text = create_content(template, binder_data, notebook_file) if not add_content_below(button_text, f"{self.nb_path}/{notebook_file}"): From 3d68791d885b0920b1fc4d3329686ed82428209c Mon Sep 17 00:00:00 2001 From: Kelvin Choi Date: Thu, 18 Jan 2024 21:44:56 +0900 Subject: [PATCH 068/122] [GPU] gemm op tile should be invalidate for non-simple layout format (#22005) --- .../src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp index de93e27a3810b0..e62eb5c04426c8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp @@ -261,6 +261,9 @@ bool GemmKernelTiledOpt::Validate(const Params& params, const optional_params& o for (size_t input_idx = 0; input_idx < gmm_params.inputs.size(); ++input_idx) { auto& input = gmm_params.inputs[input_idx]; + if (!Tensor::SimpleLayout(input.GetLayout())) { + return false; + } // Supports outer padding as first element offset and dynamic padding for Batch, Feature, X, Y dimensions for first and second inputs // in case of shape agnostic kernel bool proper_pad_f = input.Feature().pad.is_dynamic ? false : input.Feature().pad.Total() == 0; From 8211b849ce3aa809640fadd60577e625d009b59f Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Thu, 18 Jan 2024 13:53:28 +0100 Subject: [PATCH 069/122] Fix C API reference (#22237) --- .../learn_openvino/openvino_samples/hello_classification.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst index 52fe5fa1b5aa99..b6eef4b762a031 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst @@ -261,7 +261,7 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples ` - :doc:`Convert a Model ` -- :doc:`OpenVINO Runtime C API ` +- `OpenVINO Runtime C API `__ - `Hello Classification Python Sample on Github `__ - `Hello Classification C++ Sample on Github `__ - `Hello Classification C Sample on Github `__ From 96efa9b3418a5c0c377ba3e886f0ac06f08ded8f Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Thu, 18 Jan 2024 17:23:48 +0400 Subject: [PATCH 070/122] [GPU] Remove some old code (#22229) * [GPU] Remove split node * [GPU] Remove strided slice opt pass * [GPU] Remove unused LSTM primitives * [GPU] Remove pyramid roi align * [GPU] Removed unused header * [GPU] Disable few tests due to StridedSlice issue --- .../include/intel_gpu/primitives/lstm.hpp | 287 -- .../intel_gpu/primitives/lstm_dynamic.hpp | 167 -- .../primitives/lstm_dynamic_input.hpp | 88 - .../primitives/lstm_dynamic_timeloop.hpp | 145 - .../primitives/pyramid_roi_align.hpp | 100 - .../include/intel_gpu/primitives/split.hpp | 85 - .../include/intel_gpu/runtime/format.hpp | 2 - .../include/intel_gpu/runtime/kernel_args.hpp | 3 - .../graph_optimizer/graph_initializations.cpp | 394 --- .../graph_optimizer/post_optimize_weights.cpp | 8 - .../graph_optimizer/prepare_buffer_fusing.cpp | 1 + .../graph_optimizer/prepare_quantization.cpp | 3 +- .../reverse_optional_nodes_outputs.cpp | 24 - .../strided_slice_optimize.cpp | 86 - .../impls/ocl/kernel_selector_helper.cpp | 4 - .../graph/impls/ocl/lstm_dynamic_input.cpp | 79 - .../graph/impls/ocl/lstm_dynamic_timeloop.cpp | 100 - .../src/graph/impls/ocl/lstm_elt.cpp | 1 - .../src/graph/impls/ocl/lstm_gemm.cpp | 104 - .../src/graph/impls/ocl/pyramid_roi_align.cpp | 75 - .../src/graph/impls/ocl/register.cpp | 4 - .../src/graph/impls/ocl/register.hpp | 9 - .../graph/include/lstm_dynamic_input_inst.h | 63 - .../src/graph/include/lstm_dynamic_inst.h | 86 - .../include/lstm_dynamic_timeloop_inst.h | 90 - .../src/graph/include/lstm_gemm_inst.h | 54 - .../intel_gpu/src/graph/include/lstm_inst.h | 73 - .../src/graph/include/pass_manager.h | 18 - .../graph/include/pyramid_roi_align_inst.h | 48 - .../intel_gpu/src/graph/include/split_inst.h | 28 - src/plugins/intel_gpu/src/graph/lstm.cpp | 69 - .../intel_gpu/src/graph/lstm_dynamic.cpp | 57 - .../src/graph/lstm_dynamic_input.cpp | 109 - .../src/graph/lstm_dynamic_timeloop.cpp | 199 -- src/plugins/intel_gpu/src/graph/lstm_gemm.cpp | 62 - src/plugins/intel_gpu/src/graph/program.cpp | 33 - .../intel_gpu/src/graph/pyramid_roi_align.cpp | 43 - src/plugins/intel_gpu/src/graph/split.cpp | 87 - .../cl_kernels/lstm_dynamic_input_bfyx_opt.cl | 113 - .../cl_kernels/lstm_dynamic_input_ref.cl | 40 - .../cl_kernels/lstm_dynamic_timeloop_ref.cl | 150 - .../cl_kernels/lstm_gemm_gpu_bfyx_ref.cl | 55 - ...tm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl | 119 - ...tm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl | 121 - .../cl_kernels/pyramid_roi_align_gpu_ref.cl | 149 - .../src/kernel_selector/common_types.h | 4 - .../kernel_selector/kernel_runner_interface.h | 17 - .../kernel_selector_common.cpp | 1 - .../kernel_selector/kernel_selector_params.h | 12 - .../kernels/lstm/lstm_gemm_kernel_base.cpp | 68 - .../kernels/lstm/lstm_gemm_kernel_base.h | 81 - .../kernels/lstm/lstm_gemm_kernel_ref.cpp | 34 - .../kernels/lstm/lstm_gemm_kernel_ref.h | 19 - .../lstm/lstm_gemm_kernel_selector.cpp | 20 - .../kernels/lstm/lstm_gemm_kernel_selector.h | 23 - ...m_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp | 60 - ...stm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h | 20 - ...m_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp | 60 - ...stm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h | 20 - .../lstm_dynamic_input_bfyx_opt.cpp | 114 - .../lstm_dynamic_input_bfyx_opt.h | 26 - .../lstm_dynamic_input_kernel_base.cpp | 76 - .../lstm_dynamic_input_kernel_base.h | 52 - .../lstm_dynamic_input_kernel_selector.cpp | 19 - .../lstm_dynamic_input_kernel_selector.h | 23 - .../lstm_dynamic_input_ref_kernel.cpp | 38 - .../lstm_dynamic_input_ref_kernel.h | 21 - .../lstm_dynamic_timeloop_kernel_base.cpp | 131 - .../lstm_dynamic_timeloop_kernel_base.h | 105 - .../lstm_dynamic_timeloop_kernel_selector.cpp | 17 - .../lstm_dynamic_timeloop_kernel_selector.h | 23 - .../lstm_dynamic_timeloop_ref_kernel.cpp | 36 - .../lstm_dynamic_timeloop_ref_kernel.h | 21 - .../pyramid_roi_align_kernel_base.cpp | 56 - .../pyramid_roi_align_kernel_base.h | 48 - .../pyramid_roi_align_kernel_ref.cpp | 59 - .../pyramid_roi_align_kernel_ref.h | 19 - .../pyramid_roi_align_kernel_selector.cpp | 15 - .../pyramid_roi_align_kernel_selector.h | 20 - .../src/kernel_selector/tensor_type.cpp | 1 - .../src/kernel_selector/tensor_type.h | 11 - src/plugins/intel_gpu/src/plugin/graph.cpp | 3 - src/plugins/intel_gpu/src/runtime/format.cpp | 1 - .../intel_gpu/src/runtime/ocl/ocl_stream.cpp | 6 - .../single_layer_tests/strided_slice.cpp | 4 + .../skip_tests_config.cpp | 2 + .../tests/unit/module_tests/format_test.cpp | 1 - .../passes/prepare_buffer_fusing_test.cpp | 48 +- .../unit/test_cases/convolution_gpu_test.cpp | 2 - .../unit/test_cases/lstm_dynamic_gpu_test.cpp | 1005 ------- .../tests/unit/test_cases/lstm_gpu_test.cpp | 2411 ----------------- .../test_cases/pyramid_roi_align_gpu_test.cpp | 137 - .../test_cases/removing_output_node_test.cpp | 4 +- .../tests/unit/test_cases/split_gpu_test.cpp | 825 ------ .../test_cases/strided_slice_gpu_test.cpp | 2 +- 95 files changed, 12 insertions(+), 9444 deletions(-) delete mode 100644 src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic.hpp delete mode 100644 src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_input.hpp delete mode 100644 src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_timeloop.hpp delete mode 100644 src/plugins/intel_gpu/include/intel_gpu/primitives/pyramid_roi_align.hpp delete mode 100644 src/plugins/intel_gpu/include/intel_gpu/primitives/split.hpp delete mode 100644 src/plugins/intel_gpu/src/graph/graph_optimizer/reverse_optional_nodes_outputs.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/graph_optimizer/strided_slice_optimize.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_input.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_timeloop.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/impls/ocl/lstm_gemm.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/impls/ocl/pyramid_roi_align.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/include/lstm_dynamic_input_inst.h delete mode 100644 src/plugins/intel_gpu/src/graph/include/lstm_dynamic_inst.h delete mode 100644 src/plugins/intel_gpu/src/graph/include/lstm_dynamic_timeloop_inst.h delete mode 100644 src/plugins/intel_gpu/src/graph/include/lstm_gemm_inst.h delete mode 100644 src/plugins/intel_gpu/src/graph/include/lstm_inst.h delete mode 100644 src/plugins/intel_gpu/src/graph/include/pyramid_roi_align_inst.h delete mode 100644 src/plugins/intel_gpu/src/graph/include/split_inst.h delete mode 100644 src/plugins/intel_gpu/src/graph/lstm.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/lstm_dynamic.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/lstm_dynamic_input.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/lstm_dynamic_timeloop.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/lstm_gemm.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/pyramid_roi_align.cpp delete mode 100644 src/plugins/intel_gpu/src/graph/split.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_bfyx_opt.cl delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_ref.cl delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_timeloop_ref.cl delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemm_gpu_bfyx_ref.cl delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pyramid_roi_align_gpu_ref.cl delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernel_runner_interface.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.h delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.cpp delete mode 100644 src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.h delete mode 100644 src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp delete mode 100644 src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp delete mode 100644 src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp delete mode 100644 src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp index ae979d17fb3d37..6451e0daf0b4d4 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp @@ -25,293 +25,6 @@ enum class lstm_weights_order { fizo }; -/// @brief LSTM Output selection -/// @details The current implementation allows the use to select the output -/// of an LSTM node by specifing any of the following options -enum class lstm_output_selection { - /// output the entire hidden sequence - sequence = 0, - /// output just the last hidden value - hidden, - /// output the last hidden and last cell values - hidden_cell, - /// output the hidden sequence concatenated with the last cell - sequence_cell -}; - -/// @brief Performs forward Long Short-Term Memory (LSTM) layer. -/// @details The current implementation of LSTM is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm) - - lstm() : primitive_base("", {}) {} - - /// @brief Constructs lstm layer. - /// @param id This primitive id. - /// @param input Vector of primitive id. - /// @param weights Primitive id containing weights data. - /// @param bias Primitive id containing bias data. Provide empty string if using lstm without bias. - /// @param initial_hidden Primitive id containing initial_hidden data. Provide empty string if using lstm without initial_hidden values. - /// @param initial_cell Primitive id containing initial_cell data. Provide empty string if using lstm without initial_cell values. - /// @param peepholes Primitive id containing peepholes data. Provide empty string if using lstm without peepholes. - /// @param clip Clip threshold. Provide 0 if using lstm without activations clip threshold. - /// @param input_forget Provide 0 if using lstm without coupled input-forget gates. - /// @param activations Vector of activations. Specify [f, g, h]. Default are [sigmoid, tanh, tanh] - /// @param activation_params Vector of ativation params. Specify params for each [f, g, h] activation. - /// @brief Output selection. Default the entire hidden sequence is returned. - /// @param offset_order Order of the concatenated weights, recurrent, and bias. ONNX default is iofz [input, output, forget, block]. - lstm(const primitive_id& id, - const std::vector& input, - const primitive_id& weights, - const primitive_id& recurrent, - const primitive_id& bias = "", - const primitive_id& initial_hidden = "", - const primitive_id& initial_cell = "", - const primitive_id& peepholes = "", - const float clip = 0, - const bool input_forget = 0, - const std::vector& activations = {}, - const std::vector activation_params = {}, - const lstm_output_selection output_selection = lstm_output_selection::sequence, - const lstm_weights_order offset_order = lstm_weights_order::iofz, - const padding& output_padding = padding()) - : primitive_base(id, input, {output_padding}), - weights(weights), - recurrent(recurrent), - bias(bias), - initial_hidden(initial_hidden), - initial_cell(initial_cell), - peepholes(peepholes), - clip(clip), - input_forget(input_forget), - activations(activations), - activation_params(activation_params), - output_selection(output_selection), - offset_order(offset_order) {} - - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive id containing bias data. - primitive_id bias; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id initial_hidden; - /// @brief Primitive id containing the initial value of the cell state data. - primitive_id initial_cell; - /// @brief Primitive id containing peepholes data. - primitive_id peepholes; - /// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified. - float clip = 0.0f; - /// @brief Couple the input and forget gates if input_forget is 1. Default is 0. - bool input_forget = 0; - /// @brief A list of 3 activation functions for the input, output, forget, cell, and hidden. - std::vector activations; - /// @brief Optional scaling values used by some activation functions. The values are consumed in the order of activation functions. - std::vector activation_params; - /// @brief Output selection. Default the entire hidden sequence is returned. - lstm_output_selection output_selection = lstm_output_selection::sequence; - /// @brief Weights, recurrent weights, and biases order. [iofz] : ONNX, [ifoz] : Caffe - lstm_weights_order offset_order = lstm_weights_order::izof; - - // NOT SUPPORTED YET - // /// @brief Optional tensor specifying lengths of the sequences in a batch. - // /// If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`. - // tensor sequence_lens; - // /// @brief The sequence output for the hidden. - // uint32_t output_sequence; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, peepholes.empty()); - seed = hash_combine(seed, clip); - seed = hash_combine(seed, input_forget); - seed = hash_range(seed, activations.begin(), activations.end()); - for (auto& act_param : activation_params) { - seed = hash_combine(seed, act_param.a); - seed = hash_combine(seed, act_param.b); - } - seed = hash_combine(seed, output_selection); - seed = hash_combine(seed, offset_order); - seed = hash_combine(seed, bias.empty()); - seed = hash_combine(seed, initial_hidden.empty()); - seed = hash_combine(seed, initial_cell.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - bool act_params_eq = activation_params.size() == rhs_casted.activation_params.size(); - for (size_t i = 0; i < activation_params.size(); ++i) { - act_params_eq &= activation_params[i].a == rhs_casted.activation_params[i].a && - activation_params[i].b == rhs_casted.activation_params[i].b; - } - - #define cmp_fields(name) name == rhs_casted.name - return act_params_eq && - cmp_fields(clip) && - cmp_fields(input_forget) && - cmp_fields(activations) && - cmp_fields(output_selection) && - cmp_fields(offset_order) && - cmp_fields(initial_hidden.empty()) && - cmp_fields(initial_cell.empty()) && - cmp_fields(peepholes.empty()) && - cmp_fields(bias.empty()); - #undef cmp_fields - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << weights; - ob << recurrent; - ob << bias; - ob << initial_hidden; - ob << initial_cell; - ob << peepholes; - ob << clip; - ob << input_forget; - ob << activations; - ob << activation_params; - ob << make_data(&output_selection, sizeof(lstm_output_selection)); - ob << make_data(&offset_order, sizeof(lstm_weights_order)); - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> weights; - ib >> recurrent; - ib >> bias; - ib >> initial_hidden; - ib >> initial_cell; - ib >> peepholes; - ib >> clip; - ib >> input_forget; - ib >> activations; - ib >> activation_params; - ib >> make_data(&output_selection, sizeof(lstm_output_selection)); - ib >> make_data(&offset_order, sizeof(lstm_weights_order)); - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(weights); - ret.push_back(recurrent); - if (!bias.empty()) { - ret.push_back(bias); - } - if (!initial_hidden.empty()) { - ret.push_back(initial_hidden); - } - if (!initial_cell.empty()) { - ret.push_back(initial_cell); - } - return ret; - } -}; - -struct lstm_gemm : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_gemm) - - lstm_gemm() : primitive_base("", {}), - direction(0) {} - - /// @brief Constructs lstm layer. - /// @param id This primitive id. - /// @param input input primitive id. - /// @param input weights Primitive id containing weights data. - /// @param input recurrent Primitive id containing recurrent data. It is required even for no hidden values. - /// @param input bias Primitive id containing bias data. Provide empty string if using lstm without bias. - /// @param input hidden Primitive id containing hidden data. Provide empty string if using lstm without hidden values. - /// @param direction default = 0, bidirectional = 1. - lstm_gemm(const primitive_id& id, - const input_info& input, - const primitive_id& weights, - const primitive_id& recurrent, - const primitive_id& bias = "", - const primitive_id& hidden = "", - const uint32_t direction = 0, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - weights(weights), - recurrent(recurrent), - bias(bias), - hidden(hidden), - direction(direction) {} - - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive id containing bias data. - primitive_id bias; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id hidden; - /// @brief direction default = 0, bidirectional = 1. - uint32_t direction; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, direction); - seed = hash_combine(seed, bias.empty()); - seed = hash_combine(seed, hidden.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return direction == rhs_casted.direction && - bias.empty() == rhs_casted.bias.empty() && - hidden.empty() == rhs_casted.hidden.empty(); - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << weights; - ob << recurrent; - ob << bias; - ob << hidden; - ob << direction; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> weights; - ib >> recurrent; - ib >> bias; - ib >> hidden; - ib >> direction; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(weights); - ret.push_back(recurrent); - if (!bias.empty()) - ret.push_back(bias); - if (!hidden.empty()) - ret.push_back(hidden); - return ret; - } -}; - struct lstm_elt : public primitive_base { CLDNN_DECLARE_PRIMITIVE(lstm_elt) diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic.hpp deleted file mode 100644 index d459754ebca509..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic.hpp +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "primitive.hpp" -#include - -namespace cldnn { - -/// @brief Performs forward Long Short-Term Memory (LSTM_DYNAMIC) layer. -/// @details The current implementation of LSTM_DYNAMIC is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm_dynamic : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_dynamic) - - lstm_dynamic() : primitive_base("", {}) {} - - /// @brief Constructs lstm_dynamic layer. - /// @param id This primitive id. - /// @param input Primitive id of input layer. - /// @param dyn_length Primitive id of layer containg dynamic length values (shape: 1D). - /// @param weights Primitive id containing weights data. - /// @param recurrent Primitive id containing recurrent data. - /// @param last_hidden_output Id of mutable data primitive pointing to buffer, which will be filled with last hidden state. - /// @param last_cell_output Id of mutable data primitive pointing to buffer, which will be filled with last cell state. - /// @param bias Primitive id containing bias data. Provide empty string if using lstm_dynamic without bias. - /// @param initial_hidden Primitive id containing initial_hidden data. Provide empty string if using lstm_dynamic without initial_hidden values. - /// @param initial_cell Primitive id containing initial_cell data. Provide empty string if using lstm_dynamic without initial_cell values. - /// @param clip Clip threshold. Provide 0 if using lstm without activations clip threshold. - /// @param input_forget Provide 0 if using lstm without coupled input-forget gates. - lstm_dynamic(const primitive_id& id, - const input_info& input, - const primitive_id& dyn_length, - const primitive_id& weights, - const primitive_id& recurrent, - const primitive_id& last_hidden_state = "", - const primitive_id& last_cell_state = "", - const primitive_id& bias = "", - const primitive_id& initial_hidden = "", - const primitive_id& initial_cell = "", - const float clip = 0.0f, - const bool input_forget = false, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - dyn_length(dyn_length), - weights(weights), - recurrent(recurrent), - last_hidden_state(last_hidden_state), - last_cell_state(last_cell_state), - bias(bias), - initial_hidden(initial_hidden), - initial_cell(initial_cell), - clip(clip), - input_forget(input_forget) {} - - /// @brief Primitive id containing the dynamic sequence lengths. - primitive_id dyn_length; - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last hidden state. - primitive_id last_hidden_state; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last cell state. - primitive_id last_cell_state; - /// @brief Primitive id containing bias data. - primitive_id bias; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id initial_hidden; - /// @brief Primitive id containing the initial value of the cell state data. - primitive_id initial_cell; - /// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified. - float clip = 0.0f; - /// @brief Couple the input and forget gates if input_forget is 1. Default is 0. - bool input_forget = false; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, clip); - seed = hash_combine(seed, input_forget); - seed = hash_combine(seed, last_hidden_state.empty()); - seed = hash_combine(seed, last_cell_state.empty()); - seed = hash_combine(seed, bias.empty()); - seed = hash_combine(seed, initial_hidden.empty()); - seed = hash_combine(seed, initial_cell.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - #define cmp_fields(name) name == rhs_casted.name - return cmp_fields(clip) && - cmp_fields(input_forget) && - cmp_fields(last_hidden_state.empty()) && - cmp_fields(last_cell_state.empty()) && - cmp_fields(initial_hidden.empty()) && - cmp_fields(initial_cell.empty()) && - cmp_fields(bias.empty()); - #undef cmp_fields - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << dyn_length; - ob << weights; - ob << recurrent; - ob << last_hidden_state; - ob << last_cell_state; - ob << bias; - ob << initial_hidden; - ob << initial_cell; - ob << clip; - ob << input_forget; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> dyn_length; - ib >> weights; - ib >> recurrent; - ib >> last_hidden_state; - ib >> last_cell_state; - ib >> bias; - ib >> initial_hidden; - ib >> initial_cell; - ib >> clip; - ib >> input_forget; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(dyn_length); - ret.push_back(weights); - ret.push_back(recurrent); - - if (!last_hidden_state.empty()) { - ret.push_back(last_hidden_state); - } - if (!last_cell_state.empty()) { - ret.push_back(last_cell_state); - } - if (!bias.empty()) { - ret.push_back(bias); - } - if (!initial_hidden.empty()) { - ret.push_back(initial_hidden); - } - if (!initial_cell.empty()) { - ret.push_back(initial_cell); - } - return ret; - } -}; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_input.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_input.hpp deleted file mode 100644 index 6a8e90d9a494ba..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_input.hpp +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/primitive.hpp" -#include - -namespace cldnn { - -/// @brief Performs forward calcaulations of input gates for dynamic lstm layer. -/// @details The current implementation of LSTM_DYNAMIC is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm_dynamic_input : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_dynamic_input) - - lstm_dynamic_input() : primitive_base("", {}) {} - - /// @brief Constructs lstm_dynamic layer. - /// @param id This primitive id. - /// @param input Primitive id of input layer. - /// @param dyn_length Primitive id of ilayer containg dynamic length values (shape: 1D). - /// @param weights Primitive id containing weights data. - /// @param recurrent Primitive id containing recurrent data. - /// @param bias Primitive id containing bias data. Provide empty string if using lstm_dynamic without bias. - lstm_dynamic_input(const primitive_id& id, - const input_info& input, - const primitive_id& dyn_length, - const primitive_id& weights, - const primitive_id& bias = "", - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), dyn_length(dyn_length), weights(weights), bias(bias) {} - - /// @brief Primitive id containing the dynamic sequence lengths. - primitive_id dyn_length; - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing bias data. - primitive_id bias; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, bias.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return bias.empty() == rhs_casted.bias.empty(); - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << dyn_length; - ob << weights; - ob << bias; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> dyn_length; - ib >> weights; - ib >> bias; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(dyn_length); - ret.push_back(weights); - - if (!bias.empty()) { - ret.push_back(bias); - } - return ret; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_timeloop.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_timeloop.hpp deleted file mode 100644 index ef184707f94db7..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_timeloop.hpp +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/primitive.hpp" -#include - -namespace cldnn { - -/// @brief Performs forward calcaulations of input gates for dynamic lstm layer. -/// @details The current implementation of LSTM_DYNAMIC is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm_dynamic_timeloop - : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_dynamic_timeloop) - - lstm_dynamic_timeloop() : primitive_base("", {}) {} - - /// @brief Constructs lstm_dynamic layer. - /// @param id This primitive id. - /// @param input Primitive id of input layer. - /// @param dyn_length Primitive id of ilayer containg dynamic length values (shape: 1D). - /// @param recurrent Primitive id containing recurrent data. - /// @param initial_hidden Primitive id containing initial_hidden data. Provide empty string if using lstm_dynamic without initial_hidden values. - /// @param initial_cell Primitive id containing initial_cell data. Provide empty string if using lstm_dynamic without initial_cell values. - /// @param clip Clip threshold. Provide 0 if using lstm without activations clip threshold. - /// @param input_forget Provide 0 if using lstm without coupled input-forget gates. - lstm_dynamic_timeloop(const primitive_id& id, - const input_info& input, - const primitive_id& dyn_length, - const primitive_id& recurrent, - const primitive_id& last_hidden_state = "", - const primitive_id& last_cell_state = "", - const primitive_id& initial_hidden = "", - const primitive_id& initial_cell = "", - const float clip = 0.0f, - const bool input_forget = 0, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - dyn_length(dyn_length), - recurrent(recurrent), - last_hidden_state(last_hidden_state), - last_cell_state(last_cell_state), - initial_hidden(initial_hidden), - initial_cell(initial_cell), - clip(clip), - input_forget(input_forget) {} - - /// @brief Primitive id containing the dynamic sequence lengths. - primitive_id dyn_length; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last hidden state. - primitive_id last_hidden_state; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last cell state. - primitive_id last_cell_state; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id initial_hidden; - /// @brief Array of primitive ids containing the initial value of the hidden state data (Ht-1). - primitive_id initial_cell; - /// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified. - float clip = 0.0f; - /// @brief Couple the input and forget gates if input_forget is 1. Default is 0. - bool input_forget = 0; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, clip); - seed = hash_combine(seed, input_forget); - seed = hash_combine(seed, last_hidden_state.empty()); - seed = hash_combine(seed, last_cell_state.empty()); - seed = hash_combine(seed, initial_hidden.empty()); - seed = hash_combine(seed, initial_cell.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - #define cmp_fields(name) name == rhs_casted.name - return cmp_fields(clip) && - cmp_fields(input_forget) && - cmp_fields(last_hidden_state.empty()) && - cmp_fields(last_cell_state.empty()) && - cmp_fields(initial_hidden.empty()) && - cmp_fields(initial_cell.empty()); - #undef cmp_fields - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << dyn_length; - ob << recurrent; - ob << last_hidden_state; - ob << last_cell_state; - ob << initial_hidden; - ob << initial_cell; - ob << clip; - ob << input_forget; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> dyn_length; - ib >> recurrent; - ib >> last_hidden_state; - ib >> last_cell_state; - ib >> initial_hidden; - ib >> initial_cell; - ib >> clip; - ib >> input_forget; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(dyn_length); - ret.push_back(recurrent); - - if (!last_hidden_state.empty()) { - ret.push_back(last_hidden_state); - } - if (!last_cell_state.empty()) { - ret.push_back(last_cell_state); - } - if (!initial_hidden.empty()) { - ret.push_back(initial_hidden); - } - if (!initial_cell.empty()) { - ret.push_back(initial_cell); - } - return ret; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/pyramid_roi_align.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/pyramid_roi_align.hpp deleted file mode 100644 index 123faf83f29332..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/pyramid_roi_align.hpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "primitive.hpp" -#include -#include -#include - -namespace cldnn { - -/// @brief Performs RoI Align using image pyramid. -/// @details Applies RoI Align to layer from the image pyramid. -/// @par Level of the pyramid is selected by equation: -/// floor(START_LEVEL + log2(sqrt(w * h) / IMAGE_SIZE) -/// @par Where: -/// @li w, h - width and heigt of the region -/// @li START_LEVEL - scale of first level of the pyramid -/// @li IMAGE_SIZE - original image size -/// @par RoI Align algorithm performs max-pooling on region of interest -/// using billinear interpolation of surrounding values to avoid quantization. -struct pyramid_roi_align : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(pyramid_roi_align) - - pyramid_roi_align() : primitive_base("", {}) {} - - /// @param id This primitive id. - /// @param rois Input RoI boxes as tuple [x1, y1, x2, y2] describing two opposite corners of the region. - /// @param P2 First level of the image pyramid. - /// @param P3 Second level of the image pyramid. - /// @param P4 Third level of the image pyramid. - /// @param P5 Fourth level of the image pyramid. - /// @param output_size Output pooling size from the region pooling. - /// @param sampling_ratio Number of sampling points per output value. - /// @param pyramid_scales Scales of each level of pyramid in relation to original image. - /// @param pyramid_starting_level Starting level of the pyramid that should be used for region of whole image. - pyramid_roi_align(const primitive_id& id, - const input_info& rois, - const input_info& P2, - const input_info& P3, - const input_info& P4, - const input_info& P5, - int output_size, - int sampling_ratio, - std::vector pyramid_scales, - int pyramid_starting_level, - const padding &output_padding = padding()) - : primitive_base(id, - { rois, P2, P3, P4, P5 }, - {output_padding}) - , output_size(output_size) - , sampling_ratio(sampling_ratio) - , pyramid_scales(std::move(pyramid_scales)) - , pyramid_starting_level(pyramid_starting_level) - {} - - int output_size = 0; - int sampling_ratio = 0; - std::vector pyramid_scales; - int pyramid_starting_level = 0; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, sampling_ratio); - seed = hash_range(seed, pyramid_scales.begin(), pyramid_scales.end()); - seed = hash_combine(seed, pyramid_starting_level); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return output_size == rhs_casted.output_size && - sampling_ratio == rhs_casted.sampling_ratio && - pyramid_scales == rhs_casted.pyramid_scales && - pyramid_starting_level == rhs_casted.pyramid_starting_level; - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << output_size; - ob << sampling_ratio; - ob << pyramid_scales; - ob << pyramid_starting_level; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> output_size; - ib >> sampling_ratio; - ib >> pyramid_scales; - ib >> pyramid_starting_level; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/split.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/split.hpp deleted file mode 100644 index a31e0c765c2d32..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/split.hpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "primitive.hpp" -#include -#include - -namespace cldnn { - -/// @brief Performs split operation on input. -/// @details splits the input data into n parts, for each user provides name and offsets. -/// @n User cannot use split primitive directly. -/// @n It is needed to refer to the output ids with the name ":". -/// @n -/// @n\b Assumptions -/// @n - offsets1 < offsets2 < offsets3 < ... -/// @n - size[n] = offsets[n+1] - offsets[n]; -/// @n - last element: size[n] = split_input.size - offsets[n]; -/// @n - no buffer overlapping, as the output size is calculated using offset and input size -/// @n - split primitive id cannot be used by any other primitive (user needs to use output_ids only) -/// @n Breaking any of this conditions will cause exeption throw. -/// @n -/// @n\b Example: -/// @n Splitting output to 2 parts by the features: -/// @n input_size = { 2, 4, 3, 5 }; -/// @n split_id = "split"; -/// @n output_ids_offsets[0] = { "out0", { 0,0,0,0 } }; -/// @n output_ids_offsets[1] = { "out1", { 0,2,0,0 } }; -/// @n After split there would be 2 primitives: "split:out0" and "split:out1" which contain 2 feature maps (lower and upper) -struct split : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(split) - - /// @brief Constructs split primitive. - /// @param id This primitive id. - /// @param input Input primitive id. - /// @param output_ids_offsets Pairs of output_ids and offsets - split(const primitive_id& id, - const input_info& input, - const std::vector >& output_ids_offsets, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - output_offsets(extract_tensor_vector(output_ids_offsets)), - output_ids(extract_primitive_vector(output_ids_offsets)) {} - - /// @brief Array of tensors with offsets. - std::vector output_offsets; - /// @brief List of output_ids. - const primitive_id_arr output_ids; - - size_t hash() const override { - size_t seed = primitive::hash(); - for (auto& offset : output_offsets) { - seed = hash_combine(seed, offset.hash()); - } - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return output_offsets == rhs_casted.output_offsets; - } - -protected: - static std::vector extract_primitive_vector( - const std::vector >& stor) { - std::vector res; - for (auto& stor_pair : stor) res.push_back(stor_pair.first); - - return res; - } - - static std::vector extract_tensor_vector(const std::vector >& stor) { - std::vector res; - for (auto& stor_pair : stor) res.push_back(stor_pair.second); - - return res; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp index ec7276ebcc1f34..77b49d794bab75 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp @@ -218,8 +218,6 @@ struct format { os_is_yx_osv32_isv4_swizzled_by_2, ///< format for weights for IMAD convolutions os_is_yx_osv32_isv4, ///< format for weights for IMAD convolutions os_is_zyx_osv32_isv4, ///< format for weights for IMAD convolutions - lstm_weights_dio, ///< dynamic_lstm, direction, - ///< than IO (I - input size, O - 4 * hidden_size) os_is_osv32_isv32_swizzled_by_4, ///< format for weights for 1x1 IMAD convolution os_iyx_osv8, os_iyx_osv32__ai32, diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp index d68cfcd7edbae0..8a4fddecdd4fb6 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp @@ -66,10 +66,7 @@ struct argument_desc { SLOPE, INTERNAL_BUFFER, SCALAR, - RECURRENT, // RNN/LSTM/GRU recurrent weights - HIDDEN, // RNN/LSTM/GRU hidden input CELL, // LSTM cell input - LSTM_PACK, // LSTM packed output WEIGHTS_ZERO_POINTS, ACTIVATIONS_ZERO_POINTS, COMPENSATION, diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp index cf9f44a9a59686..0b557579674885 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp @@ -5,19 +5,6 @@ #include "pass_manager.h" #include "program_node.h" -#include "split_inst.h" -#include "convolution_inst.h" -#include "crop_inst.h" -#include "lstm_inst.h" -#include "reshape_inst.h" -#include "resample_inst.h" -#include "depth_to_space_inst.h" -#include "lstm_dynamic_inst.h" -#include "lstm_dynamic_input_inst.h" -#include "lstm_dynamic_timeloop_inst.h" -#include "mutable_data_inst.h" -#include "arg_max_min_inst.h" - #include #include #include @@ -28,375 +15,6 @@ using namespace cldnn; namespace cldnn { -namespace { -std::string get_id_string(size_t i) { - std::stringstream ss; - ss << std::setw(5) << std::setfill('0') << i; - return ss.str(); -} -} // namespace - -void graph_initializations::handle_split_node(program& p, split_node& node) { - if (!node.get_users().empty()) { - throw std::logic_error("Split layer cannot be used directly! Please use split output \"" + node.id() + - ":\"!"); - } - // get_output size and validate split primitive inputs - layout output_layout = node.get_output_layout(); - tensor output_layout_size = output_layout.get_tensor(); - - auto split_prim = node.typed_desc(); - std::size_t split_num = split_prim->output_offsets.size(); - - std::vector transformed_ids; - - // create crop for each split output provided - for (std::size_t i = 0; i < split_num; i++) { - primitive_id output_id = node.id() + ":" + split_prim->output_ids[i]; - - auto output_node_itr = p.nodes_map.find(output_id); - if (output_node_itr == p.nodes_map.end()) { - continue; - } - - transformed_ids.push_back(std::move(output_id)); - - auto node_ptr = output_node_itr->second; - - // calculate crop reference input size - tensor reference_input_size; - - // For all the split offsets before the last split offset, the size can be calculated - // size_of_offset[n] = offset[n + 1] - offset[n]; - if (i != (split_num - 1)) { - reference_input_size += split_prim->output_offsets[i + 1] - split_prim->output_offsets[i]; - } else { // For the last split i.e. size[split_num - 1] = split_input.size - offsets[n]; - reference_input_size += output_layout_size - split_prim->output_offsets[i]; - } - - // For all the other dimensions, copy from the split_input - for (int32_t dimension = 0; dimension < tensor_dim_max; dimension++) { - if (reference_input_size.raw[dimension] == 0) { - reference_input_size.raw[dimension] = output_layout_size.raw[dimension]; - } - } - - // update crop primitive - node_ptr->set_output_padding(output_layout.data_padding); - auto crop_prim = node_ptr->as().typed_desc(); - crop_prim->reference_input = reference_input_size; - } - - // remove input->split connection and remove original split node - p.remove_connection(node.input(), node); - - p.add_optimized_primitive_info(node.id(), transformed_ids); - p.optimized_out.push_back(node.id()); - p.nodes_map.erase(node.id()); -} - -void graph_initializations::handle_lstm_node(program& p, lstm_node& node) { - // lstm_node& lstm_node = node->as(); - bool initial_hidden_term = node.initial_hidden_term(); - bool initial_cell_term = node.initial_cell_term(); - bool bias_term = node.bias_term(); - auto lstm_prim = node.typed_desc(); - primitive_id weights_id = lstm_prim->weights; - primitive_id recurrent_id = lstm_prim->recurrent; - primitive_id bias_id = bias_term ? lstm_prim->bias : ""; - primitive_id initial_hidden_id = initial_hidden_term ? lstm_prim->initial_hidden : ""; - primitive_id initial_cell_id = initial_cell_term ? lstm_prim->initial_cell : ""; - - // removing connection with weights to get proper dependency order for next operations - p.remove_connection(p.get_node(weights_id), node); - p.remove_connection(p.get_node(recurrent_id), node); - if (bias_term) - p.remove_connection(p.get_node(bias_id), node); - if (initial_hidden_term) - p.remove_connection(p.get_node(initial_hidden_id), node); - if (initial_cell_term) - p.remove_connection(p.get_node(initial_cell_id), node); - - // calculating sizes - program_node& input = node.input(); - layout input_layout = input.get_output_layout(); - tensor recurrent_size = p.get_node(recurrent_id).get_output_layout().get_tensor(); - - // hidden tensor size = [batch, seq, hidden_size, direction] - // the output of the element wise operation is cropped and used in the next time step - // sequence_len = 1 and direction = 1. The backward pass is separated from the forward pass - auto hidden_size = tensor(input_layout.batch(), 1, recurrent_size.spatial[0], 1); - - size_t directions = recurrent_size.feature[0]; - size_t num_input_dependencies = node.get_dependencies().size(); - size_t sequence_len = node.sequence_len(); - - // Calculate the input sequence length for the lstm node - // Case 1: If the input comes in as a concatenated input i.e. the - // input is not divided into sequence elements - if (sequence_len == 1 && num_input_dependencies == 1) { - // Get the sequence length from the input to LSTM - sequence_len = input_layout.feature(); - - // If the input's feature/sequence length field is > 1, i.e. If - // the sequence elements are concatenated into one single input - // then it has to be split into individual sequence elements - if (sequence_len > 1) { - for (size_t sequence_element = 0; sequence_element < sequence_len; sequence_element++) { - primitive_id crop_id = input.id() + ":crop:" + get_id_string(sequence_element); - tensor crop_tensor{input_layout.batch(), 1, input_layout.spatial(0), input_layout.spatial(1)}; - tensor offset_tensor{0, static_cast(sequence_element), 0, 0}; - auto input_crop = std::make_shared(crop_id, input.id(), crop_tensor, offset_tensor); - auto& input_crop_node = p.get_or_create(input_crop); - - // Add the crop nodes as user for input - p.add_connection(input, input_crop_node); - - // Connect crop with lstm - p.add_connection(input_crop_node, node); - } - - // We have the sequence elements (cropped inputs) as input to LSTM. - // The original input is no longer a dependency to LSTM. - // Remove the input node as a dependency to LSTM - p.remove_connection(input, node); - - // Update the total no. of input dependecies - num_input_dependencies = node.get_dependencies().size(); - } - // if the sequence has a single element but it has multiple inputs then - // the parent of this lstm is an lstm node. If this is a bidirectional lstm - // then the sequence length is the number of dependencies divided by 2. - } else if (sequence_len == 1 && num_input_dependencies > 1) { - sequence_len = (directions == 1) ? num_input_dependencies : num_input_dependencies / 2; - } - - // check if this lstm node has an lstm child - bool has_lstm_children = false; - for (auto& user : node.get_users()) { - if (user->is_type()) { - has_lstm_children = true; - } - } - - bool emit_last_cell = lstm_prim->output_selection == lstm_output_selection::hidden_cell || - lstm_prim->output_selection == lstm_output_selection::sequence_cell; - bool emit_sequence = lstm_prim->output_selection == lstm_output_selection::sequence_cell || - lstm_prim->output_selection == lstm_output_selection::sequence; - - std::vector cell_list(directions * sequence_len); - std::vector hidden_list(directions * sequence_len); - std::map> output_map; - size_t input_directions = input_layout.spatial(1); - - // lstm expanding - for (size_t dir = 0; dir < directions; ++dir) { - auto hidden_id = initial_hidden_id; - auto cell_id = initial_cell_id; - for (size_t i = 0; i < sequence_len; ++i) { - size_t idx = i + dir * sequence_len; - primitive_id lstm_gemm_id = node.id() + ":lstm_gemm" + get_id_string(idx); - primitive_id lstm_elt_id = node.id() + ":lstm_elt" + get_id_string(idx); - primitive_id crop_id = node.id() + ":crop" + get_id_string(idx); - - size_t input_idx = i; - // for bidirectional lstms, if first LSTM layer then reverse input - // for subsequent stacked layers the input is strided on the dir dimension - if (num_input_dependencies > sequence_len) { // stacked layer - input_idx = dir * sequence_len + i; - } else if ((input_directions < 2) && dir > 0) { // first layer - input_idx = sequence_len - i - 1; - } - - // primitive_id lstm_gemm_input_id = node->get_dependency(input_idx).get_primitive()->id; - // the line below requires an attention: get_org_primitive_id() might not be an actual id of a node - // (see rename method) ToDO: ensure that get_org_primitive_id() is suitable here - primitive_id lstm_gemm_input_id = node.get_dependency(input_idx).get_org_primitive_id(); - - auto lstm_gemm_node = std::make_shared(lstm_gemm_id, - lstm_gemm_input_id, - weights_id, - recurrent_id, - bias_id, - hidden_id, - (uint32_t)dir); - auto& n1 = p.get_or_create(lstm_gemm_node); - - auto lstm_elt_node = std::make_shared(lstm_elt_id, - lstm_gemm_id, - cell_id, - lstm_prim->clip, - lstm_prim->input_forget, - lstm_prim->activations, - lstm_prim->activation_params, - lstm_prim->offset_order, - (uint32_t)dir); - auto& n2 = p.get_or_create(lstm_elt_node); - // adding lstm_elt as user - p.add_connection(n1, n2); - // adding dependecy to lstm_gemm node - // input - p.add_connection(node.get_dependency(input_idx), n1); - // adding weights and initial values to lstm_gemm - p.add_connection(p.get_node(weights_id), n1); - p.add_connection(p.get_node(recurrent_id), n1); - if (bias_term) - p.add_connection(p.get_node(bias_id), n1); - - // adding cell and hiddens as dependencies - if (i > 0) { - p.add_connection(*cell_list[(i - 1) * directions + dir], n2); - p.add_connection(*hidden_list[(i - 1) * directions + dir], n1); - } else { // if initial values are present - if (initial_hidden_term) - p.add_connection(p.get_node(hidden_id), n1); - if (initial_cell_term) - p.add_connection(p.get_node(cell_id), n2); - } - - // lstm_hidden - { - hidden_id = crop_id + ":hidden"; - auto crop_hidden = - std::make_shared(hidden_id, lstm_elt_id, hidden_size, tensor{0, 0, 0, 0}); - auto& n3 = p.get_or_create(crop_hidden); - // adding eltwise as dependency to hidden - p.add_connection(n2, n3); - - // if parent is lstm adding hiddens as dependency - if (has_lstm_children) { - for (auto& user : node.get_users()) { - p.add_connection(n3, *user); - } - } - hidden_list[i * directions + dir] = &n3; - if (i == sequence_len - 1 || emit_sequence) { - output_map[i * directions + dir] = {hidden_id, &n3}; - } - } - - // lstm_cell - if (i < sequence_len - 1 || emit_last_cell) { - cell_id = crop_id + ":cell"; - auto crop_cell = std::make_shared(cell_id, lstm_elt_id, hidden_size, tensor{0, 1, 0, 0}); - auto& n4 = p.get_or_create(crop_cell); - p.add_connection(n2, n4); - cell_list[i * directions + dir] = &n4; - if (i == sequence_len - 1) { - output_map[sequence_len * directions + dir] = {cell_id, &n4}; - } - } - } - } - // if there is no next lstm, concatenation is created - if (!has_lstm_children) { - std::vector output_ids_offsets; - for (auto& e : output_map) { - output_ids_offsets.push_back(input_info(e.second.first)); - } - primitive_id concatenation_id = node.id() + ":concat"; - auto concatenation_primitive = std::make_shared(concatenation_id, output_ids_offsets, 1); - auto& concatenation_node = p.get_or_create(concatenation_primitive); - for (auto& e : output_map) { - p.add_connection(*e.second.second, concatenation_node); - } - if (directions == 2) { - // bidirectional support requires concatenations along the direction and sequence axis - // instead we can concatenate along the sequence axis and reshape the tensor to the account - // for the direction - size_t concatenate_len = emit_sequence ? sequence_len : 1; - if (emit_last_cell) - concatenate_len++; - - tensor output_size{input_layout.batch(), - static_cast(concatenate_len), - hidden_size.spatial[0], - (int32_t)directions}; - auto reshape_primitive = std::make_shared(node.id() + ":reshape", concatenation_id, output_size); - auto& reshape_node = p.get_or_create(reshape_primitive); - p.add_connection(concatenation_node, reshape_node); - p.replace_all_usages(node, reshape_node); - } else { - p.replace_all_usages(node, concatenation_node); - } - } - // removing expanded node - p.remove_all_connections(node); - p.nodes_map.erase(node.id()); -} - -void graph_initializations::handle_dynamic_lstm_node(program& p, lstm_dynamic_node& node) { - // [0] Prepare helper temp variables. - // auto& lstm_dynamic_node = node->as(); - auto& node_id = node.id(); - auto input_id = node.get_primitive()->input.at(0); - auto dyn_length_id = node.dyn_length_id(); - auto weights_id = node.weights_id(); - auto bias_id = node.bias_id(); - std::string suffix = "__cldnn_"; - - // [1] Add lstm_dynamic_input - auto lstm_dynamic_input_primitive = - std::make_shared(node_id + suffix + "input", - input_id, - dyn_length_id, - weights_id, - bias_id, - node.get_primitive()->output_paddings[0]); - auto& lstm_dynamic_input_node = p.get_or_create(lstm_dynamic_input_primitive); - p.add_connection(node.input(), lstm_dynamic_input_node); // connect real input to dlstm_input - // connect other deps - p.add_connection(p.get_node(dyn_length_id), lstm_dynamic_input_node); - p.add_connection(p.get_node(weights_id), lstm_dynamic_input_node); - if (!bias_id.empty()) - p.add_connection(p.get_node(bias_id), lstm_dynamic_input_node); - lstm_dynamic_input_node.get_output_layout(); // calc out layout - - auto recurrent_id = node.recurrent_id(); - auto init_hidden_id = node.initial_hidden_id(); - auto init_cell_id = node.initial_cell_id(); - auto last_hidden_id = node.last_hidden_state_id(); - auto last_cell_id = node.last_cell_state_id(); - auto lstm_dynamic_timeloop_primitive = - std::make_shared(node_id + suffix + "timeloop", - lstm_dynamic_input_node.id(), - dyn_length_id, - recurrent_id, - last_hidden_id, - last_cell_id, - init_hidden_id, - init_cell_id, - node.clip(), - node.input_forget(), - lstm_dynamic_input_primitive->output_paddings[0]); - auto& lstm_dynamic_timeloop_node = p.get_or_create(lstm_dynamic_timeloop_primitive); - p.add_connection(lstm_dynamic_input_node, lstm_dynamic_timeloop_node); // connect dlstm_input to dlstm_timeloop - // connect other deps - p.add_connection(p.get_node(dyn_length_id), lstm_dynamic_timeloop_node); - p.add_connection(p.get_node(recurrent_id), lstm_dynamic_timeloop_node); - - // [hack] reversed dependecies so the prociessing/execution order will be valid (from the user persepctive) - // It means that this optional outputs for sure will be "executed" layer. - // This connection will be reversed (to normal state) later in program.cpp (right after caluticaiton prcoessing order)! - if (!last_hidden_id.empty()) - p.add_connection(lstm_dynamic_timeloop_node, p.get_node(last_hidden_id)); - if (!last_cell_id.empty()) - p.add_connection(lstm_dynamic_timeloop_node, p.get_node(last_cell_id)); - // [hack end] - if (!init_hidden_id.empty()) - p.add_connection(p.get_node(init_hidden_id), lstm_dynamic_timeloop_node); - if (!init_cell_id.empty()) - p.add_connection(p.get_node(init_cell_id), lstm_dynamic_timeloop_node); - lstm_dynamic_timeloop_node.get_output_layout(); // calc out layout - - // [2] Finally replace original node with the new ones. - p.replace_all_usages(node, lstm_dynamic_timeloop_node); - p.remove_all_connections(node); - p.remove_if_dangling(node); - p.rename(lstm_dynamic_timeloop_node, node_id); // get original id - - // we dont have to set output since it will be done in next graph_opts step -} void graph_initializations::set_outputs(program& p) { auto custom_outputs = p.get_config().get_property(ov::intel_gpu::custom_outputs); @@ -417,18 +35,6 @@ void graph_initializations::set_outputs(program& p) { } void graph_initializations::run(program& p) { - auto itr = p.nodes_map.begin(); - while (itr != p.nodes_map.end()) { - auto node_itr = itr++; - auto& node = node_itr->second; - if (node->is_type()) { - handle_split_node(p, node->as()); - } else if (node->is_type()) { - handle_lstm_node(p, node->as()); - } else if (node->is_type()) { - handle_dynamic_lstm_node(p, node->as()); - } - } set_outputs(p); p.get_processing_order().calc_processing_order(p); } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp index 3f568b96ebacda..f8d3ed08139817 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp @@ -10,7 +10,6 @@ #include "deconvolution_inst.h" #include "deformable_convolution_inst.h" #include "fully_connected_inst.h" -#include "lstm_dynamic_input_inst.h" namespace cldnn { @@ -21,11 +20,6 @@ template post_optimize_weights::weights_bias_offset post_optimize_we return weights_bias_offset(node.get_primitive()->input.size(), program_helpers::wrap_if_single(node.get_primitive()->weights).size()); } -template <> -post_optimize_weights::weights_bias_offset post_optimize_weights::get_weights_bias_offset(const lstm_dynamic_input_node& node) { - return weights_bias_offset(node.get_primitive()->input.size() + 1, program_helpers::wrap_if_single(node.get_primitive()->weights).size()); -} - // function which prepares given primitive for weights optimization template void post_optimize_weights::optimize_weights(T& node, program& p) { @@ -129,8 +123,6 @@ void post_optimize_weights::run(program& p) { optimize_weights(node->as(), p); } else if (node->is_type()) { optimize_weights(node->as(), p); - } else if (node->is_type()) { - optimize_weights(node->as(), p); } } } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 90565ef9ceaa26..db9216202cfb55 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -15,6 +15,7 @@ #include "depth_to_space_inst.h" #include "resample_inst.h" #include "loop_inst.h" +#include "lstm_elt_inst.h" #include "strided_slice_inst.h" #include "shape_of_inst.h" #include "non_max_suppression_inst.h" diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp index 893d35395077bb..a3f43332cf7a95 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp @@ -316,8 +316,7 @@ void prepare_quantization::prepare_dequantize_merge(program& p, eltwise_node& el } auto get_scale_shift_mem = [](const cldnn::eltwise_node& eltw, size_t dep_id) -> memory::ptr { - if (dep_id >= eltw.get_dependencies().size()) - CLDNN_ERROR_MESSAGE(eltw.id(), "Invalid dependency id in dequantize optimization"); + OPENVINO_ASSERT(dep_id < eltw.get_dependencies().size(), "[GPU] ", eltw.id(), "Invalid dependency id in dequantize optimization"); return eltw.get_dependency(dep_id).as().get_attached_memory_ptr(); }; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/reverse_optional_nodes_outputs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/reverse_optional_nodes_outputs.cpp deleted file mode 100644 index 41df4c0ce5920b..00000000000000 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/reverse_optional_nodes_outputs.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pass_manager.h" -#include "program_helpers.h" -#include "lstm_dynamic_timeloop_inst.h" - -#include - -using namespace cldnn; - -/* - Pass made for nodes, which has optional outputs (and had to reverse connections so - the processing order was valid). -*/ -void reverse_optional_nodes_outputs::run(program& p) { - for (auto& node : p.get_processing_order()) { - if (node->is_type()) { - auto& typed_node = node->as(); - typed_node.reverse_optional_outputs_connections(); - } - } -} diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/strided_slice_optimize.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/strided_slice_optimize.cpp deleted file mode 100644 index c35d5e37233daf..00000000000000 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/strided_slice_optimize.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "intel_gpu/runtime/error_handler.hpp" -#include "pass_manager.h" -#include "program_helpers.h" -#include "strided_slice_inst.h" -#include "reshape_inst.h" -#include "data_inst.h" -#include -#include - -using namespace cldnn; - -void strided_slice_optimize::run(program& p) { - auto node_itr = p.get_processing_order().begin(); - while (node_itr != p.get_processing_order().end()) { - auto& node = (*node_itr++); - if (node->is_type() && node->get_output_layout().is_static()) { - auto& strided_slice_node = node->as(); - auto& new_axis_mask = strided_slice_node.get_primitive()->new_axis_mask; - - if (std::find(new_axis_mask.begin(), new_axis_mask.end(), 1) == new_axis_mask.end()) - continue; - - auto node_layout = strided_slice_node.get_output_layout(); - // only 4D or less dimension output runs optimization - if (node_layout.get_rank() > 4) - continue; - - auto& deps = node->get_dependencies(); - auto is_other_deps_constant = [deps]() { - for (size_t i = 1; i < deps.size(); i++) { - if (!deps[i].first->is_type()) return false; - } - return true; - }; - if (!is_other_deps_constant()) - continue; - - for (size_t i = deps.size(); i--;) - if (deps[i].first->is_type()) - node->remove_dependency(i); - - auto node_size = node_layout.get_tensor().sizes(format::bfyx); - - auto is_shift_possible = [&](const std::vector& dims) -> bool { - if (dims.empty()) - CLDNN_ERROR_MESSAGE(node->id(), "Error while adding new axis: node has incorrect dimensions"); - - if (dims[dims.size() - 1] == 1) - return true; - else - CLDNN_ERROR_MESSAGE(node->id(), "Not supported yet: too many axes for adding"); - return false; - }; - - std::vector output_dims_sizes = node_size; - if (std::find(new_axis_mask.begin(), new_axis_mask.end(), 1) != new_axis_mask.end()) { - for (size_t i = 0; i < new_axis_mask.size(); ++i) { - if (new_axis_mask[new_axis_mask.size() - i - 1] == 1) { - if (is_shift_possible(output_dims_sizes)) { - for (size_t j = output_dims_sizes.size() - 1; j > i; --j) - output_dims_sizes[j] = output_dims_sizes[j - 1]; - output_dims_sizes[i] = 1; - } - } - } - } - - auto reshape_prim = std::make_shared( - "reshape_" + node->id(), - node->get_dependency(0).get_primitive()->id, - tensor(output_dims_sizes[0], output_dims_sizes[1], output_dims_sizes[3], output_dims_sizes[2])); - - auto& reshape_prim_node = p.get_or_create(reshape_prim); - - layout output_layout = { node_layout.data_type, node_layout.format, reshape_prim->output_shape }; - reshape_prim_node.set_output_layout(output_layout); - - p.add_intermediate(reshape_prim_node, *node, 0, true); - p.extract_and_remove(*node); - } - } -} diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index bb891c0ab8f99f..d83f40748123d8 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -722,8 +722,6 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::g_os_y_is_x_osv8_isv4; case format::g_os_is_yx_isv16_osv16: return kernel_selector::weights_layout::g_os_is_yx_isv16_osv16; - case format::lstm_weights_dio: - return kernel_selector::weights_layout::dlstm_dir_io; case format::os_i_yxs_osv4_yxsv4: return kernel_selector::weights_layout::os_i_yxs_osv4_yxsv4; default: @@ -1002,8 +1000,6 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::os_is_zyx_osv64_isv16; case kernel_selector::weights_layout::os_is_yx_isv8_osv16_isv2: return cldnn::format::os_is_yx_isv8_osv16_isv2; - case kernel_selector::weights_layout::dlstm_dir_io: - return cldnn::format::lstm_weights_dio; case kernel_selector::weights_layout::os_iyx_osv16_rotate_180: return cldnn::format::os_iyx_osv16; case kernel_selector::weights_layout::os_i_yxs_osv4_yxsv4: diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_input.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_input.cpp deleted file mode 100644 index 8bf8ba0a8fcedc..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_input.cpp +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "lstm_dynamic_input_inst.h" -#include "lstm_dynamic/lstm_dynamic_input_kernel_selector.h" -#include "lstm_dynamic/lstm_dynamic_input_kernel_base.h" - -namespace cldnn { -namespace ocl { - -struct lstm_dynamic_input_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::lstm_dynamic_input_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::lstm_dynamic_input_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - -protected: - kernel_arguments_data get_arguments(const typed_primitive_inst& instance) const override { - kernel_arguments_data args; - args.inputs = { instance.input_memory_ptr(), instance.dyn_length_memory()}; - args.outputs = { instance.output_memory_ptr() }; - args.weights = instance.weights_memory(); - args.bias = instance.bias_term() ? instance.bias_memory() : nullptr; - return args; - } - -public: - static kernel_params_t get_kernel_params(const kernel_impl_params& impl_param) { - const auto& primitive = impl_param.typed_desc(); - auto params = get_default_params(impl_param); - - const auto dyn_len_idx = 1; - const auto weights_idx = 2; - const auto bias_idx = 3; - - const auto& weights_layout = impl_param.get_input_layout(weights_idx); - params.weights = convert_weights_tensor(weights_layout); - - auto has_bias = !primitive->bias.empty(); - if (has_bias) { - const auto& bias_layout = impl_param.get_input_layout(bias_idx); - params.bias.push_back(convert_data_tensor(bias_layout)); - } - - const auto& dyn_length_tensor = impl_param.input_layouts[dyn_len_idx]; - params.inputs.push_back(convert_data_tensor(dyn_length_tensor)); - - params.direction = weights_layout.feature(); - - auto optional_params = get_default_weights_bias_optional_params(impl_param.get_program()); - return {params, optional_params}; - } -}; - -namespace detail { - -attach_lstm_dynamic_input_impl::attach_lstm_dynamic_input_impl() { - implementation_map::add(impl_types::ocl, typed_primitive_impl_ocl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f16, format::bfyx), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_dynamic_input_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_dynamic_input) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_dynamic) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_timeloop.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_timeloop.cpp deleted file mode 100644 index 18e39d0e7615ec..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_timeloop.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "lstm_dynamic_timeloop_inst.h" -#include "lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h" -#include "lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h" - -namespace cldnn { -namespace ocl { - -struct lstm_dynamic_timeloop_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::lstm_dynamic_timeloop_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::lstm_dynamic_timeloop_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - -protected: - kernel_arguments_data get_arguments(const typed_primitive_inst& instance) const override { - kernel_arguments_data args; - args.inputs = {instance.input_memory_ptr(), instance.dyn_length_memory()}; - if (instance.last_hidden_output_term()) - args.inputs.push_back(instance.last_hidden_output_memory()); - if (instance.last_cell_output_term()) - args.inputs.push_back(instance.last_cell_output_memory()); - args.outputs = { instance.output_memory_ptr() }; - args.recurrent = instance.recurrent_memory(); - args.hidden = instance.initial_hidden_term() ? instance.initial_hidden_memory() : nullptr; - args.cell = instance.initial_cell_term() ? instance.initial_cell_memory() : nullptr; - return args; - } - -public: - static std::unique_ptr create(const lstm_dynamic_timeloop_node& arg, const kernel_impl_params& impl_param) { - auto dlstm_timeloop_params = get_default_params(impl_param); - - // dyn length - const auto& dyn_length_tensor = impl_param.input_layouts[arg.get_dependency_idx("dyn_length")]; - dlstm_timeloop_params.inputs.push_back(convert_data_tensor(dyn_length_tensor)); - - // recurrent - const auto& recurrent_layout = impl_param.input_layouts[arg.get_dependency_idx("recurrent")]; - dlstm_timeloop_params.recurrent = convert_data_tensor(recurrent_layout); - - dlstm_timeloop_params.direction = arg.direction(); - - if (arg.initial_cell_term()) { - const auto& cell_layout = impl_param.input_layouts[arg.get_dependency_idx("initial_cell")]; - dlstm_timeloop_params.set_cell(convert_data_tensor(cell_layout)); - } - - if (arg.last_hidden_output_term()) { - const auto& last_hidden_output_layout = impl_param.input_layouts[arg.get_dependency_idx("last_hidden_output")]; - dlstm_timeloop_params.set_last_hidden_output(convert_data_tensor(last_hidden_output_layout)); - } - - if (arg.initial_hidden_term()) { - const auto& hidden_layout = impl_param.input_layouts[arg.get_dependency_idx("initial_hidden")]; - dlstm_timeloop_params.set_hidden(convert_data_tensor(hidden_layout)); - } - - if (arg.last_cell_output_term()) { - const auto& last_cell_state_layout = impl_param.input_layouts[arg.get_dependency_idx("last_cell_output")]; - dlstm_timeloop_params.set_last_cell_output(convert_data_tensor(last_cell_state_layout)); - } - dlstm_timeloop_params.set_dynamic_shape_offsets(); - // finially get best kernel - auto dlstm_timeloop_optional_params = - get_default_optional_params(impl_param.get_program()); - - auto& kernel_selector = kernel_selector::lstm_dynamic_timeloop_kernel_selector::Instance(); - auto best_kernel = kernel_selector.get_best_kernel(dlstm_timeloop_params, dlstm_timeloop_optional_params); - - return make_unique(best_kernel); - } -}; - -namespace detail { - -attach_lstm_dynamic_timeloop_impl::attach_lstm_dynamic_timeloop_impl() { - implementation_map::add(impl_types::ocl, lstm_dynamic_timeloop_impl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f16, format::bfyx), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_dynamic_timeloop_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_dynamic_timeloop) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp index 0bccdd999b2889..9ebf715bbc3112 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp @@ -136,4 +136,3 @@ attach_lstm_elt_impl::attach_lstm_elt_impl() { BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_elt_impl) BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_elt) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_gemm.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_gemm.cpp deleted file mode 100644 index 47809d63dc1bf4..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_gemm.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "lstm_gemm_inst.h" -#include "lstm/lstm_gemm_kernel_selector.h" -#include "lstm/lstm_gemm_kernel_base.h" - -namespace cldnn { -namespace ocl { - -struct lstm_gemm_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::lstm_gemm_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::lstm_gemm_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - -protected: - kernel_arguments_data get_arguments(const typed_primitive_inst& instance) const override { - kernel_arguments_data args = parent::get_arguments(instance); - - args.outputs = { instance.output_memory_ptr() }; - args.weights = instance.weights_memory(); - args.recurrent = instance.recurrent_memory(); - args.bias = instance.bias_term() ? instance.bias_memory() : nullptr; - args.hidden = instance.hidden_term() ? instance.hidden_memory() : nullptr; - - return args; - } - -public: - static std::unique_ptr create(const lstm_gemm_node& arg, const kernel_impl_params& impl_param) { - const auto input_idx = 0; - const auto weight_idx = 1; - const auto recurrent_idx = 2; - const auto bias_idx = 3; - const auto hidden_idx = arg.bias_term() ? 4 : 3; - - const auto& weights_layout = impl_param.input_layouts[weight_idx]; - auto lstm_gemm_params = get_default_params(impl_param); - lstm_gemm_params.weights = convert_data_tensor(weights_layout); - - if (arg.bias_term()) { - const auto& bias_layout = impl_param.input_layouts[bias_idx]; - lstm_gemm_params.SetBias(convert_data_tensor(bias_layout)); - } - if (arg.hidden_term()) { - const auto& recurrent_layout = impl_param.input_layouts[recurrent_idx]; - lstm_gemm_params.recurrent = convert_data_tensor(recurrent_layout); - - const auto& hidden_layout = impl_param.input_layouts[hidden_idx]; - lstm_gemm_params.SetHidden(convert_data_tensor(hidden_layout)); - // TODO: make a generic function to get the direction - if (hidden_layout.spatial(1) > 1) { - lstm_gemm_params.hidden_direction = arg.direction(); - } - } - lstm_gemm_params.direction = arg.direction(); - - // Update the direction of the input for the gemm kernel - const auto& input_layout = impl_param.input_layouts[input_idx]; - size_t input_directions = input_layout.spatial(1); - - if (input_directions > 1) { // For bidirection input, input direction can be 1 or 0 - lstm_gemm_params.input_direction = arg.direction(); - } else { // For unidirectional input - lstm_gemm_params.input_direction = 0; - } - lstm_gemm_params.set_dynamic_shape_offsets(); - auto lstm_gemm_optional_params = - get_default_optional_params(impl_param.get_program()); - - auto& kernel_selector = kernel_selector::lstm_gemm_kernel_selector::Instance(); - auto best_kernel = kernel_selector.get_best_kernel(lstm_gemm_params, lstm_gemm_optional_params); - - return make_unique(best_kernel); - } -}; - -namespace detail { - -attach_lstm_gemm_impl::attach_lstm_gemm_impl() { - implementation_map::add(impl_types::ocl, lstm_gemm_impl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f16, format::bfyx), - std::make_tuple(data_types::f32, format::fyxb), - std::make_tuple(data_types::f16, format::fyxb), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_gemm_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_gemm) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/pyramid_roi_align.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/pyramid_roi_align.cpp deleted file mode 100644 index cf0b2f7e4794f9..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/pyramid_roi_align.cpp +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "pyramid_roi_align_inst.h" -#include "pyramid_roi_align/pyramid_roi_align_kernel_selector.h" -#include "pyramid_roi_align/pyramid_roi_align_kernel_base.h" - -#include - -namespace cldnn { -namespace ocl { - -struct pyramid_roi_align_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::PyramidROIAlign_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::pyramid_roi_align_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - - static kernel_params_t get_kernel_params(const kernel_impl_params& impl_param) { - const auto& primitive = impl_param.typed_desc(); - auto params = get_default_params(impl_param); - auto optional_params = get_default_optional_params(impl_param.get_program()); - - const auto P2_idx = 1; - const auto P3_idx = 2; - const auto P4_idx = 3; - const auto P5_idx = 4; - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P2_idx))); - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P3_idx))); - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P4_idx))); - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P5_idx))); - - params.sampling_ratio_x = primitive->sampling_ratio; - params.sampling_ratio_y = primitive->sampling_ratio; - - auto first_layer_scale = primitive->pyramid_scales[0]; - auto image_size_x = impl_param.get_input_layout(P2_idx).spatial(0) * first_layer_scale; - auto image_size_y = impl_param.get_input_layout(P2_idx).spatial(1) * first_layer_scale; - params.image_size_x = image_size_x; - params.image_size_y = image_size_y; - - params.pyramid_starting_level = primitive->pyramid_starting_level; - - return {params, optional_params}; - } -}; - -namespace detail { - -attach_pyramid_roi_align_impl::attach_pyramid_roi_align_impl() { - implementation_map::add(impl_types::ocl, typed_primitive_impl_ocl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f32, format::yxfb), - std::make_tuple(data_types::f32, format::byxf), - std::make_tuple(data_types::f16, format::bfyx), - std::make_tuple(data_types::f16, format::yxfb), - std::make_tuple(data_types::f16, format::byxf), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::pyramid_roi_align_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::pyramid_roi_align) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp index a1f81551305f97..bb2dba327f15b7 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp @@ -42,7 +42,6 @@ void register_implementations() { REGISTER_OCL(group_normalization); REGISTER_OCL(kv_cache); REGISTER_OCL(lrn); - REGISTER_OCL(lstm_gemm); REGISTER_OCL(lstm_elt); REGISTER_OCL(multiclass_nms); REGISTER_OCL(multinomial); @@ -55,7 +54,6 @@ void register_implementations() { REGISTER_OCL(permute); REGISTER_OCL(pooling); REGISTER_OCL(prior_box); - REGISTER_OCL(pyramid_roi_align); REGISTER_OCL(quantize); REGISTER_OCL(random_uniform); REGISTER_OCL(range); @@ -82,8 +80,6 @@ void register_implementations() { REGISTER_OCL(slice); REGISTER_OCL(strided_slice); REGISTER_OCL(tile); - REGISTER_OCL(lstm_dynamic_input); - REGISTER_OCL(lstm_dynamic_timeloop); REGISTER_OCL(gather_tree); REGISTER_OCL(resample); REGISTER_OCL(grn); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp index 6c16814916ac67..6c27c72dc4caae 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp @@ -36,10 +36,6 @@ #include "intel_gpu/primitives/grn.hpp" #include "intel_gpu/primitives/group_normalization.hpp" #include "intel_gpu/primitives/lrn.hpp" -#include "intel_gpu/primitives/lstm.hpp" -#include "intel_gpu/primitives/lstm_dynamic.hpp" -#include "intel_gpu/primitives/lstm_dynamic_input.hpp" -#include "intel_gpu/primitives/lstm_dynamic_timeloop.hpp" #include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/multinomial.hpp" #include "intel_gpu/primitives/mvn.hpp" @@ -48,7 +44,6 @@ #include "intel_gpu/primitives/one_hot.hpp" #include "intel_gpu/primitives/permute.hpp" #include "intel_gpu/primitives/pooling.hpp" -#include "intel_gpu/primitives/pyramid_roi_align.hpp" #include "intel_gpu/primitives/quantize.hpp" #include "intel_gpu/primitives/random_uniform.hpp" #include "intel_gpu/primitives/range.hpp" @@ -125,7 +120,6 @@ REGISTER_OCL(grid_sample); REGISTER_OCL(group_normalization); REGISTER_OCL(kv_cache); REGISTER_OCL(lrn); -REGISTER_OCL(lstm_gemm); REGISTER_OCL(lstm_elt); REGISTER_OCL(multiclass_nms); REGISTER_OCL(multinomial); @@ -138,7 +132,6 @@ REGISTER_OCL(one_hot); REGISTER_OCL(permute); REGISTER_OCL(pooling); REGISTER_OCL(prior_box); -REGISTER_OCL(pyramid_roi_align); REGISTER_OCL(quantize); REGISTER_OCL(random_uniform); REGISTER_OCL(range); @@ -165,8 +158,6 @@ REGISTER_OCL(space_to_batch); REGISTER_OCL(space_to_depth); REGISTER_OCL(strided_slice); REGISTER_OCL(tile); -REGISTER_OCL(lstm_dynamic_input); -REGISTER_OCL(lstm_dynamic_timeloop); REGISTER_OCL(gather_tree); REGISTER_OCL(resample); REGISTER_OCL(grn); diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_input_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_input_inst.h deleted file mode 100644 index 0dece56e7d2c96..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_input_inst.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm_dynamic_input.hpp" -#include "primitive_inst.h" -#include "intel_gpu/runtime/error_handler.hpp" - -#include -#include - -namespace cldnn { - -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - typed_program_node(std::shared_ptr prim, program& prog) : parent(prim, prog) {} - - program_node& input() const { return get_dependency(0); } - program_node& dyn_length() const { return get_dependency(1); } - program_node& weights() const { return get_dependency(2); } - - program_node& bias() const { - CLDNN_ERROR_BOOL(id(), "Bias term", !bias_term(), "Trying to get non existing bias."); - return get_dependency(3); - } - - int32_t direction() const { return weights().get_output_layout().feature(); } - bool dyn_length_term() const { return !get_primitive()->dyn_length.empty(); } - bool bias_term() const { return !get_primitive()->bias.empty(); } - bool weights_term() const { return !get_primitive()->weights.empty(); } -}; - -using lstm_dynamic_input_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_dynamic_input_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_dynamic_input_node const& node); - -public: - typed_primitive_inst(network& network, lstm_dynamic_input_node const& node); - - memory::ptr dyn_length_memory() const { return dep_memory_ptr(1); } - memory::ptr weights_memory() const { return dep_memory_ptr(2); } - memory::ptr bias_memory() const { - CLDNN_ERROR_BOOL(id(), "Bias term", !bias_term(), "Trying to get non existing bias memory."); - return dep_memory_ptr(3); - } - int32_t direction() const { return node->direction(); } - bool bias_term() const { return node->bias_term(); } -}; - -using lstm_dynamic_input_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_inst.h deleted file mode 100644 index 011c226cda9de5..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_inst.h +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm_dynamic.hpp" -#include "primitive_inst.h" -#include "intel_gpu/runtime/error_handler.hpp" - -#include -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - - typed_program_node(std::shared_ptr prim, program& prog) : parent(prim, prog) {} - - program_node& input() const { return get_dependency(0); } - float clip() const { return get_primitive()->clip; } - bool input_forget() const { return get_primitive()->input_forget; } - primitive_id bias_id() const { return get_primitive()->bias; } - primitive_id weights_id() const { return get_primitive()->weights; } - primitive_id recurrent_id() const { return get_primitive()->recurrent; } - primitive_id initial_hidden_id() const { return get_primitive()->initial_hidden; } - primitive_id initial_cell_id() const { return get_primitive()->initial_cell; } - primitive_id dyn_length_id() const { return get_primitive()->dyn_length; } - primitive_id last_hidden_state_id() const { return get_primitive()->last_hidden_state; } - primitive_id last_cell_state_id() const { return get_primitive()->last_cell_state; } -}; - -using lstm_dynamic_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_dynamic_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_dynamic_node const& node); - - typed_primitive_inst(network& network, lstm_dynamic_node const& node); - - static void check_direction(program_node& node, int32_t direction, std::string name) { - if (node.get_output_layout().spatial(1) != direction) - CLDNN_ERROR_MESSAGE(node.id(), name + " directions size need to equal 1 or 2 (bidrectional) !"); - } - - static void check_common_lstm_dynamic_sizes(program_node& node, - int32_t batch_size, - int32_t hidden_size, - int32_t direction, - std::string name) { - auto node_layout = node.get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - name + " format", - node.get_output_layout().format.value, - "expected bfyx format", - format::bfyx); - CLDNN_ERROR_NOT_EQUAL(node.id(), - name + " batch size", - node_layout.batch(), - "input batch size", - batch_size, - "Sizes mismatch, " + name + ": " + node.id()); - check_direction(node, direction, name); - CLDNN_ERROR_NOT_EQUAL(node.id(), - name + " x size", - node_layout.spatial(0), - "input_size", - hidden_size, - "Sizes mismatch, " + name + ": " + node.id()); - CLDNN_ERROR_NOT_EQUAL(node.id(), - name + " f size", - node_layout.feature(), - "1", - 1, - "Sizes mismatch, " + name + ": " + node.id()); - } -}; - -using lstm_dynamic_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_timeloop_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_timeloop_inst.h deleted file mode 100644 index 0d1aea8f9b2e64..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_timeloop_inst.h +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm_dynamic_timeloop.hpp" -#include "primitive_inst.h" - -#include -#include -#include - -namespace cldnn { - -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -private: - std::vector _param_list; - program_node& get_dependency_by_name(std::string val) const; - void init_params_list(); - inline size_t get_param_list_index(const std::string& dependency_tag) const { - return static_cast(std::distance(_param_list.begin(), std::find_if( - _param_list.begin(), _param_list.end(), [&](const std::string& tag) { return tag == dependency_tag; }))); - } - -public: - typed_program_node(std::shared_ptr prim, program& prog) - : parent(std::move(prim), prog) { - init_params_list(); - can_share_buffer(false); - } - - void reverse_optional_outputs_connections(); - size_t get_dependency_idx(std::string val) const; - - program_node& input() const { return get_dependency_by_name("input"); } - program_node& dyn_length() const { return get_dependency_by_name("dyn_length"); } - program_node& recurrent() const { return get_dependency_by_name("recurrent"); } - program_node& last_hidden_state() const { return get_dependency_by_name("last_hidden_output"); } - program_node& last_cell_state() const { return get_dependency_by_name("last_cell_output"); } - program_node& initial_hidden() const { return get_dependency_by_name("initial_hidden"); } - program_node& initial_cell() const { return get_dependency_by_name("initial_cell"); } - - float clip() const { return get_primitive()->clip; } - int32_t direction() const { return recurrent().get_output_layout().feature(); } - bool input_forget() const { return get_primitive()->input_forget; } - bool dyn_length_term() const { return !get_primitive()->dyn_length.empty(); } - bool recurrent_term() const { return !get_primitive()->recurrent.empty(); } - bool initial_hidden_term() const { return !get_primitive()->initial_hidden.empty(); } - bool initial_cell_term() const { return !get_primitive()->initial_cell.empty(); } - bool last_hidden_output_term() const { return !get_primitive()->last_hidden_state.empty(); } - bool last_cell_output_term() const { return !get_primitive()->last_cell_state.empty(); } -}; - -using lstm_dynamic_timeloop_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_dynamic_timeloop_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_dynamic_timeloop_node const& node); - -public: - typed_primitive_inst(network& network, lstm_dynamic_timeloop_node const& node); - - memory::ptr dyn_length_memory() const { return get_dependency_memory("dyn_length"); } - memory::ptr recurrent_memory() const { return get_dependency_memory("recurrent"); } - memory::ptr last_hidden_output_memory() const { return get_dependency_memory("last_hidden_output"); } - memory::ptr last_cell_output_memory() const { return get_dependency_memory("last_cell_output"); } - memory::ptr initial_hidden_memory() const { return get_dependency_memory("initial_hidden"); } - memory::ptr initial_cell_memory() const { return get_dependency_memory("initial_cell"); } - - bool dyn_length_term() const { return node->dyn_length_term(); } - bool initial_hidden_term() const { return node->initial_hidden_term(); } - bool initial_cell_term() const { return node->initial_cell_term(); } - bool last_hidden_output_term() const { return node->last_hidden_output_term(); } - bool last_cell_output_term() const { return node->last_cell_output_term(); } - -private: - memory::ptr get_dependency_memory(std::string val) const { return dep_memory_ptr(node->get_dependency_idx(val)); } -}; - -using lstm_dynamic_timeloop_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_gemm_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_gemm_inst.h deleted file mode 100644 index 5ddeb6051bc8c6..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_gemm_inst.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm.hpp" -#include "primitive_inst.h" - -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - using parent::parent; - - program_node& input() const { return get_dependency(0); } - program_node& weights() const { return get_dependency(1); } - program_node& recurrent() const { return get_dependency(2); } - program_node& bias() const { return get_dependency(3); } - program_node& hidden() const { return bias_term() ? get_dependency(4) : get_dependency(3); } - bool bias_term() const { return !get_primitive()->bias.empty(); } - bool hidden_term() const { return !get_primitive()->hidden.empty(); } - uint32_t direction() const { return get_primitive()->direction; } -}; - -using lstm_gemm_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_gemm_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_gemm_node const& node); - -public: - typed_primitive_inst(network& network, lstm_gemm_node const& node); - - memory::ptr weights_memory() const { return dep_memory_ptr(1); } - memory::ptr recurrent_memory() const { return dep_memory_ptr(2); } - memory::ptr bias_memory() const { return dep_memory_ptr(3); } - memory::ptr hidden_memory() const { return bias_term() ? dep_memory_ptr(4) : dep_memory_ptr(3); } - bool bias_term() const { return !get_typed_desc()->bias.empty(); } - bool hidden_term() const { return !get_typed_desc()->hidden.empty(); } - uint32_t direction() const { return get_typed_desc()->direction; } -}; - -using lstm_gemm_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_inst.h deleted file mode 100644 index 2cad108fdd8128..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_inst.h +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm.hpp" -#include "primitive_inst.h" - -#include -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - using parent::parent; - - program_node& input() const { return get_dependency(0); } - program_node& weights() const { return get_dependency(1); } - program_node& recurrent() const { return get_dependency(2); } - program_node& bias() const { return get_dependency(3); } - program_node& inital_hidden() const { return get_dependency(bias_term() ? 4 : 3); } - program_node& inital_cell() const { - // This doesn't scale. We should use a map to get the dependencies index at primitive level - return get_dependency(bias_term() ? (initial_hidden_term() ? 5 : 4) : (initial_hidden_term() ? 4 : 2)); - } - program_node& peepholes() const { return get_dependency(6); } - bool bias_term() const { return !get_primitive()->bias.empty(); } - bool peepholes_term() const { return !get_primitive()->peepholes.empty(); } - bool initial_hidden_term() const { return !get_primitive()->initial_hidden.empty(); } - bool initial_cell_term() const { return !get_primitive()->initial_cell.empty(); } - std::vector activations() const { return get_primitive()->activations; } - std::vector activation_params() const { - return get_primitive()->activation_params; - } - size_t sequence_len() const { return get_primitive()->input.size(); } -}; - -using lstm_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_node const& node); - -public: - typed_primitive_inst(network& network, lstm_node const& node); - - memory& weights_memory() const { return dep_memory(1); } - memory& recurrent_memory() const { return dep_memory(2); } - memory& bias_memory() const { return dep_memory(3); } - memory& initial_hidden_memory() const { return dep_memory(bias_term() ? 4 : 3); } - memory& initial_cell_memory() const { - return dep_memory(bias_term() ? (initial_hidden_term() ? 5 : 4) : (initial_hidden_term() ? 4 : 2)); - } - memory& peepholes_memory() const { return dep_memory(6); } - bool bias_term() const { return !argument->bias.empty(); } - bool peepholes_term() const { return !argument->peepholes.empty(); } - bool initial_hidden_term() const { return !argument->initial_hidden.empty(); } - bool initial_cell_term() const { return !argument->initial_cell.empty(); } - std::vector activations() const { return argument->activations; } - std::vector activation_params() const { return argument->activation_params; } -}; - -using lstm_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/pass_manager.h b/src/plugins/intel_gpu/src/graph/include/pass_manager.h index 0020eee07c6233..ba393760a07962 100644 --- a/src/plugins/intel_gpu/src/graph/include/pass_manager.h +++ b/src/plugins/intel_gpu/src/graph/include/pass_manager.h @@ -6,9 +6,6 @@ #include "intel_gpu/graph/program.hpp" #include "layout_optimizer.h" -#include "split_inst.h" -#include "lstm_inst.h" -#include "lstm_dynamic_inst.h" #include "quantize_inst.h" #include "eltwise_inst.h" #include "convolution_inst.h" @@ -82,9 +79,6 @@ class graph_initializations : public base_pass { private: void run(program& p) override; - void handle_split_node(program& p, split_node& node); - void handle_lstm_node(program& p, lstm_node& node); - void handle_dynamic_lstm_node(program& p, lstm_dynamic_node& node); void set_outputs(program& p); }; @@ -316,18 +310,6 @@ class trim_to_outputs : public base_pass { void run(program& p) override; }; -class strided_slice_optimize : public base_pass { -public: - strided_slice_optimize() : base_pass("strided_slice_optimize") {} - void run(program& p) override; -}; - -class reverse_optional_nodes_outputs : public base_pass { -public: - reverse_optional_nodes_outputs() : base_pass("reverse_optional_nodes_outputs") {} - void run(program& p) override; -}; - class concat_input_order : public base_pass { // This optimization changes order of inputs for concatenation to provide // better alignment for execution and allow for optimizing out in some cases. diff --git a/src/plugins/intel_gpu/src/graph/include/pyramid_roi_align_inst.h b/src/plugins/intel_gpu/src/graph/include/pyramid_roi_align_inst.h deleted file mode 100644 index 71126be00b3933..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/pyramid_roi_align_inst.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/pyramid_roi_align.hpp" -#include "primitive_inst.h" - -#include -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - typed_program_node(std::shared_ptr prim, program& prog) : parent(prim, prog) {} - - program_node& input() const { return get_dependency(0); } - // program_node& boxes() const { return get_dependency(0); } - program_node& P2() const { return get_dependency(1); } - program_node& P3() const { return get_dependency(2); } - program_node& P4() const { return get_dependency(3); } - program_node& P5() const { return get_dependency(4); } -}; - -using pyramid_roi_align_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(pyramid_roi_align_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(pyramid_roi_align_node const& node); - typed_primitive_inst(network& network, pyramid_roi_align_node const& node); - - memory& input() const { return dep_memory(0); } - memory& P2() const { return dep_memory(1); } - memory& P3() const { return dep_memory(2); } - memory& P4() const { return dep_memory(3); } - memory& P5() const { return dep_memory(4); } -}; - -using pyramid_roi_align_inst = typed_primitive_inst; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/split_inst.h b/src/plugins/intel_gpu/src/graph/include/split_inst.h deleted file mode 100644 index 0c3fb839f0e086..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/split_inst.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "intel_gpu/primitives/split.hpp" -#include "primitive_inst.h" - -#include - -namespace cldnn { - -using split_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(split_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(split_node const& node); - typed_primitive_inst(network& network, split_node const& node); -}; - -using split_inst = typed_primitive_inst; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm.cpp b/src/plugins/intel_gpu/src/graph/lstm.cpp deleted file mode 100644 index fa5e64e1fa33cf..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm.cpp +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm) - -layout lstm_inst::calc_output_layout(lstm_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_node!"); - auto input_layout = impl_param.get_input_layout(); - auto hidden_layout = node.inital_hidden().get_output_layout(); - - // input = [ batch, sequence, direction, input_size ] - // weights = [ 1, direction, 4 * hidden_size, input_size ] - // recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] - // biases = [ 1, 1, direction, 4 * hidden_size ] - // hidden = [ batch, 1, direction, hidden_size ] - // cell = [ batch, 1, direction, hidden_size ] - // output = [ batch, sequence, direction, hidden_size ] - auto result = layout(input_layout.data_type, - format::bfyx, - tensor(hidden_layout.feature(), - input_layout.feature(), - hidden_layout.spatial(0), - hidden_layout.spatial(1))); - return result; -} - -std::string lstm_inst::to_string(lstm_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto weights_id = desc->weights; - auto recurrent_id = desc->recurrent; - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - auto peepholes_id = desc->peepholes != "" ? desc->peepholes : "no peepholes"; - auto initial_hidden_id = desc->initial_hidden != "" ? desc->initial_hidden : "no inital hidden"; - auto initial_cell_id = desc->initial_cell != "" ? desc->initial_cell : "no initial cell"; - - std::stringstream primitive_description; - - json_composite lstm_info; - lstm_info.add("weights id", weights_id); - lstm_info.add("recurrent id", recurrent_id); - lstm_info.add("bias id", std::move(bias_id)); - lstm_info.add("peepholes id", std::move(peepholes_id)); - lstm_info.add("initial_hidden id", std::move(initial_hidden_id)); - lstm_info.add("initial_cell id", std::move(initial_cell_id)); - node_info->add("lstm info", lstm_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_inst::typed_primitive_inst(network& network, lstm_node const& node) : parent(network, node) { - auto input_layout = node.input().get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx); -} - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_dynamic.cpp b/src/plugins/intel_gpu/src/graph/lstm_dynamic.cpp deleted file mode 100644 index 44ee3720a1a142..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_dynamic.cpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_dynamic_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_dynamic) - -// input_tensor: [b: batch, f: max_sequence_length, x: input_size, y: direction] -// weights_tensor: [b: 1, f: direction, x: input_size, y: 4 * hidden_size] -// recurr_tensor: [b: 1, f: direction, x: hidden_size, y: 4 * hidden_size] -// init_hidden: [b: batch, f: 1, x: hidden_size, y: direction] -// init_cell: [b: batch, f: 1, x: hidden_size, y: direction] -// output_tensor: [b: batch, f: max_sequence_length, x: hidden_size, y: direction] -layout lstm_dynamic_inst::calc_output_layout(lstm_dynamic_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_dynamic_node!"); - /* - This program node is just placeholder for input + timeloop combinations, thus this is returning dummy layout. - */ - return impl_param.get_input_layout(); -} - -std::string lstm_dynamic_inst::to_string(lstm_dynamic_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto weights_id = desc->weights; - auto recurrent_id = desc->recurrent; - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - auto initial_hidden_id = desc->initial_hidden != "" ? desc->initial_hidden : "no inital hidden"; - auto initial_cell_id = desc->initial_cell != "" ? desc->initial_cell : "no initial cell"; - - std::stringstream primitive_description; - json_composite lstm_dynamic_info; - lstm_dynamic_info.add("dyn_length id", desc->dyn_length); - lstm_dynamic_info.add("weights id", std::move(weights_id)); - lstm_dynamic_info.add("recurrent id", recurrent_id); - lstm_dynamic_info.add("bias id", bias_id); - lstm_dynamic_info.add("initial_hidden id", std::move(initial_hidden_id)); - lstm_dynamic_info.add("initial_cell id", initial_cell_id); - node_info->add("lstm_dynamic info", lstm_dynamic_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_dynamic_inst::typed_primitive_inst(network& network, lstm_dynamic_node const& node) : parent(network, node) { - CLDNN_ERROR_MESSAGE(node.id(), - std::string("This primitive_inst should never be created. It should be repalced by ") - .append("lstm_dynamic_input + lstm_dyamic_timeloop combinations.")); -} - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_dynamic_input.cpp b/src/plugins/intel_gpu/src/graph/lstm_dynamic_input.cpp deleted file mode 100644 index 0633c949f13ba5..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_dynamic_input.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_dynamic_input_inst.h" -#include "lstm_dynamic_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_dynamic_input) -// input_tensor: [b: batch, f: max_sequence_length, x: input_size, y: direction] -// weights_tensor: [b: 1, f: direction, x: input_size, y: 4 * hidden_size] -// output_tensor: [b: batch, f: max_sequence_length, x: 4 * hidden_size, y: direction] -layout lstm_dynamic_input_inst::calc_output_layout(lstm_dynamic_input_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_dynamic_node!"); - auto input_layout = impl_param.get_input_layout(0); - auto weight_layout = impl_param.get_input_layout(2); - auto batch = input_layout.batch(); - auto direction = weight_layout.feature(); - auto output_sequence = input_layout.feature(); - return layout(input_layout.data_type, - input_layout.format, - tensor(batch, output_sequence, weight_layout.spatial(1), direction)); -} - -std::string lstm_dynamic_input_inst::to_string(lstm_dynamic_input_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - - std::stringstream primitive_description; - json_composite lstm_dynamic_input_info; - lstm_dynamic_input_info.add("dyn_length id", desc->dyn_length); - lstm_dynamic_input_info.add("weights id", desc->weights); - lstm_dynamic_input_info.add("bias id", bias_id); - lstm_dynamic_input_info.add("max seq len", node.input().get_output_layout().feature()); - lstm_dynamic_input_info.add("hidden size", node.weights().get_output_layout().spatial(1) / 4); - lstm_dynamic_input_info.add("direction", node.weights().get_output_layout().feature()); - node_info->add("lstm_dynamic_input info", lstm_dynamic_input_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_dynamic_input_inst::typed_primitive_inst(network& network, lstm_dynamic_input_node const& node) - : parent(network, node) { - // Check input - auto input_layout = node.input().get_output_layout(); - auto direction = node.direction(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx); - lstm_dynamic_inst::check_direction(node.input(), direction, "input"); - - // check dynamic length - CLDNN_ERROR_BOOL(node.id(), - "Dynamic length memory", - !node.dyn_length_term(), - "Id of dynamic length memory is not set."); - auto dyn_length_size = node.dyn_length().get_output_layout().count(); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Batch", - node.get_output_layout().batch(), - "Dynamic tensor elements count.", - dyn_length_size, - "Should be equal."); - - // check weights - CLDNN_ERROR_BOOL(node.id(), "Weights memory", !node.weights_term(), "Id of weights memory is not set."); - auto weights_id = node.weights().id(); - auto weights_layout = node.weights().get_output_layout(); - auto hidden_size = weights_layout.spatial(1) / 4; - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "weights format", - node.weights().get_output_layout().format.value, - "expected bfyx format", - format::oiyx, format::lstm_weights_dio, format::bfyx); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Weights batch size", - weights_layout.batch(), - "1", - 1, - "Sizes mismatch, weights_id: " + weights_id); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Weights x size", - weights_layout.spatial(0), - "input_size", - input_layout.spatial(0), - "Sizes mismatch, weights_id: " + weights_id); - - // check bias - if (node.bias_term()) { - auto bias_id = node.id(); - auto bias_tensor = node.bias().get_output_layout().get_tensor(); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Bias count", - bias_tensor.count(), - "direction * 4 * hidden_size", - direction * 4 * hidden_size, - "Bias count mismtach, bias_id: " + bias_id); - lstm_dynamic_inst::check_direction(node.bias(), direction, "bias"); - } -} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_dynamic_timeloop.cpp b/src/plugins/intel_gpu/src/graph/lstm_dynamic_timeloop.cpp deleted file mode 100644 index 6ec45a35e72e3b..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_dynamic_timeloop.cpp +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_dynamic_timeloop_inst.h" -#include "lstm_dynamic_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_dynamic_timeloop) - -program_node& lstm_dynamic_timeloop_node::get_dependency_by_name(std::string val) const { - return get_dependency(get_dependency_idx(val)); -} - -void lstm_dynamic_timeloop_node::init_params_list() { - _param_list.push_back("input"); - _param_list.push_back("dyn_length"); - _param_list.push_back("recurrent"); - if (last_hidden_output_term()) - _param_list.push_back("last_hidden_output"); - if (last_cell_output_term()) - _param_list.push_back("last_cell_output"); - if (initial_hidden_term()) - _param_list.push_back("initial_hidden"); - if (initial_cell_term()) - _param_list.push_back("initial_cell"); -} - -void lstm_dynamic_timeloop_node::reverse_optional_outputs_connections() { - auto reverse_connections = [&](program_node& mutable_data_node, const std::string& dependency_tag) { - auto index_to_insert = get_param_list_index(dependency_tag); - mutable_data_node.dependencies.erase(std::remove_if(mutable_data_node.dependencies.begin(), mutable_data_node.dependencies.end(), - [&](const std::pair& dep) { - return this == dep.first; - })); - mutable_data_node.users.push_back(this); - users.remove(&mutable_data_node); - auto port_idx = get_port_from_deps(mutable_data_node.id()); - dependencies.insert(dependencies.begin() + index_to_insert, {&mutable_data_node, port_idx}); - // fix inputs/outputs - if (mutable_data_node.get_dependencies().empty()) { - myprog.get_inputs().push_back(&mutable_data_node); - } - if (mutable_data_node.is_output()) { - mutable_data_node.set_output(false); - auto& program_output = myprog.get_outputs(); - program_output.erase(std::remove(program_output.begin(), program_output.end(), &mutable_data_node)); - } - }; - - if (last_hidden_output_term()) { - reverse_connections(myprog.get_node(get_primitive()->last_hidden_state), "last_hidden_output"); - } - if (last_cell_output_term()) { - reverse_connections(myprog.get_node(get_primitive()->last_cell_state), "last_cell_output"); - } - - // moved mutable data do deps, try to set this node at output if no users - auto& outputs = myprog.get_outputs(); - if (users.empty() && std::find(outputs.begin(), outputs.end(), this) == outputs.end()) { - output = true; - myprog.get_outputs().push_back(this); - } -} - -size_t lstm_dynamic_timeloop_node::get_dependency_idx(std::string val) const { - auto ret = get_param_list_index(val); - CLDNN_ERROR_EQUAL(id(), - "Dependency index", - ret, - "out of range number", - _param_list.size(), - "Trying to get non-exsisting param!"); - return ret; -} - -// input_tensor: [b: batch, f: max_sequence_length, x: 4 * hiden_size, y: direction] -// recurr_tensor: [b: 1, f: direction, x: hidden_size, y: 4 * hidden_size] -// init_cell: [b: batch, f: 1, x: hidden_size, y: direction] -// output_tensor: [b: batch, f: max_sequence_length, x: hidden_size, y: direction] -layout lstm_dynamic_timeloop_inst::calc_output_layout(lstm_dynamic_timeloop_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_dynamic_node!"); - auto input_layout = impl_param.get_input_layout(); - auto batch = input_layout.batch(); - auto output_sequence = input_layout.feature(); - auto reccurent_layout = node.recurrent().get_output_layout(); - auto hidden_size = reccurent_layout.spatial(0); - auto direction = reccurent_layout.feature(); - return layout(input_layout.data_type, input_layout.format, tensor(batch, output_sequence, hidden_size, direction)); -} - -std::string lstm_dynamic_timeloop_inst::to_string(lstm_dynamic_timeloop_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto initial_hidden_id = desc->initial_hidden != "" ? desc->initial_hidden : "no initial hidden"; - auto initial_cell_id = desc->initial_cell != "" ? desc->initial_cell : "no inital cell"; - auto last_cell_id = desc->last_cell_state != "" ? desc->last_cell_state : "no inital cell"; - auto last_hidden_id = desc->last_hidden_state != "" ? desc->last_hidden_state : "no inital hidden"; - - std::stringstream primitive_description; - json_composite lstm_dynamic_input_info; - lstm_dynamic_input_info.add("dyn_length id", desc->dyn_length); - lstm_dynamic_input_info.add("recurrent id", desc->recurrent); - lstm_dynamic_input_info.add("initial cell id", std::move(initial_cell_id)); - lstm_dynamic_input_info.add("initial hidden id", initial_hidden_id); - lstm_dynamic_input_info.add("last cell id", last_cell_id); - lstm_dynamic_input_info.add("last hidden id", std::move(last_hidden_id)); - lstm_dynamic_input_info.add("max seq len", node.input().get_output_layout().feature()); - lstm_dynamic_input_info.add("hidden size", node.recurrent().get_output_layout().spatial(0)); - lstm_dynamic_input_info.add("direction", node.recurrent().get_output_layout().feature()); - node_info->add("lstm_dynamic_timeloop info", lstm_dynamic_input_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_dynamic_timeloop_inst::typed_primitive_inst(network& network, lstm_dynamic_timeloop_node const& node) - : parent(network, node) { - auto batch_size = node.get_output_layout().batch(); - auto direction = node.direction(); - - // TODO: check input sizes - auto input_id = node.input().id(); - auto input_layout = node.input().get_output_layout(); - auto hidden_size = input_layout.spatial(0) / 4; - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx); - lstm_dynamic_inst::check_direction(node.input(), direction, "input"); - - // check recurrent - CLDNN_ERROR_BOOL(node.id(), "Recurrent memory", !node.recurrent_term(), "Id of weights memory is not set."); - auto reccurent_id = node.recurrent().id(); - auto recurrent_layout = node.recurrent().get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "recurrent format", - node.recurrent().get_output_layout().format.value, - "expected bfyx format", - format::bfyx); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Recurrent batch size", - recurrent_layout.batch(), - "1", - 1, - "Sizes mismatch, reccuren_id: " + reccurent_id); - if (recurrent_layout.feature() != direction) - CLDNN_ERROR_MESSAGE(node.id(), "Reccurent directions size needs to be equal to 1 or 2 (bidrectional) !"); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Recurrent x size", - recurrent_layout.spatial(0), - "hidden_size", - hidden_size, - "Sizes mismatch, reccuren_id: " + reccurent_id); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Recurrent y size", - recurrent_layout.spatial(1), - "4 * hidden_size", - 4 * hidden_size, - "Sizes mismatch, reccuren_id: " + reccurent_id); - - if (initial_cell_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.initial_cell(), - batch_size, - hidden_size, - direction, - "initial_cell"); - } - - if (initial_hidden_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.initial_hidden(), - batch_size, - hidden_size, - direction, - "initial_hidden"); - } - - if (node.last_hidden_output_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.last_hidden_state(), - batch_size, - hidden_size, - direction, - "optional_hidden_output"); - } - - if (node.last_cell_output_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.last_cell_state(), - batch_size, - hidden_size, - direction, - "optional_cell_output"); - } -} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_gemm.cpp b/src/plugins/intel_gpu/src/graph/lstm_gemm.cpp deleted file mode 100644 index b4d98cd28898ff..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_gemm.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "intel_gpu/runtime/error_handler.hpp" -#include "lstm_gemm_inst.h" -#include "primitive_type_base.h" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_gemm) - -layout lstm_gemm_inst::calc_output_layout(lstm_gemm_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_gemm_node!"); - auto input_layout = impl_param.get_input_layout(0); - auto weights_layout = impl_param.get_input_layout(1); - - // input{bfyx} = [b: batch, f: sequence, x: input_size, y: 1] - // weights{bfyx} = [b: 1, f: direction, x: 4 * hidden_size, y: input_size ] - // recurrent{bfyx} = [b: 1, f: direction, x: 4 * hidden_size, y: hidden_size ] - // biases{bfyx} = [b: 1, f:1 , x: direction, y: 4 * hidden_size ] - // hidden{bfyx} = [b: batch, f: direction, x: 1 , y: hidden_size ] optional - // tempGEMM{bfyx} = [b: batch, f: direction, x: 4*hidden_size, y: 1] output - auto result = - layout(input_layout.data_type, - input_layout.format, - tensor(input_layout.batch(), weights_layout.feature(), weights_layout.spatial(1), 1)); - return result; -} - -std::string lstm_gemm_inst::to_string(lstm_gemm_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto weights_id = desc->weights; - auto recurrent_id = desc->recurrent; - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - auto hidden_id = desc->hidden != "" ? desc->hidden : "no inital hidden"; - - std::stringstream primitive_description; - - json_composite lstm_gemm_info; - lstm_gemm_info.add("weights id", weights_id); - lstm_gemm_info.add("recurrent id", recurrent_id); - lstm_gemm_info.add("bias id", std::move(bias_id)); - lstm_gemm_info.add("hidden id", hidden_id); - node_info->add("lstm gemm info", lstm_gemm_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_gemm_inst::typed_primitive_inst(network& network, lstm_gemm_node const& node) : parent(network, node) { - auto input_layout = node.input().get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx, - format::fyxb); -} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index 43d0efc3dce4cd..aad680e3bd1a0e 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -45,9 +45,6 @@ #include "shuffle_channels_inst.h" #include "arg_max_min_inst.h" #include "dft_inst.h" -#include "lstm_inst.h" -#include "lstm_elt_inst.h" -#include "lstm_gemm_inst.h" #include "multiclass_nms_inst.h" #include "mutable_data_inst.h" #include "pooling_inst.h" @@ -56,7 +53,6 @@ #include "prior_box_inst.h" #include "proposal_inst.h" #include "reorder_inst.h" -#include "split_inst.h" #include "mvn_inst.h" #include "gemm_inst.h" #include "adaptive_pooling_inst.h" @@ -423,7 +419,6 @@ void program::prepare_nodes(topology const& topology) { for (const auto& prim : topo_map) { get_or_create(prim.second); } - add_split_outputs(); for (const auto& node : nodes_map) { auto node_ptr = node.second.get(); if (node_ptr == nullptr) @@ -534,8 +529,6 @@ void program::pre_optimize_graph(bool is_internal) { processing_order.calculate_BFS_processing_order(); // this method makes sense only for OOOQ (out of order execution queue) - apply_opt_pass(); - bool output_size_handling_enabled = analyze_output_size_handling_need(); for (auto& node : processing_order) { if (!node->is_type()) @@ -582,8 +575,6 @@ void program::pre_optimize_graph(bool is_internal) { apply_opt_pass(); } - apply_opt_pass(); - apply_opt_pass(); apply_opt_pass(output_size_handling_enabled); @@ -722,30 +713,6 @@ void program::transfer_memory_to_device() { } } -void program::add_split_outputs() { - auto itr = nodes_map.begin(); - while (itr != nodes_map.end()) { - auto node_itr = itr++; - auto& node = (*node_itr).second; - - if (node->is_type()) { - auto split_prim = node->as().typed_desc(); - input_info input(split_prim->input[0]); - auto split_num = split_prim->output_offsets.size(); - - // create crop for each split output provided - for (decltype(split_num) i = 0; i < split_num; i++) { - primitive_id output_id = node->id() + ":" + split_prim->output_ids[i]; - - // create dummy crop primitive and add it to nodes map - auto crop_prim = - std::make_shared(output_id, input, tensor{1, 1, 1, 1}, split_prim->output_offsets[i]); - get_or_create(crop_prim); - } - } - } -} - program::nodes_ordering& program::get_processing_order() { return processing_order; } const program::nodes_ordering& program::get_processing_order() const { return processing_order; } diff --git a/src/plugins/intel_gpu/src/graph/pyramid_roi_align.cpp b/src/plugins/intel_gpu/src/graph/pyramid_roi_align.cpp deleted file mode 100644 index 9b38b477f3d947..00000000000000 --- a/src/plugins/intel_gpu/src/graph/pyramid_roi_align.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "pyramid_roi_align_inst.h" -#include "primitive_type_base.h" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(pyramid_roi_align) - -layout pyramid_roi_align_inst::calc_output_layout(pyramid_roi_align_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for " - "pyramid_roi_align node!"); - - auto desc = impl_param.typed_desc(); - - auto boxes_layout = impl_param.get_input_layout(0); - auto P2_layout = impl_param.get_input_layout(1); - - int32_t output_b = boxes_layout.batch(); - int32_t output_f = P2_layout.feature(); - - int32_t output_x = desc->output_size; - int32_t output_y = desc->output_size; - - return layout{P2_layout.data_type, P2_layout.format, {output_b, output_f, output_x, output_y}}; -} - -std::string pyramid_roi_align_inst::to_string(pyramid_roi_align_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - std::stringstream primitive_description; - json_composite pyramid_roi_align_info; - node_info->add("pyramid_roi_align_info", std::move(pyramid_roi_align_info)); - node_info->dump(primitive_description); - return primitive_description.str(); -} - -pyramid_roi_align_inst::typed_primitive_inst(network& network, pyramid_roi_align_node const& node) - : parent(network, node) {} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/split.cpp b/src/plugins/intel_gpu/src/graph/split.cpp deleted file mode 100644 index 5edf7eb0135b04..00000000000000 --- a/src/plugins/intel_gpu/src/graph/split.cpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "split_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/memory.hpp" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(split) - -layout split_inst::calc_output_layout(split_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for split_node!"); - auto desc = impl_param.typed_desc(); - auto output_ids = desc->output_ids; - auto output_offsets = desc->output_offsets; - auto param_num = output_ids.size(); - auto input_sizes = impl_param.get_non_padded_input_layout().get_tensor(); - tensor null_tensor { 0, 0, 0, 0 }; - - // check if output_ids count equals output_offsets count - CLDNN_ERROR_NOT_EQUAL(desc->id, - "Output_ids count", - param_num, - "output_offsets count", - output_offsets.size(), - "Output_ids count/ output_offsets count mismatch"); - - for (decltype(param_num) i = 0; i < param_num; i++) { - if (i != param_num - 1) - // check if output offset sizes is less than next output offset sizes - CLDNN_ERROR_TENSOR_SIZES_GREATER_THAN(desc->id, - "output_offsets", - output_offsets[i], - "next output_offsets", - output_offsets[i + 1], - "Output_offsets tensor/ next input output_offsets tensor mismatch"); - else - // check if output offset sizes matches output offsets sizes - CLDNN_ERROR_TENSOR_SIZES_GREATER_THAN(desc->id, - "Output_offsets", - output_offsets[i], - "input sizes", - input_sizes, - "Output_offsets tensor/ input tensor mismatch"); - - // check if offsets do not extend input sizes and if match the output sizes - CLDNN_ERROR_TENSOR_SIZES_LESS_THAN(desc->id, - "Output_offsets", - output_offsets[i], - "0 value", - null_tensor, - "Invalid output_offsets: dims cannot be less than 0"); - } - - return impl_param.get_non_padded_input_layout(); -} - -std::string split_inst::to_string(split_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto output_ids = desc->output_ids; - auto output_offsets = desc->output_offsets; - auto& input = node.input(); - - std::stringstream primitive_description; - - json_composite split_info; - split_info.add("input id", input.id()); - split_info.add("output ids count", output_ids.size()); - split_info.add("offset count", output_offsets.size()); - - node_info->add("split info", split_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -split_inst::typed_primitive_inst(network& network, split_node const& node) : parent(network, node) { - CLDNN_ERROR_MESSAGE(node.id(), "Split primitive instance should not be created!"); -} - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_bfyx_opt.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_bfyx_opt.cl deleted file mode 100644 index 00952a89514de9..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_bfyx_opt.cl +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/sub_group_block_read.cl" -#include "include/batch_headers/sub_group_block_write.cl" -#include "include/batch_headers/fetch_data.cl" -#include "include/unit_type.cl" -#include "include/sub_group.cl" - -#if FP16_UNIT_USED - #define MAD_1X8(_result_block, _input_value, _weights_block) \ - { \ - _result_block.s0 = fma(_input_value, _weights_block.s0, _result_block.s0); \ - _result_block.s1 = fma(_input_value, _weights_block.s1, _result_block.s1); \ - _result_block.s2 = fma(_input_value, _weights_block.s2, _result_block.s2); \ - _result_block.s3 = fma(_input_value, _weights_block.s3, _result_block.s3); \ - _result_block.s4 = fma(_input_value, _weights_block.s4, _result_block.s4); \ - _result_block.s5 = fma(_input_value, _weights_block.s5, _result_block.s5); \ - _result_block.s6 = fma(_input_value, _weights_block.s6, _result_block.s6); \ - _result_block.s7 = fma(_input_value, _weights_block.s7, _result_block.s7); \ - } -#else - #define MAD_1X8(_result_block, _input_value, _weights_block) \ - { \ - _result_block.s0 = mad(_input_value, _weights_block.s0, _result_block.s0); \ - _result_block.s1 = mad(_input_value, _weights_block.s1, _result_block.s1); \ - _result_block.s2 = mad(_input_value, _weights_block.s2, _result_block.s2); \ - _result_block.s3 = mad(_input_value, _weights_block.s3, _result_block.s3); \ - _result_block.s4 = mad(_input_value, _weights_block.s4, _result_block.s4); \ - _result_block.s5 = mad(_input_value, _weights_block.s5, _result_block.s5); \ - _result_block.s6 = mad(_input_value, _weights_block.s6, _result_block.s6); \ - _result_block.s7 = mad(_input_value, _weights_block.s7, _result_block.s7); \ - } -#endif - -#define INC_OFFSET(_offset, _value) _offset += _value -#define SIMD_SIZE 8 - -REQD_SUB_GROUP_SIZE(SIMD_SIZE) -KERNEL(lstm_dynamic_input_bfyx_opt)( - const __global INPUT0_TYPE* input, - const __global DYN_LENGTH_TYPE* dyn_lengths, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint batch = (uint)get_global_id(1) % INPUT0_BATCH_NUM; - const uint dir = (uint)get_global_id(1) / INPUT0_BATCH_NUM; - const uint timestep = get_global_id(2); - if(timestep > (uint)dyn_lengths[batch]) - return; - // which general local work item within work group we have - const uint local_work_item_id = get_local_id(0); - // which id in SUBGROUP we have (0..7) - const uint sub_group_local_id = get_sub_group_local_id(); - // which SUBGROUP we have - const uint sub_group_id = local_work_item_id / SIMD_SIZE;//get_sub_group_id(); - const uint dir_sub_group_id = sub_group_id % SIMD_SIZE; - //which workgroup we have <0,1> - const uint wg_id = get_group_id(0); - const uint wg_offset = wg_id * (uint)get_local_size(0) * SIMD_SIZE; - //Subgroups have region of calcuations (ROC) within each local work item calculate simd_size values across y spatial. - //i.e sub_group_id = 1 have ROC, which starts at 64th y'th position - const uint sub_group_offset = SIMD_SIZE * 8; - const uint weights_single_dir_size = WEIGHTS_SIZE_X * WEIGHTS_SIZE_Y; - const uint dir_offset_for_weights = dir * weights_single_dir_size; - uint calcuation_offset = dir_offset_for_weights + wg_offset + dir_sub_group_id * sub_group_offset; - uint input_offset = GET_DATA_INDEX(INPUT0, batch, timestep, dir, sub_group_local_id); - const uint output_offset = GET_DATA_INDEX(OUTPUT, batch, timestep, dir, wg_offset + dir_sub_group_id * sub_group_offset); - -#if BIAS_TERM - //preload output with biases - const uint bias_calcuation_offset = dir * BIAS_SIZE_X + wg_offset + dir_sub_group_id * sub_group_offset; - UNIT_TYPE8 dot_prod = UNIT_BLOCK_READ8(biases, bias_calcuation_offset); -#else - UNIT_TYPE8 dot_prod = UNIT_VAL_ZERO; -#endif - - for(uint x = 0; x < INPUT0_SIZE_X / SIMD_SIZE; ++x) - { - UNIT_TYPE8 BLOCK_W0 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W1 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W2 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W3 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W4 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W5 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W6 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W7 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - - UNIT_TYPE input_value = input[input_offset]; - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 0), BLOCK_W0); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 1), BLOCK_W1); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 2), BLOCK_W2); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 3), BLOCK_W3); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 4), BLOCK_W4); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 5), BLOCK_W5); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 6), BLOCK_W6); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 7), BLOCK_W7); - - input_offset += SIMD_SIZE; - } - - UNIT_BLOCK_WRITE8(output, output_offset, dot_prod); -} - -#undef SIMD_SIZE -#undef INC_OFFSET -#undef MAD_1X8 -#undef OPT diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_ref.cl deleted file mode 100644 index 7985508b4e3b7c..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_ref.cl +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" -#include "include/batch_headers/fetch_weights.cl" -#include "include/acc_type.cl" - -KERNEL(lstm_dynamic_input_ref)( - const __global INPUT0_TYPE* input, - const __global DYN_LENGTH_TYPE* dyn_lengths, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint y = get_global_id(0); - const uint batch = (uint)get_global_id(1) % INPUT0_BATCH_NUM; - const uint dir = (uint)get_global_id(1) / INPUT0_BATCH_NUM; - const uint timestep = get_global_id(2); - - if(timestep > (uint)dyn_lengths[batch]) - return; - - ACCUMULATOR_TYPE dot_prod = 0; - for(uint x = 0; x < INPUT0_SIZE_X; ++x ) - { - const uint input_idx = GET_DATA_INDEX(INPUT0, batch, timestep, dir, x); - const uint weights_idx = GET_FILTER_INDEX(WEIGHTS, 0, 0, dir, y, x); - dot_prod += (ACCUMULATOR_TYPE)(input[input_idx] * weights[weights_idx]); - } - -#if BIAS_TERM - dot_prod += (ACCUMULATOR_TYPE)biases[GET_DATA_INDEX(BIAS, 0, 0, dir, y)]; -#endif - - output[GET_DATA_INDEX(OUTPUT, batch, timestep, dir, y)] = (OUTPUT_TYPE)dot_prod; -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_timeloop_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_timeloop_ref.cl deleted file mode 100644 index 7beec1f77dec99..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_timeloop_ref.cl +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" -#include "include/acc_type.cl" - -#define ACTIVATION_LOGISTIC(input) (UNIT_VAL_ONE/(UNIT_VAL_ONE + exp(-input))) -#define ACTIVATION_HYPERBOLIC_TAN(input) (tanh(input)) - -KERNEL(lstm_dynamic_timeloop_ref)( - const __global INPUT0_TYPE* input, - const __global DYN_LENGTH_TYPE* dyn_lengths, - __global OUTPUT_TYPE* output, - const __global RECURRENT_TYPE* recurrent -#if INIT_HIDDEN_TERM - , const __global INIT_HIDDEN_TYPE* hidden -#endif -#if INIT_CELL_TERM - , const __global INIT_CELL_TYPE* cell -#endif -#if LAST_HIDDEN_TERM - , __global LAST_HIDDEN_TYPE* last_hidden -#endif -#if LAST_CELL_TERM - , __global LAST_CELL_TYPE* last_cell -#endif - ) -{ - const uint y_offset = (uint)get_global_id(0) * ELEMENTS_TO_COUNT; - const uint b = get_global_id(1); - const uint dir = get_global_id(2); - uint unroll_timesteps = dyn_lengths[b]; - - //if hidden_size is bigger then 256, then ELEMENTS_TO_COUNT will be hidden_size/256 - ACCUMULATOR_TYPE it[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE ot[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE zt[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE ft[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE eltiwse_vals[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE cell_vals[ELEMENTS_TO_COUNT]; - OUTPUT_TYPE output_value = UNIT_VAL_ZERO; - #if INIT_HIDDEN_TERM - bool use_hidden = true; - #else - bool use_hidden = false; - #endif //hidden_term - - #if INIT_CELL_TERM - bool use_cell = true; - #else - bool use_cell = false; - #endif //cell_term - - for(int timestep = 0; timestep < MAX_SEQUENCE_LENGTH; timestep++) - { - //not all workitems will do computations - if(timestep < unroll_timesteps) - { - for(uint element_idx = 0; element_idx < ELEMENTS_TO_COUNT; element_idx++) - { - const uint y = y_offset + element_idx; - // [f, i, z, o] - ft[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_F)]; - it[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_I)]; - zt[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_Z)]; - ot[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_O)]; - if(use_hidden) - { - for(uint x = 0; x < OUTPUT_SIZE_X; ++x) - { - if(timestep == 0) - { - #if INIT_HIDDEN_TERM - uint hidden_idx = GET_DATA_INDEX(INIT_HIDDEN, b, 0, dir, x); - ft[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_F, x)]); - it[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_I, x)]); - zt[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_Z, x)]); - ot[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_O, x)]); - #endif //INIT_HIDDEN_TERM - } - else - { - uint hidden_idx = GET_DATA_INDEX(OUTPUT, b, timestep - 1, dir, x); - ft[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_F, x)]); - it[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_I, x)]); - zt[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_Z, x)]); - ot[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_O, x)]); - } //else timesteo ==0 - }//for(uint x = 0; x < OUTPUT_SIZE_X; ++x) - }//if(use_hidden) - - //eltwise operation - eltiwse_vals[element_idx] = ACTIVATION_LOGISTIC(CLIP(it[element_idx])) * ACTIVATION_HYPERBOLIC_TAN(CLIP(zt[element_idx])); - #if INPUT_FORGET - eltiwse_vals[element_idx] *= ((ACCUMULATOR_TYPE)1 - ft[element_idx]); - #endif //INPUT_FORGET - - if(use_cell) - { - if(timestep == 0) - { - #if INIT_CELL_TERM - eltiwse_vals[element_idx] += cell[GET_DATA_INDEX(INIT_CELL, b, 0, dir, y)] * ACTIVATION_LOGISTIC(CLIP(ft[element_idx])); - #endif //INIT_CELL_TERM - } - else - { - eltiwse_vals[element_idx] += cell_vals[element_idx] * ACTIVATION_LOGISTIC(CLIP(ft[element_idx])); - } - } - //end of eltwise operation - }//for(uint cell_element = 0; cell_element < ELEMENTS_TO_COUNT; cell_element++) - } //first if(timestep < unroll_timesteps) - - //all workitems needs to hit the barrier before writing to global output memory - barrier(CLK_GLOBAL_MEM_FENCE); - - //not all workitems will do computations - if(timestep < unroll_timesteps) - { - for(uint element_idx = 0; element_idx < ELEMENTS_TO_COUNT; element_idx++) - { - const uint y = y_offset + element_idx; - output_value = (OUTPUT_TYPE)(ACTIVATION_HYPERBOLIC_TAN(eltiwse_vals[element_idx]) * ACTIVATION_LOGISTIC(ot[element_idx])); // hidden - output[GET_DATA_INDEX(OUTPUT, b, timestep, dir, y)] = output_value; - #if LAST_HIDDEN_TERM - if(timestep == unroll_timesteps - 1) - { - last_hidden[GET_DATA_INDEX(LAST_HIDDEN, b, 0, dir, y)] = output_value; - } - #endif //LAST_HIDDEN_TERM - cell_vals[element_idx] = (OUTPUT_TYPE)eltiwse_vals[element_idx]; - #if LAST_CELL_TERM - if(timestep == unroll_timesteps - 1) - { - last_cell[GET_DATA_INDEX(LAST_CELL, b, 0, dir, y)] = cell_vals[element_idx]; - } - #endif //LAST_CELL_TERM - //cleanup loop - use_hidden = true; - use_cell = true; - eltiwse_vals[element_idx] = UNIT_VAL_ZERO; - } - } //second if(timestep < unroll_timesteps) - - //all workitems needs to hit the barrier after writing to global output memory - barrier(CLK_GLOBAL_MEM_FENCE); - } -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemm_gpu_bfyx_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemm_gpu_bfyx_ref.cl deleted file mode 100644 index 8f9157a9521746..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemm_gpu_bfyx_ref.cl +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" -#include "include/acc_type.cl" - -#ifndef DIRECTION -#define DIRECTION 0 -#endif - -// input = [ batch, sequence, 1, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, direction, 1, 4 * hidden_size ] output -KERNEL(lstm_gemm)( - const __global INPUT0_TYPE* input, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if HIDDEN_TERM - , const __global OUTPUT_TYPE* hidden, - const __global RECURRENT_TYPE* recurrent -#endif -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint y = get_global_id(0); - const uint b = get_global_id(1); - - ACCUMULATOR_TYPE dotProd = 0; - for(uint x = 0; x < INPUT0_SIZE_X; ++x ) { - const uint input_idx = GET_DATA_INDEX(INPUT0, b, 0, INPUT_DIRECTION, x); - const uint weights_idx = GET_DATA_INDEX(WEIGHTS, 0, DIRECTION, y, x); - dotProd += (ACCUMULATOR_TYPE)(input[input_idx] * weights[weights_idx]); - } - -#if HIDDEN_TERM - for(uint x = 0; x < HIDDEN_SIZE_X; ++x ) { - const uint hidden_idx = GET_DATA_INDEX(HIDDEN, b, 0, HIDDEN_DIRECTION, x); - const uint recurrent_idx = GET_DATA_INDEX(RECURRENT, 0, DIRECTION, y, x); - dotProd += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[recurrent_idx]); - } -#endif - -#if BIAS_TERM - const uint bias_idx = GET_DATA_INDEX(BIAS, 0, 0, DIRECTION, y); - dotProd += (ACCUMULATOR_TYPE)biases[bias_idx]; -#endif - const uint output_idx = GET_DATA_INDEX(OUTPUT, b, 0, 0, y); - output[output_idx] = (OUTPUT_TYPE)dotProd; -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl deleted file mode 100644 index 9536c81e5ee23b..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/sub_group_shuffle.cl" -#include "include/batch_headers/fetch_data.cl" -#include "include/acc_type.cl" - -#ifndef DIRECTION -#define DIRECTION 0 -#endif - -#ifndef SIMD -#define SIMD 16 -#endif - -// Sums value of result across all subgroups. -#define SUM_ACROSS_SUB_GROUP(val) \ - \ -{ \ - val += _sub_group_shuffle(val, x+1); \ - val += _sub_group_shuffle(val, x+2); \ - val += _sub_group_shuffle(val, x+4); \ - val += (SIMD > 8) ? _sub_group_shuffle(val, x+8) : 0; \ - val += (SIMD > 16) ? _sub_group_shuffle(val, x+16) : 0; \ -} - -// input = [ batch, sequence, 1, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, direction, 1, 4 * hidden_size ] output - -__attribute__((reqd_work_group_size(SIMD, 1, 1))) -KERNEL(lstm_gemm)( - const __global INPUT0_TYPE* input, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if HIDDEN_TERM - , const __global OUTPUT_TYPE* hidden, - const __global RECURRENT_TYPE* recurrent -#endif -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint x = get_local_id(0); - const uint y = get_global_id(1); - const int local_sz = get_local_size(0); - const int weight_num_rows = get_global_size(1); - - uint K; - int start_offset; - int end_offset; - int matrix_offset; - int vector_offset; - float4 sum; - float result; - - K = INPUT0_SIZE_X; // Width of weight matrix - start_offset = GET_DATA_INDEX(WEIGHTS, 0, DIRECTION, y, 0); // set as the starting offset of the weight matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // Weight offset for the work item to work on - vector_offset = GET_DATA_INDEX(INPUT0, 0, 0, INPUT_DIRECTION, (x*4)); // Input offset for the work item to work on - sum = (float4)(0.f); - result = 0; - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - float4 mask = (float4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - float4 m = (float4) (weights[matrix_offset], weights[matrix_offset + 1], weights[matrix_offset + 2], weights[matrix_offset + 3]); - m = m * mask; - - const float4 v = (float4) (input[vector_offset], input[vector_offset + 1], input[vector_offset + 2], input[vector_offset + 3]); - - sum = mad(m, v, sum); - } - - result = sum.x + sum.y + sum.z + sum.w; - -#if HIDDEN_TERM - K = HIDDEN_SIZE_X; // width of recurrent matrix - start_offset = GET_DATA_INDEX(RECURRENT, 0, DIRECTION, y, 0); // set as the starting offset of the recurrent matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // recurrent offset for the work item to work on - vector_offset = GET_DATA_INDEX(HIDDEN, 0, 0, HIDDEN_DIRECTION, (x*4)); // hidden vector offset for the work item to work on - sum = (float4)(0.f); - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - float4 mask = (float4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - float4 m = (float4) (recurrent[matrix_offset], recurrent[matrix_offset + 1], recurrent[matrix_offset + 2], recurrent[matrix_offset + 3]); - m = m * mask; - - const float4 v = (float4) (hidden[vector_offset], hidden[vector_offset + 1], hidden[vector_offset + 2], hidden[vector_offset + 3]); - - sum = mad(m, v, sum); - } - - result += sum.x + sum.y + sum.z + sum.w; -#endif - - // Add together partial sums contained in each work item's "result" variable - SUM_ACROSS_SUB_GROUP(result); - - if(x == 0) - { - output[y] = (OUTPUT_TYPE)result; - -#if BIAS_TERM - const uint bias_idx = GET_DATA_INDEX(BIAS, 0, 0, DIRECTION, y); - float bias = (ACCUMULATOR_TYPE)biases[bias_idx]; - output[y] += (OUTPUT_TYPE)bias; -#endif - } -} - -#undef SUM_ACROSS_SUB_GROUP -#undef SIMD diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl deleted file mode 100644 index 15c68604ce5442..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/sub_group_shuffle.cl" -#include "include/batch_headers/fetch_data.cl" - -#ifndef DIRECTION -#define DIRECTION 0 -#endif - -#ifndef SIMD -#define SIMD 16 -#endif - -// Sums value of result across all subgroups. -#define SUM_ACROSS_SUB_GROUP(val) \ - \ -{ \ - val += _sub_group_shuffle(val, x+1); \ - val += _sub_group_shuffle(val, x+2); \ - val += _sub_group_shuffle(val, x+4); \ - val += _sub_group_shuffle(val, x+8); \ -} - -// input = [ batch, sequence, 1, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, direction, 1, 4 * hidden_size ] output - -__attribute__((reqd_work_group_size(SIMD, 1, 1))) -KERNEL(lstm_gemm)( - const __global INPUT0_TYPE* input, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if HIDDEN_TERM - , const __global OUTPUT_TYPE* hidden, - const __global RECURRENT_TYPE* recurrent -#endif -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint x = get_local_id(0); - const uint y = get_global_id(1); - const int local_sz = get_local_size(0); - - uint K; - int start_offset; - int end_offset; - int matrix_offset; - int vector_offset; - float4 sum; - float result; - - K = INPUT0_SIZE_X; // Width of weight matrix - start_offset = GET_DATA_INDEX(WEIGHTS, 0, DIRECTION, y, 0); // set as the starting offset of the weight matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // Weight offset for the work item to work on - vector_offset = GET_DATA_INDEX(INPUT0, 0, 0, INPUT_DIRECTION, (x*4)); // Input offset for the work item to work on - sum = (float4)(0.f); - result = 0; - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - half4 mask = (half4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - half4 m = (half4) (weights[matrix_offset], weights[matrix_offset + 1], weights[matrix_offset + 2], weights[matrix_offset + 3]); - m = m * mask; - - const half4 v = (half4)(input[vector_offset], input[vector_offset + 1], input[vector_offset + 2], input[vector_offset + 3]); - - sum = mad(convert_float4(m), convert_float4(v), sum); - } - - result = sum.x + sum.y + sum.z + sum.w; - -#if HIDDEN_TERM - K = HIDDEN_SIZE_X; // width of recurrent matrix - start_offset = GET_DATA_INDEX(RECURRENT, 0, DIRECTION, y, 0); // set as the starting offset of the recurrent matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // recurrent offset for the work item to work on - vector_offset = GET_DATA_INDEX(HIDDEN, 0, 0, HIDDEN_DIRECTION, (x*4)); // hidden vector offset for the work item to work on - sum = (float4)(0.f); - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - half4 mask = (half4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - half4 m = (half4) (recurrent[matrix_offset], recurrent[matrix_offset + 1], recurrent[matrix_offset + 2], recurrent[matrix_offset + 3]); - m = m * mask; - - const half4 v = (half4) (hidden[vector_offset], hidden[vector_offset + 1], hidden[vector_offset + 2], hidden[vector_offset + 3]); - - sum = mad(convert_float4(m), convert_float4(v), sum); - } - - result += sum.x + sum.y + sum.z + sum.w; -#endif - - // Add together partial sums contained in each work item's "result" variable - SUM_ACROSS_SUB_GROUP(result); - - if(x == 0) - { - output[y] = 0;// (half)result; - -#if BIAS_TERM - const uint bias_idx = GET_DATA_INDEX(BIAS, 0, 0, DIRECTION, y); - half bias = biases[bias_idx]; - result += (float)bias; -#endif - - output[y] = (half)result; - //output[y] = convert_half_rte(result); - - - } -} - -#undef SUM_ACROSS_SUB_GROUP -#undef SIMD diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pyramid_roi_align_gpu_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pyramid_roi_align_gpu_ref.cl deleted file mode 100644 index 4294b1244f8104..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pyramid_roi_align_gpu_ref.cl +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" - -#define PYRAMID_LEVELS 4 - -struct Parameters -{ - int size_y, size_x, f_pitch, x_pitch, y_pitch, offset; -}; - -__constant struct Parameters parameters [PYRAMID_LEVELS] = - { - { INPUT1_SIZE_Y, INPUT1_SIZE_X, INPUT1_FEATURE_PITCH, INPUT1_X_PITCH, INPUT1_Y_PITCH, INPUT1_OFFSET }, - { INPUT2_SIZE_Y, INPUT2_SIZE_X, INPUT2_FEATURE_PITCH, INPUT2_X_PITCH, INPUT2_Y_PITCH, INPUT2_OFFSET }, - { INPUT3_SIZE_Y, INPUT3_SIZE_X, INPUT3_FEATURE_PITCH, INPUT3_X_PITCH, INPUT3_Y_PITCH, INPUT3_OFFSET }, - { INPUT4_SIZE_Y, INPUT4_SIZE_X, INPUT4_FEATURE_PITCH, INPUT4_X_PITCH, INPUT4_Y_PITCH, INPUT4_OFFSET } - }; - -inline INPUT1_TYPE FUNC(accumulate)(INPUT1_TYPE acc, INPUT1_TYPE val) { - return max(acc, val); -} - -#define ACCUMULATOR_INIT_VAL INPUT1_VAL_MIN - -KERNEL(pyramidROIAlign_gpu_ref)( - const __global INPUT0_TYPE *boxes, - const __global INPUT1_TYPE *P2, - const __global INPUT2_TYPE *P3, - const __global INPUT3_TYPE *P4, - const __global INPUT4_TYPE *P5, - __global OUTPUT_TYPE *output) -{ - const uint oyx = get_global_id(0); - const uint ox = oyx % OUTPUT_SIZE_X; - const uint oy = oyx / OUTPUT_SIZE_X; - const uint of = get_global_id(1); - const uint kerNum = (uint) get_global_id(2); - - INPUT0_TYPE hU = boxes[GET_DATA_INDEX(INPUT0, kerNum, 3, 0, 0)]; - INPUT0_TYPE hL = boxes[GET_DATA_INDEX(INPUT0, kerNum, 1, 0, 0)]; - INPUT0_TYPE h = hU - hL; - INPUT0_TYPE wU = boxes[GET_DATA_INDEX(INPUT0, kerNum, 2, 0, 0)]; - INPUT0_TYPE wL = boxes[GET_DATA_INDEX(INPUT0, kerNum, 0, 0, 0)]; - INPUT0_TYPE w = wU - wL; - - // TODO This scale could be used when box coordinates are not normalized, but in pixel coordinates. -#ifdef PYRAMID_ROI_ALIGN_PIXEL_BOXES - float image_area = IMAGE_SIZE_X * IMAGE_SIZE_Y; - float scale = 1.f / sqrt(image_area); -#else - float scale = 1.f; -#endif - - int roi_level = (int)round(PYRAMID_STARTING_LEVEL + log2(sqrt(h*w) * scale)); - // 0 <= roi_level < PYRAMID_LEVELS - roi_level = min(PYRAMID_LEVELS - 1, max(0, roi_level)); - - const __global INPUT1_TYPE* feature_map_ptrs[PYRAMID_LEVELS]; - - feature_map_ptrs[0] = P2; - feature_map_ptrs[1] = P3; - feature_map_ptrs[2] = P4; - feature_map_ptrs[3] = P5; - - const __global INPUT1_TYPE* feature_map_ptr = feature_map_ptrs[roi_level]; - - const uint sampling_ratio_x = SAMPLING_RATIO_X != 0 ? SAMPLING_RATIO_X : (uint)ceil(1.f * w * IMAGE_SIZE_X / OUTPUT_SIZE_X); - const uint sampling_ratio_y = SAMPLING_RATIO_Y != 0 ? SAMPLING_RATIO_Y : (uint)ceil(1.f * h * IMAGE_SIZE_Y / OUTPUT_SIZE_Y); - - //calculate cooficients for transformation - INPUT0_TYPE y1 = hL * (parameters[roi_level].size_y - 1); - INPUT0_TYPE x1 = wL * (parameters[roi_level].size_x - 1); - INPUT0_TYPE y2 = hU * (parameters[roi_level].size_y - 1); - INPUT0_TYPE x2 = wU * (parameters[roi_level].size_x - 1); - INPUT0_TYPE deltaX = (x2 - x1) / (OUTPUT_SIZE_X); - INPUT0_TYPE deltaY = (x2 - x1) / (OUTPUT_SIZE_Y); - INPUT0_TYPE pool_deltaX = deltaX / sampling_ratio_x; - INPUT0_TYPE pool_deltaY = deltaY / sampling_ratio_y; - - uint data_base_offset = parameters[roi_level].offset + parameters[roi_level].f_pitch * of; - - INPUT0_TYPE y_base = y1 + oy * deltaY + TO_INPUT0_TYPE(0.5f) * pool_deltaY; - INPUT0_TYPE x_base = x1 + ox * deltaX + TO_INPUT0_TYPE(0.5f) * pool_deltaX; - - INPUT1_TYPE accumulator = ACCUMULATOR_INIT_VAL; - - //transformation - for (int yi = 0; yi < sampling_ratio_y; ++yi) { - INPUT0_TYPE y = y_base + yi * pool_deltaY; - int y_low = (int)floor(y); - int y_high = (int)ceil(y); - - y_low = clamp(y_low, 0, parameters[roi_level].size_y - 1); - y_high = clamp(y_high, 0, parameters[roi_level].size_y - 1); - - if (y_low == y_high) { - if (y_high + 1 <= parameters[roi_level].size_y) - y_high += 1; - else - y_low -= 1; - } - - INPUT0_TYPE y_high_coeff = y - y_low; - INPUT0_TYPE y_low_coeff = y_high - y; - - for (int xi = 0; xi < sampling_ratio_x; ++xi) { - INPUT0_TYPE x = x_base + xi * pool_deltaX; - - int x_left = (int)floor(x); - int x_right = (int)ceil(x); - - x_left = clamp(x_left, 0, parameters[roi_level].size_x - 1); - x_right = clamp(x_right, 0, parameters[roi_level].size_x - 1); - - if (x_left == x_right) { - if (x_right + 1 <= parameters[roi_level].size_x) - x_right += 1; - else - x_left -= 1; - } - - INPUT0_TYPE x_right_coeff = x - x_left; - INPUT0_TYPE x_left_coeff = x_right - x; - - uint low_left_idx = data_base_offset + parameters[roi_level].x_pitch * x_left + parameters[roi_level].y_pitch * y_low; - uint high_left_idx = data_base_offset + parameters[roi_level].x_pitch * x_left + parameters[roi_level].y_pitch * y_high; - uint low_right_idx = data_base_offset + parameters[roi_level].x_pitch * x_right + parameters[roi_level].y_pitch * y_low; - uint high_right_idx = data_base_offset + parameters[roi_level].x_pitch * x_right + parameters[roi_level].y_pitch * y_high; - - INPUT1_TYPE low_left_val = feature_map_ptr[low_left_idx]; - INPUT1_TYPE high_left_val = feature_map_ptr[high_left_idx]; - INPUT1_TYPE low_right_val = feature_map_ptr[low_right_idx]; - INPUT1_TYPE high_right_val = feature_map_ptr[high_right_idx]; - - INPUT1_TYPE left_val = y_low_coeff * low_left_val + y_high_coeff * high_left_val; - INPUT1_TYPE right_val = y_low_coeff * low_right_val + y_high_coeff * high_right_val; - - INPUT1_TYPE interpolated_val = x_left_coeff * left_val + x_right_coeff * right_val; - - accumulator = FUNC_CALL(accumulate)(accumulator, interpolated_val); - } - } - - uint output_idx = GET_DATA_INDEX(OUTPUT, kerNum, of, oy, ox); - output[output_idx] = TO_OUTPUT_TYPE(accumulator); -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/common_types.h b/src/plugins/intel_gpu/src/kernel_selector/common_types.h index fc946d9e4272bb..2b6f7be857be8c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/common_types.h +++ b/src/plugins/intel_gpu/src/kernel_selector/common_types.h @@ -39,7 +39,6 @@ enum class KernelType { REGION_YOLO, REORG_YOLO, MVN, - LSTM_GEMM, LSTM_ELT, BORDER, TILE, @@ -48,7 +47,6 @@ enum class KernelType { BUCKETIZE, GEMM, GRID_SAMPLE, - PYRAMID_ROI_ALIGN, CONTRACT, ONE_HOT, GATHER, @@ -65,8 +63,6 @@ enum class KernelType { STRIDED_SLICE, REVERSE_SEQUENCE, QUANTIZE, - LSTM_DYNAMIC_INPUT, - LSTM_DYNAMIC_TIMELOOP, REDUCE, GATHER_TREE, SPACE_TO_DEPTH, diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_runner_interface.h b/src/plugins/intel_gpu/src/kernel_selector/kernel_runner_interface.h deleted file mode 100644 index bfb4575248c6d3..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_runner_interface.h +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include -#include - -namespace kernel_selector { -class KernelRunnerInterface { -public: - // Gets a list of kernels, executes them and returns the run time of each kernel (in nano-seconds). - virtual std::vector run_kernels(const kernel_selector::KernelsData& kernelsData) = 0; - - virtual ~KernelRunnerInterface() = default; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp index fbf33dbedd750c..b2bb2f59f06efa 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp @@ -340,7 +340,6 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::winograd_6x3_s1_fused_weights: return "WINOGRAD_6x3_S1_FUSED_WEIGHTS"; case WeightsLayout::image_2d_weights_winograd_6x3_s1_fbxyb: return "IMAGE_2D_WEIGHTS_WINOGRAD_6x3_S1_FBXYB"; case WeightsLayout::image_2d_weights_winograd_6x3_s1_xfbyb: return "IMAGE_2D_WEIGHTS_WINOGRAD_6x3_S1_XFBYB"; - case WeightsLayout::dlstm_dir_io: return "DLSTM_DIR_IO"; case WeightsLayout::os_is_yx_isa8_osv8_isv4: return "OS_IS_YX_ISA8_OSV8_ISV4"; case WeightsLayout::os_is_yx_isa8_osv16_isv4: return "OS_IS_YX_ISA8_OSV16_ISV4"; case WeightsLayout::os_is_yx_isa8_osv8_isv4_swizzled_by_4: return "OS_IS_YX_ISA8_OSV8_ISV4_SWIZZLED_BY_4"; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h index c8ba581f51c696..dec0d3476ce9a5 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h @@ -228,14 +228,6 @@ class ParamsKey { uint32_t stride : 1; uint32_t broadcast : 1; } eltwise; - struct lstm_gemm_t { - uint32_t bias : 1; - uint32_t hidden : 1; - } lstm_gemm; - struct lstm_dynamic_t { - uint32_t last_hidden : 1; - uint32_t last_cell : 1; - } lstm_dynamic; struct lstm_elt_t { uint32_t cell : 1; } lstm_elt; @@ -338,11 +330,7 @@ class ParamsKey { void EnableEltwiseStride(); void EnableEltwiseBroadcast() { key.restrict.val.dedicated.eltwise.broadcast = 1; } - void EnableLSTMGEMMBias() { key.restrict.val.dedicated.lstm_gemm.bias = 1; } - void EnableLSTMGEMMHidden() { key.restrict.val.dedicated.lstm_gemm.hidden = 1; } void EnableLSTMEltCell() { key.restrict.val.dedicated.lstm_elt.cell = 1; } - void EnableLSTMDyanmicOptionalHiddenOutput() { key.restrict.val.dedicated.lstm_dynamic.last_hidden = 1; } - void EnableLSTMDyanmicOptionalCellOutput() { key.restrict.val.dedicated.lstm_dynamic.last_cell = 1; } void EnableConcatKernelPerInput() { key.restrict.val.dedicated.concat.kernelPerInput = 1; } void EnableConcatOneKernel() { key.restrict.val.dedicated.concat.oneKernel = 1; } void EnableArgMaxMinAxis(ArgMaxMinAxis a); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.cpp deleted file mode 100644 index 3f3e5c11fd2936..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.cpp +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemm_kernel_base.h" -#include "kernel_selector_utils.h" -#include "common_tools.h" - -namespace kernel_selector { -JitConstants LSTMGemmKernelBase::GetJitConstants(const lstm_gemm_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - const auto& weights = params.weights; - const auto& recurrent = params.recurrent; - const auto& hidden = params.hidden; - const auto& bias = params.bias; - if (params.hasBias) { - jit.AddConstants({MakeJitConstant("BIAS", bias), MakeJitConstant("BIAS_TERM", true)}); - } - if (params.hasHidden) { - jit.AddConstants({MakeJitConstant("HIDDEN", hidden), - MakeJitConstant("HIDDEN_TERM", true), - MakeJitConstant("RECURRENT", recurrent), - MakeJitConstant("HIDDEN_DIRECTION", params.hidden_direction)}); - } - jit.AddConstants({MakeJitConstant("WEIGHTS", weights)}); - jit.AddConstants({MakeJitConstant("DIRECTION", params.direction)}); - jit.AddConstants({MakeJitConstant("INPUT_DIRECTION", params.input_direction)}); - - return jit; -} - -KernelsData LSTMGemmKernelBase::GetCommonKernelsData(const Params& params, const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - const lstm_gemm_params& orgParams = static_cast(params); - - KernelData kd = KernelData::Default(params, orgParams.inputs.size()); - - const auto& input = orgParams.inputs[0]; - - auto newParams = orgParams; - newParams.inputs.resize(1); - newParams.inputs[0] = input; - auto out = newParams.outputs[0]; - // TODO: reorder weights if needed - auto& kernel = kd.kernels[0]; - auto cldnnJit = GetJitConstants(newParams); - auto entryPoint = GetEntryPoint(kernelName, newParams.layerID, params, options); - auto jit = CreateJit(kernelName, cldnnJit, entryPoint); - - kernel.params.workGroups.global = {out.X().v, out.Batch().v, 1}; - kernel.code.kernelString = GetKernelString(kernelName, jit, entryPoint, params.engineInfo); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, 0}); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::OUTPUT, 0}); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::WEIGHTS, 0}); - if (orgParams.hasHidden) { - kernel.params.arguments.push_back({ArgumentDescriptor::Types::HIDDEN, 0}); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::RECURRENT, 0}); - } - if (orgParams.hasBias) { - kernel.params.arguments.push_back({ArgumentDescriptor::Types::BIAS, 0}); - } - - return {kd}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.h deleted file mode 100644 index 116d9426929b9b..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.h +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_gemm_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_gemm_params : public base_params { - lstm_gemm_params() : base_params(KernelType::LSTM_GEMM) {} - - DataTensor weights; - DataTensor recurrent; - DataTensor bias; - DataTensor hidden; - bool hasBias = false; - bool hasHidden = false; - uint32_t direction = 0; - uint32_t input_direction = 0; // for bidirectional node fusion in stacked LSTMs - uint32_t hidden_direction = 0; - - void SetBias(const DataTensor& v) { - bias = v; - hasBias = true; - } - - void SetHidden(const DataTensor& v) { - hidden = v; - hasHidden = true; - } - - ParamsKey GetParamsKey() const override { - ParamsKey k = base_params::GetParamsKey(); - - if (hasBias) { - k.EnableLSTMGEMMBias(); - } - - if (hasHidden) { - k.EnableLSTMGEMMHidden(); - } - - return k; - } -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_gemm_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_gemm_optional_params : optional_params { - lstm_gemm_optional_params() : optional_params(KernelType::LSTM_GEMM) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// LSTMGemmKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class LSTMGemmKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~LSTMGemmKernelBase() {} - - struct DispatchData : public CommonDispatchData {}; - -protected: - virtual JitConstants GetJitConstants(const lstm_gemm_params& params) const; - KernelsData GetCommonKernelsData(const Params& params, const optional_params& optParams) const; - - bool Validate(const Params& p, const optional_params&) const override { - if (p.GetType() != KernelType::LSTM_GEMM) { - return false; - } - - return true; - } -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.cpp deleted file mode 100644 index 3d1ee9175dfd7c..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.cpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemm_kernel_ref.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTMGemmKernelRef::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableDifferentTypes(); - k.EnableAllInputLayout(); - k.EnableAllOutputLayout(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableLSTMGEMMHidden(); - return k; -} - -KernelsData LSTMGemmKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority LSTMGemmKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.h deleted file mode 100644 index b729ab05f0d58e..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_gemm_kernel_base.h" - -namespace kernel_selector { -class LSTMGemmKernelRef : public LSTMGemmKernelBase { -public: - LSTMGemmKernelRef() : LSTMGemmKernelBase("lstm_gemm_gpu_bfyx_ref") {} - virtual ~LSTMGemmKernelRef() {} - - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.cpp deleted file mode 100644 index b997572d754c37..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemm_kernel_selector.h" -#include "lstm_gemm_kernel_ref.h" -#include "lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h" -#include "lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h" - -namespace kernel_selector { -lstm_gemm_kernel_selector::lstm_gemm_kernel_selector() { - Attach(); - Attach(); - Attach(); -} - -KernelsData lstm_gemm_kernel_selector::GetBestKernels(const Params& params, const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::LSTM_GEMM); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.h deleted file mode 100644 index f878705ceace2c..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class lstm_gemm_kernel_selector : public kernel_selector_base { -public: - static lstm_gemm_kernel_selector& Instance() { - static lstm_gemm_kernel_selector instance_; - return instance_; - } - - lstm_gemm_kernel_selector(); - - virtual ~lstm_gemm_kernel_selector() {} - - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp deleted file mode 100644 index 18d57c4920a248..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F32); - k.EnableDifferentTypes(); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableLSTMGEMMHidden(); - return k; -} - -DeviceFeaturesKey LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::get_required_device_features_key(const Params& params, const optional_params& options) const { - DeviceFeaturesKey k; - k.requires_subgroups(); - k.requires_subgroup_shuffle(); - - return k; -} - -KernelsData LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::GetKernelsData(const Params& params, - const optional_params& options) const { - KernelsData kernelsData = GetCommonKernelsData(params, options); - auto& kernel = kernelsData[0].kernels[0]; - - // This kernel is good if - // 1) Batch size is 1 - // 2) The input size y-x size is 64x1 - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - const auto& out = orgParams.outputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - kernel.params.workGroups.global = {16, out.X().v, out.Batch().v}; - - return kernelsData; -} - -KernelsPriority LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::GetKernelsPriority(const Params& params, const optional_params& /*options*/) const { - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - return FORCE_PRIORITY_1; - else - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h deleted file mode 100644 index ef9ba3e46fb8e9..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_gemm_kernel_base.h" - -namespace kernel_selector { -class LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16 : public LSTMGemmKernelBase { -public: - LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16() : LSTMGemmKernelBase("lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16") {} - virtual ~LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16() {} - - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; - DeviceFeaturesKey get_required_device_features_key(const Params& params, const optional_params& /*options*/) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp deleted file mode 100644 index baa58ebde3fd9e..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F16); - k.EnableDifferentTypes(); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableLSTMGEMMHidden(); - return k; -} - -DeviceFeaturesKey LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::get_required_device_features_key(const Params& params, const optional_params& options) const { - DeviceFeaturesKey k; - k.requires_subgroups(); - k.requires_subgroup_shuffle(); - - return k; -} - -KernelsData LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::GetKernelsData(const Params& params, - const optional_params& options) const { - KernelsData kernelsData = GetCommonKernelsData(params, options); - auto& kernel = kernelsData[0].kernels[0]; - - // This kernel is good if - // 1) Batch size is 1 - // 2) The input size y-x size is 64x1 - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - const auto& out = orgParams.outputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - kernel.params.workGroups.global = {16, out.X().v, out.Batch().v}; - - return kernelsData; -} - -KernelsPriority LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::GetKernelsPriority(const Params& params, const optional_params& /*options*/) const { - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - return FORCE_PRIORITY_1; - else - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h deleted file mode 100644 index 795d4d1a70725d..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_gemm_kernel_base.h" - -namespace kernel_selector { -class LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16 : public LSTMGemmKernelBase { -public: - LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16() : LSTMGemmKernelBase("lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16") {} - virtual ~LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16() {} - - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; - DeviceFeaturesKey get_required_device_features_key(const Params& params, const optional_params& /*options*/) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp deleted file mode 100644 index a7fbdf9c603ded..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic/lstm_dynamic_input_bfyx_opt.h" -#include "kernel_selector_utils.h" - -#include - -namespace kernel_selector { - -ParamsKey LSTM_DynamicInputKernelBfyxOpt::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableInputWeightsType(WeightsType::F32); - k.EnableInputWeightsType(WeightsType::F16); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableDifferentTypes(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableNonBiasTerm(); - k.EnableBiasPerFeature(); - k.EnableBiasPerOutput(); - return k; -} - -DeviceFeaturesKey LSTM_DynamicInputKernelBfyxOpt::get_required_device_features_key(const Params& params, const optional_params& options) const { - auto k = get_common_subgroups_device_features_key(params, options); - k.requires_subgroup_shuffle(); - - return k; -} - -bool LSTM_DynamicInputKernelBfyxOpt::Validate(const Params & p, const optional_params & o) const { - if (!LSTM_DynamicInputKernelBase::Validate(p, o)) { - return false; - } - - const auto& params = static_cast(p); - - const auto& weights = params.weights; - const auto weights_x = weights.X().v; - const auto weights_y = weights.Y().v; - const auto& input = params.inputs[0]; - const auto& out = params.outputs[0]; - - bool input_X_div_by_8 = input.X().v % 8 == 0; - bool weights_X_div_by_8 = weights_x % 8 == 0; - bool weights_Y_div_by_8_x_simd_size = weights_y % (8 * simd_size) == 0; - bool gws0_size = out.X().v / simd_size <= 512; // ToDo remove condition and update .cl code for bigger gws0 - - if (!input_X_div_by_8 || - !weights_X_div_by_8 || - !weights_Y_div_by_8_x_simd_size || - !gws0_size) - return false; - return true; -} - -KernelsData LSTM_DynamicInputKernelBfyxOpt::GetKernelsData(const Params& params, const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - DispatchData dispatchData; - - KernelData kd = KernelData::Default(params); - lstm_dynamic_input_params& dlstm_params = *static_cast(kd.params.get()); - - auto in_layout = dlstm_params.inputs[0].GetLayout(); - auto out_layout = dlstm_params.outputs[0].GetLayout(); - std::vector> dims_by_gws = {{ Tensor::DataChannelName::X }, - { Tensor::DataChannelName::Y, Tensor::DataChannelName::BATCH }, - { Tensor::DataChannelName::FEATURE }}; - - const auto& out = dlstm_params.outputs[0]; - auto hidden_size = out.X().v; - - dispatchData.gws = { hidden_size / simd_size, out.Batch().v * out.Y().v, out.Feature().v }; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, in_layout, out_layout, dims_by_gws); - - bool succeed = UpdateWeightsParams(dlstm_params, - options, - WeightsLayout::dlstm_dir_io, - kd.weightsReorderParams, - GetSupportedKey()); - - if (!succeed) { - return {}; - } - - auto cldnn_jit = GetJitConstants(dlstm_params); - auto entry_point = GetEntryPoint(kernelName, dlstm_params.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = kd.kernels[0]; - kernel.params.workGroups.global = dispatchData.gws; - kernel.params.workGroups.local = dispatchData.lws; - kernel.code.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); - SetKernelArguments(dlstm_params, kernel); - - return { kd }; -} - -KernelsPriority LSTM_DynamicInputKernelBfyxOpt::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return FORCE_PRIORITY_5; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.h deleted file mode 100644 index ed989beaba814f..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_dynamic_input_kernel_base.h" - -namespace kernel_selector { -class LSTM_DynamicInputKernelBfyxOpt : public LSTM_DynamicInputKernelBase { -public: - LSTM_DynamicInputKernelBfyxOpt() : LSTM_DynamicInputKernelBase("lstm_dynamic_input_bfyx_opt") {} - - virtual ~LSTM_DynamicInputKernelBfyxOpt() {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; - DeviceFeaturesKey get_required_device_features_key(const Params& params, const optional_params& /*options*/) const override; - -protected: - bool Validate(const Params& p, const optional_params& o) const override; - -private: - const uint32_t simd_size = 8; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp deleted file mode 100644 index 4c47aed7d50ba9..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_input_kernel_base.h" -#include "kernel_selector_utils.h" -#include "common_tools.h" -#include - -namespace kernel_selector { -JitConstants LSTM_DynamicInputKernelBase::GetJitConstants(const lstm_dynamic_input_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - - jit.AddConstants({MakeJitConstant("WEIGHTS", params.weights), - MakeJitConstant("DYN_LENGTH", params.inputs.at(1)), - MakeJitConstant("MAX_SEQUENCE_LENGTH", params.inputs.at(0).Feature().v)}); - - // [2] Optionals - if (!params.bias.empty()) { - jit.AddConstants({MakeJitConstant("BIAS", params.bias[0]), MakeJitConstant("BIAS_TERM", true)}); - } - - return jit; -} - -LSTM_DynamicInputKernelBase::DispatchData LSTM_DynamicInputKernelBase::SetDefault( - const lstm_dynamic_input_params& params) { - DispatchData dispatchData; - auto in_layout = params.inputs[0].GetLayout(); - auto out_layout = params.outputs[0].GetLayout(); - std::vector> dims_by_gws = {{ Tensor::DataChannelName::X }, - { Tensor::DataChannelName::Y, Tensor::DataChannelName::BATCH }, - { Tensor::DataChannelName::FEATURE }}; - - const auto& out = params.outputs[0]; - - // 4 * hidden, batch * dir, seq_len - dispatchData.gws = { out.X().v, out.Batch().v * out.Y().v, out.Feature().v }; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, in_layout, out_layout, dims_by_gws); - - return dispatchData; -} - -void kernel_selector::LSTM_DynamicInputKernelBase::SetKernelArguments(const lstm_dynamic_input_params& params, clKernelData& kernel) const { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, 0 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, 1 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::OUTPUT, 0 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::WEIGHTS, 0 }); - if (!params.bias.empty()) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::BIAS, 0 }); - } -} - -KernelsData LSTM_DynamicInputKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - const lstm_dynamic_input_params& orgParams = static_cast(params); - - auto dispatchData = SetDefault(orgParams); - KernelData k_data = KernelData::Default(params, 1); - - auto cldnn_jit = GetJitConstants(orgParams); - auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = k_data.kernels[0]; - kernel.params.workGroups.global = dispatchData.gws; - kernel.code.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); - SetKernelArguments(orgParams, kernel); - - return {k_data}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.h deleted file mode 100644 index 069942d657d479..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "weight_bias_params.h" -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_input_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_input_params : public weight_bias_params { - lstm_dynamic_input_params() : weight_bias_params(KernelType::LSTM_DYNAMIC_INPUT) {} - - int32_t direction = 1; -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_input_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_input_optional_params : weight_bias_optional_params { - lstm_dynamic_input_optional_params() : weight_bias_optional_params(KernelType::LSTM_DYNAMIC_INPUT) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// LSTM_DynamicInputKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class LSTM_DynamicInputKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~LSTM_DynamicInputKernelBase() {} - - struct DispatchData : public CommonDispatchData {}; - -protected: - virtual JitConstants GetJitConstants(const lstm_dynamic_input_params& params) const; - static DispatchData SetDefault(const lstm_dynamic_input_params& params); - KernelsData GetCommonKernelsData(const Params& params, - const optional_params& optParams) const; - void SetKernelArguments(const lstm_dynamic_input_params& params, clKernelData& k_data) const; - - bool Validate(const Params& p, const optional_params&) const override { - if (p.GetType() != KernelType::LSTM_DYNAMIC_INPUT) { - return false; - } - - return true; - } -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.cpp deleted file mode 100644 index 8d013efc8f56dc..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.cpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_input_kernel_selector.h" -#include "lstm_dynamic_input_ref_kernel.h" -#include "lstm_dynamic_input_bfyx_opt.h" - -namespace kernel_selector { -lstm_dynamic_input_kernel_selector::lstm_dynamic_input_kernel_selector() { - Attach(); - Attach(); -} - -KernelsData lstm_dynamic_input_kernel_selector::GetBestKernels(const Params& params, - const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::LSTM_DYNAMIC_INPUT); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.h deleted file mode 100644 index 57f7571bb395bb..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class lstm_dynamic_input_kernel_selector : public kernel_selector_base { -public: - static lstm_dynamic_input_kernel_selector& Instance() { - static lstm_dynamic_input_kernel_selector instance_; - return instance_; - } - - lstm_dynamic_input_kernel_selector(); - - virtual ~lstm_dynamic_input_kernel_selector() {} - - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.cpp deleted file mode 100644 index 1b5214da61e31e..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic/lstm_dynamic_input_ref_kernel.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTM_DynamicInputKernelRef::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableInputWeightsType(WeightsType::F16); - k.EnableInputWeightsType(WeightsType::F32); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableDifferentTypes(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableNonBiasTerm(); - k.EnableBiasPerFeature(); - k.EnableBiasPerOutput(); - return k; -} - -KernelsData LSTM_DynamicInputKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority LSTM_DynamicInputKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return DONT_USE_IF_HAVE_SOMETHING_ELSE; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.h deleted file mode 100644 index 5c711c2662b764..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.h +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_dynamic_input_kernel_base.h" - -namespace kernel_selector { -class LSTM_DynamicInputKernelRef : public LSTM_DynamicInputKernelBase { -public: - LSTM_DynamicInputKernelRef() : LSTM_DynamicInputKernelBase("lstm_dynamic_input_ref") {} - - virtual ~LSTM_DynamicInputKernelRef() {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - -protected: - ParamsKey GetSupportedKey() const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp deleted file mode 100644 index 2b3a4cebe0532f..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_timeloop_kernel_base.h" -#include "kernel_selector_utils.h" -#include "common_tools.h" -#include -#include - -namespace kernel_selector { -JitConstants LSTM_DynamicTimeloopKernelBase::GetJitConstants(const lstm_dynamic_timeloop_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - - const auto& out = params.outputs[0]; - size_t hidden_size = out.X().v; - - // [1] Certainties - jit.AddConstants({ - // IE default: fizo - MakeJitConstant("GEMM_OFFSET_I", 1 * hidden_size), - MakeJitConstant("GEMM_OFFSET_O", 3 * hidden_size), - MakeJitConstant("GEMM_OFFSET_F", 0 * hidden_size), - MakeJitConstant("GEMM_OFFSET_Z", 2 * hidden_size), - }); - - jit.AddConstants({MakeJitConstant("RECURRENT", params.recurrent), - MakeJitConstant("DYN_LENGTH", params.inputs.at(1)), - MakeJitConstant("HIDDEN_SIZE", hidden_size), - MakeJitConstant("MAX_SEQUENCE_LENGTH", params.inputs.at(0).Feature().v), - MakeJitConstant("ELEMENTS_TO_COUNT", hidden_size > 256 ? hidden_size / 256 : 1)}); - - if (params.has_hidden) { - const auto& hidden = params.hidden; - jit.AddConstants({ - MakeJitConstant("INIT_HIDDEN_TERM", true), - MakeJitConstant("INIT_HIDDEN", hidden), - }); - } - - if (params.has_cell) { - const auto& cell = params.cell; - jit.AddConstants({ - MakeJitConstant("INIT_CELL_TERM", true), - MakeJitConstant("INIT_CELL", cell), - }); - } - - if (params.clip > 0) { - std::string psclip = toCodeString(params.clip); - std::string nsclip = toCodeString(-params.clip); - jit.AddConstants( - {MakeJitConstant("CLIP(x)", - "((x > " + psclip + ") ? " + psclip + ": (x < " + nsclip + ") ? " + nsclip + " : (x))")}); - } else { - jit.AddConstants({MakeJitConstant("CLIP(x)", "(x)")}); - } - if (params.input_forget) { - jit.AddConstants({MakeJitConstant("INPUT_FORGET", true)}); - } - - if (params.has_last_hidden_output) { - jit.AddConstants( - {MakeJitConstant("LAST_HIDDEN", params.last_hidden_output), MakeJitConstant("LAST_HIDDEN_TERM", true)}); - } - - if (params.has_last_cell_output) { - jit.AddConstants( - {MakeJitConstant("LAST_CELL", params.last_cell_output), MakeJitConstant("LAST_CELL_TERM", true)}); - } - - return jit; -} - -LSTM_DynamicTimeloopKernelBase::DispatchData LSTM_DynamicTimeloopKernelBase::SetDefault( - const lstm_dynamic_timeloop_params& params) { - DispatchData dispatchData; - const auto& out = params.outputs[0]; - - auto out_x_size = out.X().v; - auto gws0 = out_x_size > 256 ? 256 : out_x_size; - dispatchData.gws = { gws0, out.Batch().v, static_cast(params.direction) }; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - - return dispatchData; -} - -void kernel_selector::LSTM_DynamicTimeloopKernelBase::SetKernelArguments(const lstm_dynamic_timeloop_params& params, clKernelData& kernel) const { - uint32_t input_idx = 0; - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::OUTPUT, 0 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::RECURRENT, 0 }); - if (params.has_hidden) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::HIDDEN, 0 }); - } - if (params.has_cell) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::CELL, 0 }); - } - if (params.has_last_hidden_output) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - } - if (params.has_last_cell_output) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - } -} - - -KernelsData LSTM_DynamicTimeloopKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - const lstm_dynamic_timeloop_params& org_params = static_cast(params); - - auto dispatchData = SetDefault(org_params); - KernelData k_data = KernelData::Default(params, 1); - - auto cldnn_jit = GetJitConstants(org_params); - auto entry_point = GetEntryPoint(kernelName, org_params.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = k_data.kernels[0]; - kernel.params.workGroups.global = dispatchData.gws; - kernel.params.workGroups.local = dispatchData.lws; - kernel.code.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); - SetKernelArguments(org_params, kernel); - return {k_data}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h deleted file mode 100644 index 6bd56ed8f91c65..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_timeloop_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_timeloop_params : public base_params { - lstm_dynamic_timeloop_params() : base_params(KernelType::LSTM_DYNAMIC_TIMELOOP) {} - - DataTensor recurrent; - DataTensor hidden; - DataTensor cell; - DataTensor last_hidden_output; - DataTensor last_cell_output; - - float clip = 0.0f; - bool input_forget = false; - bool has_hidden = false; - bool has_cell = false; - bool has_last_hidden_output = false; - bool has_last_cell_output = false; - int32_t direction = 1; - - void set_hidden(const DataTensor& v) { - hidden = v; - has_hidden = true; - } - - void set_cell(const DataTensor& v) { - cell = v; - has_cell = true; - } - - void set_last_hidden_output(const DataTensor& v) { - last_hidden_output = v; - has_last_hidden_output = true; - } - - void set_last_cell_output(const DataTensor& v) { - last_cell_output = v; - has_last_cell_output = true; - } - - ParamsKey GetParamsKey() const override { - ParamsKey k = base_params::GetParamsKey(); - - if (has_hidden) { - k.EnableLSTMGEMMHidden(); - } - - if (has_cell) { - k.EnableLSTMEltCell(); - } - - if (has_last_hidden_output) { - k.EnableLSTMDyanmicOptionalHiddenOutput(); - } - - if (has_last_cell_output) { - k.EnableLSTMDyanmicOptionalCellOutput(); - } - - return k; - } -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_timeloop_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_optional_params : optional_params { - lstm_dynamic_optional_params() : optional_params(KernelType::LSTM_DYNAMIC_TIMELOOP) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// LSTM_DynamicTimeloopKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class LSTM_DynamicTimeloopKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~LSTM_DynamicTimeloopKernelBase() {} - - struct DispatchData : public CommonDispatchData {}; - -protected: - virtual JitConstants GetJitConstants(const lstm_dynamic_timeloop_params& params) const; - static DispatchData SetDefault(const lstm_dynamic_timeloop_params& params); - KernelsData GetCommonKernelsData(const Params& params, - const optional_params& optParams) const; - void SetKernelArguments(const lstm_dynamic_timeloop_params& params, clKernelData& k_data) const; - bool Validate(const Params& p, const optional_params&) const override { - if (p.GetType() != KernelType::LSTM_DYNAMIC_TIMELOOP) { - return false; - } - - return true; - } -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.cpp deleted file mode 100644 index 0fe024ce448249..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_timeloop_kernel_selector.h" -#include "lstm_dynamic_timeloop_ref_kernel.h" - -namespace kernel_selector { -lstm_dynamic_timeloop_kernel_selector::lstm_dynamic_timeloop_kernel_selector() { - Attach(); -} - -KernelsData lstm_dynamic_timeloop_kernel_selector::GetBestKernels(const Params& params, - const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::LSTM_DYNAMIC_TIMELOOP); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h deleted file mode 100644 index 45e02422ec5e89..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class lstm_dynamic_timeloop_kernel_selector : public kernel_selector_base { -public: - static lstm_dynamic_timeloop_kernel_selector& Instance() { - static lstm_dynamic_timeloop_kernel_selector instance_; - return instance_; - } - - lstm_dynamic_timeloop_kernel_selector(); - - virtual ~lstm_dynamic_timeloop_kernel_selector() {} - - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.cpp deleted file mode 100644 index ce120ad1091328..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTM_DynamicTimeloopKernelRef::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableDifferentTypes(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMEltCell(); - k.EnableLSTMGEMMHidden(); - k.EnableLSTMDyanmicOptionalCellOutput(); - k.EnableLSTMDyanmicOptionalHiddenOutput(); - return k; -} - -KernelsData LSTM_DynamicTimeloopKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority LSTM_DynamicTimeloopKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return DONT_USE_IF_HAVE_SOMETHING_ELSE; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h deleted file mode 100644 index 215f985503cdf3..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_dynamic_timeloop_kernel_base.h" - -namespace kernel_selector { -class LSTM_DynamicTimeloopKernelRef : public LSTM_DynamicTimeloopKernelBase { -public: - LSTM_DynamicTimeloopKernelRef() : LSTM_DynamicTimeloopKernelBase("lstm_dynamic_timeloop_ref") {} - - virtual ~LSTM_DynamicTimeloopKernelRef() {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - -protected: - ParamsKey GetSupportedKey() const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp deleted file mode 100644 index 28c4e94ba88603..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyramid_roi_align_kernel_base.h" -#include "kernel_selector_utils.h" -#include - -namespace kernel_selector { - -JitConstants PyramidROIAlignKernelBase::GetJitConstants(const PyramidROIAlign_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - - jit.AddConstant(MakeJitConstant("IMAGE_SIZE_X", params.image_size_x)); - jit.AddConstant(MakeJitConstant("IMAGE_SIZE_Y", params.image_size_y)); - jit.AddConstant(MakeJitConstant("SAMPLING_RATIO_X", params.sampling_ratio_x)); - jit.AddConstant(MakeJitConstant("SAMPLING_RATIO_Y", params.sampling_ratio_y)); - jit.AddConstant(MakeJitConstant("PYRAMID_STARTING_LEVEL", params.pyramid_starting_level)); - - return jit; -} - -PyramidROIAlignKernelBase::DispatchData PyramidROIAlignKernelBase::SetDefault(const PyramidROIAlign_params& params) const { - DispatchData dispatchData; - dispatchData.gws = {1, 1, 1}; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return dispatchData; -} - -KernelsData PyramidROIAlignKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options) const { - assert(params.GetType() == KernelType::PYRAMID_ROI_ALIGN); - - const auto& prim_params = - static_cast(params); - auto dispatchData = SetDefault(prim_params); - KernelData k_data = KernelData::Default(params); - auto cldnn_jit = GetJitConstants(prim_params); - auto entry_point = GetEntryPoint(kernelName, prim_params.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = k_data.kernels[0]; - FillCLKernelData(kernel, - dispatchData, - params.engineInfo, - kernelName, - jit, - entry_point, - "", - false, - false, - (uint32_t)prim_params.inputs.size()); - - return {k_data}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.h deleted file mode 100644 index 52c1d810a56a54..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// PyramidROIAlign_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct PyramidROIAlign_params : public base_params { - PyramidROIAlign_params() : base_params(KernelType::PYRAMID_ROI_ALIGN), - image_size_x(1), image_size_y(1), sampling_ratio_x(1), sampling_ratio_y(1), - pyramid_starting_level(0) {} - - int image_size_x; - int image_size_y; - int sampling_ratio_x; - int sampling_ratio_y; - int pyramid_starting_level; -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// index_select_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct PyramidROIAlign_optional_params : optional_params { - PyramidROIAlign_optional_params() : optional_params(KernelType::PYRAMID_ROI_ALIGN) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// PyramidROIAlignKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class PyramidROIAlignKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~PyramidROIAlignKernelBase() {} - - using DispatchData = CommonDispatchData; - -protected: - JitConstants GetJitConstants(const PyramidROIAlign_params& params) const; - virtual DispatchData SetDefault(const PyramidROIAlign_params& params) const; - KernelsData GetCommonKernelsData(const Params& params, const optional_params&) const; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp deleted file mode 100644 index 5492dd48c7e530..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyramid_roi_align_kernel_ref.h" -#include "kernel_selector_utils.h" - -#include - -namespace kernel_selector { -ParamsKey PyramidROIAlignKernelRef::GetSupportedKey() const { - ParamsKey k; - - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - - k.EnableOutputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - - k.EnableInputLayout(DataLayout::bfyx); - k.EnableInputLayout(DataLayout::yxfb); - k.EnableInputLayout(DataLayout::byxf); - - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::yxfb); - k.EnableOutputLayout(DataLayout::byxf); - - k.EnableBatching(); - k.EnableDifferentTypes(); - - return k; -} - -PyramidROIAlignKernelBase::DispatchData PyramidROIAlignKernelRef::SetDefault(const PyramidROIAlign_params& params) const { - auto dispatchData = PyramidROIAlignKernelBase::SetDefault(params); - auto in_layout = params.inputs[0].GetLayout(); - auto out_layout = params.outputs[0].GetLayout(); - std::vector> dims_by_gws = {{ Tensor::DataChannelName::X, Tensor::DataChannelName::Y }, - { Tensor::DataChannelName::FEATURE }, - { Tensor::DataChannelName::BATCH }}; - - dispatchData.gws = { - params.outputs[0].X().v * params.outputs[0].Y().v, - params.outputs[0].Feature().v, - params.outputs[0].Batch().v }; - - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, in_layout, out_layout, dims_by_gws); - - return dispatchData; -} - -KernelsData PyramidROIAlignKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority PyramidROIAlignKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.h deleted file mode 100644 index fcbcba2cf05155..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "pyramid_roi_align_kernel_base.h" - -namespace kernel_selector { -class PyramidROIAlignKernelRef : public PyramidROIAlignKernelBase { -public: - PyramidROIAlignKernelRef() : PyramidROIAlignKernelBase("pyramid_roi_align_gpu_ref") {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; -protected: - DispatchData SetDefault(const PyramidROIAlign_params& params) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.cpp deleted file mode 100644 index 296b8ef986b071..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyramid_roi_align_kernel_selector.h" -#include "pyramid_roi_align_kernel_ref.h" - -namespace kernel_selector { -PyramidROIAlign_kernel_selector::PyramidROIAlign_kernel_selector() { Attach(); } - -KernelsData PyramidROIAlign_kernel_selector::GetBestKernels(const Params& params, - const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::PYRAMID_ROI_ALIGN); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.h deleted file mode 100644 index 42b807d792cd13..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.h +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class PyramidROIAlign_kernel_selector : public kernel_selector_base { -public: - static PyramidROIAlign_kernel_selector& Instance() { - static PyramidROIAlign_kernel_selector instance; - return instance; - } - - PyramidROIAlign_kernel_selector(); - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp index 719ff94aa32ca4..e6189addaf42fa 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp @@ -109,7 +109,6 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::winograd_6x3_s1_fused_weights, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::image_2d_weights_winograd_6x3_s1_fbxyb, { 1, 0, -1, 3, 2, -1 } }, { WeightsLayout::image_2d_weights_winograd_6x3_s1_xfbyb, { 3, 0, -1, 2, 1, -1 } }, - { WeightsLayout::dlstm_dir_io, { 1, 0, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_isa8_osv8_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_isa8_osv16_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_isa8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1 } }, diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h index 745c54e5689791..f78a1afe2a83be 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h @@ -134,7 +134,6 @@ enum WeightsLayout { // 3x3 with stride 1 image_2d_weights_winograd_6x3_s1_xfbyb, // image 2d winograd convolution weights for fused kernel, F(2, 3) --filter // 3x3 with stride 1 - dlstm_dir_io, // dlstm weights layout direction, input_size, 4* hiden_size os_is_yx_isa8_osv8_isv4, // for MMAD convolution os_is_zyx_isa8_osv8_isv4, // for MMAD convolution os_is_yx_isa8_osv16_isv4, // for fully connected MMAD @@ -316,7 +315,6 @@ inline bool SimpleLayout(WeightsLayout l) { case WeightsLayout::yxio: case WeightsLayout::oizyx: case WeightsLayout::iozyx: - case WeightsLayout::dlstm_dir_io: return true; default: return false; @@ -390,15 +388,6 @@ inline bool IsImageType(WeightsLayout l) { } } -inline bool IsDynamicLSTMType(WeightsLayout l) { - switch (l) { - case WeightsLayout::dlstm_dir_io: - return true; - default: - return false; - } -} - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Tensor Explanation //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/plugin/graph.cpp b/src/plugins/intel_gpu/src/plugin/graph.cpp index 1194f659d89ff6..0b1748b36ab76d 100644 --- a/src/plugins/intel_gpu/src/plugin/graph.cpp +++ b/src/plugins/intel_gpu/src/plugin/graph.cpp @@ -185,9 +185,7 @@ std::shared_ptr Graph::get_runtime_model(std::vector Graph::get_runtime_model(std::vector format_traits_map { FMT_TRAITS(image_2d_weights_winograd_6x3_s1_xfbyb, 1, 1, 2, 0, {3, 1, 0, 2}, "xioy", "oixy", {}), FMT_TRAITS(image_2d_weights_c4_fyx_b, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {}), FMT_TRAITS(image_2d_weights_c1_b_fyx, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {}), - FMT_TRAITS(lstm_weights_dio, 1, 1, 2, 0, {0, 1, 3, 2}, "oixy", "oixy", {}), FMT_TRAITS(os_is_yx_isa8_osv16_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 8}, {0, 16}, {1, 4}}), FMT_TRAITS(os_is_yx_osa4_isa8_osv8_isv2, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 4}, {1, 8}, {0, 8}, {1, 2}}), FMT_TRAITS(os_is_yx_osa4_isa8_osv8_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 4}, {1, 8}, {0, 8}, {1, 4}}), diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp index a34f386d0384cc..e89201de456218 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp @@ -172,12 +172,6 @@ void set_arguments_impl(ocl_kernel_type& kernel, } } break; - case args_t::RECURRENT: - status = set_kernel_arg(kernel, i, data.recurrent); - break; - case args_t::HIDDEN: - status = set_kernel_arg(kernel, i, data.hidden); - break; case args_t::CELL: status = set_kernel_arg(kernel, i, data.cell); break; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp index 93ea0f2448d57b..98384ef39cfd7b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp @@ -116,6 +116,10 @@ std::vector ss_only_test_cases_fp32 = { { 5, 5, 5, 5 }})), { -1, 0, -1, 0 }, { -50, 0, -60, 0 }, { -1, 1, -1, 1 }, { 0, 0, 0, 0 }, { 0, 1, 0, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 4, 1 }})), + { 0, 0, 0, 0 }, { 2, 2, 4, 1 }, { 1, 1, 1, 1 }, + { 0 }, { 0 }, { 1 }, { 0 }, {0 } }, StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ { 128, 1, 1024 }})), { -1, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index b19fa7b250e9f8..5ba72f7ac0e99c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -88,5 +88,7 @@ std::vector disabledTestPatterns() { R"(smoke_Nms9LayerTest.*)", // Doesn't match reference results as v6 ref impl behavior is misaligned with expected R"(smoke_MemoryTestV3.*)", + // Issue: 129991 + R"(.*StridedSliceLayerTest.*TS=.*2.2.4.1*.*)", }; } diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp index 98169c608f2bfb..25b11ee28f5987 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp @@ -224,7 +224,6 @@ INSTANTIATE_TEST_SUITE_P(smoke, find_format_test, {{2, 3, 1, 0}, {}, true, false, false, false, false, format::yxio}, {{0, 1, 2, 3}, {{0, 16}}, true, false, false, false, false, format::os_iyx_osv16}, {{0, 1, 2, 3}, {}, true, false, false, true, false, format::winograd_2x3_s1_weights}, - {{0, 1, 3, 2}, {}, true, false, false, false, false, format::lstm_weights_dio}, {{0, 1, 2, 3}, {{1, 8}, {0, 8}, {1, 4}}, true, false, false, false, false, format::os_is_yx_isa8_osv8_isv4}, {{0, 1, 2, 3, 4}, {}, true, true, false, false, false, format::goiyx}, {{0, 2, 1, 3, 4}, {{1, 16}, {0, 16}}, true, true, false, false, false, format::g_is_os_yx_isv16_osv16}, diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index d6a672e1ce2f56..0be5445aa29584 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -118,52 +118,6 @@ TEST(prepare_buffer_fusing, static_node_after_optimized_out_dyn_reshape) { ASSERT_EQ(out_mem->get_layout().get_partial_shape(), expected_shape); } -TEST(prepare_buffer_fusing, propagate_data_padding) { - auto& engine = get_test_engine(); - - auto in_layout = layout{ ov::PartialShape{1, 4, 3, 3}, data_types::f32, format::bfyx }; - - std::vector> offsets; - std::vector inputs; - for (int i = 0; i < 2; i++) { - auto id = "crop_" + std::to_string(i); - inputs.push_back(input_info("split:" + id)); - offsets.push_back({ id, {0, (i * 2), 0, 0} }); - } - - topology topology; - topology.add(input_layout("input", in_layout)); - topology.add(split("split", input_info("input"), offsets)); - topology.add(reorder("crop_0_reorder", inputs[0], format::bfzyx, data_types::f32)); - topology.add(reorder("crop_1_reorder", inputs[1], format::bfzyx, data_types::f32)); - topology.add(concatenation("concat", {input_info("crop_0_reorder"), input_info("crop_1_reorder")}, 1)); - topology.add(reorder("output", input_info("concat"), format::bfyx, data_types::f32)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - - cldnn::network net(engine, topology, config); - - auto in_mem = engine.allocate_memory(in_layout); - tests::set_random_values(in_mem); - - net.set_input_data("input", in_mem); - std::map output; - ASSERT_NO_THROW(output = net.execute()); - - auto out_mem = output.at("output").get_memory(); - - ASSERT_NE(out_mem, nullptr); - cldnn::mem_lock output_ptr(out_mem, get_test_stream()); - cldnn::mem_lock input_ptr(in_mem, get_test_stream()); - - ASSERT_EQ(input_ptr.size(), output_ptr.size()); - for (size_t i = 0; i < input_ptr.size(); ++i) - { - ASSERT_EQ(output_ptr[i], input_ptr[i]); - } -} - TEST(prepare_buffer_fusing, in_place_concat_static) { auto& engine = get_test_engine(); auto in_layout1 = layout{ ov::PartialShape{1, 2, 3, 4}, data_types::f32, format::bfyx }; // => {1, 4, 3, 2} @@ -912,7 +866,7 @@ TEST(prepare_buffer_fusing, skip_in_place_concat_padding_in_non_concat_axis_of_d auto in2 = rg.generate_random_1d(input2_mem->count(), 0, 1); auto in3 = rg.generate_random_1d(input3_mem->count(), 0, 1); auto in4 = rg.generate_random_1d(input4_mem->count(), 0, 1); - + set_values(input1_mem, in1); set_values(input2_mem, in2); set_values(input3_mem, in3); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index 414ab37f11295e..2430ad6f995ca3 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -8874,8 +8874,6 @@ class convolution_test : public tests::generic_test { } static std::vector> generate_specific_test_params() { - // TODO: check split - // TODO: check convolution without bias const primitive_id& weights = "input1"; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp deleted file mode 100644 index a6a180a1ce7788..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp +++ /dev/null @@ -1,1005 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" -#include "random_generator.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef _MSC_VER -#pragma warning( disable : 4503 ) -#endif - -#define MEASURE_PERF false -#define MEASURE_LOOP 50 -using namespace cldnn; -using namespace ::tests; - -namespace { - float sigmoid(float x) { - return 1.f / (1.f + (float)std::exp((float)(-x))); - } -} - -struct offset_order_dynamic { - size_t it, ot, ft, zt; - offset_order_dynamic(size_t scale, const lstm_weights_order& t = lstm_weights_order::fizo) { - static const std::map> offset_map{ - { lstm_weights_order::fizo, { 1, 3, 0, 2 } }, - }; - std::vector v = offset_map.at(t); - it = v[0] * scale; - ot = v[1] * scale; - ft = v[2] * scale; - zt = v[3] * scale; - } -}; -lstm_weights_order default_offset_type_dynamic = lstm_weights_order::fizo; - -namespace dynamic_lstm -{ - template - T clip(T val, T threshold) { - if (threshold > 0) { - if (val > threshold) return threshold; - if (val < -threshold) return -threshold; - } - return val; - } - -template -VVVVF lstm_dynamic_input_ref(VVVVF& input, VVVVF& weights, VVVVF& bias, - VF dynamic_lengths, size_t seq, bool hasBias, size_t dir) { - size_t input_size = input[0][0][0].size(); - size_t hidden_size = weights[0][0].size() / 4; - size_t batch_size = input.size(); - - VVVVFoutput(batch_size, VVVF(seq, VVF(dir, VF(4 * hidden_size)))); - for (size_t b = 0; b < batch_size; ++b) - { - for (size_t l = 0; l < seq; ++l) - { - if (l > static_cast(dynamic_lengths[b])) - break; - for (size_t d = 0; d < dir; ++d) - { - for (size_t y = 0; y < 4 * hidden_size; ++y) - { - T res = 0; - for (size_t x = 0; x < input_size; ++x) - { - res += (T)weights[0][d][y][x] * (T)input[b][l][d][x]; - } - if (hasBias) - { - res += (T)bias[0][0][d][y]; - } - output[b][l][d][y] = res; - } - } - } - } - return output; -} - - template - VVVVF lstm_gemm_reference(VVVVF& input, VVVVF& weights, VVVVF& recurrent, VVVVF& bias, VVVVF& hidden, - size_t seq, bool hasBias = true, bool hasHidden = true, size_t dir = 0, size_t input_dir = 0) { - size_t input_size = input[0][0][0].size(); - size_t hidden_size = hidden[0][0][0].size(); - size_t batch_size = input.size(); - - // Temporary output from GEMM operations [f, i, o, z] - VVVVF tempGEMM(batch_size, VVVF(1, VVF(1, VF(4 * hidden_size)))); - for (size_t b = 0; b < batch_size; ++b) { - for (size_t y = 0; y < 4 * hidden_size; ++y) { - T res = 0; - for (size_t x = 0; x < input_size; ++x) { - res += (T)weights[0][dir][y][x] * (T)input[b][seq][input_dir][x]; - } - if (hasHidden) { - for (size_t x = 0; x < hidden_size; ++x) { - auto rec_v = (T)recurrent[0][dir][y][x]; - auto hid_v = (T)hidden[b][0][dir][x]; - auto temp = rec_v * hid_v; - res += temp; - } - } - if (hasBias) { - res += (T)bias[0][0][dir][y]; - } - tempGEMM[b][0][0][y] = res; - } - } - return tempGEMM; - } - - template - VVVVF lstm_elt_reference(VVVVF& tempGEMM, VVVVF& cell, - bool hasCell = true, float clip_threshold = 0, - bool input_forget = false, size_t dir = 0) - { - size_t hidden_size = tempGEMM[0][0][0].size() / 4; - size_t batch_size = tempGEMM.size(); - VVVVF tempOut(batch_size, VVVF(2, VVF(1, VF(hidden_size)))); - offset_order_dynamic off(hidden_size, default_offset_type_dynamic); - - for (size_t b = 0; b < batch_size; ++b) { - T *it = &tempGEMM[b][0][0][off.it]; - T *ot = &tempGEMM[b][0][0][off.ot]; - T *ft = &tempGEMM[b][0][0][off.ft]; - T *zt = &tempGEMM[b][0][0][off.zt]; - - for (size_t h = 0; h < hidden_size; ++h) { - - // Convert all inputs to float for all the elementwise operations. This is done to immitate - // how lstm kernel is performing the elementwise operations. - float fp32_it = (float)it[h]; - float fp32_ot = (float)ot[h]; - float fp32_ft = (float)ft[h]; - float fp32_zt = (float)zt[h]; - float val = sigmoid(clip(fp32_it, clip_threshold)) * std::tanh(clip(fp32_zt, clip_threshold)); - - if (input_forget) { - val *= (1 - fp32_ft); - } - if (hasCell) { - val += (float)cell[b][0][dir][h] * sigmoid(clip(fp32_ft, clip_threshold)); - } - - // Convert back to output data type before storing it into the output buffer. Currently, the output - // data type may be float or ov::float16 (half) - tempOut[b][0][0][h] = (T)(std::tanh(val) * sigmoid(fp32_ot)); - tempOut[b][1][0][h] = (T)val; - } - } - return tempOut; - } - - template - void lstm_dynamic_reference(VVVVF& input, VVVVF& hidden, VVVVF& cell, - VVVVF& weights, VVVVF& recurrent, VVVVF& bias, - VVVVF& output_hidden, VVVVF& output_cell, - bool hasBias = true, bool hasInitialHidden = true, bool hasInitialCell = true, - float clip_threshold = 0, bool input_forget = false) - { - size_t sequence_len = input[0].size(); - size_t dir_len = weights[0].size(); - size_t batch = input.size(); - for (size_t dir = 0; dir < dir_len; ++dir) { - bool tempHasInitialHidden = hasInitialHidden; - bool tempHasInitialCell = hasInitialCell; - for (size_t seq = 0; seq < sequence_len; ++seq) { - size_t seq_id = seq; - size_t input_direction = dir; - VVVVF tempGEMM = lstm_gemm_reference(input, weights, recurrent, bias, hidden, seq_id, hasBias, tempHasInitialHidden, dir, input_direction); - VVVVF tempOutput = lstm_elt_reference(tempGEMM, cell, tempHasInitialCell, clip_threshold, input_forget, dir); - // tempOutput[batch][0] = hidden and tempOutput[batch][1] = cell - for (size_t i = 0; i < batch; i++) { - output_hidden[i][seq][dir] = tempOutput[i][0][0]; - output_cell[i][seq][dir] = tempOutput[i][1][0]; - hidden[i][0][dir] = tempOutput[i][0][0]; - cell[i][0][dir] = tempOutput[i][1][0]; - } - tempHasInitialHidden = true; - tempHasInitialCell = true; - } - } - } -} -template -struct lstm_dynamic_input_layer_test : public ::testing::Test -{ - tests::random_generator rg; - - void SetUp() override { - rg.set_seed(GET_SUITE_NAME); - } - - void input_single_layer_generic_test(int32_t direction, int32_t batch_size, int32_t max_sequence_len, int32_t input_size, int32_t hidden_size, std::vector dynamic_lengths, - bool has_bias = false) - { - auto min_random = -2, max_random = 2; - VVVVF ref_input = rg.generate_random_4d(batch_size, max_sequence_len, direction, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - - auto& engine = get_test_engine(); - VF ref_dynamic_length; - for (auto& v : dynamic_lengths) - ref_dynamic_length.push_back((T)v); - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, direction } }); - set_values(input_mem, ref_input_vec); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - set_values(weights_mem, ref_weights_vec); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - set_values(dynamic_length_mem, ref_dynamic_length); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); - set_values(bias_mem, ref_bias_vec); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - - std::string bias_id = ""; - if (has_bias) { - bias_id = "bias"; - topology.add(data(bias_id, bias_mem)); - } - - topology.add(lstm_dynamic_input("dynamic_lstm_input", - input_info("input"), - "dyn_len", - "weights", - bias_id)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - -#if MEASURE_PERF == true - using clock = std::chrono::high_resolution_clock; - std::vector times(MEASURE_LOOP); - for (uint32_t i = 0; i < MEASURE_LOOP; i++) - { - auto t0 = clock::now(); - network.set_input_data("input", input_mem); - network.set_input_data("dynamic_lstm_input", dynamic_length_mem); - auto real_outs = network.execute(); - real_outs.at("dynamic_lstm_input").get_event().wait(); - auto t1 = clock::now(); - auto exec_time = t1 - t0; - times[i] = exec_time; - } - std::sort(times.begin(), times.end()); - std::nth_element(times.begin(), times.begin() + times.size() / 2, times.end()); - std::cout << "Perf: " << std::chrono::duration_cast(times[times.size() / 2]).count() << " micros. " << std::endl; -#else - network.set_input_data("input", input_mem); - network.set_input_data("dyn_len", dynamic_length_mem); - - auto outputs = network.execute(); - auto out = outputs.at("dynamic_lstm_input"); - auto out_layout = out.get_memory()->get_layout(); - cldnn::mem_lock out_ptr(out.get_memory(), get_test_stream()); - - - auto output_ref = dynamic_lstm::lstm_dynamic_input_ref(ref_input, ref_weights, ref_bias, dynamic_lengths, max_sequence_len, has_bias, direction); - - size_t i = 0; - for (auto b = 0; b < out_layout.batch(); b++) - { - for (auto len = 0; len < max_sequence_len; len++) - { - for (auto dir = 0; dir < direction; dir++) - { - for (auto x = 0; x < out_layout.spatial(0); x++) - { - ASSERT_NEAR(output_ref[b][len][dir][x], (float)out_ptr[i++], 1e-3f) - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - } - } - } -#endif - } -}; - -template -struct lstm_dynamic_single_layer_test : public ::testing::Test -{ - tests::random_generator rg; - - void SetUp() override { - rg.set_seed(GET_SUITE_NAME); - } - - void single_layer_generic_test(int32_t direction, int32_t batch_size, int32_t max_sequence_len, int32_t input_size, int32_t hidden_size, std::vector dynamic_lengths, - bool has_bias = false, bool has_initial_hidden = false, bool has_initial_cell = false, bool has_last_hidden_state = false, bool has_last_cell_state = false, float epsilon = 1e-3f) - { - float clip_threshold = 0; - bool input_forget = false; - - auto min_random = 0, max_random = 2; - VVVVF ref_input = rg.generate_random_4d(batch_size, max_sequence_len, direction, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random); - VVVVF ref_output_hidden = VVVVF(batch_size, VVVF(max_sequence_len, VVF(direction, VF(hidden_size)))); - VVVVF ref_output_cell = VVVVF(batch_size, VVVF(max_sequence_len, VVF(direction, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - - auto& engine = get_test_engine(); - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - VF ref_dynamic_length; - for (auto& v : dynamic_lengths) - ref_dynamic_length.push_back((T)v); - - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, direction } }); - set_values(input_mem, ref_input_vec); - - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - set_values(weights_mem, ref_weights_vec); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - set_values(recurrent_mem, ref_recurrent_vec); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - set_values(dynamic_length_mem, ref_dynamic_length); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); - set_values(bias_mem, ref_bias_vec); - auto initial_hidden_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - set_values(initial_hidden_mem, ref_hidden_vec); - auto initial_cell_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - set_values(initial_cell_mem, ref_cell_vec); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - - std::string bias_id = ""; - if (has_bias) - { - bias_id = "bias"; - topology.add(data(bias_id, bias_mem)); - } - - std::string initial_hidden_id = ""; - if (has_initial_hidden) - { - initial_hidden_id = "initial_hidden"; - topology.add(data(initial_hidden_id, initial_hidden_mem)); - } - - std::string initial_cell_id = ""; - if (has_initial_cell) - { - initial_cell_id = "initial_cell"; - topology.add(data(initial_cell_id, initial_cell_mem)); - } - - std::string last_hidden_state = ""; - auto last_hidden_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - last_hidden_mem->fill(get_test_stream()); - get_test_stream().finish(); - if (has_last_hidden_state) - { - last_hidden_state = "last_hidden_state"; - topology.add(mutable_data(last_hidden_state, last_hidden_mem)); - } - - std::string last_cell_state = ""; - auto last_cell_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - last_cell_mem->fill(get_test_stream()); - get_test_stream().finish(); - if (has_last_cell_state) - { - last_cell_state = "last_cell_state"; - topology.add(mutable_data(last_cell_state, last_cell_mem)); - } - - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent", - last_hidden_state, - last_cell_state, - bias_id, - initial_hidden_id, - initial_cell_id)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - network.set_input_data("input", input_mem); - network.set_input_data("dyn_len", dynamic_length_mem); - -#if MEASURE_PERF == true - using clock = std::chrono::high_resolution_clock; - std::vector times(MEASURE_LOOP); - for (uint32_t i = 0; i < MEASURE_LOOP; i++) - { - auto t0 = clock::now(); - network.set_input_data("input", input_mem); - network.set_input_data("dyn_len", dynamic_length_mem); - auto real_outs = network.execute(); - real_outs.at("dynamic_lstm").get_event().wait(); - auto t1 = clock::now(); - auto exec_time = t1 - t0; - times[i] = exec_time; - } - std::sort(times.begin(), times.end()); - std::nth_element(times.begin(), times.begin() + times.size() / 2, times.end()); - std::cout << "Perf: " << std::chrono::duration_cast(times[times.size() / 2]).count() << " micros. " << std::endl; -#else - dynamic_lstm::lstm_dynamic_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output_hidden, - ref_output_cell, has_bias, has_initial_hidden, has_initial_cell, - clip_threshold, input_forget); - auto real_outs = network.execute(); - auto out = real_outs.at("dynamic_lstm"); - auto out_layout = out.get_memory()->get_layout(); - - cldnn::mem_lock out_ptr(out.get_memory(), get_test_stream()); - cldnn::mem_lock last_hidden_ptr(last_hidden_mem, get_test_stream()); - cldnn::mem_lock last_cell_ptr(last_cell_mem, get_test_stream()); - size_t i = 0, i_lh = 0, i_lc = 0; - for (auto b = 0; b < out_layout.batch(); b++) - { - for (auto len = 0; len < max_sequence_len; len++) - { - for (auto dir = 0; dir < direction; dir++) - { - for (auto x = 0; x < out_layout.spatial(0); x++) - { - //check hidden - if (len < dynamic_lengths[b]) - { - ASSERT_NEAR((float)ref_output_hidden[b][len][dir][x], (float)out_ptr[i++], epsilon) - << "check hidden, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - else - { - ASSERT_NEAR(0.0f, (float)out_ptr[i++], epsilon) - << "check hidden, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - - //check optional last hidden state output - if(has_last_hidden_state && len == dynamic_lengths[b] - 1) - { - auto ratio = (float)ref_output_hidden[b][len][dir][x] / (float)last_hidden_ptr[i_lh++]; - ASSERT_TRUE(std::abs(1.0f - ratio) < 0.01f) - << "check has_last_hidden_state with ratio: " << ratio << ", " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - - } - else if (has_last_hidden_state && len == 0 && dynamic_lengths[b] == 0) - { - ASSERT_NEAR(0.0f, (float)last_hidden_ptr[i_lh++], epsilon) - << "check has_last_hidden_state, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - - //check optional last cell state output - if(has_last_cell_state && len == dynamic_lengths[b] - 1) - { - auto ratio = (float)ref_output_cell[b][len][dir][x] / (float)last_cell_ptr[i_lc++]; - ASSERT_TRUE(std::abs(1.0f - ratio) < 0.01f) - << "check has_last_cell_state with ratio: " << ratio << ", " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - else if (has_last_cell_state && len == 0 && dynamic_lengths[b] == 0) - { - ASSERT_NEAR(0.0f, (float)last_cell_ptr[i_lc++], epsilon) - << "check has_last_cell_state, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - } - } - } - } -#endif - } - -}; -typedef ::testing::Types lstm_dynamic_test_types; -TYPED_TEST_SUITE(lstm_dynamic_single_layer_test, lstm_dynamic_test_types); -TYPED_TEST_SUITE(lstm_dynamic_input_layer_test, lstm_dynamic_test_types); - -/* ----------------------------------------------- - DYNAMIC_LSTM INPUT TEST ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_b1_seq3_is3_hs2) -{ - auto dir = 1, batch_size = 1, max_seq_len = 5, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 3 }; - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_b3_seq5_is3_hs2) -{ - auto dir = 1, batch_size = 3, max_seq_len = 5, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 3, 4, 2 }; - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, b10_seq20_is16_hs64) -{ - auto dir = 1, batch = 10, max_seq_len = 20, input_size = 16, hidden_size = 64; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_b8_seq10_is4_hs16) -{ - auto batch_size = 8, max_seq_len = 10, input_size = 4, hidden_size = 16; - std::vector dynamic_lengths = { 1, 2, 3, 4, 5, 6, 7, 8}; - auto dir = 1; - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir2_b8_seq10_is4_hs16_options) -{ - auto batch_size = 8, max_seq_len = 10, input_size = 4, hidden_size = 16; - std::vector dynamic_lengths = { 1, 2, 3, 4, 5, 6, 7, 8 }; - auto dir = 2; - std::vector bias_options = { true, false }; - for (auto bias : bias_options) - { - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, bias); - } -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_1b1_seq1_is32_hs_128) -{ - auto dir = 1, batch = 1, max_seq_len = 1, input_size = 32, hidden_size = 128; - std::vector dynamic_lengths = - { - 1 - }; - bool bias = true; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir_b8_seq27_is16_hs_56) -{ - auto dir = 1, batch = 8, max_seq_len = 27, input_size = 16, hidden_size = 56; - std::vector dynamic_lengths = - { - 20, 25, 24, 10, 15, 8, 19, 26 - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, false); -} - - -/* ----------------------------------------------- - FULL DYNAMIC_LSTM TESTS ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq1_is3_hs2) -{ - auto dir = 1, batch = 1, max_seq_len = 1, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq3_is3_hs2_options) -{ - auto dir = 1, batch = 1, max_seq_len = 3, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq10_is10_hs32) -{ - auto dir = 1, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq10_is10_hs32_options) -{ - auto dir = 1, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b4_seq1_is3_hs2) -{ - auto dir = 1, batch = 2, max_seq_len = 3, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1, 2 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b4_seq3_is3_hs2_options) -{ - auto dir = 1, batch = 4, max_seq_len = 3, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1, 2, 2, 0 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b10_seq20_is16_hs64) -{ - auto dir = 1, batch = 10, max_seq_len = 20, input_size = 16, hidden_size = 64; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, - }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -// DISABLED beacuse it is veeery long -TYPED_TEST(lstm_dynamic_single_layer_test, DISABLED_b16_seq20_is32_hs32_options) -{ - auto dir = 1, batch = 16, max_seq_len = 20, input_size = 32, hidden_size = 32; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, 0, 0, 0, 0, 19, 18 - }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - std::vector last_hidden_state = { true, false }; - std::vector last_cell_state = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - for (auto l_h_s : last_hidden_state) - { - for (auto l_c_s : last_cell_state) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c, l_h_s, l_c_s, 1e-2f); - } - } - } - } - } -} - -/* ----------------------------------------------- - BIDIRECTIONAL TESTS ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b2_seq7_is3_hs4) -{ - auto dir = 2, batch = 2, max_seq_len = 7, input_size = 3, hidden_size = 4; - std::vector dynamic_lengths = { 3, 5 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir_b1_seq1_is32_hs_512) -{ - auto dir = 2, batch = 1, max_seq_len = 1, input_size = 8, hidden_size = 128; - std::vector dynamic_lengths = - { - 1 - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir_b8_seq5_is32_hs_512) -{ - auto dir = 2, batch = 8, max_seq_len = 5, input_size = 8, hidden_size = 128; - std::vector dynamic_lengths = - { - 3, 4, 5, 1, 3, 2, 2, 3 - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b10_seq7_is3_hs4) -{ - auto dir = 2, batch = 10, max_seq_len = 7, input_size = 3, hidden_size = 4; - std::vector dynamic_lengths = { 1, 2, 3, 4, 5, 6, 5, 4, 3, 2}; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b2_seq7_is3_hs4_options) -{ - auto dir = 2, batch = 2, max_seq_len = 7, input_size = 3, hidden_size = 4; - std::vector dynamic_lengths = { 3, 5 }; - std::vector bias_options = { false, true }; - std::vector init_hidden = { false, true }; - std::vector init_cell = { false, true}; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b1_seq10_is10_hs32) -{ - auto dir = 2, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b1_seq10_is10_hs32_options) -{ - auto dir = 2, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c, false, false, 1e-2f); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b10_seq20_is16_hs64) -{ - auto dir = 2, batch = 10, max_seq_len = 20, input_size = 16, hidden_size = 64; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, - }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b16_seq20_is4_hs8_options) -{ - auto dir = 2, batch = 16, max_seq_len = 20, input_size = 4, hidden_size = 8; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, 0, 0, 0, 0, 14, 18 - }; - std::vector bias_options = { false, true }; - std::vector init_hidden = { false, true }; - std::vector init_cell = { false, true }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -/* ----------------------------------------------- - OPTIONAL OUTPUTS ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_single_layer_test, b16_seq20_is4_hs8_dirs_optional_outputs) -{ - auto batch = 16, max_seq_len = 20, input_size = 4, hidden_size = 8; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, 0, 0, 0, 0, 14, 18 - }; - this->single_layer_generic_test(1, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, false, false, false, true, true, 1e-3f); -} - -/* ----------------------------------------------- - NEGATIVE TESTS ----------------------------------------------- -*/ - -TEST(lstm_dynamic_negative, wrong_weights_size) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx, { batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, wrong_value } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_recurrent_size_0) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, wrong_value, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_recurrent_size_1) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, wrong_value, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_dynamic_length_size_0) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, wrong_value, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_dynamic_length_size_1) { - - auto batch_size = 50, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 2; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, wrong_value, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp deleted file mode 100644 index 9d6cbc48aeddbc..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp +++ /dev/null @@ -1,2411 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" -#include "random_generator.hpp" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#ifdef _MSC_VER -# pragma warning(disable: 4503) -#endif - -using namespace cldnn; -using namespace ::tests; - -#define FERROR 1E-4 - -namespace { -float sigmoid(float x) { - return 1.f / (1.f + (float)std::exp((float)(-x))); -} -struct offset_order { - size_t it, ot, ft, zt; - offset_order(size_t scale, const lstm_weights_order& t = lstm_weights_order::iofz) { - static const std::map> offset_map{ - { lstm_weights_order::iofz,{ 0, 1, 2, 3 } }, - { lstm_weights_order::ifoz,{ 0, 2, 1, 3 } } - }; - std::vector v = offset_map.at(t); - it = v[0] * scale; - ot = v[1] * scale; - ft = v[2] * scale; - zt = v[3] * scale; - } -}; -lstm_weights_order default_offset_type = lstm_weights_order::iofz; -template -T clip(T val, T threshold) { - if (threshold > 0) { - if (val > threshold) return threshold; - if (val < -threshold) return -threshold; - } - return val; -} - -template -VVVVF lstm_gemm_reference(VVVVF& input, VVVVF& weights, VVVVF& recurrent, VVVVF& bias, VVVVF& hidden, - size_t seq, bool hasBias = true, bool hasHidden = true, size_t dir = 0, size_t input_dir = 0) { - size_t input_size = input[0][0][0].size(); - size_t hidden_size = hidden[0][0][0].size(); - size_t batch_size = input.size(); - - // Temporary output from GEMM operations [f, i, o, z] - VVVVF tempGEMM(batch_size, VVVF(1, VVF(1, VF(4 * hidden_size)))); - for (size_t b = 0; b < batch_size; ++b) { - for (size_t y = 0; y < 4 * hidden_size; ++y) { - T res = 0; - for (size_t x = 0; x < input_size; ++x) { - res += (T)weights[0][dir][y][x] * (T)input[b][seq][input_dir][x]; - } - if (hasHidden) { - for (size_t x = 0; x < hidden_size; ++x) { - res += (T)recurrent[0][dir][y][x] * (T)hidden[b][0][dir][x]; - } - } - if (hasBias) { - res += (T)bias[0][0][dir][y]; - } - tempGEMM[b][0][0][y] = res; - } - } - return tempGEMM; -} - -template -VVVVF lstm_elt_reference(VVVVF& tempGEMM, VVVVF& cell, - bool hasCell = true, float clip_threshold = 0, - bool input_forget = false, size_t dir = 0) -{ - size_t hidden_size = tempGEMM[0][0][0].size() / 4; - size_t batch_size = tempGEMM.size(); - VVVVF tempOut(batch_size, VVVF(2, VVF(1, VF(hidden_size)))); - offset_order off(hidden_size, default_offset_type); - - for (size_t b = 0; b < batch_size; ++b) { - T *it = &tempGEMM[b][0][0][off.it]; - T *ot = &tempGEMM[b][0][0][off.ot]; - T *ft = &tempGEMM[b][0][0][off.ft]; - T *zt = &tempGEMM[b][0][0][off.zt]; - - for (size_t h = 0; h < hidden_size; ++h) { - - // Convert all inputs to float for all the elementwise operations. This is done to immitate - // how lstm kernel is performing the elementwise operations. - float fp32_it = (float)it[h]; - float fp32_ot = (float)ot[h]; - float fp32_ft = (float)ft[h]; - float fp32_zt = (float)zt[h]; - float val = sigmoid(clip(fp32_it, clip_threshold)) * std::tanh(clip(fp32_zt, clip_threshold)); - - if (input_forget) { - val *= (1 - fp32_ft); - } - if (hasCell) { - val += (float)cell[b][0][dir][h] * sigmoid(clip(fp32_ft, clip_threshold)); - } - - // Convert back to output data type before storing it into the output buffer. Currently, the output - // data type may be float or ov::float16 (half) - tempOut[b][0][0][h] = (T)(std::tanh(val) * sigmoid(fp32_ot)); - tempOut[b][1][0][h] = (T)val; - } - } - return tempOut; -} - -template -void print(const std::string& s, VVVVF& input) { - printf("%s -------------\n", s.c_str()); - printf("Size = [%d, %d, %d, %d]\n", (int)input.size(), (int)input[0].size(), (int)input[0][0].size(), (int)input[0][0][0].size()); - for (size_t b = 0; b < input.size(); ++b) { - for (size_t f = 0; f < input[0].size(); ++f) { - for (size_t y = 0; y < input[0][0].size(); ++y) { - for (size_t x = 0; x < input[0][0][0].size(); ++x) { - printf("%f ", input[b][f][y][x]); - } - printf("\n"); - } - } - } - printf("---------------------------------------\n"); -} - -// input = [ batch, sequence, direction, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// cell = [ batch, direction, 1, hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, 1, 1, 4 * hidden_size ] temporary output -// output = [ batch, sequence, direction, hidden_size ] output -template -void lstm_reference(VVVVF& input, VVVVF& hidden, VVVVF& cell, - VVVVF& weights, VVVVF& recurrent, VVVVF& bias, - VVVVF& output, VVVVF& last_hidden, - VVVVF& last_cell, bool hasBias = true, - bool hasInitialHidden = true, bool hasInitialCell = true, - float clip_threshold = 0, bool input_forget = false, - bool scramble_input = true) -{ - size_t sequence_len = input[0].size(); - size_t dir_len = weights[0].size(); - size_t batch = input.size(); - size_t input_directions = input[0][0].size(); - for (size_t dir = 0; dir < dir_len; ++dir) { - bool tempHasInitialHidden = hasInitialHidden; - bool tempHasInitialCell = hasInitialCell; - for (size_t seq = 0; seq < sequence_len; ++seq) { - size_t seq_id = seq; - size_t input_direction = dir; - if (scramble_input) { - if (dir > 0) { - seq_id = input_directions == 1 ? sequence_len - seq - 1 : seq; - input_direction = input_directions - 1; - } - } - VVVVF tempGEMM = lstm_gemm_reference(input, weights, recurrent, bias, hidden, seq_id, hasBias, tempHasInitialHidden, dir, input_direction); - VVVVF tempOutput = lstm_elt_reference(tempGEMM, cell, tempHasInitialCell, clip_threshold, input_forget, dir); - // tempOutput[batch][0] = hidden and tempOutput[batch][1] = cell - for (size_t i = 0; i < batch; i++) { - output[i][seq][dir] = tempOutput[i][0][0]; - hidden[i][0][dir] = tempOutput[i][0][0]; - cell[i][0][dir] = tempOutput[i][1][0]; - } - tempHasInitialHidden = true; - tempHasInitialCell = true; - } - } - last_hidden = hidden; - last_cell = cell; -} - -template -void generic_lstm_gemm_gpu_test(int sequence_len, int direction, int batch_size, int input_size, int hidden_size, - bool hasBias, bool hasHidden, bool is_caching_test = false) { - int min_random = -2, max_random = 2; - - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - - VVVVF ref_output = lstm_gemm_reference(ref_input, ref_weights, ref_recurrent, ref_bias, ref_hidden, 0, hasBias, hasHidden); - - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - auto& engine = get_test_engine(); - - // If the input is of fp16 type then, the memory::ptr will be allocated as such - if (!engine.get_device_info().supports_fp16) - { - if (dt == data_types::f16) - { - return; - } - } - - memory::ptr input = engine.allocate_memory({ dt, format::bfyx, { batch_size, sequence_len, input_size, 1 } }); - memory::ptr weights = engine.allocate_memory({ dt, format::bfyx, { 1, direction, input_size, 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ dt, format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ dt, format::bfyx, { 1, 1, 4 * hidden_size, direction } }); - memory::ptr hidden = engine.allocate_memory({ dt, format::bfyx, { batch_size, direction, hidden_size, 1 } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - if (hasBias) { - topology.add(data("biases", biases)); - } - if (hasHidden) { - topology.add(input_layout("hidden", hidden->get_layout())); - } - - topology.add(lstm_gemm("lstm_gemm", input_info("input"), "weights", "recurrent", hasBias ? "biases" : "", hasHidden ? "hidden" : "")); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - if (hasHidden) { - network->set_input_data("hidden", hidden); - } - - auto outputs = network->execute(); - ASSERT_EQ(outputs.size(), size_t(1)); - - auto output = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - int i = 0; - for (int b = 0; b < batch_size; ++b) { - for (int x = 0; x < 4 * hidden_size; ++x) - ASSERT_FLOAT_EQ(ref_output[b][0][0][x], output_ptr[i++]); - } -} - -template -void generic_lstm_elt_gpu_test(int /* sequence_len */, int direction, int batch_size, - int /* input_size */, int hidden_size, bool hasCell, - T clip_threshold, bool input_forget, bool is_caching_test = false) { - // tempGEMM = [ 1, direction, batch, 4 * hidden_size ] input - // cell = [ 1, direction, batch, hidden_size ] optional - // output = [ 2, direction, batch, hidden_size ] output concat[hidden, cell] - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_tempGEMM = rg.generate_random_4d(batch_size, direction, 1, 4 * hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VF ref_tempGEMM_vec = flatten_4d(cldnn::format::bfyx, ref_tempGEMM); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - - VVVVF ref_output = lstm_elt_reference(ref_tempGEMM, ref_cell, hasCell, clip_threshold, input_forget); - - // We observe some mismatch in down-converting from fp32 to fp16 - // between the reference implementation and opencl kernel. This can be - // a simple rounding error. Thus, for fp16 we are increasing our tolerance - // to error from 1E-4 to 1E-2 - constexpr float ferror = std::is_same::value ? (float)1E-4 : (float)1E-2; - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - auto& engine = get_test_engine(); - - // If the input is of fp16 type then, the memory::ptr will be allocated as such - if (!engine.get_device_info().supports_fp16) - { - if (dt == data_types::f16) - { - return; - } - } - - memory::ptr tempGEMM = engine.allocate_memory({ dt, format::bfyx,{ batch_size, direction, 4 * hidden_size, 1 } }); - memory::ptr cell = engine.allocate_memory({ dt, format::bfyx,{ batch_size, direction, hidden_size, 1 } }); - set_values(tempGEMM, ref_tempGEMM_vec); - set_values(cell, ref_cell_vec); - - topology topology; - topology.add(input_layout("tempGEMM", tempGEMM->get_layout())); - if (hasCell) { - topology.add(input_layout("cell", cell->get_layout())); - } - topology.add(lstm_elt("lstm_elt", input_info("tempGEMM"), hasCell ? "cell" : "", clip_threshold, input_forget)); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("tempGEMM", tempGEMM); - if (hasCell) { - network->set_input_data("cell", cell); - } - - auto outputs = network->execute(); - ASSERT_EQ(outputs.size(), size_t(1)); - - auto output = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - for (int b = 0; b < batch_size; ++b) { - for (int j = 0; j < 2; ++j) { - for (int x = 0; x < hidden_size; ++x) - { - auto idx = b * 2 * hidden_size + j * hidden_size + x; - ASSERT_NEAR(ref_output[b][j][0][x], output_ptr[idx] , ferror); - } - } - } -} - -std::string get_string_id(size_t i) { - std::stringstream ss; - ss << std::setw(5) << std::setfill('0') << i; - return ss.str(); -} - -// --------------- Manually constructed LSTM ---------------------------------------- -// This function manually generates an lstm node sequence by conbining lstm_gemm and lstm_elt nodes -// it requires that the output of the lstm_elt node is croped to obtain the corresponding hidden and cell outputs -void generate_lstm_topology(topology& t, memory::ptr input, memory::ptr hidden, memory::ptr cell, - memory::ptr weights, memory::ptr recurrent, memory::ptr biases, int sequence_len, - bool hasBias = true, bool hasInitialHidden = true, bool hasInitialCell = true) { - auto hidden_size = hidden->get_layout().get_tensor(); - t.add(input_layout("input", input->get_layout())); - std::vector> input_ids_offsets; - std::vector output_ids_offsets; - for (int i = 0; i < sequence_len; ++i) - input_ids_offsets.push_back({ get_string_id(i),{ 0, i, 0, 0 } }); - t.add(split("inputSplit", input_info("input"), input_ids_offsets)); - t.add(data("weights", weights)); - t.add(data("recurrent", recurrent)); - - std::string biasStr = ""; - std::string hiddenStr = ""; - std::string cellStr = ""; - if (hasBias) - { - t.add(data("biases", biases)); - biasStr = "biases"; - } - if (hasInitialHidden) - { - t.add(input_layout("hidden", hidden->get_layout())); - hiddenStr = "hidden"; - } - if (hasInitialCell) - { - t.add(input_layout("cell", cell->get_layout())); - cellStr = "cell"; - } - for (int i = 0; i < sequence_len; ++i) { - std::string lstm_gemm_id = "lstm_gemm" + get_string_id(i); - std::string lstm_elt_id = "lstm_elt" + get_string_id(i); - std::string crop_id = "crop" + get_string_id(i); - - t.add(lstm_gemm(lstm_gemm_id, input_info("inputSplit:" + get_string_id(i)), "weights", "recurrent", biasStr, hiddenStr)); - t.add(lstm_elt(lstm_elt_id, input_info(lstm_gemm_id), cellStr)); - - hiddenStr = crop_id + ":hidden"; - t.add(crop(hiddenStr, input_info(lstm_elt_id), hidden_size, tensor{ 0,0,0,0 })); - if (i < sequence_len - 1) { - cellStr = crop_id + ":cell"; - t.add(crop(cellStr, input_info(lstm_elt_id), hidden_size, tensor{ 0,1,0,0 })); - } - output_ids_offsets.push_back(input_info(hiddenStr)); - } - t.add(concatenation("concatenation", output_ids_offsets, 1)); -} - -template -void generic_lstm_custom_gpu_test(int sequence_len, int direction, int batch_size, int input_size, int hidden_size, - bool hasBias, bool hasInitialHidden, bool hasInitialCell, bool is_caching_test = false) { - std::cout << "Input Size = " << input_size << " Hidden Size = " << hidden_size << " Sequence Len = " << sequence_len << " Batch Size = " << batch_size << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VVVVF ref_output(batch_size, VVVF(sequence_len, VVF(direction, VF(hidden_size)))); - VVVVF last_hidden(batch_size, VVVF(direction, VVF(1, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(direction, VVF(1, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - lstm_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output, last_hidden, last_cell, - hasBias, hasInitialHidden, hasInitialCell); - - auto& engine = get_test_engine(); - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, sequence_len, input_size, 1 } }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, direction, hidden_size, 1 } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, direction, hidden_size, 1 } }); - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - topology topology; - generate_lstm_topology(topology, input, hidden, cell, weights, recurrent, biases, sequence_len, - hasBias, hasInitialHidden, hasInitialCell); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - if (hasInitialHidden) network->set_input_data("hidden", hidden); - if (hasInitialCell) network->set_input_data("cell", cell); - auto outputs = network->execute(); - - ASSERT_EQ(outputs.size(), size_t(1)); - size_t output_size = outputs.begin()->second.get_memory()->size() / sizeof(T); - ASSERT_EQ(output_size, size_t(hidden_size * sequence_len * batch_size * direction)); - - auto output = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - int i = 0; - for (int b = 0; b < batch_size; ++b) { - for (int s = 0; s < sequence_len; ++s) { - for (int x = 0; x < hidden_size; ++x) { - for (int d = 0; d < direction; ++d) { - ASSERT_NEAR(ref_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } -} - -// ------------------------------------------------------- -template -void generic_lstm_gpu_test(int layers, int sequence_len, int direction, int batch_size, int input_size, int hidden_size, - bool hasBias, bool hasInitialHidden, bool hasInitialCell, - T clip_threshold, bool input_forget, bool is_caching_test = false) { - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Direction = " << direction << " Batch Size = " << batch_size << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - - std::vector> ref_weights; - std::vector> ref_recurrent; - std::vector> ref_bias; - std::vector> ref_hidden; - std::vector> ref_cell; - std::vector> ref_output; - - for (int i = 0; i < layers; ++i) { - ref_weights.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, i==0 ? input_size : hidden_size, min_random, max_random)); - ref_recurrent.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random)); - ref_bias.push_back(rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random)); - ref_hidden.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_cell.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_output.push_back(VVVVF(batch_size, VVVF(sequence_len, VVF(direction, VF(hidden_size))))); - } - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - std::vector> ref_weights_vec; - std::vector> ref_recurrent_vec; - std::vector> ref_bias_vec; - std::vector> ref_hidden_vec; - std::vector> ref_cell_vec; - for (int i = 0; i < layers; ++i) { - ref_weights_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_weights[i])); - ref_recurrent_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_recurrent[i])); - ref_bias_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_bias[i])); - ref_hidden_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_hidden[i])); - ref_cell_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_cell[i])); - } - - VVVVF last_hidden(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden[0], ref_cell[0], ref_weights[0], ref_recurrent[0], ref_bias[0], ref_output[0], - last_hidden, last_cell, hasBias, hasInitialHidden, hasInitialCell, - clip_threshold, input_forget, true); - - for (int i = 1; i < layers; ++i) { - lstm_reference(ref_output[i - 1], ref_hidden[i], ref_cell[i], ref_weights[i], ref_recurrent[i], - ref_bias[i], ref_output[i], - last_hidden, last_cell, hasBias, hasInitialHidden, hasInitialCell, - clip_threshold, input_forget, false); - } - - // We observe some mismatch in down-converting from fp32 to fp16 - // between the reference implementation and opencl kernel. This can be - // a simple rounding error. Thus, for fp16 we are increasing our tolerance - // to error from 1E-4 to 1E-2 - constexpr float ferror = std::is_same::value ? (float)1E-4 : (float)1E-2; - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - auto& engine = get_test_engine(); - - // If the input is of fp16 type then, the memory::ptr will be allocated as such - if (!engine.get_device_info().supports_fp16) - { - if (dt == data_types::f16) - { - return; - } - } - - memory::ptr input = engine.allocate_memory({ dt, format::bfyx, {batch_size, sequence_len, input_size, 1} }); - set_values(input, ref_input_vec); - - std::vector weights; - std::vector recurrent; - std::vector biases; - std::vector hidden; - std::vector cell; - for(int i = 0; i < layers; ++i) { - weights.push_back(engine.allocate_memory({ dt, format::bfyx, { 1, direction, i==0 ? input_size : hidden_size, 4 * hidden_size } })); - set_values(weights[i], ref_weights_vec[i]); - recurrent.push_back(engine.allocate_memory({ dt, format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } })); - set_values(recurrent[i], ref_recurrent_vec[i]); - if (hasBias) { - biases.push_back(engine.allocate_memory({ dt, format::bfyx, { 1, 1, 4 * hidden_size, direction } })); - set_values(biases[i], ref_bias_vec[i]); - } - if (hasInitialHidden) { - hidden.push_back(engine.allocate_memory({ dt, format::bfyx, { batch_size, 1, hidden_size, direction } })); - set_values(hidden[i], ref_hidden_vec[i]); - } - if (hasInitialCell) { - cell.push_back(engine.allocate_memory({ dt, format::bfyx, { batch_size, 1, hidden_size, direction} })); - set_values(cell[i], ref_cell_vec[i]); - } - } - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - cldnn::primitive_id prev_lstm_id; - for(int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - std::string lstm_id = "lstm" + sid; - std::string weights_id = "weights" + sid; - std::string recurrent_id = "recurrent" + sid; - std::string biases_id = "biases" + sid; - std::string hidden_id = "hidden" + sid; - std::string cell_id = "cell" + sid; - - topology.add(data(weights_id, weights[i])); - topology.add(data(recurrent_id, recurrent[i])); - if (hasBias) topology.add(data(biases_id, biases[i])); - if (hasInitialHidden) topology.add(input_layout(hidden_id, hidden[i]->get_layout())); - if (hasInitialCell) topology.add(input_layout(cell_id, cell[i]->get_layout())); - if (i == 0) { - topology.add(lstm(lstm_id, lstm_inputs, weights_id, recurrent_id, - hasBias ? biases_id : "", hasInitialHidden ? hidden_id : "", hasInitialCell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence, default_offset_type)); - } - else { - topology.add(lstm(lstm_id, { input_info(prev_lstm_id) }, weights_id, recurrent_id, - hasBias ? biases_id : "", hasInitialHidden ? hidden_id : "", hasInitialCell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence, default_offset_type)); - } - prev_lstm_id = lstm_id; - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - for (int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - if (hasInitialHidden) network->set_input_data("hidden" + sid, hidden[i]); - if (hasInitialCell) network->set_input_data("cell" + sid, cell[i]); - } - auto outputs = network->execute(); - { - ASSERT_EQ(outputs.size(), size_t(1)); - size_t output_size = outputs.begin()->second.get_memory()->size() / sizeof(T); - ASSERT_EQ(output_size, size_t(hidden_size * sequence_len * batch_size * direction)); - - auto output = outputs.begin()->second.get_memory(); - - // Get the output tensor - cldnn::layout output_layout = output->get_layout(); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(batch_size, output_layout.batch()); - ASSERT_EQ(sequence_len, output_layout.feature()); - ASSERT_EQ(direction, output_layout.spatial(1)); - ASSERT_EQ(hidden_size, output_layout.spatial(0)); - - cldnn::mem_lock output_ptr(output, get_test_stream()); - int32_t i = 0; - for (int32_t b = 0; b < batch_size; ++b) { - for (int32_t s = 0; s < sequence_len; ++s) { - for (int32_t d = 0; d < direction; ++d) { - for (int32_t x = 0; x < hidden_size; ++x) { - ASSERT_NEAR(ref_output[layers - 1][b][s][d][x], output_ptr[i++], ferror); - } - } - } - } - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_output_test(const lstm_output_selection& output_selection, int directions, bool is_caching_test = false) { - int layers = 1; - int sequence_len = 4; - int batch_size = 3; - int input_size = 3; - int hidden_size = 4; - - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Directions = " << directions << " Batch Size = " << batch_size - << " Output selection: " << static_cast(output_selection) << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, directions, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_output = VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - - VVVVF last_hidden(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output, - last_hidden, last_cell, true, true, true, - (T)0, false, true); - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - bool emit_last_cell = output_selection == lstm_output_selection::hidden_cell || - output_selection == lstm_output_selection::sequence_cell; - bool emit_last_hidden = output_selection == lstm_output_selection::hidden || - output_selection == lstm_output_selection::hidden_cell; - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) - { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - topology.add(data("biases", biases)); - topology.add(input_layout("hidden", hidden->get_layout())); - topology.add(input_layout("cell", cell->get_layout())); - topology.add(lstm("lstm", lstm_inputs, "weights", "recurrent", - "biases", "hidden", "cell", "", 0, false, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection, default_offset_type)); - if (emit_last_cell) - { - int32_t concatenation_len = emit_last_hidden ? 2 : sequence_len + 1; - tensor hidden_tensor {batch_size, concatenation_len - 1, hidden_size, directions}; - tensor cell_tensor {batch_size, 1, hidden_size, directions}; - topology.add(crop(emit_last_hidden ? "crop:last_hidden" : "crop:sequence", input_info("lstm"), hidden_tensor, tensor{0, 0, 0, 0})); - topology.add(crop("crop:last_cell", input_info("lstm"), cell_tensor, tensor{0, concatenation_len - 1, 0, 0})); - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - network->set_input_data("hidden", hidden); - network->set_input_data("cell", cell); - - auto outputs = network->execute(); - uint32_t ref_num_output_primitives = 1; // Output will return atleast 1 primitive - - if (emit_last_cell) { - // add another primitve to account for cell state if the output selection includes cell state - ref_num_output_primitives += 1; - } - - // check if the number of returned primitives match the expected number of output primitives - ASSERT_EQ(ref_num_output_primitives, outputs.size()); - - for (auto itr = outputs.begin(); itr != outputs.end(); itr++) - { - auto output_layout = itr->second.get_memory()->get_layout(); - primitive_id primitive_name = itr->first; - - cldnn::memory::ptr output_memory = itr->second.get_memory(); - int32_t output_size = (int32_t)(itr->second.get_memory()->size() / sizeof(T)); - cldnn::tensor ref_output_tensor; - VVVVF ref_primitive_output; - - int32_t ref_batch_size = batch_size; - int32_t ref_hidden_size = hidden_size; - int32_t ref_directions = directions; - - int32_t ref_seq_len = 1; - // Set the reference output against which the primitive's output will be compared - if (primitive_name.find("crop:last_cell") != std::string::npos) - { - ref_primitive_output = last_cell; - } - else if (emit_last_hidden || primitive_name.find("crop:last_hidden") != std::string::npos) - { - ref_primitive_output = last_hidden; - } - else - { - ref_seq_len = sequence_len; - ref_primitive_output = ref_output; - } - - ref_output_tensor = { ref_batch_size, ref_seq_len, ref_hidden_size, ref_directions }; - int32_t ref_output_size = ref_batch_size * ref_seq_len * ref_hidden_size * ref_directions; - - // The number of elements in reference should match the number of elements in the primitive's output - ASSERT_EQ(ref_output_size , output_size); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(ref_batch_size, output_layout.batch()); - ASSERT_EQ(ref_seq_len, output_layout.feature()); // Sequence length should match - ASSERT_EQ(ref_directions, output_layout.spatial(1)); // directions should match - ASSERT_EQ(ref_hidden_size, output_layout.spatial(0)); // input size should match - - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - int32_t i = 0; - for (int32_t b = 0; b < ref_batch_size; ++b) { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_format_test(const cldnn::format& format, int directions, bool is_caching_test = false) { - int layers = 1; - int sequence_len = 6; - int batch_size = 3; - int input_size = 4; - int hidden_size = 5; - - lstm_output_selection output_selection = lstm_output_selection::sequence; - - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Directions = " << directions << " Batch Size = " << batch_size - << " Output selection: " << static_cast(output_selection) << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, directions, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_output = VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(format, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(format, ref_hidden); - VF ref_cell_vec = flatten_4d(format, ref_cell); - - VVVVF last_hidden(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output, - last_hidden, last_cell, true, true, true, - (T)0, false, true); - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(),format, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format, { batch_size, 1, hidden_size, directions } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - bool emit_last_cell = output_selection == lstm_output_selection::hidden_cell || - output_selection == lstm_output_selection::sequence_cell; - bool emit_last_hidden = output_selection == lstm_output_selection::hidden || - output_selection == lstm_output_selection::hidden_cell; - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) - { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - topology.add(data("biases", biases)); - topology.add(input_layout("hidden", hidden->get_layout())); - topology.add(input_layout("cell", cell->get_layout())); - topology.add(lstm("lstm"+get_string_id(0), lstm_inputs, "weights", "recurrent", - "biases", "hidden", "cell", "", 0, false, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection, default_offset_type)); - - if (emit_last_cell) - { - int32_t concatenation_len = emit_last_hidden ? 2 : sequence_len + 1; - tensor hidden_tensor {batch_size, concatenation_len - 1, hidden_size, directions}; - tensor cell_tensor {batch_size, 1, hidden_size, directions}; - topology.add(crop(emit_last_hidden ? "crop:last_hidden" : "crop:sequence", input_info("lstm"), hidden_tensor, tensor{0, 0, 0, 0})); - topology.add(crop("crop:last_cell", input_info("lstm"), cell_tensor, tensor{0, concatenation_len - 1, 0, 0})); - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - - std::map outputs; - - network->set_input_data("input", input); - network->set_input_data("hidden", hidden); - network->set_input_data("cell", cell); - outputs = network->execute(); - - uint32_t ref_num_output_primitives = 1; // Output will return atleast 1 primitive - - if (emit_last_cell) { - // add another primitve to account for cell state if the output selection includes cell state - ref_num_output_primitives += 1; - } - - // check if the number of returned primitives match the expected number of output primitives - ASSERT_EQ(ref_num_output_primitives, outputs.size()); - - for (auto itr = outputs.begin(); itr != outputs.end(); itr++) - { - auto output_layout = itr->second.get_memory()->get_layout(); - primitive_id primitive_name = itr->first; - - cldnn::memory::ptr output_memory = itr->second.get_memory(); - int32_t output_size = (int32_t)(itr->second.get_memory()->size() / sizeof(T)); - cldnn::tensor ref_output_tensor; - VVVVF ref_primitive_output; - - int32_t ref_batch_size = batch_size; - int32_t ref_hidden_size = hidden_size; - int32_t ref_directions = directions; - - int32_t ref_seq_len = 1; - // Set the reference output against which the primitive's output will be compared - if (primitive_name.find("crop:last_cell") != std::string::npos) - { - ref_primitive_output = last_cell; - } - else if (emit_last_hidden || primitive_name.find("crop:last_hidden") != std::string::npos) - { - ref_primitive_output = last_hidden; - } - else - { - ref_seq_len = sequence_len; - ref_primitive_output = ref_output; - } - - ref_output_tensor = { ref_batch_size, ref_seq_len, ref_hidden_size, ref_directions }; - int32_t ref_output_size = ref_batch_size * ref_seq_len * ref_hidden_size * ref_directions; - - // The number of elements in reference should match the number of elements in the primitive's output - ASSERT_EQ(ref_output_size , output_size); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(ref_batch_size, output_layout.batch()); - ASSERT_EQ(ref_seq_len, output_layout.feature()); // Sequence length should match - ASSERT_EQ(ref_directions, output_layout.spatial(1)); // directions should match - ASSERT_EQ(ref_hidden_size, output_layout.spatial(0)); // input size should match - - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - int32_t i = 0; - if (format == cldnn::format::bfyx) { - for (int32_t b = 0; b < ref_batch_size; ++b) { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } - else if(format == cldnn::format::fyxb) - { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - for (int32_t b = 0; b < ref_batch_size; ++b) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } - - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_users_test(bool is_caching_test = false) { - int sequence_len = 2; - int batch_size = 1; - int input_size = 1; - int hidden_size = 1; - int directions = 1; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - // The following test is designed to test the user dependencies of an LSTM node when replaced by subcomponents - // by the graph compiler. - // The output of an LSTM node is set to last_hidden only. Then we concatenate the last_hidden with the initial_hidden tensor: - // (input, weights, recurrent, bias, initial_hidden, inital_cell) -> LSTM -> last_hidden - // concatenation(last_hidden, initial_hidden) - // If the replacing is is done correctly then the initial_hidden tensor should match the output of the concatenation - // by an offset along the sequence. - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, directions, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_output = VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(format::bfyx, ref_cell); - - VVVVF last_hidden(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) - { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - topology.add(data("biases", biases)); - topology.add(input_layout("hidden", hidden->get_layout())); - topology.add(input_layout("cell", cell->get_layout())); - topology.add(lstm("lstm", lstm_inputs, "weights", "recurrent", - "biases", "hidden", "cell", "", 0, false, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::hidden, default_offset_type)); - std::vector output_ids_offsets { input_info("lstm"), input_info("hidden") }; - topology.add(concatenation("concatenation", output_ids_offsets, 1)); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - - std::map outputs; - - network->set_input_data("input", input); - network->set_input_data("hidden", hidden); - network->set_input_data("cell", cell); - outputs = network->execute(); - - // check if the number of returned primitives match the expected number of output primitives - ASSERT_EQ(size_t(1), outputs.size()); - cldnn::memory::ptr output_memory = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - for (int32_t b = 0; b < batch_size; ++b) { - for (int32_t s = 0; s < 1; ++s) { - for (int32_t d = 0; d < directions; ++d) { - for (int32_t x = 0; x < hidden_size; ++x) { - int32_t idx = x + hidden_size * (d + directions * ((s+1) + sequence_len * b)); - ASSERT_NEAR(ref_hidden[b][s][d][x], output_ptr[idx], FERROR); - } - } - } - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_concatenated_input_test(int layers, int sequence_len, int direction, - int batch_size, int input_size, int hidden_size, - bool has_bias, bool has_initial_hidden, - bool has_initial_cell, float clip_threshold, - bool input_forget, bool is_caching_test = false) -{ - tests::random_generator rg(GET_SUITE_NAME); - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Direction = " << direction << " Batch Size = " << batch_size << std::endl; - int min_random = -2, max_random = 2; - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - - std::vector> ref_weights; - std::vector> ref_recurrent; - std::vector> ref_bias; - std::vector> ref_hidden; - std::vector> ref_cell; - std::vector> ref_output; - - for (int i = 0; i < layers; ++i) { - ref_weights.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, i == 0 ? input_size : hidden_size, min_random, max_random)); - ref_recurrent.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random)); - ref_bias.push_back(rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random)); - ref_hidden.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_cell.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_output.push_back(VVVVF(batch_size, VVVF(sequence_len, VVF(direction, VF(hidden_size))))); - } - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - - std::vector> ref_weights_vec; - std::vector> ref_recurrent_vec; - std::vector> ref_bias_vec; - std::vector> ref_hidden_vec; - std::vector> ref_cell_vec; - for (int i = 0; i < layers; ++i) { - ref_weights_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_weights[i])); - ref_recurrent_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_recurrent[i])); - ref_bias_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_bias[i])); - ref_hidden_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_hidden[i])); - ref_cell_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_cell[i])); - } - - VVVVF last_hidden(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden[0], ref_cell[0], ref_weights[0], ref_recurrent[0], ref_bias[0], ref_output[0], - last_hidden, last_cell, has_bias, has_initial_hidden, has_initial_cell, - clip_threshold, input_forget, true); - - for (int i = 1; i < layers; ++i) { - lstm_reference(ref_output[i - 1], ref_hidden[i], ref_cell[i], ref_weights[i], ref_recurrent[i], - ref_bias[i], ref_output[i], - last_hidden, last_cell, has_bias, has_initial_hidden, has_initial_cell, - clip_threshold, input_forget, false); - } - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); - set_values(input, ref_input_vec); - - std::vector weights; - std::vector recurrent; - std::vector biases; - std::vector hidden; - std::vector cell; - for (int i = 0; i < layers; ++i) { - weights.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, direction, i == 0 ? input_size : hidden_size, 4 * hidden_size } })); - set_values(weights[i], ref_weights_vec[i]); - recurrent.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } })); - set_values(recurrent[i], ref_recurrent_vec[i]); - if (has_bias) { - biases.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, direction } })); - set_values(biases[i], ref_bias_vec[i]); - } - if (has_initial_hidden) { - hidden.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, direction } })); - set_values(hidden[i], ref_hidden_vec[i]); - } - if (has_initial_cell) { - cell.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, direction} })); - set_values(cell[i], ref_cell_vec[i]); - } - } - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - cldnn::primitive_id prev_node_id; - - for (int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - std::string lstm_id = "lstm" + sid; - std::string weights_id = "weights" + sid; - std::string recurrent_id = "recurrent" + sid; - std::string biases_id = "biases" + sid; - std::string hidden_id = "hidden" + sid; - std::string cell_id = "cell" + sid; - std::string output_crop_id = "crop:sequence:" + sid; - - topology.add(data(weights_id, weights[i])); - topology.add(data(recurrent_id, recurrent[i])); - if (has_bias) topology.add(data(biases_id, biases[i])); - if (has_initial_hidden) topology.add(input_layout(hidden_id, hidden[i]->get_layout())); - if (has_initial_cell) topology.add(input_layout(cell_id, cell[i]->get_layout())); - if (i == 0) { - topology.add(lstm(lstm_id, { input_info("input") }, weights_id, recurrent_id, - has_bias ? biases_id : "", has_initial_hidden ? hidden_id : "", has_initial_cell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence_cell, default_offset_type)); - } - else { - topology.add(lstm(lstm_id, { input_info(prev_node_id) }, weights_id, recurrent_id, - has_bias ? biases_id : "", has_initial_hidden ? hidden_id : "", has_initial_cell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence_cell, default_offset_type)); - } - - // Crop out the whole output sequence element - topology.add(crop(output_crop_id, input_info(lstm_id), {batch_size, sequence_len, hidden_size, direction}, {0, 0, 0, 0})); - - // Save the node id to provide it as input to the next lstm layer - prev_node_id = output_crop_id; - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - for (int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - if (has_initial_hidden) network->set_input_data("hidden" + sid, hidden[i]); - if (has_initial_cell) network->set_input_data("cell" + sid, cell[i]); - } - auto outputs = network->execute(); - { - ASSERT_EQ(outputs.size(), size_t(1)); - size_t output_size = outputs.begin()->second.get_memory()->size() / sizeof(T); - ASSERT_EQ(output_size, size_t(hidden_size * sequence_len * batch_size * direction)); - - auto output = outputs.begin()->second.get_memory(); - - // Get the output tensor - cldnn::layout output_layout = output->get_layout(); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(batch_size, output_layout.batch()); - ASSERT_EQ(sequence_len, output_layout.feature()); - ASSERT_EQ(direction, output_layout.spatial(1)); - ASSERT_EQ(hidden_size, output_layout.spatial(0)); - - cldnn::mem_lock output_ptr(output, get_test_stream()); - int32_t i = 0; - for (int32_t b = 0; b < batch_size; ++b) { - for (int32_t s = 0; s < sequence_len; ++s) { - for (int32_t d = 0; d < direction; ++d) { - for (int32_t x = 0; x < hidden_size; ++x) { - ASSERT_NEAR(ref_output[layers - 1][b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } -} - -// This test checks chained and stacked LSTM topology. The configuration allows to create -// LSTM topology with multiple layers and can also be chained together. -template -void lstm_gpu_chain_test(int batch_size, int input_size, int hidden_size, - int directions, size_t layers, size_t chains, int sequence_len, - const lstm_output_selection& output_selection, bool is_caching_test = false) -{ - tests::random_generator rg(GET_SUITE_NAME); - int min_random = -2, max_random = 2; - bool has_bias = false; - bool has_initial_hidden = false; - bool has_initial_cell = false; - float clip_threshold = 0; - bool input_forget = false; - - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Directions = " << directions << " Batch Size = " << batch_size - << " Output selection: " << static_cast(output_selection) << std::endl; - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - std::vector>> ref_weights; - std::vector>> ref_recurrent; - std::vector>> ref_bias; - std::vector>> ref_hidden; - std::vector>> ref_cell; - std::vector>> ref_output; - - // Create the 4 dimensional weight, bias, hidden, cell state and output vectors - for (size_t chain = 0; chain < chains; chain++) { - - std::vector> per_chain_ref_weights; - std::vector> per_chain_ref_recurrent; - std::vector> per_chain_ref_bias; - std::vector> per_chain_ref_hidden; - std::vector> per_chain_ref_cell; - std::vector> per_chain_ref_output; - - for (size_t layer = 0; layer < layers; layer++) { - per_chain_ref_weights.push_back(rg.generate_random_4d(1, directions, 4 * hidden_size, (layer == 0) ? input_size : hidden_size, min_random, max_random)); - per_chain_ref_recurrent.push_back(rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random)); - per_chain_ref_bias.push_back(rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random)); - per_chain_ref_hidden.push_back(rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random)); - per_chain_ref_cell.push_back(rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random)); - per_chain_ref_output.push_back(VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size))))); - } - - ref_weights.push_back(per_chain_ref_weights); - ref_recurrent.push_back(per_chain_ref_recurrent); - ref_bias.push_back(per_chain_ref_bias); - ref_hidden.push_back(per_chain_ref_hidden); - ref_cell.push_back(per_chain_ref_cell); - ref_output.push_back(per_chain_ref_output); - } - - VF ref_input_vec; - std::vector>> ref_weights_vec; - std::vector>> ref_recurrent_vec; - std::vector>> ref_bias_vec; - std::vector>> ref_hidden_vec; - std::vector>> ref_cell_vec; - std::vector>> ref_output_vec; - - ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - - // flatten all the 4 dimensional vectors across chains and layers - for (size_t chain = 0; chain < chains; chain++) { - - std::vector> per_chain_ref_weights; - std::vector> per_chain_ref_recurrent; - std::vector> per_chain_ref_bias; - std::vector> per_chain_ref_hidden; - std::vector> per_chain_ref_cell; - std::vector> per_chain_ref_output; - - for (size_t layer = 0; layer < layers; layer++) { - per_chain_ref_weights.push_back(flatten_4d(cldnn::format::bfyx, ref_weights[chain][layer])); - per_chain_ref_recurrent.push_back(flatten_4d(cldnn::format::bfyx, ref_recurrent[chain][layer])); - per_chain_ref_bias.push_back(flatten_4d(cldnn::format::bfyx, ref_bias[chain][layer])); - per_chain_ref_hidden.push_back(flatten_4d(cldnn::format::bfyx, ref_hidden[chain][layer])); - per_chain_ref_cell.push_back(flatten_4d(cldnn::format::bfyx, ref_cell[chain][layer])); - per_chain_ref_output.push_back(flatten_4d(cldnn::format::bfyx, ref_output[chain][layer])); - } - - ref_weights_vec.push_back(per_chain_ref_weights); - ref_recurrent_vec.push_back(per_chain_ref_recurrent); - ref_bias_vec.push_back(per_chain_ref_bias); - ref_hidden_vec.push_back(per_chain_ref_hidden); - ref_cell_vec.push_back(per_chain_ref_cell); - ref_output_vec.push_back(per_chain_ref_output); - } - - std::vector>> last_hidden(chains, std::vector >(layers, VVVVF(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))))); - std::vector>> last_cell(chains, std::vector >(layers, VVVVF(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))))); - - for (size_t chain = 0; chain < chains; chain++) { - lstm_reference(ref_input, ref_hidden[chain][0], ref_cell[chain][0], ref_weights[chain][0], - ref_recurrent[chain][0], ref_bias[chain][0], ref_output[chain][0], - last_hidden[chain][0], last_cell[chain][0], has_bias, - chain == 0 ? has_initial_hidden : true, - chain == 0 ? has_initial_cell : true, - clip_threshold, input_forget, true); - - if (chain < chains - 1) - { - ref_hidden[chain + 1][0] = last_hidden[chain][0]; - ref_cell[chain + 1][0] = last_cell[chain][0]; - } - } - - for (size_t layer = 1; layer < layers; ++layer) { - for (size_t chain = 0; chain < chains; chain++) { - lstm_reference(ref_output[chain][layer - 1], ref_hidden[chain][layer], ref_cell[chain][layer], - ref_weights[chain][layer], ref_recurrent[chain][layer], ref_bias[chain][layer], - ref_output[chain][layer], last_hidden[chain][layer], last_cell[chain][layer], has_bias, - chain == 0 ? has_initial_hidden : true, - chain == 0 ? has_initial_cell : true, - clip_threshold, input_forget, - false); - - if (chain < chains - 1) - { - ref_hidden[chain + 1][layer] = last_hidden[chain][layer]; - ref_cell[chain + 1][layer] = last_cell[chain][layer]; - } - } - } - - auto& engine = get_test_engine(); - tensor input_tensor = { batch_size, sequence_len, input_size, 1 }; - layout layout = { ov::element::from(), cldnn::format::bfyx, input_tensor }; - - memory::ptr input = engine.allocate_memory(layout); - set_values(input, ref_input_vec); - - // 2-dim vectors to support chain and layers - std::vector> weights; - std::vector> recurrent; - std::vector> biases; - std::vector> hidden; - std::vector> cell; - - for (size_t chain = 0; chain < chains; chain++) { - std::vector per_chain_weights; - std::vector per_chain_recurrent; - std::vector per_chain_biases; - std::vector per_chain_hidden; - std::vector per_chain_cell; - - for (size_t layer = 0; layer < layers; layer++) { - per_chain_weights.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, directions, layer == 0 ? input_size : hidden_size, 4 * hidden_size} })); - set_values(per_chain_weights[layer], ref_weights_vec[chain][layer]); - - per_chain_recurrent.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, directions, hidden_size, 4 * hidden_size} })); - set_values(per_chain_recurrent[layer], ref_recurrent_vec[chain][layer]); - - if (has_bias) - { - per_chain_biases.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, 4 * hidden_size, directions} })); - set_values(per_chain_biases[layer], ref_bias_vec[chain][layer]); - } - - if (has_initial_hidden) - { - per_chain_hidden.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, hidden_size, directions} })); - set_values(per_chain_hidden[layer], ref_hidden_vec[chain][layer]); - } - - if (has_initial_cell) - { - per_chain_cell.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, hidden_size, directions} })); - set_values(per_chain_cell[layer], ref_cell_vec[chain][layer]); - } - } - - weights.push_back(per_chain_weights); - recurrent.push_back(per_chain_recurrent); - biases.push_back(per_chain_biases); - hidden.push_back(per_chain_hidden); - cell.push_back(per_chain_cell); - } - - // Start creating the topology - cldnn::topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - - for (int feature = 0; feature < sequence_len; feature++) { - input_ids_offsets.push_back({ get_string_id(feature), {0, feature, 0, 0} }); - lstm_inputs.push_back(input_info("inputSplit:" + get_string_id(feature))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - - bool emit_last_hidden = output_selection == lstm_output_selection::hidden - || output_selection == lstm_output_selection::hidden_cell; - - std::vector output_sequence_ids; - std::vector last_hidden_ids; - std::vector last_cell_ids; - - for (size_t chain = 0; chain < chains; chain++) { - - // Add all the primitives to the network - std::vector prev_output_sequence_ids(output_sequence_ids); - std::vector prev_last_hidden_ids(last_hidden_ids); - std::vector prev_last_cell_ids(last_cell_ids); - - // Erase all the temporary primitive id containers - output_sequence_ids.clear(); - last_cell_ids.clear(); - last_hidden_ids.clear(); - - for (size_t layer = 0; layer < layers; layer++) { - std::string chain_id = get_string_id(chain); - std::string layer_id = get_string_id(layer); - std::string lstm_id = "lstm:" + chain_id + ":" + layer_id; - std::string weights_id = "weights:" + chain_id + ":" + layer_id; - std::string recurrent_id = "recurrent:" + chain_id + ":" + layer_id; - std::string biases_id = "biases:" + chain_id + ":" + layer_id; - std::string hidden_id = "hidden:" + chain_id + ":" + layer_id; - std::string cell_id = "cell:" + chain_id + ":" + layer_id; - std::string crop_seq_id = "crop:sequence:" + chain_id + ":" + layer_id; - std::string crop_last_cell_id = "crop:last_cell:" + chain_id + ":" + layer_id; - std::string crop_last_hidden_id = "crop:last_hidden:" + chain_id + ":" + layer_id; - - primitive_id initial_hidden_id; - primitive_id initial_cell_id; - lstm_output_selection output_selection_per_layer; - - topology.add(data(weights_id, weights[chain][layer])); - topology.add(data(recurrent_id, recurrent[chain][layer])); - if (has_bias) topology.add(data(biases_id, biases[chain][layer])); - - if (chain == 0 && layer == 0) - { - if (has_initial_hidden) topology.add(input_layout(hidden_id, hidden[chain][layer]->get_layout())); - if (has_initial_cell) topology.add(input_layout(cell_id, cell[chain][layer]->get_layout())); - } - - // Get the initial hidden and initial cell for each layer for each chain link - if (chain == 0) - { - initial_hidden_id = has_initial_hidden ? hidden_id : ""; - initial_cell_id = has_initial_cell ? cell_id : ""; - } - else - { - initial_hidden_id = prev_last_hidden_ids[layer]; - initial_cell_id = prev_last_cell_ids[layer]; - } - - // Output selection for all the layers except the last layer has to have the sequence, - // last hidden and last cell - if (layer < layers - 1) - { - output_selection_per_layer = lstm_output_selection::sequence_cell; - } - else - { - // For the last layer, use the output selection provided by the user - output_selection_per_layer = output_selection; - } - - if (layer == 0) - { - topology.add(lstm(lstm_id, lstm_inputs, weights_id, recurrent_id, - has_bias ? biases_id : "", - initial_hidden_id, initial_cell_id, - "", clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection_per_layer, default_offset_type)); - } - else - { - topology.add(lstm(lstm_id, { input_info(output_sequence_ids[layer - 1]) }, weights_id, recurrent_id, - has_bias ? biases_id : "", - initial_hidden_id, initial_cell_id, - "", clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection_per_layer, default_offset_type)); - } - - tensor sequence_tensor{ batch_size, sequence_len, hidden_size, directions }; - tensor cell_tensor{ batch_size, 1, hidden_size, directions }; - tensor last_hidden_tensor{ batch_size, 1, hidden_size, directions }; - - // For all the layers except the last layer, we need to crop output sequence, - // last hidden and last cell. - // The output sequence goes into the next layer of lstm in a chain link - // The last cell state and last hidden go to the lstm node in the same layer - // next in chain - topology.add(crop(crop_seq_id, input_info(lstm_id), sequence_tensor, tensor{ 0, 0, 0, 0 })); // Add crop to get the sequence - topology.add(crop(crop_last_hidden_id, input_info(lstm_id), last_hidden_tensor, tensor{ 0, sequence_len - 1, 0, 0 })); // Add crop to get the last hidden element - topology.add(crop(crop_last_cell_id, input_info(lstm_id), cell_tensor, tensor{ 0, sequence_len, 0, 0 })); // Add crop to get the last cell element - - // Keep a copy of the sequence, last hidden and last cell primitve id for each layer - output_sequence_ids.push_back(crop_seq_id); - last_hidden_ids.push_back(crop_last_hidden_id); - last_cell_ids.push_back(crop_last_cell_id); - } - } - - // Creating network out of the above designed topology - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - for (size_t layer = 0; layer < layers; layer++) { - std::string sid = get_string_id(layer); - if (has_initial_hidden) network->set_input_data("hidden:000:" + sid, hidden[0][layer]); // 0 is the chain link index - if (has_initial_cell) network->set_input_data("cell:000:" + sid, cell[0][layer]); // 0 is the chain link index - } - - auto outputs = network->execute(); - for (auto itr = outputs.begin(); itr != outputs.end(); itr++) - { - auto output_layout = itr->second.get_memory()->get_layout(); - primitive_id primitive_name = itr->first; - - // Split the primitive id to get the chain id - // Eg: primitive id: crop:last_cell:XXX:YYY - // XXX is the chain id - // YYY is the layer id - std::string chain_str = primitive_name.substr(primitive_name.find(":", primitive_name.find(":") + 1) + 1, 5); - std::string layer_str = primitive_name.substr(primitive_name.find(":", primitive_name.find(":", primitive_name.find(":") + 1) + 1) + 1, 5); - size_t chain_id = stoi(chain_str); - size_t layer_id = stoi(layer_str); - - cldnn::memory::ptr output_memory = itr->second.get_memory(); - int32_t output_size = (int32_t)(itr->second.get_memory()->size() / sizeof(T)); - cldnn::tensor ref_output_tensor; - VVVVF ref_primitive_output; - - int32_t ref_batch_size = batch_size; - int32_t ref_hidden_size = hidden_size; - int32_t ref_directions = directions; - - int32_t ref_seq_len = 1; - - // Set the reference output against which the primitive's output will be compared - if (primitive_name.find("crop:last_cell") != std::string::npos) - { - ref_primitive_output = last_cell[chain_id][layer_id]; - } - else if (emit_last_hidden || primitive_name.find("crop:last_hidden") != std::string::npos) - { - ref_primitive_output = last_hidden[chain_id][layer_id]; - } - else - { - ref_seq_len = sequence_len; - ref_primitive_output = ref_output[chain_id][layers - 1]; - } - - ref_output_tensor = { ref_batch_size, ref_seq_len, ref_hidden_size, ref_directions }; - int32_t ref_output_size = ref_batch_size * ref_seq_len * ref_hidden_size * ref_directions; - - // The number of elements in reference should match the number of elements in the primitive's output - ASSERT_EQ(ref_output_size, output_size); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(ref_batch_size, output_layout.batch()); - ASSERT_EQ(ref_seq_len, output_layout.feature()); // Sequence length should match - ASSERT_EQ(ref_directions, output_layout.spatial(1)); // directions should match - ASSERT_EQ(ref_hidden_size, output_layout.spatial(0)); // input size should match - - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - int32_t i = 0; - for (int32_t b = 0; b < ref_batch_size; ++b) { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } -} -} // namespace - -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false); -} - -// LSTM GEMM tests to test LSTM GEMMV kernel implementation -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_test_f32) { - generic_lstm_gemm_gpu_test(5, 1, 1, 1024, 1024, true, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 1, 256, 2, false, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_f32) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, true, false); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, false, false); -} - -// LSTM ELT Tests -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_biasf32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, false); -} - -// generic_lstm_gpu_test paramters: -// layers, sequence, dir, batch, input, hidden, bias, initial_h, initial_cell, threshold, coupled_input_forget -TEST(lstm_gpu, generic_lstm_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0); -} - -TEST(lstm_gpu, generic_lstm_input_forget_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f32) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f32) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false); -} - -// bidirectional support -TEST(lstm_gpu, generic_lstm_bi_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, false, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false); -} - -// multi-layer support -TEST(lstm_gpu, generic_lstm_stacked_no_seq_f32) { - generic_lstm_gpu_test(4, 1, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_f32) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_bi_f32) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_bi_f32) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); -} - -// optional outputs support -TEST(lstm_gpu, output_test_sequence_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence, 1); -} - -TEST(lstm_gpu, output_test_hidden_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden, 1); -} - -TEST(lstm_gpu, output_test_hidden_cell_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 1); -} - -TEST(lstm_gpu, output_test_sequence_cell_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 1); -} - -TEST(lstm_gpu, output_test_sequence_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence, 2); -} - -TEST(lstm_gpu, output_test_hidden_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden, 2); -} - -TEST(lstm_gpu, output_test_hidden_cell_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 2); -} - -TEST(lstm_gpu, output_test_sequence_cell_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 2); -} - -// format tests -TEST(lstm_gpu, lstm_gpu_format_bfyx_f32) { - lstm_gpu_format_test(cldnn::format::bfyx, 1); -} - -TEST(lstm_gpu, lstm_gpu_format_bfyx_bi_f32) { - lstm_gpu_format_test(cldnn::format::bfyx, 2); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_f32) { - lstm_gpu_format_test(cldnn::format::fyxb, 1); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_bi_f32) { - lstm_gpu_format_test(cldnn::format::fyxb, 2); -} - -// test for LSTM users' dependencies -TEST(lstm_gpu, lstm_users_f32) { - lstm_gpu_users_test(); -} - -// Test for LSTM with concatenated input -TEST(lstm_gpu, generic_lstm_concatenated_input) { - lstm_gpu_concatenated_input_test(1, 2, 2, 1, 1, 1, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_concatenated_input_multi_layer) { - lstm_gpu_concatenated_input_test(5, 5, 2, 1, 1, 4, true, true, true, 0, false); -} - -// test for LSTM with chain and stack (multilayer) -TEST(lstm_gpu, generic_lstm_chained_unidirectional_f32) { - // batch size = 1 - // input size = 2 - // hidden size = 4 - // directions = 1 - // layers = 1 - // chains = 1 - // sequence length = 1 - // output selection = output sequence and cell - lstm_gpu_chain_test(1, 2, 4, 1, 1, 2, 1, lstm_output_selection::sequence_cell); -} - -TEST(lstm_gpu, generic_lstm_chained_bidirectional_f32) { - // batch size = 1 - // input size = 2 - // hidden size = 4 - // directions = 2 - // layers = 1 - // chains = 1 - // sequence length = 1 - // output selection = output sequence and cell - lstm_gpu_chain_test(1, 2, 4, 2, 1, 1, 1, lstm_output_selection::sequence_cell); -} - -TEST(lstm_gpu, generic_lstm_chained_no_stack_bidirectional_f32) { - // batch size = 2 - // input size = 2 - // hidden size = 4 - // directions = 2 - // layers = 1 - // chains = 2 - // sequence length = 5 - // output selection = output sequence and cell - lstm_gpu_chain_test(2, 2, 4, 2, 1, 2, 5, lstm_output_selection::sequence_cell); -} - -TEST(lstm_gpu, generic_lstm_chained_stacked_bidirectional_f32) { - // batch size = 2 - // input size = 2 - // hidden size = 4 - // directions = 2 - // layers = 4 - // chains = 2 - // sequence length = 5 - // output selection = output sequence and cell - lstm_gpu_chain_test(2, 2, 4, 2, 4, 2, 5, lstm_output_selection::sequence_cell); -} - -// FP16 Half precision tests -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false); -} - -TEST(lstm_gpu, generic_lstm_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0); -} - -TEST(lstm_gpu, generic_lstm_input_forget_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f16) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f16) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false); -} - -// bidirectional support -TEST(lstm_gpu, generic_lstm_bi_bias_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false); -} - -// multi-layer support -TEST(lstm_gpu, generic_lstm_stacked_seq_f16) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_bi_f16) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); -} - -// TODO: Add tests for the following: -// integration testing using multi-layer and chained LSTMs -// LSTMs single input -// optional activation list - -#ifdef RUN_ALL_MODEL_CACHING_TESTS -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_test_f32_cached) { - generic_lstm_gemm_gpu_test(5, 1, 1, 1024, 1024, true, true, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 1, 256, 2, false, true, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, true, false, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, false, false, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_biasf32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, false, true); -} - -TEST(lstm_gpu, generic_lstm_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0, true); -} - -TEST(lstm_gpu, generic_lstm_input_forget_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1, true); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f32_cached) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f32_cached) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, false, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_no_seq_f32_cached) { - generic_lstm_gpu_test(4, 1, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_f32_cached) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_bi_f32_cached) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_bi_f32_cached) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, output_test_sequence_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence, 1, true); -} - -TEST(lstm_gpu, output_test_hidden_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden, 1, true); -} - -TEST(lstm_gpu, output_test_hidden_cell_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 1, true); -} - -TEST(lstm_gpu, output_test_sequence_cell_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 1, true); -} - -TEST(lstm_gpu, output_test_sequence_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence, 2, true); -} - -TEST(lstm_gpu, output_test_hidden_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden, 2, true); -} - -TEST(lstm_gpu, output_test_hidden_cell_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 2, true); -} - -TEST(lstm_gpu, output_test_sequence_cell_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 2, true); -} - -TEST(lstm_gpu, lstm_gpu_format_bfyx_f32_cached) { - lstm_gpu_format_test(cldnn::format::bfyx, 1, true); -} - -TEST(lstm_gpu, lstm_gpu_format_bfyx_bi_f32_cached) { - lstm_gpu_format_test(cldnn::format::bfyx, 2, true); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_f32_cached) { - lstm_gpu_format_test(cldnn::format::fyxb, 1, true); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_bi_f32_cached) { - lstm_gpu_format_test(cldnn::format::fyxb, 2, true); -} - -TEST(lstm_gpu, lstm_users_f32_cached) { - lstm_gpu_users_test(true); -} - -TEST(lstm_gpu, generic_lstm_concatenated_input_cached) { - lstm_gpu_concatenated_input_test(1, 2, 2, 1, 1, 1, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_concatenated_input_multi_layer_cached) { - lstm_gpu_concatenated_input_test(5, 5, 2, 1, 1, 4, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_chained_unidirectional_f32_cached) { - lstm_gpu_chain_test(1, 2, 4, 1, 1, 2, 1, lstm_output_selection::sequence_cell, true); -} - -TEST(lstm_gpu, generic_lstm_chained_bidirectional_f32_cached) { - lstm_gpu_chain_test(1, 2, 4, 2, 1, 1, 1, lstm_output_selection::sequence_cell, true); -} - -TEST(lstm_gpu, generic_lstm_chained_no_stack_bidirectional_f32_cached) { - lstm_gpu_chain_test(2, 2, 4, 2, 1, 2, 5, lstm_output_selection::sequence_cell, true); -} - -TEST(lstm_gpu, generic_lstm_chained_stacked_bidirectional_f32_cached) { - lstm_gpu_chain_test(2, 2, 4, 2, 4, 2, 5, lstm_output_selection::sequence_cell, true); -} - -// FP16 Half precision tests -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false, true); -} - -TEST(lstm_gpu, generic_lstm_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_input_forget_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1, true); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f16_cached) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f16_cached) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false, true); -} - -// bidirectional support -TEST(lstm_gpu, generic_lstm_bi_bias_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_f16_cached) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} -#endif -TEST(lstm_gpu, generic_lstm_stacked_bi_f16_cached) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp deleted file mode 100644 index 4cec9e2b18aac5..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" - -#include -#include -#include - -using namespace cldnn; -using namespace ::tests; - -template -struct pyramid_roi_align_typed_test : testing::Test { - const data_types data_type = ov::element::from(); - using Type = T; - - void execute(bool is_caching_test) { - auto& engine = get_test_engine(); - - const int rois_num = 3; - const int output_size = 2; - const int sampling_points = 2; - const int starting_level = 2; - const int P2_scale = 1; - const int P3_scale = 2; - const int P4_scale = 4; - const int P5_scale = 8; - const int P2_size = 8; - const int P3_size = P2_size * P2_scale / P3_scale; - const int P4_size = P2_size * P2_scale / P4_scale; - const int P5_size = P2_size * P2_scale / P5_scale; - - std::vector rois_data = { - Type(0.f), Type(0.f), Type(1.f), Type(1.f), - Type(0.f), Type(0.f), Type(0.5f), Type(0.5f), - Type(0.5f), Type(0.5f), Type(0.75f), Type(0.75f) - }; - - std::vector P2_data = { - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - }; - - std::vector P3_data = { - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - }; - - std::vector P4_data = { - Type(11.f), Type(19.f), - Type(11.f), Type(19.f), - }; - - std::vector P5_data = { - Type(15.f) - }; - - auto rois_lay = layout(this->data_type, format::bfyx, tensor(batch(rois_num), feature(4))); - auto P2_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P2_size, P2_size)); - auto P3_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P3_size, P3_size)); - auto P4_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P4_size, P4_size)); - auto P5_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P5_size, P5_size)); - - auto rois_mem = engine.allocate_memory(rois_lay); - auto P2_mem = engine.allocate_memory(P2_lay); - auto P3_mem = engine.allocate_memory(P3_lay); - auto P4_mem = engine.allocate_memory(P4_lay); - auto P5_mem = engine.allocate_memory(P5_lay); - - tests::set_values(rois_mem, rois_data); - tests::set_values(P2_mem, P2_data); - tests::set_values(P3_mem, P3_data); - tests::set_values(P4_mem, P4_data); - tests::set_values(P5_mem, P5_data); - - topology topo; - topo.add(data("P2", P2_mem)); - topo.add(data("P3", P3_mem)); - topo.add(data("P4", P4_mem)); - topo.add(data("P5", P5_mem)); - topo.add(input_layout("rois", rois_lay)); - topo.add(pyramid_roi_align("pyramid", - input_info("rois"), - input_info("P2"), - input_info("P3"), - input_info("P4"), - input_info("P5"), - output_size, - sampling_points, - { P2_scale, P3_scale, P4_scale, P5_scale }, - starting_level)); - - cldnn::network::ptr net = get_network(engine, topo, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - - net->set_input_data("rois", rois_mem); - - std::vector expected_out = { - // RoI 0,0 - 1,1 from P4 - 14.f, 18.f, 14.f, 18.f, - // RoI 0,0 - 0.5,0.5 from P3 - 11.25f, 14.25f, 11.25f, 14.25f, - // RoI 0.5,0.5 - 0.75,0.75 from P2 - 12.15625f, 13.03125f, 7.40625f, 8.28125f, - }; - - auto result = net->execute(); - - auto out_mem = result.at("pyramid").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); - - ASSERT_EQ(expected_out.size(), out_ptr.size()); - for (size_t i = 0; i < expected_out.size(); ++i) { - ASSERT_EQ(expected_out[i], static_cast(out_ptr[i])) << "at i = " << i; - } - } -}; -using pyramid_roi_align_types = testing::Types; - -TYPED_TEST_SUITE(pyramid_roi_align_typed_test, pyramid_roi_align_types); - -TYPED_TEST(pyramid_roi_align_typed_test, smoke_4levels) { - this->execute(false); -} - -TYPED_TEST(pyramid_roi_align_typed_test, smoke_4levels_cached) { - this->execute(true); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp index 194fdb524bf313..d645f46fe080d0 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp @@ -89,7 +89,7 @@ void test_multiple_outputs(bool is_caching_test) { ASSERT_EQ(output_ptr2[i], out_vec[i]); } -TEST(removing_output_node, multiple_outputs) { +TEST(removing_output_node, DISABLED_multiple_outputs) { // Issue 129991 test_multiple_outputs(false); } @@ -164,7 +164,7 @@ TEST(removing_output_node, output_node_optimization) { } #ifdef RUN_ALL_MODEL_CACHING_TESTS -TEST(removing_output_node, multiple_outputs_cached) { +TEST(removing_output_node, DISABLED_multiple_outputs_cached) { // Issue 129991 test_multiple_outputs(true); } #endif diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp deleted file mode 100644 index 6aea709c0fe496..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" -#include "random_generator.hpp" - -#include -#include -#include -#include - -#include -#include - -using namespace cldnn; -using namespace ::tests; - -template -void check_feature_map(T* output_ptr, std::vector &input_vec, size_t batch_num, size_t feature_num, size_t y_size, size_t x_size, size_t feature_id, size_t factor) -{ - for (size_t b = 0; b < batch_num; ++b) { //B - for (size_t y = 0; y < y_size; ++y) { //Y - for (size_t x = 0; x < x_size; ++x) { //X - auto linear_id = x + x_size * (y + y_size * (feature_id + feature_num * b)); - auto output_linear_id = x + x_size * (y + y_size * b); - ASSERT_EQ(output_ptr[output_linear_id], input_vec[linear_id] * factor); - } - } - } -} - -template -void split_test(int batch_num, int feature_num, int x_size, int y_size, std::vector split_offsets, - bool is_caching_test) -{ - auto& engine = get_test_engine(); - cldnn::tensor reference_input_size = { batch_num, feature_num, x_size, y_size }; - - cldnn::memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, reference_input_size }); - std::vector > input_ids_offsets; - - topology topology; - topology.add(input_layout("input", input->get_layout())); - - // lambda exoression to create the primitive id for the splits - auto create_split_id = [](size_t splitNum) { - std::stringstream ss; - ss << std::setw(5) << std::setfill('0') << splitNum; - - return ss.str(); - }; - - // Create the splits with the split ids for the topology - for (size_t splitNum = 0; splitNum < split_offsets.size(); splitNum++) - { - input_ids_offsets.push_back({ create_split_id(splitNum), split_offsets[splitNum]}); - } - - topology.add(split("split", input_info("input"), input_ids_offsets)); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - - auto outputs = network->execute(); - - // The number of splits should match the expected number of splits - ASSERT_EQ(outputs.size(), size_t(split_offsets.size())); - - std::vector expected_sizes; - for (size_t splitNum = 0; splitNum < split_offsets.size(); splitNum++) // Calculate the expected sizes - { - cldnn::tensor size; - - if (splitNum < (split_offsets.size() - 1)) - { - size = split_offsets[splitNum + 1] - split_offsets[splitNum]; - } - else - { - size = reference_input_size - split_offsets[splitNum]; - } - - // For all the other dimensions, copy from the split_input - for (int dimension = 0; dimension < cldnn::tensor_dim_max; dimension++) - { - size.raw[dimension] - = (size.raw[dimension] == 0) ? reference_input_size.raw[dimension] : size.raw[dimension]; - } - - expected_sizes.push_back(size); - } - - cldnn::mem_lock input_ptr(input, get_test_stream()); - - for (size_t splitNum = 0; splitNum < split_offsets.size(); splitNum++) - { - primitive_id split_id = "split:" + create_split_id(splitNum); - cldnn::memory::ptr output = outputs.at(split_id).get_memory(); - auto prim = output->get_layout(); - ASSERT_EQ(prim.get_tensor(), expected_sizes[splitNum]); - cldnn::mem_lock output_ptr(output, get_test_stream()); - - // Output tensor size - auto output_batch = prim.batch(); - auto output_feature = prim.feature(); - auto output_x = prim.spatial(0); - auto output_y = prim.spatial(1); - - // Input offsets, starting from which we will compare the output - auto input_batch_offset = split_offsets[splitNum].batch[0]; - auto input_feature_offset = split_offsets[splitNum].feature[0]; - auto input_y_offset = split_offsets[splitNum].spatial[1]; - auto input_x_offset = split_offsets[splitNum].spatial[0]; - - // iterator to iterate through input buffer - auto input_batch_itr = input_batch_offset; - auto input_feature_itr = input_feature_offset; - auto input_y_itr = input_y_offset; - auto input_x_itr = input_x_offset; - - for (auto b = 0; b < output_batch; ++b) { // B - - // reset the input feature iterator - input_feature_itr = input_feature_offset; - for (auto f = 0; f < output_feature; f++) { // F - - // reset the input y iterator - input_y_itr = input_y_offset; - for (auto y = 0; y < output_y; y++) { // Y - - // reset the input x iterator - input_x_itr = input_x_offset; - for (auto x = 0; x < output_x; x++) { // X - auto linear_id = input_x_itr + x_size * (input_y_itr + y_size * (input_feature_itr + feature_num * input_batch_itr)); // index in input - auto output_linear_id = x + output_x * (y + output_y * (f + output_feature * b)); // index in output - ASSERT_EQ(output_ptr[output_linear_id], input_vec[linear_id]); - input_x_itr++; // update the input x iterator - } - input_y_itr++; // update the input y iterator - } - input_feature_itr++; // update the input feature iterator - } - input_batch_itr++; // update the input batch iterator - } - } -} - -TEST(split_gpu_f32, split_1d_uneven_2_splits) { - - // Input : 2x4x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_1d_uneven_2_splits) { - - // Input : 2x4x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, basic_split_concat_optimization) { - - auto& engine = get_test_engine(); - - auto input = engine.allocate_memory({ data_types::f32,format::bfyx,{ 1, 25, 1, 256 } }); - tests::set_random_values(input); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - std::vector> offsets; - std::vector inputs; - for (int i = 0; i < 25; i++) - { - auto id = "crop_" + std::to_string(i); - inputs.push_back(input_info("split:" + id)); - offsets.push_back({ id, {0, i, 0, 0} }); - } - - topology.add(split("split", input_info("input"), offsets)); - topology.add(concatenation("concat", inputs, 1)); - topology.add(reorder("output", input_info("concat"), format::bfyx, data_types::f32)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - auto output = outputs.at("output").get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - cldnn::mem_lock input_ptr(input, get_test_stream()); - - for (int i = 0; i < 25*256; ++i) - { - ASSERT_EQ(output_ptr[i], input_ptr[i]); - } -} - -TEST(split_gpu_i64, basic_split_concat_optimization) { - - auto& engine = get_test_engine(); - - auto input = engine.allocate_memory({ data_types::i64,format::bfyx,{ 1, 25, 1, 256 } }); - tests::set_random_values(input); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - std::vector> offsets; - std::vector inputs; - for (int i = 0; i < 25; i++) - { - auto id = "crop_" + std::to_string(i); - inputs.push_back(input_info("split:" + id)); - offsets.push_back({ id, {0, i, 0, 0} }); - } - - topology.add(split("split", input_info("input"), offsets)); - topology.add(concatenation("concat", inputs, 1)); - topology.add(reorder("output", input_info("concat"), format::bfyx, data_types::i64)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - auto output = outputs.at("output").get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - cldnn::mem_lock input_ptr(input, get_test_stream()); - - for (int i = 0; i < 25*256; ++i) - { - ASSERT_EQ(output_ptr[i], input_ptr[i]); - } -} - -TEST(split_gpu_f32, split_1d_uneven_3_splits) { - - // Input : 2x8x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 4, 0, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_1d_uneven_3_splits) { - - // Input : 2x8x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 4, 0, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_2d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x6x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_2d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x6x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_2d_uneven_3_split3) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - // id: "out2", offsets: { 0, 4, 7, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_2d_uneven_3_split3) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - // id: "out2", offsets: { 0, 4, 7, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_3d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x1 - // Output2 : 2x7x6x2 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_3d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x1 - // Output2 : 2x7x6x2 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_3d_uneven_3_splits) { - - // Input : 2x8x10x5 - // Output1 : 2x1x4x1 - // Output2 : 2x6x4x1 - // Output3 : 2x1x2x1 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - // id: "out2", offsets: { 0, 7, 8, 2 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_3d_uneven_3_splits) { - - // Input : 2x8x10x5 - // Output1 : 2x1x4x1 - // Output2 : 2x6x4x1 - // Output3 : 2x1x2x1 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - // id: "out2", offsets: { 0, 7, 8, 2 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, basic_in2x3x2x2_split_feature_bfyx) { - // Input : 6x3x4x3 - // 3 x Outputs: 6x1x4x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 2, 0, 0 } - - auto& engine = get_test_engine(); - - auto batch_num = 6; - auto feature_num = 3; - auto x_size = 4; - auto y_size = 3; - - auto input = engine.allocate_memory({ data_types::f32,format::bfyx,{ batch_num, feature_num, x_size, y_size } }); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(split("split", input_info("input"), - { - { "out0", { 0, 0, 0, 0 } }, - { "out1", { 0, 1, 0, 0 } }, - { "out2", { 0, 2, 0, 0 } } - } )); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - network network(engine, topology, get_test_default_config(engine)); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - ASSERT_EQ(outputs.size(), size_t(3)); - - for (unsigned int i = 0; i < 3; i++) - { - auto split_id = "split:out" + std::to_string(i); - auto output = outputs.at(split_id).get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - check_feature_map(output_ptr.data(), input_vec, batch_num, feature_num, y_size, x_size, i, 1); - } -} - -TEST(split_gpu_i64, basic_in2x3x2x2_split_feature_bfyx) { - // Input : 6x3x4x3 - // 3 x Outputs: 6x1x4x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 2, 0, 0 } - - auto& engine = get_test_engine(); - - auto batch_num = 6; - auto feature_num = 3; - auto x_size = 4; - auto y_size = 3; - - auto input = engine.allocate_memory({ data_types::i64,format::bfyx,{ batch_num, feature_num, x_size, y_size } }); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(split("split", input_info("input"), - { - { "out0", { 0, 0, 0, 0 } }, - { "out1", { 0, 1, 0, 0 } }, - { "out2", { 0, 2, 0, 0 } } - } )); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - network network(engine, topology, get_test_default_config(engine)); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - ASSERT_EQ(outputs.size(), size_t(3)); - - for (unsigned int i = 0; i < 3; i++) - { - auto split_id = "split:out" + std::to_string(i); - auto output = outputs.at(split_id).get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - check_feature_map(output_ptr.data(), input_vec, batch_num, feature_num, y_size, x_size, i, 1); - } -} - -TEST(split_gpu_f32, basic_in2x3x2x2_split_scale_feature_bfyx) { - // Input : 6x3x4x3 - // 3 x Outputs: 6x1x4x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 2, 0, 0 } - // Additional scale layer at the end - - auto& engine = get_test_engine(); - - auto batch_num = 6; - auto feature_num = 3; - auto x_size = 4; - auto y_size = 3; - - auto input = engine.allocate_memory({ data_types::f32,format::bfyx,{ batch_num, feature_num, x_size, y_size } }); - auto scale_input0 = engine.allocate_memory({ data_types::f32, format::bfyx,{ 1, 1, 1, 1 } }); - auto scale_input1 = engine.allocate_memory({ data_types::f32, format::bfyx,{ 1, 1, 1, 1 } }); - auto scale_input2 = engine.allocate_memory({ data_types::f32, format::bfyx,{ 1, 1, 1, 1 } }); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(input_layout("scale_input0", scale_input0->get_layout())); - topology.add(input_layout("scale_input1", scale_input1->get_layout())); - topology.add(input_layout("scale_input2", scale_input2->get_layout())); - topology.add(split("split", input_info("input"), - { - { "out0",{ 0, 0, 0, 0 } }, - { "out1",{ 0, 1, 0, 0 } }, - { "out2",{ 0, 2, 0, 0 } } - })); - topology.add(eltwise("scale0", { input_info("split:out0"), input_info("scale_input0") }, eltwise_mode::prod)); - topology.add(eltwise("scale1", { input_info("split:out1"), input_info("scale_input1") }, eltwise_mode::prod)); - topology.add(eltwise("scale2", { input_info("split:out2"), input_info("scale_input2") }, eltwise_mode::prod)); - - std::vector scale_input_vec0 = { 1.f }; - set_values(scale_input0, scale_input_vec0); - std::vector scale_input_vec1 = { 2.f }; - set_values(scale_input1, scale_input_vec1); - std::vector scale_input_vec2 = { 3.f }; - set_values(scale_input2, scale_input_vec2); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - network network(engine, topology, get_test_default_config(engine)); - - network.set_input_data("input", input); - network.set_input_data("scale_input0", scale_input0); - network.set_input_data("scale_input1", scale_input1); - network.set_input_data("scale_input2", scale_input2); - - auto outputs = network.execute(); - - ASSERT_EQ(outputs.size(), size_t(3)); - - for (unsigned int i = 0; i < 3; i++) - { - auto split_id = "scale" + std::to_string(i); - auto output = outputs.at(split_id).get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - check_feature_map(output_ptr.data(), input_vec, batch_num, feature_num, y_size, x_size, i, i + 1); - } -} - -#ifdef RUN_ALL_MODEL_CACHING_TESTS -TEST(split_gpu_f32, split_1d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_1d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_1d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_1d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_2d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_2d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_2d_uneven_3_split3_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_2d_uneven_3_split3_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_3d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_3d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_3d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} -#endif -TEST(split_gpu_i64, split_3d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp index 827a649159d6ae..809f81d263d686 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp @@ -2779,7 +2779,7 @@ TEST_F(strided_slice_cpu_impl_constants, test_2x2x4x3_stride) { this->test_2x2x4x3_stride(false, impl_types::cpu); } -TEST_F(strided_slice_cpu_impl_constants, test_2x2x4x1_new_axis_mask) { +TEST_F(strided_slice_cpu_impl_constants, DISABLED_test_2x2x4x1_new_axis_mask) { // Issue 129991 this->test_2x2x4x1_new_axis_mask(false, impl_types::cpu); } From b0fe37f747dc8be63ecb09ad9f39a9bbab1fa35d Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Thu, 18 Jan 2024 21:35:54 +0800 Subject: [PATCH 071/122] [AUTO Plugin][Func Test] remove all 1.0 tests (#22225) * [AUTO Plugin][Func Test] remove all 1.0 tests Signed-off-by: Zhai, Xuejun * Clean skip_tests_config.cpp Signed-off-by: Zhai, Xuejun --------- Signed-off-by: Zhai, Xuejun --- .../executable_network/exec_network_base.cpp | 44 ---- .../executable_network/get_metric.cpp | 36 ---- .../behavior/infer_request/callback.cpp | 23 --- .../behavior/infer_request/io_blob.cpp | 29 --- .../behavior/infer_request/memory_states.cpp | 29 --- .../behavior/infer_request/multitheading.cpp | 27 --- .../behavior/infer_request/perf_counters.cpp | 24 --- .../behavior/infer_request/wait.cpp | 28 --- .../behavior/plugin/configuration_tests.cpp | 191 ------------------ .../behavior/plugin/core_integration.cpp | 46 ----- .../behavior/plugin/core_threading_tests.cpp | 37 ---- .../skip_tests_config.cpp | 38 ---- 12 files changed, 552 deletions(-) delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp delete mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp deleted file mode 100644 index 845f63ee4cee8c..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/exec_network_base.hpp" - -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> auto_configs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - ExecutableNetworkBaseTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_configs)), - ExecutableNetworkBaseTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - ExecutableNetworkBaseTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_configs)), - ExecutableNetworkBaseTest::getTestCaseName); - -const std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - ExecNetSetPrecision, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_configs)), - ExecNetSetPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - ExecNetSetPrecision, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_configs)), - ExecNetSetPrecision::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp deleted file mode 100644 index ca702dc66db4bc..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/get_metric.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -namespace { - -// -// Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp deleted file mode 100644 index c02a5c44c30e35..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/callback.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> multiConfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestCallbackTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestCallbackTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp deleted file mode 100644 index 483067a521c1b0..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/io_blob.hpp" - -#include - -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Autoconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Autoconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp deleted file mode 100644 index 5cbda535d8b2df..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/memory_states.hpp" - -#include - -#include "functional_test_utils/plugin_cache.hpp" -#include "ov_models/builders.hpp" - -using namespace BehaviorTestsDefinitions; - -namespace { -std::vector memoryStateTestCases = { - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_AUTO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}), - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}})}; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestVariableStateTest, - ::testing::ValuesIn(memoryStateTestCases), - InferRequestVariableStateTest::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp deleted file mode 100644 index 27a82693f28ff6..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "behavior/infer_request/multithreading.hpp" -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Multiconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestMultithreadingTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestMultithreadingTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp deleted file mode 100644 index baa0c4fe978c29..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/perf_counters.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Autoconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Autoconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp deleted file mode 100644 index e1307f5092f6a5..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/wait.hpp" - -#include - -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Autoconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Autoconfigs)), - InferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestWaitTests::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp deleted file mode 100644 index 75ff757589ae08..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/configuration_tests.hpp" - -#include "ie_plugin_config.hpp" -#include "openvino/runtime/system_conf.hpp" - -using namespace BehaviorTestsDefinitions; - -namespace { -#if (defined(__APPLE__) || defined(_WIN32)) -auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto numaNodes = ov::get_available_numa_nodes(); - auto coreTypes = ov::get_available_cores_types(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else if (numaNodes.size() > 1) { - return std::string{CONFIG_VALUE(NUMA)}; - } else { - return std::string{CONFIG_VALUE(NO)}; - } -}()}; -#else -auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto coreTypes = ov::get_available_cores_types(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else { - return std::string{CONFIG_VALUE(YES)}; - } -}()}; -#endif - -const std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; - -const std::vector> conf = {{}}; - -const std::vector> MultiConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}}; - -const std::vector> AutoConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_NONE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_ERROR}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_WARNING}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_INFO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_DEBUG}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_TRACE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_LOW}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - CorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs)), - CorrectConfigTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - CorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - CorrectConfigTests::getTestCaseName); - -const std::vector> multiinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF"}}, -}; - -const std::vector> autoinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}}; - -const std::vector> multiconf = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - IncorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - IncorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - IncorrectConfigAPITests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - IncorrectConfigAPITests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - -const std::vector> auto_multi_prop_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; - -const std::vector> auto_multi_loadNetWork_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp deleted file mode 100644 index 29097f845f876d..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/core_integration.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -// defined in plugin_name.cpp -extern const char* cpu_plugin_file_name; - -namespace { -// -// IE Class Common tests with -// -// -// IE Class GetMetric -// - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetConfigTest, - IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("MULTI", "AUTO")); -////////////////////////////////////////////////////////////////////////////////////////// -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp deleted file mode 100644 index 12553dbab98b03..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#ifdef __GLIBC__ -# include -# if __GLIBC_MINOR__ >= 34 -# define ENABLETESTMULTI -# endif -#else -# define ENABLETESTMULTI -#endif - -namespace { - -const Params params[] = { - std::tuple{ov::test::utils::DEVICE_TEMPLATE, {{CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)}}}, -#ifdef ENABLETESTMULTI - std::tuple{ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, - std::tuple{ov::test::utils::DEVICE_AUTO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, -#endif -}; -} // namespace -/* -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTests, testing::ValuesIn(params), -CoreThreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTestsWithIterations, - testing::Combine(testing::ValuesIn(params), - testing::Values(4), - testing::Values(50), - testing::Values(ModelClass::Default)), - CoreThreadingTestsWithIterations::getTestCaseName); -*/ diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp index 02548538f29862..f85ece7c65e192 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -11,31 +11,11 @@ std::vector disabledTestPatterns() { std::vector retVector{ - // TODO: Issue: 43793 - R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*iPRC=0.*_iLT=1.*)", - R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*oPRC=0.*_oLT=1.*)", - - // Not expected behavior - R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)", - R"(.*Auto.*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableWithIncorrectConfig.*)", - // Not implemented yet: - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNetWithIncorrectConfig.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)", - // TODO: CVS-104942 - R"(.*(Auto|Multi).*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableAndCheckConfig.*)", - R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", - - // CPU does not support dynamic rank - // Issue: CVS-66778 - R"(.*smoke_Auto_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicOutputToDynamicInput.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicInputToDynamicOutput.*)", // unsupported metrics R"(.*smoke_AutoOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", @@ -47,22 +27,8 @@ std::vector disabledTestPatterns() { // AUTO does not support import / export R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)", - - // New plugin API doesn't support changes of pre-processing - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", - // New plugin work with tensors, so it means that blob in old API can have different pointers - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)", - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)", - // TODO Issue 100145 - R"(.*Behavior.*InferRequestIOBBlobTest.*canReallocateExternalBlobViaGet.*)", R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)", - // Not expected behavior - R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)", - R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)", // template plugin doesn't support this case R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)"}; @@ -70,9 +36,5 @@ std::vector disabledTestPatterns() { // very time-consuming test retVector.emplace_back(R"(.*OVInferConsistencyTest.*)"); #endif - -#if defined(_WIN32) - retVector.emplace_back(R"(.*LoadNetworkCompiledKernelsCacheTest.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)"); -#endif return retVector; } From 45f62850f3a6ee820ce4a869a29010e79c0f5caa Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Thu, 18 Jan 2024 14:43:22 +0100 Subject: [PATCH 072/122] String Tensors Basic Documentation (#22097) (#22240) port: #22097 Basic documentation of how to use string tensors authored-by: Sergey Lyalin --- .../running_inference_with_openvino.rst | 1 + .../integrate_with_your_application.rst | 15 +- .../string_tensors.rst | 208 ++++++++++++++++++ 3 files changed, 218 insertions(+), 6 deletions(-) create mode 100644 docs/articles_en/openvino_workflow/running_inference_with_openvino/string_tensors.rst diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst index 17595ffdae3692..227097201b4434 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino.rst @@ -14,6 +14,7 @@ Running Inference with OpenVINO™ openvino_docs_OV_UG_ShapeInference openvino_docs_OV_UG_DynamicShapes openvino_docs_OV_UG_model_state_intro + openvino_docs_OV_UG_string_tensors Optimize Inference .. meta:: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst index 0b167d932d767a..3d60fa22e0c512 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst @@ -15,12 +15,12 @@ Integrate OpenVINO™ with Your Application .. meta:: - :description: Learn how to implement a typical inference pipeline of OpenVINO™ + :description: Learn how to implement a typical inference pipeline of OpenVINO™ Runtime in an application. -Following these steps, you can implement a typical OpenVINO™ Runtime inference -pipeline in your application. Before proceeding, make sure you have +Following these steps, you can implement a typical OpenVINO™ Runtime inference +pipeline in your application. Before proceeding, make sure you have :doc:`installed OpenVINO Runtime ` and set environment variables (run ``/setupvars.sh`` for Linux or ``setupvars.bat`` for Windows, otherwise, the ``OpenVINO_DIR`` variable won't be configured properly to pass ``find_package`` calls). @@ -243,8 +243,8 @@ To learn how to change the device configuration, read the :doc:`Query device pro Step 3. Create an Inference Request ################################### -``ov::InferRequest`` class provides methods for model inference in OpenVINO™ Runtime. -Create an infer request using the following code (see +``ov::InferRequest`` class provides methods for model inference in OpenVINO™ Runtime. +Create an infer request using the following code (see :doc:`InferRequest detailed documentation ` for more details): .. tab-set:: @@ -299,6 +299,7 @@ You can use external memory to create ``ov::Tensor`` and use the ``ov::InferRequ :language: cpp :fragment: [part4] +See :doc:`additional materials ` to learn how to handle textual data as a model input. Step 5. Start Inference ####################### @@ -329,7 +330,7 @@ OpenVINO™ Runtime supports inference in either synchronous or asynchronous mod :fragment: [part5] -This section demonstrates a simple pipeline. To get more information about other ways to perform inference, read the dedicated +This section demonstrates a simple pipeline. To get more information about other ways to perform inference, read the dedicated :doc:`"Run inference" section `. Step 6. Process the Inference Results @@ -360,6 +361,7 @@ Go over the output tensors and process the inference results. :language: cpp :fragment: [part6] +See :doc:`additional materials ` to learn how to handle textual data as a model output. Step 7. Release the allocated objects (only for C) ################################################## @@ -440,5 +442,6 @@ Additional Resources * See the :doc:`OpenVINO Samples ` page or the `Open Model Zoo Demos `__ page for specific examples of how OpenVINO pipelines are implemented for applications like image classification, text prediction, and many others. * :doc:`OpenVINO™ Runtime Preprocessing ` +* :doc:`String Tensors ` * :doc:`Using Encrypted Models with OpenVINO ` diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/string_tensors.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/string_tensors.rst new file mode 100644 index 00000000000000..a5a1d3dd9987aa --- /dev/null +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/string_tensors.rst @@ -0,0 +1,208 @@ +.. {#openvino_docs_OV_UG_string_tensors} + +String Tensors +============== + + +.. meta:: + :description: Learn how to pass and retrieve text to and from OpenVINO model. + +OpenVINO tensors can hold not only numerical data, like floating-point or integer numbers, +but also textual information, represented as one or multiple strings. +Such a tensor is called a string tensor and can be passed as input or retrieved as output of a text-processing model, such as +`tokenizers and detokenizers `__. + +While this section describes basic API to handle string tensors, more practical examples that leverage both +string tensors and OpenVINO tokenizer can be found in +`GenAI Samples `__. + + +Representation +############## + +String tensors are supported in C++ and Python APIs, represented as instances of the `ov::Tensor` +class with the `element_type` parameter equal to `ov::element::string`. Each element of a string tensor is a string +of arbitrary length, including an empty string, and can be set independently of other elements in the same tensor. + +Depending on the API used (C++ or Python), the underlying data type that represents the string when accessing the tensor elements is +different: + + - in C++, std::string is used + - in Python, `numpy.str_`/`numpy.bytes_` populated Numpy arrays are used, as a read-only copy of the underlying C++ content + +String tensor implementation doesn't imply any limitations on string encoding, as underlying `std::string` doesn't have such limitations. +It is capable of representing all valid UTF-8 characters but also any other byte sequence outside of the UTF-8 encoding standard. +Users should pay extra attention when handling arbitrary byte sequences when accessing tensor content as encoded UTF-8 symbols. + +As the string representation is more sophisticated in contrast to for example `float` or `int` data type, +the underlying memory that is used for string tensor representation cannot be handled without properly constructing and destroying string objects. +Also, in contrast to numerical data, C++ and Python do not share the same memory layout, so there is no immediate +sharing of tensor content between the two APIs. Python provides only a numpy-compatible view of the data +allocated and held in C++ core as an array of the `std::string` objects. + +A developer must consider these restrictions when writing code using string tensors and +avoid treating the content as raw bytes or as a view of data in Python. + +Create a String Tensor +###################### + +The following is an example of how to create a small 1D tensor pre-populated with three elements: + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. code-block:: py + :force: + + import openvino as ov + + tensor = ov.Tensor(['text', 'more text', 'even more text']) + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: cpp + + #include + #include + #include + + std::vector strings = {"text", "more text", "even more text"}; + ov::Tensor tensor(ov::element::string, ov::Shape{strings.size()}, &strings[0]); + +The example demonstrates that similarly to tensors with numerical information, +a tensor object can be created on top of existing memory in C++ by providing a pointer to a pre-allocated array of elements. +Here, an instance of std::vector is used to hold the memory and consists of three std::string objects. +So, the `tensor` object in the C++ example will share the same memory as the `strings` vector. + +Note that `ov::Tensor`, when initialized with a pointer, requires pre-initialized memory with valid `std::string` objects +created by calling one of the available `std::string` constructors even for empty string. It is undefined behaviour if +not initialized memory is passed to this `ov::Tensor` constructor. + +In the Python version of the example above, a regular list of strings is used as an initializer. +No memory sharing is available this time, in contrast to C++, +and the strings from the initialization list are copied to a separately allocated storage underneath the `tensor` object. + +Besides a plain Python list of strings, an initializer can be one of the supported `numpy` arrays initialized +with Unicode or byte strings: + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. code-block:: python + :force: + + import numpy as np + + tensor = ov.Tensor(np.array(['text', 'more text', 'even more text'])) + tensor = ov.Tensor(np.array([b'text', b'more text', b'even more text'])) + +If `ov::Tensor` is created without providing initialization strings, +a tensor of a specified shape and empty strings as elements is created: + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. code-block:: python + :force: + + tensor = ov.Tensor(dtype=str, shape=[3]) + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: cpp + + ov::Tensor tensor(ov::element::string, ov::Shape{3}); + +`ov::Tensor` allocates and initializes the required number of `std::string` objects under the hood. + + +Accessing Elements +################## + +The following code prints all elements in the 1D string tensor constructed above. +In C++ code the same `.data` template method is used for other data types, +and to access string data it should be called with the `std::string` type. +In Python, dedicated `std_data` and `byte_data` fields are used instead of `data` field for numerical data. + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. code-block:: python + :force: + + data = tensor.str_data # use tensor.byte_data instead to access encoded strings as `bytes` + for i in range(tensor.get_size()): + print(data[i]) + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: cpp + + #include + + std::string* data = tensor.data(); + for(size_t i = 0; i < tensor.get_size(); ++i) + std::cout << data[i] << '\n'; + +In the case of Python, an object retrieved with `tensor.str_data` (or `tensor.bytes_data`) is a numpy array +with `numpy.str_` elements (or `numpy.bytes_` correspondingly). It is a copy of underlying data from +the `tensor` object and cannot be used for tensor content modification. +To set new values, the entire tensor content should be set as a list or as a `numpy` array, as demonstrated +below. + +In contrast to Python, when using `tensor.data()` in C++, a pointer to the underlying data +storage is returned and it can be used for tensor element modification: + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. code-block:: python + + # Unicode strings: + tensor.str_data = ['one', 'two', 'three'] + # Do NOT use tensor.str_data[i] to set a new value, it won't update the tensor content + + # Encoded strings: + tensor.bytes_data = [b'one', b'two', b'three'] + # Do NOT use tensor.bytes_data[i] to set a new value, it won't update the tensor content + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: cpp + + std::string new_content[] = {"one", "two", "three"}; + std::string* data = tensor.data(); + for(size_t i = 0; i < tensor.get_size(); ++i) + data[i] = new_content[i]; + +When reading or setting string tensor elements in Python, it is recommended to use `str` objects (or `numpy.str_` if used in numpy array) +when it is known that the underlying byte sequence forms a valid UTF-8 encoded string. +Otherwise, if arbitrary byte sequences are allowed, +not necessarily within the UTF-8 standard, use `bytes` strings (or `numpy.bytes_` correspondingly) instead. + +Accessing tensor content through `str_data` implicitly applies UTF-8 decoding. +If parts of the byte stream cannot be represented as valid Unicode symbols, +the � replacement symbol is used to signal errors in such invalid Unicode streams. + +Additional Resources +#################### + +* Learn about the :doc:`basic steps to integrate inference in your application `. + +* Use `OpenVINO tokenizers `__ to produce models that use string tensors to work with textual information as pre- and post-processing for the large language models. + +* Check out `GenAI Samples `__ to see how string tensors are used in real-life applications. From 1a8f72f3426e02a2cdd3929b608659cd6003feca Mon Sep 17 00:00:00 2001 From: Rajat Krishna Date: Thu, 18 Jan 2024 09:46:20 -0500 Subject: [PATCH 073/122] Parallel execution of TensorFlow Layer 1 Python tests (#22173) * Update TF Layer 1 tests * Enable parallel execution of tests * Fix typo * Fix typo * Enable parallel tensorflow tests for windows --------- Co-authored-by: Roman Kazantsev --- .github/workflows/job_python_unit_tests.yml | 2 +- .github/workflows/windows.yml | 2 +- .../tensorflow_tests/test_tf_ArgMinMax.py | 21 +++-- .../tensorflow_tests/test_tf_CheckNumerics.py | 14 ++- .../tensorflow_tests/test_tf_ComplexFFT.py | 88 +++++++++++-------- .../test_tf_FakeQuantWithMinMaxVars.py | 34 +++---- .../tensorflow_tests/test_tf_Identity.py | 20 +++-- .../tensorflow_tests/test_tf_Resize.py | 44 ++++------ 8 files changed, 129 insertions(+), 96 deletions(-) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 274aaaacea3223..95edce67c2652d 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -249,7 +249,7 @@ jobs: run: | # requires 'unit_tests' from 'mo' export PYTHONPATH=${INSTALL_TEST_DIR}/mo - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe -n logical --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml env: TEST_DEVICE: CPU TEST_PRECISION: FP16 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index ff7ae0310aaaec..bcc0db607eb733 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -473,7 +473,7 @@ jobs: run: | :: requires 'unit_tests' from 'tools/mo' set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH% - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -n logical -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml env: TEST_DEVICE: CPU TEST_PRECISION: FP16 diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py index 39fb2c62fc63b4..cb2e2df2648266 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py @@ -13,6 +13,11 @@ # Documentation: https://www.tensorflow.org/api_docs/python/tf/raw_ops/ArgMin # https://www.tensorflow.org/api_docs/python/tf/raw_ops/ArgMax +OPS = { + 'tf.raw_ops.ArgMax': tf.raw_ops.ArgMax, + 'tf.raw_ops.ArgMin': tf.raw_ops.ArgMin +} + class TestArgMinMax(CommonTFLayerTest): def _prepare_input(self, inputs_info): assert 'input' in inputs_info @@ -41,24 +46,24 @@ def create_argmin_max_net(self, input_shape, dimension, input_type, output_type, return tf_net, ref_net test_data = [ - dict(input_shape=[20], dimension=0), - dict(input_shape=[20, 30], dimension=1), - dict(input_shape=[2, 30, 3, 4], dimension=2), + [[20], 0], + [[20, 30], 1], + [[2, 30, 3, 4], 2], ] - @pytest.mark.parametrize("params", test_data) + @pytest.mark.parametrize("input_shape, dimension", test_data) @pytest.mark.parametrize("input_type", [np.float32, np.int32]) @pytest.mark.parametrize("output_type", [tf.int32, tf.int64]) - @pytest.mark.parametrize("op_type", [tf.raw_ops.ArgMax, tf.raw_ops.ArgMin]) + @pytest.mark.parametrize("op_type", ['tf.raw_ops.ArgMax', 'tf.raw_ops.ArgMin']) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Linux' and platform.machine() in ['arm', 'armv7l', 'aarch64', 'arm64', 'ARM64'], reason='Ticket - 126314') - def test_argmin_max_net(self, params, input_type, output_type, op_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend): + def test_argmin_max_net(self, input_shape, dimension, input_type, output_type, op_type, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, dimension=dimension) self._test(*self.create_argmin_max_net(**params, input_type=input_type, - output_type=output_type, op_type=op_type), + output_type=output_type, op_type=OPS[op_type]), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py b/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py index a8f52841929f34..7382f780197924 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py @@ -7,6 +7,11 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + "tf.raw_ops.CheckNumerics": tf.raw_ops.CheckNumerics, + "tf.raw_ops.CheckNumericsV2": tf.raw_ops.CheckNumericsV2 +} + class TestCheckNumerics(CommonTFLayerTest): def _prepare_input(self, inputs_info): assert 'x' in inputs_info @@ -33,15 +38,16 @@ def create_check_numerics_net(self, input_shape, input_type, op): return tf_net, None test_data_basic = [ - dict(input_shape=[2, 6], input_type=np.float32, op=tf.raw_ops.CheckNumerics), - dict(input_shape=[3, 4, 5], input_type=np.float32, op=tf.raw_ops.CheckNumericsV2), + [[2, 6], np.float32, 'tf.raw_ops.CheckNumerics'], + [[3, 4, 5], np.float32, 'tf.raw_ops.CheckNumericsV2'], ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, input_type, op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_check_numerics_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_check_numerics_basic(self, input_shape, input_type, op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, input_type=input_type, op=OPS[op]) self._test(*self.create_check_numerics_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py b/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py index 21dafffbf58719..0b63324cc91cef 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py @@ -9,6 +9,21 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.IRFFT': tf.raw_ops.IRFFT, + 'tf.raw_ops.IRFFT2D': tf.raw_ops.IRFFT2D, + 'tf.raw_ops.IRFFT3D': tf.raw_ops.IRFFT3D, + 'tf.raw_ops.FFT': tf.raw_ops.FFT, + 'tf.raw_ops.FFT2D': tf.raw_ops.FFT2D, + 'tf.raw_ops.FFT3D': tf.raw_ops.FFT3D, + 'tf.raw_ops.IFFT': tf.raw_ops.IFFT, + 'tf.raw_ops.IFFT2D': tf.raw_ops.IFFT2D, + 'tf.raw_ops.IFFT3D': tf.raw_ops.IFFT3D, + 'tf.raw_ops.RFFT': tf.raw_ops.RFFT, + 'tf.raw_ops.RFFT2D': tf.raw_ops.RFFT2D, + 'tf.raw_ops.RFFT3D': tf.raw_ops.RFFT3D +} + class TestComplexFFT(CommonTFLayerTest): def _prepare_input(self, inputs_info): rng = np.random.default_rng() @@ -41,30 +56,31 @@ def create_complex_fft_net(self, input_shape, shift_roll, axis_roll, fft_op): return tf_net, None test_data_basic = [ - dict(input_shape=[1, 50, 2], shift_roll=[10, 1], axis_roll=[-2, -1]), - dict(input_shape=[4, 20, 3], shift_roll=[2, 10], axis_roll=[0, 1]), - dict(input_shape=[1, 50, 50, 2], shift_roll=[10, 20], axis_roll=[-2, -1]), - dict(input_shape=[4, 20, 30, 3], shift_roll=[2, 10], axis_roll=[0, 1]), - dict(input_shape=[1, 50, 50, 30, 2], shift_roll=[10, 20, 4], axis_roll=[-3, -2, -1]), - dict(input_shape=[4, 20, 30, 10, 3], shift_roll=[2, 10], axis_roll=[1, 2]), + [[1, 50, 2], [10, 1], [-2, -1]], + [[4, 20, 3], [2, 10], [0, 1]], + [[1, 50, 50, 2], [10, 20], [-2, -1]], + [[4, 20, 30, 3], [2, 10], [0, 1]], + [[1, 50, 50, 30, 2], [10, 20, 4], [-3, -2, -1]], + [[4, 20, 30, 10, 3], [2, 10], [1, 2]], ] @pytest.mark.parametrize("fft_op", [ - tf.raw_ops.FFT, tf.raw_ops.FFT2D, tf.raw_ops.FFT3D, - tf.raw_ops.IFFT, tf.raw_ops.IFFT2D, tf.raw_ops.IFFT3D + "tf.raw_ops.FFT", "tf.raw_ops.FFT2D", "tf.raw_ops.FFT3D", + "tf.raw_ops.IFFT", "tf.raw_ops.IFFT2D", "tf.raw_ops.IFFT3D" ]) - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, shift_roll, axis_roll", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Linux' and platform.machine() in ['arm', 'armv7l', 'aarch64', 'arm64', 'ARM64'], reason='Ticket - 126314') - def test_complex_fft_basic(self, params, fft_op, + def test_complex_fft_basic(self, input_shape, shift_roll, axis_roll, fft_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, shift_roll=shift_roll, axis_roll=axis_roll) self._test( - *self.create_complex_fft_net(**params, fft_op=fft_op), + *self.create_complex_fft_net(**params, fft_op=OPS[fft_op]), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, custom_eps=1e-2) @@ -95,20 +111,19 @@ def create_complex_abs_net(self, input_shape): return tf_net, None test_data_basic = [ - dict(input_shape=[]), - dict(input_shape=[2]), - dict(input_shape=[1, 3]), - dict(input_shape=[2, 3, 4]), - dict(input_shape=[3, 4, 5, 6]), + [], + [2], + [1, 3], + [2, 3, 4], + [3, 4, 5, 6], ] - - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_complex_abs_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_complex_abs_basic(self, input_shape, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test( - *self.create_complex_abs_net(**params), + *self.create_complex_abs_net(input_shape), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) @@ -138,18 +153,19 @@ def create_complex_rfft_net(self, input_shape, fft_length, rfft_op): return tf_net, None test_data_basic = [ - dict(input_shape=[1, 3, 20], fft_length=[10], rfft_op=tf.raw_ops.RFFT), - dict(input_shape=[1, 3, 20], fft_length=[20], rfft_op=tf.raw_ops.RFFT), - dict(input_shape=[1, 3, 20, 40], fft_length=[20, 10], rfft_op=tf.raw_ops.RFFT2D), - dict(input_shape=[1, 3, 20, 40], fft_length=[10, 40], rfft_op=tf.raw_ops.RFFT2D), - dict(input_shape=[1, 2, 10, 20, 5], fft_length=[2, 5, 3], rfft_op=tf.raw_ops.RFFT3D), + [[1, 3, 20], [10], 'tf.raw_ops.RFFT'], + [[1, 3, 20], [20], 'tf.raw_ops.RFFT'], + [[1, 3, 20, 40], [20, 10], 'tf.raw_ops.RFFT2D'], + [[1, 3, 20, 40], [10, 40], 'tf.raw_ops.RFFT2D'], + [[1, 2, 10, 20, 5], [2, 5, 3], 'tf.raw_ops.RFFT3D'] ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, fft_length, rfft_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_complex_rfft_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_complex_rfft_basic(self, input_shape, fft_length, rfft_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, fft_length=fft_length, rfft_op=OPS[rfft_op]) self._test( *self.create_complex_rfft_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, @@ -183,19 +199,19 @@ def create_complex_irfft_net(self, input_shape, fft_length, irfft_op): return tf_net, None test_data_basic = [ - dict(input_shape=[1, 3, 20], fft_length=[10], irfft_op=tf.raw_ops.IRFFT), - dict(input_shape=[1, 3, 20], fft_length=[20], irfft_op=tf.raw_ops.IRFFT), - dict(input_shape=[1, 3, 20, 40], fft_length=[20, 10], irfft_op=tf.raw_ops.IRFFT2D), - dict(input_shape=[1, 3, 20, 40], fft_length=[10, 40], irfft_op=tf.raw_ops.IRFFT2D), - pytest.param(dict(input_shape=[1, 10, 20, 30, 5], fft_length=[2, 3, 4], irfft_op=tf.raw_ops.IRFFT3D), - marks=pytest.mark.xfail(reason="accuracy-issue-124452")) + [[1, 3, 20], [10], 'tf.raw_ops.IRFFT'], + [[1, 3, 20], [20], 'tf.raw_ops.IRFFT'], + [[1, 3, 20, 40], [20, 10], 'tf.raw_ops.IRFFT2D'], + [[1, 3, 20, 40], [10, 40], 'tf.raw_ops.IRFFT2D'], + pytest.param([1, 10, 20, 30, 5], [2, 3, 4], 'tf.raw_ops.IRFFT3D', + marks=pytest.mark.xfail(reason="accuracy-issue-124452")) ] - - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, fft_length, irfft_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_complex_irfft_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_complex_irfft_basic(self, input_shape, fft_length, irfft_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, fft_length=fft_length, irfft_op=OPS[irfft_op]) self._test( *self.create_complex_irfft_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py index 4937f34f292312..cd5129d7383b0f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py @@ -9,6 +9,12 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel': tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel, + 'tf.raw_ops.FakeQuantWithMinMaxVars': tf.raw_ops.FakeQuantWithMinMaxVars, + 'tf.raw_ops.FakeQuantWithMinMaxArgs': tf.raw_ops.FakeQuantWithMinMaxArgs +} + class TestFakeQuantWithMinMaxVars(CommonTFLayerTest): def _prepare_input(self, inputs_info): # generate elements so that the input tensor may contain repeating elements @@ -32,38 +38,36 @@ def create_fake_quant_with_min_max_vars_net(self, inputs_shape, min_value, max_v test_basic = [ # test FakeQuantWithMinMaxVars - dict(inputs_shape=[2, 6, 4], min_value=-3, max_value=4, num_bits=None, narrow_range=None), - dict(inputs_shape=[3, 2, 1, 5], min_value=-4, max_value=5, num_bits=14, narrow_range=True), - dict(inputs_shape=[3, 2, 4], min_value=2, max_value=4, num_bits=10, narrow_range=False), - dict(inputs_shape=[1, 2, 3], min_value=-6, max_value=-3, num_bits=8, narrow_range=True), + [[2, 6, 4], -3, 4, None, None], + [[3, 2, 1, 5], -4, 5, 14, True], + [[3, 2, 4], 2, 4, 10, False], + [[1, 2, 3], -6, -3, 8, True], ] - @pytest.mark.parametrize("params", test_basic) + @pytest.mark.parametrize("inputs_shape, min_value, max_value, num_bits, narrow_range", test_basic) @pytest.mark.parametrize("fake_quant_op", [ - tf.raw_ops.FakeQuantWithMinMaxVars, tf.raw_ops.FakeQuantWithMinMaxArgs + 'tf.raw_ops.FakeQuantWithMinMaxVars', 'tf.raw_ops.FakeQuantWithMinMaxArgs' ]) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') - def test_fake_quant_with_min_max_vars_basic(self, params, fake_quant_op, ie_device, precision, ir_version, temp_dir, - use_new_frontend): - self._test(*self.create_fake_quant_with_min_max_vars_net(**params, fake_quant_op=fake_quant_op), + def test_fake_quant_with_min_max_vars_basic(self, inputs_shape, min_value, max_value, num_bits, narrow_range, fake_quant_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(inputs_shape=inputs_shape, min_value=min_value, max_value=max_value, num_bits=num_bits, narrow_range=narrow_range) + self._test(*self.create_fake_quant_with_min_max_vars_net(**params, fake_quant_op=OPS[fake_quant_op]), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_per_channel_basic = [ - dict(inputs_shape=[2, 6, 4], min_value=[-4, -3, -5, -8], max_value=[4, 7, 9, 5], num_bits=None, - narrow_range=None, - fake_quant_op=tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel), + [[2, 6, 4], [-4, -3, -5, -8], [4, 7, 9, 5], None, None, 'tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel'], ] - @pytest.mark.parametrize("params", test_per_channel_basic) + @pytest.mark.parametrize("inputs_shape, min_value, max_value, num_bits, narrow_range, fake_quant_op", test_per_channel_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail("104822") - def test_fake_quant_with_min_max_vars_per_channel_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend): + def test_fake_quant_with_min_max_vars_per_channel_basic(self, inputs_shape, min_value, max_value, num_bits, narrow_range, fake_quant_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params=dict(inputs_shape=inputs_shape, min_value=min_value, max_value=max_value, num_bits=num_bits, narrow_range=narrow_range, fake_quant_op=OPS[fake_quant_op]) self._test(*self.create_fake_quant_with_min_max_vars_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Identity.py b/tests/layer_tests/tensorflow_tests/test_tf_Identity.py index 7721e31631af8e..382879490e83e0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Identity.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Identity.py @@ -6,6 +6,13 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.Identity': tf.raw_ops.Identity, + 'tf.raw_ops.PreventGradient': tf.raw_ops.PreventGradient, + 'tf.raw_ops.Snapshot': tf.raw_ops.Snapshot, + 'tf.raw_ops.StopGradient': tf.raw_ops.StopGradient, +} + class TestIdentity(CommonTFLayerTest): def create_identity_net(self, input_shape, identity_op): tf.compat.v1.reset_default_graph() @@ -22,17 +29,18 @@ def create_identity_net(self, input_shape, identity_op): return tf_net, None test_data_basic = [ - dict(input_shape=[2], identity_op=tf.raw_ops.Identity), - dict(input_shape=[2, 3], identity_op=tf.raw_ops.PreventGradient), - dict(input_shape=[], identity_op=tf.raw_ops.Snapshot), - dict(input_shape=[1, 2, 3], identity_op=tf.raw_ops.StopGradient) + [[2], 'tf.raw_ops.Identity'], + [[2, 3], 'tf.raw_ops.PreventGradient'], + [[], 'tf.raw_ops.Snapshot'], + [[1, 2, 3], 'tf.raw_ops.StopGradient'] ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, identity_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_identity_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_identity_basic(self, input_shape, identity_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, identity_op=OPS[identity_op]) self._test(*self.create_identity_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py index 8146226129db62..ac1e43651b7c95 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py @@ -9,6 +9,11 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.ResizeBilinear': tf.raw_ops.ResizeBilinear, + 'tf.raw_ops.ResizeNearestNeighbor': tf.raw_ops.ResizeNearestNeighbor, +} + class TestResize(CommonTFLayerTest): def _prepare_input(self, inputs_info): assert 'images' in inputs_info, "Test error: inputs_info must contain `x`" @@ -34,38 +39,27 @@ def create_resize_net(self, images_shape, images_type, size_value, align_corners test_data_basic = [ # ResizeBilinear testing - dict(images_shape=[1, 30, 30, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[1, 30, 30, 3], images_type=tf.float64, size_value=[40, 40], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[2, 100, 100, 3], images_type=tf.float32, size_value=[40, 40], align_corners=True, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[2, 10, 10, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[2, 40, 40, 3], images_type=tf.uint8, size_value=[10, 10], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[1, 40, 40, 3], images_type=tf.int32, size_value=[10, 10], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeBilinear), + [[1, 30, 30, 3], tf.float32, [40, 40], False, False, 'tf.raw_ops.ResizeBilinear'], + [[1, 30, 30, 3], tf.float64, [40, 40], False, False, 'tf.raw_ops.ResizeBilinear'], + [[2, 100, 100, 3], tf.float32, [40, 40], True, False, 'tf.raw_ops.ResizeBilinear'], + [[2, 10, 10, 3], tf.float32, [40, 40], False, True, 'tf.raw_ops.ResizeBilinear'], + [[2, 40, 40, 3], tf.uint8, [10, 10], False, False, 'tf.raw_ops.ResizeBilinear'], + [[1, 40, 40, 3], tf.int32, [10, 10], False, True, 'tf.raw_ops.ResizeBilinear'], # ResizeNearestNeighbor testing - dict(images_shape=[1, 30, 30, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[2, 100, 100, 3], images_type=tf.float32, size_value=[40, 40], align_corners=True, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[2, 10, 10, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[2, 40, 40, 3], images_type=tf.uint8, size_value=[10, 10], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[1, 40, 40, 3], images_type=tf.int32, size_value=[10, 10], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeNearestNeighbor), + [[1, 30, 30, 3], tf.float32, [40, 40], False, False, 'tf.raw_ops.ResizeNearestNeighbor'], + [[2, 100, 100, 3], tf.float32, [40, 40], True, False, 'tf.raw_ops.ResizeNearestNeighbor'], + [[2, 10, 10, 3], tf.float32, [40, 40], False, True, 'tf.raw_ops.ResizeNearestNeighbor'], + [[2, 40, 40, 3], tf.uint8, [10, 10], False, False, 'tf.raw_ops.ResizeNearestNeighbor'], + [[1, 40, 40, 3], tf.int32, [10, 10], False,True, 'tf.raw_ops.ResizeNearestNeighbor'], ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("images_shape, images_type, size_value, align_corners, half_pixel_centers, resize_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') - def test_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend): + def test_resize_basic(self, images_shape, images_type, size_value, align_corners, half_pixel_centers, resize_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(images_shape=images_shape, images_type=images_type, size_value=size_value, align_corners=align_corners, half_pixel_centers=half_pixel_centers, resize_op=OPS[resize_op]) self._test(*self.create_resize_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) From 6d4b2fe8300b0ad0592f722bcc51363fa2f74329 Mon Sep 17 00:00:00 2001 From: Alicja Miloszewska Date: Thu, 18 Jan 2024 19:47:49 +0100 Subject: [PATCH 074/122] [OV JS] Add ov_add_clang_format_target (#22220) * Add ov_clang_format_target() * Run make clang_format_fix_all * Update includes --- src/bindings/js/node/CMakeLists.txt | 2 ++ .../include/preprocess/preprocess_steps.hpp | 2 ++ src/bindings/js/node/src/element_type.cpp | 30 +++++++++---------- src/bindings/js/node/src/helper.cpp | 4 +-- .../js/node/src/preprocess/preprocess.cpp | 30 +++++++++---------- .../node/src/preprocess/preprocess_steps.cpp | 3 -- 6 files changed, 36 insertions(+), 35 deletions(-) diff --git a/src/bindings/js/node/CMakeLists.txt b/src/bindings/js/node/CMakeLists.txt index fffceb56799a96..1bf6eca9ee8501 100644 --- a/src/bindings/js/node/CMakeLists.txt +++ b/src/bindings/js/node/CMakeLists.txt @@ -85,6 +85,8 @@ set_target_properties(${PROJECT_NAME} PROPERTIES ov_set_install_rpath(${PROJECT_NAME} ${OV_CPACK_RUNTIMEDIR} ${OV_CPACK_RUNTIMEDIR}) +ov_add_clang_format_target(${PROJECT_NAME}_clang FOR_TARGETS ${PROJECT_NAME}) + install(TARGETS ${PROJECT_NAME} LIBRARY DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${PROJECT_NAME} ${OV_CPACK_COMP_NPM_EXCLUDE_ALL} RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${PROJECT_NAME} ${OV_CPACK_COMP_NPM_EXCLUDE_ALL} diff --git a/src/bindings/js/node/include/preprocess/preprocess_steps.hpp b/src/bindings/js/node/include/preprocess/preprocess_steps.hpp index 8bc51cf2fccbed..e44885962549a7 100644 --- a/src/bindings/js/node/include/preprocess/preprocess_steps.hpp +++ b/src/bindings/js/node/include/preprocess/preprocess_steps.hpp @@ -5,6 +5,8 @@ #include +#include "errors.hpp" +#include "helper.hpp" #include "openvino/core/preprocess/preprocess_steps.hpp" diff --git a/src/bindings/js/node/src/element_type.cpp b/src/bindings/js/node/src/element_type.cpp index b474daa908c706..54458dc028732f 100644 --- a/src/bindings/js/node/src/element_type.cpp +++ b/src/bindings/js/node/src/element_type.cpp @@ -7,23 +7,23 @@ #include namespace element { - Napi::Object init(Napi::Env env, Napi::Object exports) { - auto element = Napi::PropertyDescriptor::Accessor("element"); +Napi::Object init(Napi::Env env, Napi::Object exports) { + auto element = Napi::PropertyDescriptor::Accessor("element"); - exports.DefineProperty(element); + exports.DefineProperty(element); - return exports; - } + return exports; +} - Napi::Value add_element_namespace(const Napi::CallbackInfo& info) { - auto element = Napi::Object::New(info.Env()); - std::vector pds; +Napi::Value add_element_namespace(const Napi::CallbackInfo& info) { + auto element = Napi::Object::New(info.Env()); + std::vector pds; - for (const auto& et : get_supported_types()) - pds.push_back(Napi::PropertyDescriptor::Value(et, Napi::String::New(info.Env(), et), napi_default)); + for (const auto& et : get_supported_types()) + pds.push_back(Napi::PropertyDescriptor::Value(et, Napi::String::New(info.Env(), et), napi_default)); - element.DefineProperties(pds); - - return element; - } -}; + element.DefineProperties(pds); + + return element; +} +}; // namespace element diff --git a/src/bindings/js/node/src/helper.cpp b/src/bindings/js/node/src/helper.cpp index 1916b78a3df78e..3484c3b1b8b91b 100644 --- a/src/bindings/js/node/src/helper.cpp +++ b/src/bindings/js/node/src/helper.cpp @@ -252,12 +252,12 @@ template <> Napi::Array cpp_to_js(const Napi::CallbackInfo& info, const ov::Dimension dim) { Napi::Array interval = Napi::Array::New(info.Env(), 2); - // Indexes looks wierd, but clear assignment, + // Indexes looks wierd, but clear assignment, // like: interval[0] = value doesn't work here size_t indexes[] = {0, 1}; interval[indexes[0]] = dim.get_min_length(); interval[indexes[1]] = dim.get_max_length(); - + return interval; } diff --git a/src/bindings/js/node/src/preprocess/preprocess.cpp b/src/bindings/js/node/src/preprocess/preprocess.cpp index ef085ea6b0058c..983f6e706f34f6 100644 --- a/src/bindings/js/node/src/preprocess/preprocess.cpp +++ b/src/bindings/js/node/src/preprocess/preprocess.cpp @@ -6,23 +6,23 @@ #include "addon.hpp" namespace preprocess { - Napi::Object init(Napi::Env env, Napi::Object exports) { - auto preprocess = Napi::PropertyDescriptor::Accessor("preprocess"); +Napi::Object init(Napi::Env env, Napi::Object exports) { + auto preprocess = Napi::PropertyDescriptor::Accessor("preprocess"); - exports.DefineProperty(preprocess); + exports.DefineProperty(preprocess); - return exports; - } + return exports; +} - Napi::Value add_preprocess_namespace(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - auto preprocess = Napi::Object::New(env); - auto resizeAlgorithm = Napi::PropertyDescriptor::Accessor("resizeAlgorithm"); +Napi::Value add_preprocess_namespace(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + auto preprocess = Napi::Object::New(env); + auto resizeAlgorithm = Napi::PropertyDescriptor::Accessor("resizeAlgorithm"); - const auto data = env.GetInstanceData(); - init_class(env, preprocess, "PrePostProcessor", &PrePostProcessorWrap::get_class, data->ppp); - preprocess.DefineProperty(resizeAlgorithm); + const auto data = env.GetInstanceData(); + init_class(env, preprocess, "PrePostProcessor", &PrePostProcessorWrap::get_class, data->ppp); + preprocess.DefineProperty(resizeAlgorithm); - return preprocess; - } -}; + return preprocess; +} +}; // namespace preprocess diff --git a/src/bindings/js/node/src/preprocess/preprocess_steps.cpp b/src/bindings/js/node/src/preprocess/preprocess_steps.cpp index 0798e65453c3e4..29ccd067d209f2 100644 --- a/src/bindings/js/node/src/preprocess/preprocess_steps.cpp +++ b/src/bindings/js/node/src/preprocess/preprocess_steps.cpp @@ -1,9 +1,6 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 - -#include "errors.hpp" -#include "helper.hpp" #include "preprocess/preprocess_steps.hpp" PreProcessSteps::PreProcessSteps(const Napi::CallbackInfo& info) : Napi::ObjectWrap(info){}; From 103fb811e9f241f3d45f59b8b06995055fbc258b Mon Sep 17 00:00:00 2001 From: Karan Jakhar Date: Fri, 19 Jan 2024 00:25:25 +0530 Subject: [PATCH 075/122] [PT FE] Add aten::gcd (#21740) * Add gcd and test * constant with static * exec_condition set to true * boolean scalar, output condition, zero shape [] * removing unused headers * more tests added * code format * Update src/frontends/pytorch/src/op/gcd.cpp Co-authored-by: Maxim Vafin * Update src/frontends/pytorch/src/op/gcd.cpp Co-authored-by: Maxim Vafin * Update src/frontends/pytorch/src/op/gcd.cpp Co-authored-by: Maxim Vafin * Update src/frontends/pytorch/src/op/gcd.cpp Co-authored-by: Maxim Vafin * ngraph header removed * int64 * Add int64 support * Update tests/layer_tests/pytorch_tests/test_gcd.py Co-authored-by: Maxim Vafin --------- Co-authored-by: Maxim Vafin --- src/frontends/pytorch/src/op/gcd.cpp | 68 +++++++++++ src/frontends/pytorch/src/op_table.cpp | 2 + tests/layer_tests/pytorch_tests/test_gcd.py | 124 ++++++++++++++++++++ 3 files changed, 194 insertions(+) create mode 100644 src/frontends/pytorch/src/op/gcd.cpp create mode 100644 tests/layer_tests/pytorch_tests/test_gcd.py diff --git a/src/frontends/pytorch/src/op/gcd.cpp b/src/frontends/pytorch/src/op/gcd.cpp new file mode 100644 index 00000000000000..70301185afcb7a --- /dev/null +++ b/src/frontends/pytorch/src/op/gcd.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/loop.hpp" +#include "openvino/op/mod.hpp" +#include "openvino/op/not_equal.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reduce_logical_or.hpp" +#include "openvino/op/select.hpp" +#include "openvino/openvino.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_gcd(const NodeContext& context) { + num_inputs_check(context, 2, 2); + auto x = context.get_input(0); + auto y = context.get_input(1); + align_eltwise_input_types(context, x, y, true); + auto zero_i32 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); + + auto trip_count = std::make_shared(element::i32, Shape{}, 1000); + auto exec_condition = std::make_shared(element::boolean, Shape{}, true); + + auto loop = std::make_shared(trip_count, exec_condition); + + auto x_input = std::make_shared(x.get_element_type(), x.get_partial_shape()); + auto y_input = std::make_shared(y.get_element_type(), y.get_partial_shape()); + + x_input->set_element_type(x.get_element_type()); + y_input->set_element_type(y.get_element_type()); + auto zero = std::make_shared(zero_i32, x_input); + auto condition = std::make_shared(y_input, zero); + auto mod = std::make_shared(x_input, y_input); + auto new_x = std::make_shared(condition, y_input, x_input); + auto new_y = std::make_shared(condition, mod, zero); + + auto reduced_condition = std::make_shared(condition, zero); + + auto body = + std::make_shared(OutputVector{new_x, new_y, reduced_condition}, ParameterVector{x_input, y_input}); + loop->set_function(body); + + loop->set_special_body_ports({-1, 2}); + + loop->set_merged_input(x_input, x, new_x); + loop->set_merged_input(y_input, y, new_y); + + auto gcd_output = loop->get_iter_value(new_x, -1); + auto gcd_node = gcd_output.get_node_shared_ptr(); + + auto marked_gcd_node = context.mark_node(gcd_node); + return {marked_gcd_node}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index eb0338f52bbd60..82674ea59dbf71 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -84,6 +84,7 @@ OP_CONVERTER(translate_frobenius_norm); OP_CONVERTER(translate_full); OP_CONVERTER(translate_full_like); OP_CONVERTER(translate_gather); +OP_CONVERTER(translate_gcd); OP_CONVERTER(translate_gelu); OP_CONVERTER(translate_get_attr); OP_CONVERTER(translate_getitem); @@ -386,6 +387,7 @@ const std::map get_supported_ops_ts() { {"aten::full", op::translate_full}, {"aten::full_like", op::translate_full_like}, {"aten::gather", op::translate_gather}, + {"aten::gcd", op::translate_gcd}, {"aten::ge", op::translate_1to1_match_2_inputs_align_types}, {"aten::gelu", op::translate_gelu}, {"aten::glu", op::translate_glu}, diff --git a/tests/layer_tests/pytorch_tests/test_gcd.py b/tests/layer_tests/pytorch_tests/test_gcd.py new file mode 100644 index 00000000000000..a1b816b853ead4 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_gcd.py @@ -0,0 +1,124 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestGcd(PytorchLayerTest): + def _prepare_input(self): + return self.input_data + + def create_model_tensor_input(self): + class aten_gcd_tensor(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + + def forward(self, tensor_a, tensor_b): + return torch.gcd(tensor_a, tensor_b) + + ref_net = None + + return aten_gcd_tensor(), ref_net, "aten::gcd" + + def create_model_int_input(self): + class aten_gcd_int(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + + def forward(self, int_a: int, int_b: int): + return torch.tensor(torch.gcd(int_a, int_b)) + + ref_net = None + + return aten_gcd_int(), ref_net, "aten::gcd" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_int(self, ie_device, precision, ir_version): + self.input_data = (np.array(11, dtype=np.int32), np.array(17, dtype=np.int32)) + self._test( + *self.create_model_int_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_tensor(self, ie_device, precision, ir_version): + self.input_data = ( + np.array([14, 4, 12, 10, 3, 0], dtype=np.int32), + np.array([121, 2, 16, 0, 1, 8], dtype=np.int32), + ) + self._test( + *self.create_model_tensor_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_int64(self, ie_device, precision, ir_version): + self.input_data = (np.array(11, dtype=np.int64), np.array(17, dtype=np.int64)) + self._test( + *self.create_model_int_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_tensor64(self, ie_device, precision, ir_version): + self.input_data = ( + np.array([14, 4, 12, 10, 3, 0], dtype=np.int64), + np.array([121, 2, 16, 0, 1, 8], dtype=np.int64), + ) + self._test( + *self.create_model_tensor_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_int_diff_dtypes(self, ie_device, precision, ir_version): + self.input_data = (np.array(11, dtype=np.int64), np.array(17, dtype=np.int32)) + self._test( + *self.create_model_int_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_tensor_diff_dtypes(self, ie_device, precision, ir_version): + self.input_data = ( + np.array([14, 4, 12, 10, 3, 0], dtype=np.int64), + np.array([121, 2, 16, 0, 1, 8], dtype=np.int32), + ) + self._test( + *self.create_model_tensor_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) From 9ca5b749dd852466e50b9412fbc10076bbec3ed3 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Thu, 18 Jan 2024 21:15:38 +0100 Subject: [PATCH 076/122] [core] Remove ngraph/op, ngraph/opsets API (#22124) * Remove ngraph/op, ngraph/opsets API -except opsets3 and ops headers used by VPU openvino_contrib * Remove commented leftovers * Fix GPU test building * Correct opset include * Fix includes * Use operator instead opset --- src/core/include/ngraph/function.hpp | 6 +- src/core/include/ngraph/node.hpp | 5 +- src/core/include/ngraph/op/abs.hpp | 29 --- src/core/include/ngraph/op/acos.hpp | 29 --- src/core/include/ngraph/op/acosh.hpp | 29 --- .../include/ngraph/op/adaptive_avg_pool.hpp | 27 --- .../include/ngraph/op/adaptive_max_pool.hpp | 27 --- src/core/include/ngraph/op/add.hpp | 28 --- src/core/include/ngraph/op/and.hpp | 28 --- src/core/include/ngraph/op/asin.hpp | 29 --- src/core/include/ngraph/op/asinh.hpp | 29 --- src/core/include/ngraph/op/assign.hpp | 33 ---- src/core/include/ngraph/op/atan.hpp | 29 --- src/core/include/ngraph/op/atanh.hpp | 29 --- src/core/include/ngraph/op/avg_pool.hpp | 29 --- src/core/include/ngraph/op/batch_norm.hpp | 32 ---- src/core/include/ngraph/op/batch_to_space.hpp | 27 --- .../include/ngraph/op/binary_convolution.hpp | 28 --- src/core/include/ngraph/op/broadcast.hpp | 33 ---- src/core/include/ngraph/op/bucketize.hpp | 27 --- src/core/include/ngraph/op/ceiling.hpp | 27 --- src/core/include/ngraph/op/clamp.hpp | 28 --- src/core/include/ngraph/op/concat.hpp | 1 - src/core/include/ngraph/op/constant.hpp | 36 ---- src/core/include/ngraph/op/convert.hpp | 27 --- src/core/include/ngraph/op/convert_like.hpp | 26 --- src/core/include/ngraph/op/convolution.hpp | 29 --- src/core/include/ngraph/op/cos.hpp | 27 --- src/core/include/ngraph/op/cosh.hpp | 27 --- .../include/ngraph/op/ctc_greedy_decoder.hpp | 27 --- .../ngraph/op/ctc_greedy_decoder_seq_len.hpp | 26 --- src/core/include/ngraph/op/ctc_loss.hpp | 26 --- src/core/include/ngraph/op/cum_sum.hpp | 28 --- .../ngraph/op/deformable_convolution.hpp | 33 ---- .../ngraph/op/deformable_psroi_pooling.hpp | 26 --- src/core/include/ngraph/op/depth_to_space.hpp | 28 --- .../include/ngraph/op/detection_output.hpp | 33 ---- src/core/include/ngraph/op/dft.hpp | 44 ----- src/core/include/ngraph/op/divide.hpp | 1 - src/core/include/ngraph/op/einsum.hpp | 27 --- src/core/include/ngraph/op/elu.hpp | 28 --- .../ngraph/op/embedding_segments_sum.hpp | 28 --- .../ngraph/op/embeddingbag_offsets_sum.hpp | 29 --- .../ngraph/op/embeddingbag_packedsum.hpp | 29 --- src/core/include/ngraph/op/equal.hpp | 26 --- src/core/include/ngraph/op/erf.hpp | 27 --- src/core/include/ngraph/op/exp.hpp | 27 --- ...xperimental_detectron_detection_output.hpp | 31 ---- ...erimental_detectron_generate_proposals.hpp | 31 ---- ...imental_detectron_prior_grid_generator.hpp | 31 ---- .../op/experimental_detectron_roi_feature.hpp | 32 ---- .../op/experimental_detectron_topkrois.hpp | 31 ---- .../include/ngraph/op/extractimagepatches.hpp | 27 --- src/core/include/ngraph/op/eye.hpp | 28 --- src/core/include/ngraph/op/fake_quantize.hpp | 29 --- src/core/include/ngraph/op/floor.hpp | 27 --- src/core/include/ngraph/op/floor_mod.hpp | 29 --- src/core/include/ngraph/op/gather.hpp | 32 ---- .../include/ngraph/op/gather_elements.hpp | 26 --- src/core/include/ngraph/op/gather_nd.hpp | 29 --- src/core/include/ngraph/op/gather_tree.hpp | 26 --- src/core/include/ngraph/op/gelu.hpp | 35 ---- .../include/ngraph/op/generate_proposals.hpp | 25 --- src/core/include/ngraph/op/greater.hpp | 26 --- src/core/include/ngraph/op/greater_eq.hpp | 26 --- src/core/include/ngraph/op/grid_sample.hpp | 26 --- src/core/include/ngraph/op/grn.hpp | 29 --- src/core/include/ngraph/op/group_conv.hpp | 29 --- src/core/include/ngraph/op/gru_cell.hpp | 34 ---- src/core/include/ngraph/op/gru_sequence.hpp | 31 ---- src/core/include/ngraph/op/hard_sigmoid.hpp | 28 --- src/core/include/ngraph/op/hsigmoid.hpp | 28 --- src/core/include/ngraph/op/hswish.hpp | 28 --- src/core/include/ngraph/op/i420_to_bgr.hpp | 25 --- src/core/include/ngraph/op/i420_to_rgb.hpp | 25 --- src/core/include/ngraph/op/idft.hpp | 32 ---- src/core/include/ngraph/op/if.hpp | 30 --- src/core/include/ngraph/op/interpolate.hpp | 40 ---- src/core/include/ngraph/op/irdft.hpp | 25 --- src/core/include/ngraph/op/is_finite.hpp | 26 --- src/core/include/ngraph/op/is_inf.hpp | 26 --- src/core/include/ngraph/op/is_nan.hpp | 26 --- src/core/include/ngraph/op/less.hpp | 26 --- src/core/include/ngraph/op/less_eq.hpp | 26 --- src/core/include/ngraph/op/log.hpp | 27 --- src/core/include/ngraph/op/log_softmax.hpp | 26 --- src/core/include/ngraph/op/loop.hpp | 33 ---- src/core/include/ngraph/op/lrn.hpp | 27 --- src/core/include/ngraph/op/lstm_cell.hpp | 40 ---- src/core/include/ngraph/op/lstm_sequence.hpp | 40 ---- src/core/include/ngraph/op/matmul.hpp | 28 --- src/core/include/ngraph/op/matrix_nms.hpp | 28 --- src/core/include/ngraph/op/max.hpp | 27 --- src/core/include/ngraph/op/max_pool.hpp | 32 ---- src/core/include/ngraph/op/maximum.hpp | 26 --- src/core/include/ngraph/op/min.hpp | 27 --- src/core/include/ngraph/op/minimum.hpp | 26 --- src/core/include/ngraph/op/mish.hpp | 27 --- src/core/include/ngraph/op/mod.hpp | 26 --- src/core/include/ngraph/op/multiclass_nms.hpp | 29 --- src/core/include/ngraph/op/multiply.hpp | 26 --- src/core/include/ngraph/op/mvn.hpp | 34 ---- src/core/include/ngraph/op/negative.hpp | 29 --- .../include/ngraph/op/non_max_suppression.hpp | 43 ----- src/core/include/ngraph/op/non_zero.hpp | 27 --- src/core/include/ngraph/op/normalize_l2.hpp | 30 --- src/core/include/ngraph/op/not.hpp | 26 --- src/core/include/ngraph/op/not_equal.hpp | 26 --- src/core/include/ngraph/op/nv12_to_bgr.hpp | 25 --- src/core/include/ngraph/op/nv12_to_rgb.hpp | 25 --- src/core/include/ngraph/op/one_hot.hpp | 26 --- src/core/include/ngraph/op/op.hpp | 26 --- src/core/include/ngraph/op/or.hpp | 28 --- src/core/include/ngraph/op/pad.hpp | 31 ---- src/core/include/ngraph/op/parameter.hpp | 1 - src/core/include/ngraph/op/power.hpp | 26 --- src/core/include/ngraph/op/prelu.hpp | 28 --- src/core/include/ngraph/op/prior_box.hpp | 31 ---- .../include/ngraph/op/prior_box_clustered.hpp | 29 --- src/core/include/ngraph/op/proposal.hpp | 33 ---- src/core/include/ngraph/op/psroi_pooling.hpp | 27 --- src/core/include/ngraph/op/random_uniform.hpp | 27 --- src/core/include/ngraph/op/range.hpp | 31 ---- src/core/include/ngraph/op/rdft.hpp | 25 --- src/core/include/ngraph/op/read_value.hpp | 34 ---- src/core/include/ngraph/op/reduce_l1.hpp | 26 --- src/core/include/ngraph/op/reduce_l2.hpp | 26 --- .../include/ngraph/op/reduce_logical_and.hpp | 26 --- .../include/ngraph/op/reduce_logical_or.hpp | 26 --- src/core/include/ngraph/op/reduce_mean.hpp | 27 --- src/core/include/ngraph/op/reduce_prod.hpp | 26 --- src/core/include/ngraph/op/reduce_sum.hpp | 27 --- src/core/include/ngraph/op/region_yolo.hpp | 27 --- src/core/include/ngraph/op/relu.hpp | 32 ---- src/core/include/ngraph/op/reorg_yolo.hpp | 27 --- src/core/include/ngraph/op/reshape.hpp | 28 --- src/core/include/ngraph/op/result.hpp | 30 --- src/core/include/ngraph/op/reverse.hpp | 26 --- .../include/ngraph/op/reverse_sequence.hpp | 27 --- src/core/include/ngraph/op/rnn_cell.hpp | 34 ---- src/core/include/ngraph/op/rnn_sequence.hpp | 31 ---- src/core/include/ngraph/op/roi_align.hpp | 30 --- src/core/include/ngraph/op/roi_pooling.hpp | 27 --- src/core/include/ngraph/op/roll.hpp | 27 --- src/core/include/ngraph/op/round.hpp | 28 --- .../ngraph/op/scatter_elements_update.hpp | 30 --- .../include/ngraph/op/scatter_nd_update.hpp | 28 --- src/core/include/ngraph/op/scatter_update.hpp | 27 --- src/core/include/ngraph/op/select.hpp | 26 --- src/core/include/ngraph/op/selu.hpp | 28 --- src/core/include/ngraph/op/shape_of.hpp | 31 ---- .../include/ngraph/op/shuffle_channels.hpp | 30 --- src/core/include/ngraph/op/sigmoid.hpp | 30 --- src/core/include/ngraph/op/sign.hpp | 27 --- src/core/include/ngraph/op/sin.hpp | 27 --- src/core/include/ngraph/op/sinh.hpp | 27 --- src/core/include/ngraph/op/sink.hpp | 27 --- src/core/include/ngraph/op/slice.hpp | 26 --- src/core/include/ngraph/op/softmax.hpp | 30 --- src/core/include/ngraph/op/softplus.hpp | 27 --- src/core/include/ngraph/op/softsign.hpp | 25 --- src/core/include/ngraph/op/space_to_batch.hpp | 27 --- src/core/include/ngraph/op/space_to_depth.hpp | 27 --- src/core/include/ngraph/op/split.hpp | 1 - src/core/include/ngraph/op/sqrt.hpp | 27 --- .../include/ngraph/op/squared_difference.hpp | 27 --- src/core/include/ngraph/op/squeeze.hpp | 31 ---- src/core/include/ngraph/op/strided_slice.hpp | 31 ---- src/core/include/ngraph/op/subtract.hpp | 26 --- src/core/include/ngraph/op/swish.hpp | 27 --- src/core/include/ngraph/op/tan.hpp | 27 --- src/core/include/ngraph/op/tanh.hpp | 27 --- .../include/ngraph/op/tensor_iterator.hpp | 31 ---- src/core/include/ngraph/op/tile.hpp | 26 --- src/core/include/ngraph/op/topk.hpp | 38 ---- src/core/include/ngraph/op/transpose.hpp | 29 --- src/core/include/ngraph/op/unique.hpp | 26 --- src/core/include/ngraph/op/unsqueeze.hpp | 31 ---- .../ngraph/op/util/activation_functions.hpp | 43 ----- .../ngraph/op/util/arithmetic_reduction.hpp | 27 --- .../util/arithmetic_reductions_keep_dims.hpp | 27 --- .../op/util/binary_elementwise_arithmetic.hpp | 27 --- .../op/util/binary_elementwise_comparison.hpp | 27 --- .../op/util/binary_elementwise_logical.hpp | 26 --- .../include/ngraph/op/util/broadcast_base.hpp | 29 --- .../op/util/deformable_convolution_base.hpp | 28 --- .../ngraph/op/util/detection_output_base.hpp | 26 --- .../ngraph/op/util/elementwise_args.hpp | 26 --- .../op/util/embeddingbag_offsets_base.hpp | 27 --- .../op/util/embeddingbag_packed_base.hpp | 28 --- src/core/include/ngraph/op/util/eval_copy.hpp | 9 - src/core/include/ngraph/op/util/fft_base.hpp | 27 --- .../include/ngraph/op/util/framework_node.hpp | 25 --- .../include/ngraph/op/util/gather_base.hpp | 26 --- .../include/ngraph/op/util/gather_nd_base.hpp | 26 --- .../ngraph/op/util/index_reduction.hpp | 31 ---- .../ngraph/op/util/logical_reduction.hpp | 27 --- .../op/util/logical_reduction_keep_dims.hpp | 27 --- .../include/ngraph/op/util/max_pool_base.hpp | 27 --- .../ngraph/op/util/multi_subgraph_base.hpp | 33 ---- .../ngraph/op/util/multiclass_nms_base.hpp | 28 --- .../include/ngraph/op/util/op_annotations.hpp | 66 ------- .../ngraph/op/util/recurrent_sequence.hpp | 29 --- .../include/ngraph/op/util/reduction_base.hpp | 26 --- .../include/ngraph/op/util/rnn_cell_base.hpp | 35 ---- .../include/ngraph/op/util/scatter_base.hpp | 26 --- .../ngraph/op/util/scatter_nd_base.hpp | 26 --- .../include/ngraph/op/util/slice_plan.hpp | 71 -------- .../include/ngraph/op/util/sub_graph_base.hpp | 33 ---- .../op/util/unary_elementwise_arithmetic.hpp | 26 --- src/core/include/ngraph/op/util/variable.hpp | 29 --- .../ngraph/op/util/variable_context.hpp | 28 --- .../ngraph/op/util/variable_extension.hpp | 23 --- .../include/ngraph/op/util/variable_value.hpp | 24 --- src/core/include/ngraph/op/variadic_split.hpp | 27 --- src/core/include/ngraph/op/xor.hpp | 33 ---- src/core/include/ngraph/ops.hpp | 172 ------------------ src/core/include/ngraph/opsets/opset.hpp | 12 -- src/core/include/ngraph/opsets/opset2.hpp | 25 --- src/core/include/ngraph/opsets/opset2_tbl.hpp | 12 -- src/core/include/ngraph/pattern/matcher.hpp | 1 - src/core/include/ngraph/validation_util.hpp | 11 +- src/core/include/openvino/core/enum_names.hpp | 1 + src/core/src/graph_util.cpp | 4 +- src/core/src/op/reduce_prod.cpp | 10 +- src/core/src/op/util/slice_plan.cpp | 43 ----- src/core/src/opsets/opset.cpp | 74 +------- src/core/src/specialize_function.cpp | 17 +- src/core/src/validation_util.cpp | 12 +- src/core/tests/specialize_function.cpp | 113 ++++++------ .../onnx/frontend/src/op/constant.cpp | 1 - .../onnx/frontend/src/op/constant_fill.cpp | 2 - .../frontend/src/op/constant_of_shape.cpp | 1 - src/frontends/onnx/frontend/src/op/conv.cpp | 1 - .../frontend/src/op/dequantize_linear.cpp | 1 - .../src/op/dynamic_quantize_linear.cpp | 1 - src/frontends/onnx/frontend/src/op/expand.cpp | 3 - src/frontends/onnx/frontend/src/op/floor.hpp | 1 - src/frontends/onnx/frontend/src/op/gemm.cpp | 4 - .../onnx/frontend/src/op/greater.hpp | 1 - .../onnx/frontend/src/op/instance_norm.cpp | 4 - src/frontends/onnx/frontend/src/op/less.hpp | 1 - src/frontends/onnx/frontend/src/op/lstm.cpp | 27 ++- .../src/op/mean_variance_normalization.cpp | 5 +- src/frontends/onnx/frontend/src/op/mod.cpp | 5 +- src/frontends/onnx/frontend/src/op/mul.hpp | 2 - src/frontends/onnx/frontend/src/op/neg.hpp | 4 +- .../frontend/src/op/non_max_suppression.cpp | 3 +- .../org.openvinotoolkit/detection_output.cpp | 3 +- .../src/op/org.openvinotoolkit/prior_box.cpp | 3 +- src/frontends/onnx/frontend/src/op/pad.cpp | 5 +- .../onnx/frontend/src/op/random_uniform.cpp | 2 +- .../frontend/src/op/random_uniform_like.cpp | 2 +- .../onnx/frontend/src/op/reciprocal.cpp | 1 - .../onnx/frontend/src/op/roi_align.cpp | 2 + src/frontends/onnx/frontend/src/op/selu.cpp | 4 +- .../onnx/frontend/src/op/softsign.cpp | 1 + .../onnx/frontend/src/op/squeeze.cpp | 4 +- src/frontends/onnx/frontend/src/op/topk.cpp | 4 +- src/inference/src/cnn_network_ngraph_impl.cpp | 6 +- src/inference/src/dev/core_impl_ie.cpp | 1 - .../cpu_opset/common/op/rope.hpp | 5 +- .../single_layer_tests/depth_to_space.cpp | 3 +- .../single_layer_tests/gather_elements.cpp | 3 +- .../behavior/plugin/hetero_query_network.hpp | 3 + .../behavior/infer_request/memory_states.cpp | 14 +- .../depth_to_space_transformation.cpp | 24 +-- .../mat_mul_transformation.cpp | 7 +- .../mat_mul_with_constant_transformation.cpp | 8 +- .../squeeze_transformation.cpp | 5 +- .../unsqueeze_transformation.cpp | 5 +- .../single_layer/detection_output.hpp | 3 +- .../shared_test_classes/single_layer/grn.hpp | 17 +- .../single_layer/prior_box.hpp | 16 +- .../single_layer/prior_box_clustered.hpp | 16 +- .../src/single_layer/eye.cpp | 3 +- .../src/single_layer/memory.cpp | 15 +- .../src/single_layer/reverse.cpp | 1 + .../src/single_layer/roi_align.cpp | 3 + .../src/subgraph/memory_LSTMCell.cpp | 8 +- .../src/subgraph/mul_conv_fusion.cpp | 9 +- .../src/subgraph/parameter_shapeof_result.cpp | 4 +- .../src/assign_and_read_value.cpp | 3 + 283 files changed, 225 insertions(+), 6632 deletions(-) delete mode 100644 src/core/include/ngraph/op/abs.hpp delete mode 100644 src/core/include/ngraph/op/acos.hpp delete mode 100644 src/core/include/ngraph/op/acosh.hpp delete mode 100644 src/core/include/ngraph/op/adaptive_avg_pool.hpp delete mode 100644 src/core/include/ngraph/op/adaptive_max_pool.hpp delete mode 100644 src/core/include/ngraph/op/add.hpp delete mode 100644 src/core/include/ngraph/op/and.hpp delete mode 100644 src/core/include/ngraph/op/asin.hpp delete mode 100644 src/core/include/ngraph/op/asinh.hpp delete mode 100644 src/core/include/ngraph/op/assign.hpp delete mode 100644 src/core/include/ngraph/op/atan.hpp delete mode 100644 src/core/include/ngraph/op/atanh.hpp delete mode 100644 src/core/include/ngraph/op/avg_pool.hpp delete mode 100644 src/core/include/ngraph/op/batch_norm.hpp delete mode 100644 src/core/include/ngraph/op/batch_to_space.hpp delete mode 100644 src/core/include/ngraph/op/binary_convolution.hpp delete mode 100644 src/core/include/ngraph/op/broadcast.hpp delete mode 100644 src/core/include/ngraph/op/bucketize.hpp delete mode 100644 src/core/include/ngraph/op/ceiling.hpp delete mode 100644 src/core/include/ngraph/op/clamp.hpp delete mode 100644 src/core/include/ngraph/op/constant.hpp delete mode 100644 src/core/include/ngraph/op/convert.hpp delete mode 100644 src/core/include/ngraph/op/convert_like.hpp delete mode 100644 src/core/include/ngraph/op/convolution.hpp delete mode 100644 src/core/include/ngraph/op/cos.hpp delete mode 100644 src/core/include/ngraph/op/cosh.hpp delete mode 100644 src/core/include/ngraph/op/ctc_greedy_decoder.hpp delete mode 100644 src/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp delete mode 100644 src/core/include/ngraph/op/ctc_loss.hpp delete mode 100644 src/core/include/ngraph/op/cum_sum.hpp delete mode 100644 src/core/include/ngraph/op/deformable_convolution.hpp delete mode 100644 src/core/include/ngraph/op/deformable_psroi_pooling.hpp delete mode 100644 src/core/include/ngraph/op/depth_to_space.hpp delete mode 100644 src/core/include/ngraph/op/detection_output.hpp delete mode 100644 src/core/include/ngraph/op/dft.hpp delete mode 100644 src/core/include/ngraph/op/einsum.hpp delete mode 100644 src/core/include/ngraph/op/elu.hpp delete mode 100644 src/core/include/ngraph/op/embedding_segments_sum.hpp delete mode 100644 src/core/include/ngraph/op/embeddingbag_offsets_sum.hpp delete mode 100644 src/core/include/ngraph/op/embeddingbag_packedsum.hpp delete mode 100644 src/core/include/ngraph/op/equal.hpp delete mode 100644 src/core/include/ngraph/op/erf.hpp delete mode 100644 src/core/include/ngraph/op/exp.hpp delete mode 100644 src/core/include/ngraph/op/experimental_detectron_detection_output.hpp delete mode 100644 src/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp delete mode 100644 src/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp delete mode 100644 src/core/include/ngraph/op/experimental_detectron_roi_feature.hpp delete mode 100644 src/core/include/ngraph/op/experimental_detectron_topkrois.hpp delete mode 100644 src/core/include/ngraph/op/extractimagepatches.hpp delete mode 100644 src/core/include/ngraph/op/eye.hpp delete mode 100644 src/core/include/ngraph/op/fake_quantize.hpp delete mode 100644 src/core/include/ngraph/op/floor.hpp delete mode 100644 src/core/include/ngraph/op/floor_mod.hpp delete mode 100644 src/core/include/ngraph/op/gather.hpp delete mode 100644 src/core/include/ngraph/op/gather_elements.hpp delete mode 100644 src/core/include/ngraph/op/gather_nd.hpp delete mode 100644 src/core/include/ngraph/op/gather_tree.hpp delete mode 100644 src/core/include/ngraph/op/gelu.hpp delete mode 100644 src/core/include/ngraph/op/generate_proposals.hpp delete mode 100644 src/core/include/ngraph/op/greater.hpp delete mode 100644 src/core/include/ngraph/op/greater_eq.hpp delete mode 100644 src/core/include/ngraph/op/grid_sample.hpp delete mode 100644 src/core/include/ngraph/op/grn.hpp delete mode 100644 src/core/include/ngraph/op/group_conv.hpp delete mode 100644 src/core/include/ngraph/op/gru_cell.hpp delete mode 100644 src/core/include/ngraph/op/gru_sequence.hpp delete mode 100644 src/core/include/ngraph/op/hard_sigmoid.hpp delete mode 100644 src/core/include/ngraph/op/hsigmoid.hpp delete mode 100644 src/core/include/ngraph/op/hswish.hpp delete mode 100644 src/core/include/ngraph/op/i420_to_bgr.hpp delete mode 100644 src/core/include/ngraph/op/i420_to_rgb.hpp delete mode 100644 src/core/include/ngraph/op/idft.hpp delete mode 100644 src/core/include/ngraph/op/if.hpp delete mode 100644 src/core/include/ngraph/op/interpolate.hpp delete mode 100644 src/core/include/ngraph/op/irdft.hpp delete mode 100644 src/core/include/ngraph/op/is_finite.hpp delete mode 100644 src/core/include/ngraph/op/is_inf.hpp delete mode 100644 src/core/include/ngraph/op/is_nan.hpp delete mode 100644 src/core/include/ngraph/op/less.hpp delete mode 100644 src/core/include/ngraph/op/less_eq.hpp delete mode 100644 src/core/include/ngraph/op/log.hpp delete mode 100644 src/core/include/ngraph/op/log_softmax.hpp delete mode 100644 src/core/include/ngraph/op/loop.hpp delete mode 100644 src/core/include/ngraph/op/lrn.hpp delete mode 100644 src/core/include/ngraph/op/lstm_cell.hpp delete mode 100644 src/core/include/ngraph/op/lstm_sequence.hpp delete mode 100644 src/core/include/ngraph/op/matmul.hpp delete mode 100644 src/core/include/ngraph/op/matrix_nms.hpp delete mode 100644 src/core/include/ngraph/op/max.hpp delete mode 100644 src/core/include/ngraph/op/max_pool.hpp delete mode 100644 src/core/include/ngraph/op/maximum.hpp delete mode 100644 src/core/include/ngraph/op/min.hpp delete mode 100644 src/core/include/ngraph/op/minimum.hpp delete mode 100644 src/core/include/ngraph/op/mish.hpp delete mode 100644 src/core/include/ngraph/op/mod.hpp delete mode 100644 src/core/include/ngraph/op/multiclass_nms.hpp delete mode 100644 src/core/include/ngraph/op/multiply.hpp delete mode 100644 src/core/include/ngraph/op/mvn.hpp delete mode 100644 src/core/include/ngraph/op/negative.hpp delete mode 100644 src/core/include/ngraph/op/non_max_suppression.hpp delete mode 100644 src/core/include/ngraph/op/non_zero.hpp delete mode 100644 src/core/include/ngraph/op/normalize_l2.hpp delete mode 100644 src/core/include/ngraph/op/not.hpp delete mode 100644 src/core/include/ngraph/op/not_equal.hpp delete mode 100644 src/core/include/ngraph/op/nv12_to_bgr.hpp delete mode 100644 src/core/include/ngraph/op/nv12_to_rgb.hpp delete mode 100644 src/core/include/ngraph/op/one_hot.hpp delete mode 100644 src/core/include/ngraph/op/op.hpp delete mode 100644 src/core/include/ngraph/op/or.hpp delete mode 100644 src/core/include/ngraph/op/pad.hpp delete mode 100644 src/core/include/ngraph/op/power.hpp delete mode 100644 src/core/include/ngraph/op/prelu.hpp delete mode 100644 src/core/include/ngraph/op/prior_box.hpp delete mode 100644 src/core/include/ngraph/op/prior_box_clustered.hpp delete mode 100644 src/core/include/ngraph/op/proposal.hpp delete mode 100644 src/core/include/ngraph/op/psroi_pooling.hpp delete mode 100644 src/core/include/ngraph/op/random_uniform.hpp delete mode 100644 src/core/include/ngraph/op/range.hpp delete mode 100644 src/core/include/ngraph/op/rdft.hpp delete mode 100644 src/core/include/ngraph/op/read_value.hpp delete mode 100644 src/core/include/ngraph/op/reduce_l1.hpp delete mode 100644 src/core/include/ngraph/op/reduce_l2.hpp delete mode 100644 src/core/include/ngraph/op/reduce_logical_and.hpp delete mode 100644 src/core/include/ngraph/op/reduce_logical_or.hpp delete mode 100644 src/core/include/ngraph/op/reduce_mean.hpp delete mode 100644 src/core/include/ngraph/op/reduce_prod.hpp delete mode 100644 src/core/include/ngraph/op/reduce_sum.hpp delete mode 100644 src/core/include/ngraph/op/region_yolo.hpp delete mode 100644 src/core/include/ngraph/op/relu.hpp delete mode 100644 src/core/include/ngraph/op/reorg_yolo.hpp delete mode 100644 src/core/include/ngraph/op/reshape.hpp delete mode 100644 src/core/include/ngraph/op/result.hpp delete mode 100644 src/core/include/ngraph/op/reverse.hpp delete mode 100644 src/core/include/ngraph/op/reverse_sequence.hpp delete mode 100644 src/core/include/ngraph/op/rnn_cell.hpp delete mode 100644 src/core/include/ngraph/op/rnn_sequence.hpp delete mode 100644 src/core/include/ngraph/op/roi_align.hpp delete mode 100644 src/core/include/ngraph/op/roi_pooling.hpp delete mode 100644 src/core/include/ngraph/op/roll.hpp delete mode 100644 src/core/include/ngraph/op/round.hpp delete mode 100644 src/core/include/ngraph/op/scatter_elements_update.hpp delete mode 100644 src/core/include/ngraph/op/scatter_nd_update.hpp delete mode 100644 src/core/include/ngraph/op/scatter_update.hpp delete mode 100644 src/core/include/ngraph/op/select.hpp delete mode 100644 src/core/include/ngraph/op/selu.hpp delete mode 100644 src/core/include/ngraph/op/shape_of.hpp delete mode 100644 src/core/include/ngraph/op/shuffle_channels.hpp delete mode 100644 src/core/include/ngraph/op/sigmoid.hpp delete mode 100644 src/core/include/ngraph/op/sign.hpp delete mode 100644 src/core/include/ngraph/op/sin.hpp delete mode 100644 src/core/include/ngraph/op/sinh.hpp delete mode 100644 src/core/include/ngraph/op/sink.hpp delete mode 100644 src/core/include/ngraph/op/slice.hpp delete mode 100644 src/core/include/ngraph/op/softmax.hpp delete mode 100644 src/core/include/ngraph/op/softplus.hpp delete mode 100644 src/core/include/ngraph/op/softsign.hpp delete mode 100644 src/core/include/ngraph/op/space_to_batch.hpp delete mode 100644 src/core/include/ngraph/op/space_to_depth.hpp delete mode 100644 src/core/include/ngraph/op/sqrt.hpp delete mode 100644 src/core/include/ngraph/op/squared_difference.hpp delete mode 100644 src/core/include/ngraph/op/squeeze.hpp delete mode 100644 src/core/include/ngraph/op/strided_slice.hpp delete mode 100644 src/core/include/ngraph/op/subtract.hpp delete mode 100644 src/core/include/ngraph/op/swish.hpp delete mode 100644 src/core/include/ngraph/op/tan.hpp delete mode 100644 src/core/include/ngraph/op/tanh.hpp delete mode 100644 src/core/include/ngraph/op/tensor_iterator.hpp delete mode 100644 src/core/include/ngraph/op/tile.hpp delete mode 100644 src/core/include/ngraph/op/topk.hpp delete mode 100644 src/core/include/ngraph/op/transpose.hpp delete mode 100644 src/core/include/ngraph/op/unique.hpp delete mode 100644 src/core/include/ngraph/op/unsqueeze.hpp delete mode 100644 src/core/include/ngraph/op/util/activation_functions.hpp delete mode 100644 src/core/include/ngraph/op/util/arithmetic_reduction.hpp delete mode 100644 src/core/include/ngraph/op/util/arithmetic_reductions_keep_dims.hpp delete mode 100644 src/core/include/ngraph/op/util/binary_elementwise_arithmetic.hpp delete mode 100644 src/core/include/ngraph/op/util/binary_elementwise_comparison.hpp delete mode 100644 src/core/include/ngraph/op/util/binary_elementwise_logical.hpp delete mode 100644 src/core/include/ngraph/op/util/broadcast_base.hpp delete mode 100644 src/core/include/ngraph/op/util/deformable_convolution_base.hpp delete mode 100644 src/core/include/ngraph/op/util/detection_output_base.hpp delete mode 100644 src/core/include/ngraph/op/util/elementwise_args.hpp delete mode 100644 src/core/include/ngraph/op/util/embeddingbag_offsets_base.hpp delete mode 100644 src/core/include/ngraph/op/util/embeddingbag_packed_base.hpp delete mode 100644 src/core/include/ngraph/op/util/eval_copy.hpp delete mode 100644 src/core/include/ngraph/op/util/fft_base.hpp delete mode 100644 src/core/include/ngraph/op/util/framework_node.hpp delete mode 100644 src/core/include/ngraph/op/util/gather_base.hpp delete mode 100644 src/core/include/ngraph/op/util/gather_nd_base.hpp delete mode 100644 src/core/include/ngraph/op/util/index_reduction.hpp delete mode 100644 src/core/include/ngraph/op/util/logical_reduction.hpp delete mode 100644 src/core/include/ngraph/op/util/logical_reduction_keep_dims.hpp delete mode 100644 src/core/include/ngraph/op/util/max_pool_base.hpp delete mode 100644 src/core/include/ngraph/op/util/multi_subgraph_base.hpp delete mode 100644 src/core/include/ngraph/op/util/multiclass_nms_base.hpp delete mode 100644 src/core/include/ngraph/op/util/op_annotations.hpp delete mode 100644 src/core/include/ngraph/op/util/recurrent_sequence.hpp delete mode 100644 src/core/include/ngraph/op/util/reduction_base.hpp delete mode 100644 src/core/include/ngraph/op/util/rnn_cell_base.hpp delete mode 100644 src/core/include/ngraph/op/util/scatter_base.hpp delete mode 100644 src/core/include/ngraph/op/util/scatter_nd_base.hpp delete mode 100644 src/core/include/ngraph/op/util/slice_plan.hpp delete mode 100644 src/core/include/ngraph/op/util/sub_graph_base.hpp delete mode 100644 src/core/include/ngraph/op/util/unary_elementwise_arithmetic.hpp delete mode 100644 src/core/include/ngraph/op/util/variable.hpp delete mode 100644 src/core/include/ngraph/op/util/variable_context.hpp delete mode 100644 src/core/include/ngraph/op/util/variable_extension.hpp delete mode 100644 src/core/include/ngraph/op/util/variable_value.hpp delete mode 100644 src/core/include/ngraph/op/variadic_split.hpp delete mode 100644 src/core/include/ngraph/op/xor.hpp delete mode 100644 src/core/include/ngraph/opsets/opset2.hpp delete mode 100644 src/core/include/ngraph/opsets/opset2_tbl.hpp diff --git a/src/core/include/ngraph/function.hpp b/src/core/include/ngraph/function.hpp index 227518349110f2..7b9a7448a97c53 100644 --- a/src/core/include/ngraph/function.hpp +++ b/src/core/include/ngraph/function.hpp @@ -16,13 +16,9 @@ #include "ngraph/ngraph_visibility.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/assign.hpp" #include "ngraph/op/parameter.hpp" -#include "ngraph/op/read_value.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/op/sink.hpp" -#include "ngraph/op/util/variable.hpp" #include "openvino/core/model.hpp" +#include "openvino/op/util/variable.hpp" namespace ngraph { using Function = ov::Model; diff --git a/src/core/include/ngraph/node.hpp b/src/core/include/ngraph/node.hpp index c1ce13be4c8a92..7983a214a04767 100644 --- a/src/core/include/ngraph/node.hpp +++ b/src/core/include/ngraph/node.hpp @@ -38,13 +38,12 @@ #include "ngraph/node_input.hpp" #include "ngraph/node_output.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/op_annotations.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_value.hpp" #include "ngraph/output_vector.hpp" #include "ngraph/strides.hpp" #include "openvino/core/any.hpp" #include "openvino/core/node.hpp" +#include "openvino/op/util/variable.hpp" +#include "openvino/op/util/variable_value.hpp" namespace ov { namespace op { diff --git a/src/core/include/ngraph/op/abs.hpp b/src/core/include/ngraph/op/abs.hpp deleted file mode 100644 index ab96e5d413b55e..00000000000000 --- a/src/core/include/ngraph/op/abs.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/abs.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Abs; -} // namespace v0 -using v0::Abs; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/acos.hpp b/src/core/include/ngraph/op/acos.hpp deleted file mode 100644 index f3173555f3a330..00000000000000 --- a/src/core/include/ngraph/op/acos.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/acos.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Acos; -} // namespace v0 -using v0::Acos; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/acosh.hpp b/src/core/include/ngraph/op/acosh.hpp deleted file mode 100644 index 677fab4f318343..00000000000000 --- a/src/core/include/ngraph/op/acosh.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/acosh.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Acosh; -} // namespace v3 -using v3::Acosh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/adaptive_avg_pool.hpp b/src/core/include/ngraph/op/adaptive_avg_pool.hpp deleted file mode 100644 index 25e4a9976eebd9..00000000000000 --- a/src/core/include/ngraph/op/adaptive_avg_pool.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/adaptive_avg_pool.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::AdaptiveAvgPool; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/adaptive_max_pool.hpp b/src/core/include/ngraph/op/adaptive_max_pool.hpp deleted file mode 100644 index 1298dc97a2fc37..00000000000000 --- a/src/core/include/ngraph/op/adaptive_max_pool.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/adaptive_max_pool.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::AdaptiveMaxPool; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/add.hpp b/src/core/include/ngraph/op/add.hpp deleted file mode 100644 index 61584b5138f10f..00000000000000 --- a/src/core/include/ngraph/op/add.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/add.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Add; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/and.hpp b/src/core/include/ngraph/op/and.hpp deleted file mode 100644 index 3ce521f8c6019c..00000000000000 --- a/src/core/include/ngraph/op/and.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_logical.hpp" -#include "openvino/op/logical_and.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalAnd; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/asin.hpp b/src/core/include/ngraph/op/asin.hpp deleted file mode 100644 index 4ef7fb119d713d..00000000000000 --- a/src/core/include/ngraph/op/asin.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/asin.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Asin; -} // namespace v0 -using v0::Asin; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/asinh.hpp b/src/core/include/ngraph/op/asinh.hpp deleted file mode 100644 index 0f40a770a3f6b4..00000000000000 --- a/src/core/include/ngraph/op/asinh.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/asinh.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Asinh; -} // namespace v3 -using v3::Asinh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/assign.hpp b/src/core/include/ngraph/op/assign.hpp deleted file mode 100644 index af479f8bae8756..00000000000000 --- a/src/core/include/ngraph/op/assign.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/sink.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_extension.hpp" -#include "openvino/op/assign.hpp" - -namespace ngraph { -namespace op { -using ov::op::util::AssignBase; - -namespace v3 { -using ov::op::v3::Assign; -} // namespace v3 -namespace v6 { -using ov::op::v6::Assign; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/atan.hpp b/src/core/include/ngraph/op/atan.hpp deleted file mode 100644 index 4cc94c1709db29..00000000000000 --- a/src/core/include/ngraph/op/atan.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/atan.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Atan; -} // namespace v0 -using v0::Atan; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/atanh.hpp b/src/core/include/ngraph/op/atanh.hpp deleted file mode 100644 index 03b4454b5cf500..00000000000000 --- a/src/core/include/ngraph/op/atanh.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/atanh.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Atanh; -} // namespace v3 -using v3::Atanh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/avg_pool.hpp b/src/core/include/ngraph/op/avg_pool.hpp deleted file mode 100644 index 180787b42d6a3b..00000000000000 --- a/src/core/include/ngraph/op/avg_pool.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/avg_pool.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::AvgPool; -} // namespace v1 - -using v1::AvgPool; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/batch_norm.hpp b/src/core/include/ngraph/op/batch_norm.hpp deleted file mode 100644 index cac36e5b4e2578..00000000000000 --- a/src/core/include/ngraph/op/batch_norm.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/batch_norm.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::BatchNormInference; -} // namespace v0 -namespace v5 { -using ov::op::v5::BatchNormInference; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/batch_to_space.hpp b/src/core/include/ngraph/op/batch_to_space.hpp deleted file mode 100644 index da115a4c0389f3..00000000000000 --- a/src/core/include/ngraph/op/batch_to_space.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/batch_to_space.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::BatchToSpace; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/binary_convolution.hpp b/src/core/include/ngraph/op/binary_convolution.hpp deleted file mode 100644 index a51e91549b6780..00000000000000 --- a/src/core/include/ngraph/op/binary_convolution.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/binary_convolution.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::BinaryConvolution; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/broadcast.hpp b/src/core/include/ngraph/op/broadcast.hpp deleted file mode 100644 index 98c0ac86da51cd..00000000000000 --- a/src/core/include/ngraph/op/broadcast.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/broadcast_base.hpp" -#include "openvino/op/broadcast.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Broadcast; -} // namespace v3 - -namespace v1 { -using ov::op::v1::Broadcast; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/bucketize.hpp b/src/core/include/ngraph/op/bucketize.hpp deleted file mode 100644 index 052d0d13ebc2a3..00000000000000 --- a/src/core/include/ngraph/op/bucketize.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/bucketize.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Bucketize; -} // namespace v3 -using v3::Bucketize; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ceiling.hpp b/src/core/include/ngraph/op/ceiling.hpp deleted file mode 100644 index e5c061a4c3e1d7..00000000000000 --- a/src/core/include/ngraph/op/ceiling.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/ceiling.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Ceiling; -} // namespace v0 -using v0::Ceiling; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/clamp.hpp b/src/core/include/ngraph/op/clamp.hpp deleted file mode 100644 index 64a3c2f39f17ac..00000000000000 --- a/src/core/include/ngraph/op/clamp.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/clamp.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Clamp; -} // namespace v0 -using v0::Clamp; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/concat.hpp b/src/core/include/ngraph/op/concat.hpp index 0c509cc057a07c..a7d67014756623 100644 --- a/src/core/include/ngraph/op/concat.hpp +++ b/src/core/include/ngraph/op/concat.hpp @@ -16,7 +16,6 @@ #include -#include "ngraph/op/op.hpp" #include "openvino/op/concat.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/constant.hpp b/src/core/include/ngraph/op/constant.hpp deleted file mode 100644 index e4be563ff18d75..00000000000000 --- a/src/core/include/ngraph/op/constant.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/node.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph/runtime/shared_buffer.hpp" -#include "ngraph/type/element_type.hpp" -#include "ngraph/type/element_type_traits.hpp" -#include "ngraph/util.hpp" -#include "openvino/op/constant.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Constant; -} // namespace v0 -using v0::Constant; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/convert.hpp b/src/core/include/ngraph/op/convert.hpp deleted file mode 100644 index da3f14489e44d9..00000000000000 --- a/src/core/include/ngraph/op/convert.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/convert.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Convert; -} // namespace v0 -using v0::Convert; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/convert_like.hpp b/src/core/include/ngraph/op/convert_like.hpp deleted file mode 100644 index 60e14bd4a9f2c2..00000000000000 --- a/src/core/include/ngraph/op/convert_like.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/convert_like.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ConvertLike; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/convolution.hpp b/src/core/include/ngraph/op/convolution.hpp deleted file mode 100644 index 3c5c9bab1f9116..00000000000000 --- a/src/core/include/ngraph/op/convolution.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/convolution.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Convolution; -using ov::op::v1::ConvolutionBackpropData; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/cos.hpp b/src/core/include/ngraph/op/cos.hpp deleted file mode 100644 index af59b80a9af6b4..00000000000000 --- a/src/core/include/ngraph/op/cos.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/cos.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Cos; -} // namespace v0 -using v0::Cos; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/cosh.hpp b/src/core/include/ngraph/op/cosh.hpp deleted file mode 100644 index d2c7b8f340a4ea..00000000000000 --- a/src/core/include/ngraph/op/cosh.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/cosh.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Cosh; -} // namespace v0 -using v0::Cosh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ctc_greedy_decoder.hpp b/src/core/include/ngraph/op/ctc_greedy_decoder.hpp deleted file mode 100644 index 365406cb2a5797..00000000000000 --- a/src/core/include/ngraph/op/ctc_greedy_decoder.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/ctc_greedy_decoder.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::CTCGreedyDecoder; -} // namespace v0 -using v0::CTCGreedyDecoder; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp b/src/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp deleted file mode 100644 index a2ec282688d451..00000000000000 --- a/src/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/ctc_greedy_decoder_seq_len.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::CTCGreedyDecoderSeqLen; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ctc_loss.hpp b/src/core/include/ngraph/op/ctc_loss.hpp deleted file mode 100644 index 36e25d5f7b113b..00000000000000 --- a/src/core/include/ngraph/op/ctc_loss.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/ctc_loss.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::CTCLoss; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/cum_sum.hpp b/src/core/include/ngraph/op/cum_sum.hpp deleted file mode 100644 index 2ecd5bd4f13c47..00000000000000 --- a/src/core/include/ngraph/op/cum_sum.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/cum_sum.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::CumSum; -} // namespace v0 -using v0::CumSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/deformable_convolution.hpp b/src/core/include/ngraph/op/deformable_convolution.hpp deleted file mode 100644 index 2ef161637ddb88..00000000000000 --- a/src/core/include/ngraph/op/deformable_convolution.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/deformable_convolution_base.hpp" -#include "openvino/op/deformable_convolution.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::DeformableConvolution; -} // namespace v1 - -namespace v8 { -using ov::op::v8::DeformableConvolution; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/deformable_psroi_pooling.hpp b/src/core/include/ngraph/op/deformable_psroi_pooling.hpp deleted file mode 100644 index 1a561fbb20e08e..00000000000000 --- a/src/core/include/ngraph/op/deformable_psroi_pooling.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/deformable_psroi_pooling.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::DeformablePSROIPooling; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/depth_to_space.hpp b/src/core/include/ngraph/op/depth_to_space.hpp deleted file mode 100644 index 0f2e4341968f93..00000000000000 --- a/src/core/include/ngraph/op/depth_to_space.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/depth_to_space.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::DepthToSpace; -} // namespace v0 -using v0::DepthToSpace; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/detection_output.hpp b/src/core/include/ngraph/op/detection_output.hpp deleted file mode 100644 index 9c60638c8e3a52..00000000000000 --- a/src/core/include/ngraph/op/detection_output.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/detection_output.hpp" - -namespace ngraph { -namespace op { -using DetectionOutputAttrs = ov::op::v0::DetectionOutput::Attributes; - -namespace v0 { -using ov::op::v0::DetectionOutput; -} // namespace v0 -using v0::DetectionOutput; - -namespace v8 { -using ov::op::v8::DetectionOutput; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/dft.hpp b/src/core/include/ngraph/op/dft.hpp deleted file mode 100644 index fe94ae91fc4e03..00000000000000 --- a/src/core/include/ngraph/op/dft.hpp +++ /dev/null @@ -1,44 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2022 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fft_base.hpp" -#include "openvino/op/dft.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::DFT; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/divide.hpp b/src/core/include/ngraph/op/divide.hpp index 8ad23dc0b3ce46..7e23179987e62c 100644 --- a/src/core/include/ngraph/op/divide.hpp +++ b/src/core/include/ngraph/op/divide.hpp @@ -14,7 +14,6 @@ # endif #endif -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" #include "openvino/op/divide.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/einsum.hpp b/src/core/include/ngraph/op/einsum.hpp deleted file mode 100644 index 8dd7eaa14a1bac..00000000000000 --- a/src/core/include/ngraph/op/einsum.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/einsum.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::Einsum; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/elu.hpp b/src/core/include/ngraph/op/elu.hpp deleted file mode 100644 index 501f6689f9c581..00000000000000 --- a/src/core/include/ngraph/op/elu.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/elu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Elu; -} // namespace v0 -using v0::Elu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/embedding_segments_sum.hpp b/src/core/include/ngraph/op/embedding_segments_sum.hpp deleted file mode 100644 index 1eb8282815c0da..00000000000000 --- a/src/core/include/ngraph/op/embedding_segments_sum.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/embedding_segments_sum.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::EmbeddingSegmentsSum; -} // namespace v3 -using v3::EmbeddingSegmentsSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/embeddingbag_offsets_sum.hpp b/src/core/include/ngraph/op/embeddingbag_offsets_sum.hpp deleted file mode 100644 index 48d4a3c298dd58..00000000000000 --- a/src/core/include/ngraph/op/embeddingbag_offsets_sum.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/embeddingbag_offsets_base.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/embeddingbag_offsets_sum.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::EmbeddingBagOffsetsSum; -} // namespace v3 -using v3::EmbeddingBagOffsetsSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/embeddingbag_packedsum.hpp b/src/core/include/ngraph/op/embeddingbag_packedsum.hpp deleted file mode 100644 index 2bcbe580129dd3..00000000000000 --- a/src/core/include/ngraph/op/embeddingbag_packedsum.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/embeddingbag_packed_base.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/embeddingbag_packedsum.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::EmbeddingBagPackedSum; -} // namespace v3 -using v3::EmbeddingBagPackedSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/equal.hpp b/src/core/include/ngraph/op/equal.hpp deleted file mode 100644 index 69ca75b677b265..00000000000000 --- a/src/core/include/ngraph/op/equal.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/equal.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Equal; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/erf.hpp b/src/core/include/ngraph/op/erf.hpp deleted file mode 100644 index 04669af7b7bcb3..00000000000000 --- a/src/core/include/ngraph/op/erf.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/erf.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Erf; -} // namespace v0 -using v0::Erf; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/exp.hpp b/src/core/include/ngraph/op/exp.hpp deleted file mode 100644 index 582fb434af23d5..00000000000000 --- a/src/core/include/ngraph/op/exp.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/exp.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Exp; -} // namespace v0 -using v0::Exp; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_detection_output.hpp b/src/core/include/ngraph/op/experimental_detectron_detection_output.hpp deleted file mode 100644 index 32396876379a65..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_detection_output.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_detection_output.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronDetectionOutput; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp b/src/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp deleted file mode 100644 index e063f6f9380cb1..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_generate_proposals.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp b/src/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp deleted file mode 100644 index 3a7e86241828cd..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_prior_grid_generator.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronPriorGridGenerator; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_roi_feature.hpp b/src/core/include/ngraph/op/experimental_detectron_roi_feature.hpp deleted file mode 100644 index 98eeef604d0845..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_roi_feature.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_roi_feature.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronROIFeatureExtractor; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_topkrois.hpp b/src/core/include/ngraph/op/experimental_detectron_topkrois.hpp deleted file mode 100644 index 6b41bbece97cec..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_topkrois.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_topkrois.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronTopKROIs; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/extractimagepatches.hpp b/src/core/include/ngraph/op/extractimagepatches.hpp deleted file mode 100644 index f4c33abd174cbf..00000000000000 --- a/src/core/include/ngraph/op/extractimagepatches.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/extractimagepatches.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ExtractImagePatches; -} // namespace v3 -using v3::ExtractImagePatches; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/eye.hpp b/src/core/include/ngraph/op/eye.hpp deleted file mode 100644 index 8bb108edb6401d..00000000000000 --- a/src/core/include/ngraph/op/eye.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/eye.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::Eye; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/fake_quantize.hpp b/src/core/include/ngraph/op/fake_quantize.hpp deleted file mode 100644 index 6a2c34531ea3fc..00000000000000 --- a/src/core/include/ngraph/op/fake_quantize.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/fake_quantize.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::FakeQuantize; -} // namespace v0 -using v0::FakeQuantize; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/floor.hpp b/src/core/include/ngraph/op/floor.hpp deleted file mode 100644 index c0c7d8f3c1651d..00000000000000 --- a/src/core/include/ngraph/op/floor.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/floor.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Floor; -} // namespace v0 -using v0::Floor; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/floor_mod.hpp b/src/core/include/ngraph/op/floor_mod.hpp deleted file mode 100644 index 03d5d4dd70fbf1..00000000000000 --- a/src/core/include/ngraph/op/floor_mod.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/floor_mod.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::FloorMod; -} // namespace v1 -using v1::FloorMod; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather.hpp b/src/core/include/ngraph/op/gather.hpp deleted file mode 100644 index 124e77f03ef7d7..00000000000000 --- a/src/core/include/ngraph/op/gather.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/gather_base.hpp" -#include "openvino/op/gather.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Gather; -} // namespace v1 -namespace v7 { -using ov::op::v7::Gather; -} // namespace v7 -namespace v8 { -using ov::op::v8::Gather; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather_elements.hpp b/src/core/include/ngraph/op/gather_elements.hpp deleted file mode 100644 index 77180e143af859..00000000000000 --- a/src/core/include/ngraph/op/gather_elements.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/gather_elements.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::GatherElements; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather_nd.hpp b/src/core/include/ngraph/op/gather_nd.hpp deleted file mode 100644 index fe587ff39e565c..00000000000000 --- a/src/core/include/ngraph/op/gather_nd.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/gather_nd.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::GatherND; -} // namespace v5 -namespace v8 { -using ov::op::v8::GatherND; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather_tree.hpp b/src/core/include/ngraph/op/gather_tree.hpp deleted file mode 100644 index deead92748b9a3..00000000000000 --- a/src/core/include/ngraph/op/gather_tree.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/gather_tree.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::GatherTree; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gelu.hpp b/src/core/include/ngraph/op/gelu.hpp deleted file mode 100644 index d3f084c88b4d58..00000000000000 --- a/src/core/include/ngraph/op/gelu.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/gelu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Gelu; -} // namespace v0 -using v0::Gelu; - -using ov::op::GeluApproximationMode; - -namespace v7 { -using ov::op::v7::Gelu; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/generate_proposals.hpp b/src/core/include/ngraph/op/generate_proposals.hpp deleted file mode 100644 index 3dc2525b198755..00000000000000 --- a/src/core/include/ngraph/op/generate_proposals.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/generate_proposals.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::GenerateProposals; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/greater.hpp b/src/core/include/ngraph/op/greater.hpp deleted file mode 100644 index 4869b67748e83c..00000000000000 --- a/src/core/include/ngraph/op/greater.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/greater.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Greater; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/greater_eq.hpp b/src/core/include/ngraph/op/greater_eq.hpp deleted file mode 100644 index 130ea97bc87764..00000000000000 --- a/src/core/include/ngraph/op/greater_eq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/greater_eq.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::GreaterEqual; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/grid_sample.hpp b/src/core/include/ngraph/op/grid_sample.hpp deleted file mode 100644 index fe161bb9519008..00000000000000 --- a/src/core/include/ngraph/op/grid_sample.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/grid_sample.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::GridSample; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/grn.hpp b/src/core/include/ngraph/op/grn.hpp deleted file mode 100644 index 5073d8219ffa73..00000000000000 --- a/src/core/include/ngraph/op/grn.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/grn.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::GRN; -} // namespace v0 -using v0::GRN; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/group_conv.hpp b/src/core/include/ngraph/op/group_conv.hpp deleted file mode 100644 index bde389017e937e..00000000000000 --- a/src/core/include/ngraph/op/group_conv.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/convolution.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/group_conv.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::GroupConvolution; -using ov::op::v1::GroupConvolutionBackpropData; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gru_cell.hpp b/src/core/include/ngraph/op/gru_cell.hpp deleted file mode 100644 index 7eec4f1b285697..00000000000000 --- a/src/core/include/ngraph/op/gru_cell.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/gru_cell.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::GRUCell; -} // namespace v3 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gru_sequence.hpp b/src/core/include/ngraph/op/gru_sequence.hpp deleted file mode 100644 index 9713ddf7e641ef..00000000000000 --- a/src/core/include/ngraph/op/gru_sequence.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/gru_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::GRUSequence; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/hard_sigmoid.hpp b/src/core/include/ngraph/op/hard_sigmoid.hpp deleted file mode 100644 index 170a1072461c24..00000000000000 --- a/src/core/include/ngraph/op/hard_sigmoid.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/hard_sigmoid.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::HardSigmoid; -} // namespace v0 -using v0::HardSigmoid; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/hsigmoid.hpp b/src/core/include/ngraph/op/hsigmoid.hpp deleted file mode 100644 index 31ec84fd265df4..00000000000000 --- a/src/core/include/ngraph/op/hsigmoid.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/hsigmoid.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::HSigmoid; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/hswish.hpp b/src/core/include/ngraph/op/hswish.hpp deleted file mode 100644 index 4a75cb32f3637d..00000000000000 --- a/src/core/include/ngraph/op/hswish.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/hswish.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::HSwish; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/i420_to_bgr.hpp b/src/core/include/ngraph/op/i420_to_bgr.hpp deleted file mode 100644 index 7a0e25e615afe9..00000000000000 --- a/src/core/include/ngraph/op/i420_to_bgr.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/i420_to_bgr.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::I420toBGR; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/i420_to_rgb.hpp b/src/core/include/ngraph/op/i420_to_rgb.hpp deleted file mode 100644 index 3f092d63739a28..00000000000000 --- a/src/core/include/ngraph/op/i420_to_rgb.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/i420_to_rgb.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::I420toRGB; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/idft.hpp b/src/core/include/ngraph/op/idft.hpp deleted file mode 100644 index e85d127227de75..00000000000000 --- a/src/core/include/ngraph/op/idft.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fft_base.hpp" -#include "openvino/op/idft.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::IDFT; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/if.hpp b/src/core/include/ngraph/op/if.hpp deleted file mode 100644 index f5ceab23b72a60..00000000000000 --- a/src/core/include/ngraph/op/if.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/function.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/util/multi_subgraph_base.hpp" -#include "openvino/op/if.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::If; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/interpolate.hpp b/src/core/include/ngraph/op/interpolate.hpp deleted file mode 100644 index f0628802e244d0..00000000000000 --- a/src/core/include/ngraph/op/interpolate.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/interpolate.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using InterpolateAttrs = ov::op::v0::Interpolate::Attributes; -using ov::op::v0::Interpolate; -} // namespace v0 -namespace v4 { -using ov::op::v4::Interpolate; -} // namespace v4 -namespace v11 { -using ov::op::v11::Interpolate; -} // namespace v11 -using v0::Interpolate; -using v0::InterpolateAttrs; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/irdft.hpp b/src/core/include/ngraph/op/irdft.hpp deleted file mode 100644 index c9b69562824af7..00000000000000 --- a/src/core/include/ngraph/op/irdft.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/irdft.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::IRDFT; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/is_finite.hpp b/src/core/include/ngraph/op/is_finite.hpp deleted file mode 100644 index af3773d6e92364..00000000000000 --- a/src/core/include/ngraph/op/is_finite.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/is_finite.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::IsFinite; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/is_inf.hpp b/src/core/include/ngraph/op/is_inf.hpp deleted file mode 100644 index 8e7b41d2191ac0..00000000000000 --- a/src/core/include/ngraph/op/is_inf.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/is_inf.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::IsInf; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/is_nan.hpp b/src/core/include/ngraph/op/is_nan.hpp deleted file mode 100644 index 61426c448b1229..00000000000000 --- a/src/core/include/ngraph/op/is_nan.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/is_nan.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::IsNaN; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/less.hpp b/src/core/include/ngraph/op/less.hpp deleted file mode 100644 index 9c9772f4533c77..00000000000000 --- a/src/core/include/ngraph/op/less.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/less.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Less; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/less_eq.hpp b/src/core/include/ngraph/op/less_eq.hpp deleted file mode 100644 index 6e2bd02da347c9..00000000000000 --- a/src/core/include/ngraph/op/less_eq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/less_eq.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LessEqual; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/log.hpp b/src/core/include/ngraph/op/log.hpp deleted file mode 100644 index f590366b208064..00000000000000 --- a/src/core/include/ngraph/op/log.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/log.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Log; -} // namespace v0 -using v0::Log; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/log_softmax.hpp b/src/core/include/ngraph/op/log_softmax.hpp deleted file mode 100644 index 09a4ab0fecce75..00000000000000 --- a/src/core/include/ngraph/op/log_softmax.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/log_softmax.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::LogSoftmax; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/loop.hpp b/src/core/include/ngraph/op/loop.hpp deleted file mode 100644 index 9c14626965883d..00000000000000 --- a/src/core/include/ngraph/op/loop.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/factory_adapter.hpp" -#include "ngraph/function.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/tensor_iterator.hpp" -#include "ngraph/op/util/sub_graph_base.hpp" -#include "openvino/op/loop.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::Loop; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/lrn.hpp b/src/core/include/ngraph/op/lrn.hpp deleted file mode 100644 index 533c3fdd8ac6e9..00000000000000 --- a/src/core/include/ngraph/op/lrn.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/lrn.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::LRN; -} // namespace v0 -using v0::LRN; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/lstm_cell.hpp b/src/core/include/ngraph/op/lstm_cell.hpp deleted file mode 100644 index 9714e9bb362ac9..00000000000000 --- a/src/core/include/ngraph/op/lstm_cell.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/lstm_cell.hpp" - -namespace ngraph { -namespace op { -using ov::op::LSTMWeightsFormat; - -namespace v0 { -using ov::op::v0::LSTMCell; -} // namespace v0 - -namespace v4 { -using ov::op::v4::LSTMCell; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/lstm_sequence.hpp b/src/core/include/ngraph/op/lstm_sequence.hpp deleted file mode 100644 index 72a2bfd1aec9e0..00000000000000 --- a/src/core/include/ngraph/op/lstm_sequence.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/lstm_cell.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/lstm_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::LSTMSequence; -} // namespace v0 - -namespace v5 { -using ov::op::v5::LSTMSequence; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/matmul.hpp b/src/core/include/ngraph/op/matmul.hpp deleted file mode 100644 index e72c43dc0eb5c9..00000000000000 --- a/src/core/include/ngraph/op/matmul.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/matmul.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::MatMul; -} // namespace v0 -using v0::MatMul; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/matrix_nms.hpp b/src/core/include/ngraph/op/matrix_nms.hpp deleted file mode 100644 index eab69f6cb591f7..00000000000000 --- a/src/core/include/ngraph/op/matrix_nms.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/matrix_nms.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::MatrixNms; -} // namespace v8 -} // namespace op -using ov::operator<<; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/max.hpp b/src/core/include/ngraph/op/max.hpp deleted file mode 100644 index 39dc365f118f5c..00000000000000 --- a/src/core/include/ngraph/op/max.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reduction.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_max.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceMax; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/max_pool.hpp b/src/core/include/ngraph/op/max_pool.hpp deleted file mode 100644 index 43d9ee2569350f..00000000000000 --- a/src/core/include/ngraph/op/max_pool.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/max_pool_base.hpp" -#include "openvino/op/max_pool.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::MaxPool; -} // namespace v1 - -namespace v8 { -using ov::op::v8::MaxPool; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/maximum.hpp b/src/core/include/ngraph/op/maximum.hpp deleted file mode 100644 index 3f881f3d8c2ca4..00000000000000 --- a/src/core/include/ngraph/op/maximum.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/maximum.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Maximum; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/min.hpp b/src/core/include/ngraph/op/min.hpp deleted file mode 100644 index 46e813f8ce910d..00000000000000 --- a/src/core/include/ngraph/op/min.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reduction.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_min.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceMin; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/minimum.hpp b/src/core/include/ngraph/op/minimum.hpp deleted file mode 100644 index 4d515d017e1432..00000000000000 --- a/src/core/include/ngraph/op/minimum.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/minimum.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Minimum; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/mish.hpp b/src/core/include/ngraph/op/mish.hpp deleted file mode 100644 index d310c5370a7415..00000000000000 --- a/src/core/include/ngraph/op/mish.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/mish.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::Mish; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/mod.hpp b/src/core/include/ngraph/op/mod.hpp deleted file mode 100644 index 92081e8fe8d267..00000000000000 --- a/src/core/include/ngraph/op/mod.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/mod.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Mod; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/multiclass_nms.hpp b/src/core/include/ngraph/op/multiclass_nms.hpp deleted file mode 100644 index 19ae119a2214e6..00000000000000 --- a/src/core/include/ngraph/op/multiclass_nms.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/multiclass_nms_base.hpp" -#include "openvino/op/multiclass_nms.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::MulticlassNms; -} // namespace v8 -namespace v9 { -using ov::op::v9::MulticlassNms; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/multiply.hpp b/src/core/include/ngraph/op/multiply.hpp deleted file mode 100644 index 9e935312462785..00000000000000 --- a/src/core/include/ngraph/op/multiply.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/multiply.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Multiply; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/mvn.hpp b/src/core/include/ngraph/op/mvn.hpp deleted file mode 100644 index 3c7a4f2a4867ab..00000000000000 --- a/src/core/include/ngraph/op/mvn.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/mvn.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::MVN; -} // namespace v0 -using v0::MVN; - -using ov::op::MVNEpsMode; - -namespace v6 { -using ov::op::v6::MVN; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/negative.hpp b/src/core/include/ngraph/op/negative.hpp deleted file mode 100644 index a872049551816d..00000000000000 --- a/src/core/include/ngraph/op/negative.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/negative.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Negative; -} // namespace v0 -using v0::Negative; -} // namespace op -NGRAPH_API -std::shared_ptr operator-(const Output& arg0); -} // namespace ngraph diff --git a/src/core/include/ngraph/op/non_max_suppression.hpp b/src/core/include/ngraph/op/non_max_suppression.hpp deleted file mode 100644 index 0797e792fdd7e6..00000000000000 --- a/src/core/include/ngraph/op/non_max_suppression.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/non_max_suppression.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::NonMaxSuppression; -} // namespace v1 - -namespace v3 { -using ov::op::v3::NonMaxSuppression; -} // namespace v3 - -namespace v4 { -using ov::op::v4::NonMaxSuppression; -} // namespace v4 - -namespace v5 { -using ov::op::v5::NonMaxSuppression; -} // namespace v5 - -namespace v9 { -using ov::op::v9::NonMaxSuppression; -} // namespace v9 -} // namespace op -using ov::operator<<; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/non_zero.hpp b/src/core/include/ngraph/op/non_zero.hpp deleted file mode 100644 index 75a48ba7ebd57a..00000000000000 --- a/src/core/include/ngraph/op/non_zero.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/non_zero.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::NonZero; -} // namespace v3 -using v3::NonZero; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/normalize_l2.hpp b/src/core/include/ngraph/op/normalize_l2.hpp deleted file mode 100644 index 9e9ab00a9a62b8..00000000000000 --- a/src/core/include/ngraph/op/normalize_l2.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/normalize_l2.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::NormalizeL2; -} // namespace v0 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/not.hpp b/src/core/include/ngraph/op/not.hpp deleted file mode 100644 index 64f5e492c240ea..00000000000000 --- a/src/core/include/ngraph/op/not.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/logical_not.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalNot; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/not_equal.hpp b/src/core/include/ngraph/op/not_equal.hpp deleted file mode 100644 index 883d48c63a94ad..00000000000000 --- a/src/core/include/ngraph/op/not_equal.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/not_equal.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::NotEqual; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/nv12_to_bgr.hpp b/src/core/include/ngraph/op/nv12_to_bgr.hpp deleted file mode 100644 index 0044d2049b0d09..00000000000000 --- a/src/core/include/ngraph/op/nv12_to_bgr.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/nv12_to_bgr.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::NV12toBGR; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/nv12_to_rgb.hpp b/src/core/include/ngraph/op/nv12_to_rgb.hpp deleted file mode 100644 index bd3bdb710ae333..00000000000000 --- a/src/core/include/ngraph/op/nv12_to_rgb.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/nv12_to_rgb.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::NV12toRGB; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/one_hot.hpp b/src/core/include/ngraph/op/one_hot.hpp deleted file mode 100644 index 52618026eaf4d4..00000000000000 --- a/src/core/include/ngraph/op/one_hot.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/one_hot.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::OneHot; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/op.hpp b/src/core/include/ngraph/op/op.hpp deleted file mode 100644 index 0fe6936ebf25a6..00000000000000 --- a/src/core/include/ngraph/op/op.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "openvino/op/op.hpp" - -namespace ngraph { -namespace op { -using ov::op::Op; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/or.hpp b/src/core/include/ngraph/op/or.hpp deleted file mode 100644 index cefc9f23e593da..00000000000000 --- a/src/core/include/ngraph/op/or.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_logical.hpp" -#include "openvino/op/logical_or.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalOr; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/pad.hpp b/src/core/include/ngraph/op/pad.hpp deleted file mode 100644 index 502d6a2c1fcebc..00000000000000 --- a/src/core/include/ngraph/op/pad.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/pad.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Pad; -} // namespace v1 -namespace v12 { -using ov::op::v12::Pad; -} // namespace v12 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/parameter.hpp b/src/core/include/ngraph/op/parameter.hpp index 50b432f9662304..4442673f078e92 100644 --- a/src/core/include/ngraph/op/parameter.hpp +++ b/src/core/include/ngraph/op/parameter.hpp @@ -14,7 +14,6 @@ # endif #endif -#include "ngraph/op/op.hpp" #include "openvino/op/parameter.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/power.hpp b/src/core/include/ngraph/op/power.hpp deleted file mode 100644 index 75bd94576875b0..00000000000000 --- a/src/core/include/ngraph/op/power.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/power.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Power; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/prelu.hpp b/src/core/include/ngraph/op/prelu.hpp deleted file mode 100644 index a88fec42c7ba5f..00000000000000 --- a/src/core/include/ngraph/op/prelu.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/prelu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::PRelu; -} // namespace v0 -using v0::PRelu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/prior_box.hpp b/src/core/include/ngraph/op/prior_box.hpp deleted file mode 100644 index e8ed48bbe74bb3..00000000000000 --- a/src/core/include/ngraph/op/prior_box.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/prior_box.hpp" - -namespace ngraph { -namespace op { -using PriorBoxAttrs = ov::op::v0::PriorBox::Attributes; -namespace v0 { -using ov::op::v0::PriorBox; -} // namespace v0 -namespace v8 { -using ov::op::v8::PriorBox; -} // namespace v8 -using v0::PriorBox; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/prior_box_clustered.hpp b/src/core/include/ngraph/op/prior_box_clustered.hpp deleted file mode 100644 index 17dceb8b453172..00000000000000 --- a/src/core/include/ngraph/op/prior_box_clustered.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/prior_box_clustered.hpp" - -namespace ngraph { -namespace op { -using PriorBoxClusteredAttrs = ov::op::v0::PriorBoxClustered::Attributes; - -namespace v0 { -using ov::op::v0::PriorBoxClustered; -} // namespace v0 -using v0::PriorBoxClustered; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/proposal.hpp b/src/core/include/ngraph/op/proposal.hpp deleted file mode 100644 index 65c5481fa125d5..00000000000000 --- a/src/core/include/ngraph/op/proposal.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/proposal.hpp" - -namespace ngraph { -namespace op { -using ProposalAttrs = ov::op::v0::Proposal::Attributes; - -namespace v0 { -using ov::op::v0::Proposal; -} // namespace v0 - -namespace v4 { -using ov::op::v4::Proposal; -} // namespace v4 -using v0::Proposal; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/psroi_pooling.hpp b/src/core/include/ngraph/op/psroi_pooling.hpp deleted file mode 100644 index a7d4e11f0442e1..00000000000000 --- a/src/core/include/ngraph/op/psroi_pooling.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/psroi_pooling.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::PSROIPooling; -} // namespace v0 -using v0::PSROIPooling; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/random_uniform.hpp b/src/core/include/ngraph/op/random_uniform.hpp deleted file mode 100644 index 1c55a2a3b82aa3..00000000000000 --- a/src/core/include/ngraph/op/random_uniform.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/random_uniform.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::RandomUniform; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/range.hpp b/src/core/include/ngraph/op/range.hpp deleted file mode 100644 index 274a8ca5bbce06..00000000000000 --- a/src/core/include/ngraph/op/range.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/range.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::Range; -} // namespace v4 -namespace v0 { -using ov::op::v0::Range; -} // namespace v0 -using v0::Range; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/rdft.hpp b/src/core/include/ngraph/op/rdft.hpp deleted file mode 100644 index 3f4cc966415aa1..00000000000000 --- a/src/core/include/ngraph/op/rdft.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/rdft.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::RDFT; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/read_value.hpp b/src/core/include/ngraph/op/read_value.hpp deleted file mode 100644 index 82eefe8e6633ba..00000000000000 --- a/src/core/include/ngraph/op/read_value.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_extension.hpp" -#include "openvino/op/read_value.hpp" - -namespace ngraph { -namespace op { -using ov::op::util::ReadValueBase; - -namespace v3 { -using ov::op::v3::ReadValue; -} // namespace v3 - -namespace v6 { -using ov::op::v6::ReadValue; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_l1.hpp b/src/core/include/ngraph/op/reduce_l1.hpp deleted file mode 100644 index da3964cef7494b..00000000000000 --- a/src/core/include/ngraph/op/reduce_l1.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_l1.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::ReduceL1; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_l2.hpp b/src/core/include/ngraph/op/reduce_l2.hpp deleted file mode 100644 index 0a4667c03abaaa..00000000000000 --- a/src/core/include/ngraph/op/reduce_l2.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_l2.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::ReduceL2; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_logical_and.hpp b/src/core/include/ngraph/op/reduce_logical_and.hpp deleted file mode 100644 index cb6e06f8e426e7..00000000000000 --- a/src/core/include/ngraph/op/reduce_logical_and.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/logical_reduction_keep_dims.hpp" -#include "openvino/op/reduce_logical_and.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceLogicalAnd; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_logical_or.hpp b/src/core/include/ngraph/op/reduce_logical_or.hpp deleted file mode 100644 index e2a1b8782a08cc..00000000000000 --- a/src/core/include/ngraph/op/reduce_logical_or.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/logical_reduction_keep_dims.hpp" -#include "openvino/op/reduce_logical_or.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceLogicalOr; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_mean.hpp b/src/core/include/ngraph/op/reduce_mean.hpp deleted file mode 100644 index 4092f908367750..00000000000000 --- a/src/core/include/ngraph/op/reduce_mean.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_mean.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceMean; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_prod.hpp b/src/core/include/ngraph/op/reduce_prod.hpp deleted file mode 100644 index 4f657399f3edfb..00000000000000 --- a/src/core/include/ngraph/op/reduce_prod.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_prod.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceProd; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_sum.hpp b/src/core/include/ngraph/op/reduce_sum.hpp deleted file mode 100644 index b2022c59d0f6c7..00000000000000 --- a/src/core/include/ngraph/op/reduce_sum.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_sum.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceSum; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/region_yolo.hpp b/src/core/include/ngraph/op/region_yolo.hpp deleted file mode 100644 index ea93351b0371a4..00000000000000 --- a/src/core/include/ngraph/op/region_yolo.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/region_yolo.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::RegionYolo; -} // namespace v0 -using v0::RegionYolo; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/relu.hpp b/src/core/include/ngraph/op/relu.hpp deleted file mode 100644 index 237bdc113c1f14..00000000000000 --- a/src/core/include/ngraph/op/relu.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/relu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Relu; -} // namespace v0 -using v0::Relu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reorg_yolo.hpp b/src/core/include/ngraph/op/reorg_yolo.hpp deleted file mode 100644 index 1359e97a5182dc..00000000000000 --- a/src/core/include/ngraph/op/reorg_yolo.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/reorg_yolo.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ReorgYolo; -} // namespace v0 -using v0::ReorgYolo; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reshape.hpp b/src/core/include/ngraph/op/reshape.hpp deleted file mode 100644 index ad0247443d9175..00000000000000 --- a/src/core/include/ngraph/op/reshape.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/reshape.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Reshape; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/result.hpp b/src/core/include/ngraph/op/result.hpp deleted file mode 100644 index a3429b0b696ded..00000000000000 --- a/src/core/include/ngraph/op/result.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/result.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Result; -} // namespace v0 -using v0::Result; -} // namespace op -using ResultVector = std::vector>; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reverse.hpp b/src/core/include/ngraph/op/reverse.hpp deleted file mode 100644 index f7b5569812e01a..00000000000000 --- a/src/core/include/ngraph/op/reverse.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/reverse.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Reverse; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reverse_sequence.hpp b/src/core/include/ngraph/op/reverse_sequence.hpp deleted file mode 100644 index 4ce7389f7b2c78..00000000000000 --- a/src/core/include/ngraph/op/reverse_sequence.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/reverse_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ReverseSequence; -} // namespace v0 -using v0::ReverseSequence; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/rnn_cell.hpp b/src/core/include/ngraph/op/rnn_cell.hpp deleted file mode 100644 index 5111833cd82882..00000000000000 --- a/src/core/include/ngraph/op/rnn_cell.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/rnn_cell.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::RNNCell; -} // namespace v0 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/rnn_sequence.hpp b/src/core/include/ngraph/op/rnn_sequence.hpp deleted file mode 100644 index 08397b75215e06..00000000000000 --- a/src/core/include/ngraph/op/rnn_sequence.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/rnn_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::RNNSequence; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/roi_align.hpp b/src/core/include/ngraph/op/roi_align.hpp deleted file mode 100644 index e8b645a827d985..00000000000000 --- a/src/core/include/ngraph/op/roi_align.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/roi_align.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ROIAlign; -} // namespace v3 -namespace v9 { -using ov::op::v9::ROIAlign; -} // namespace v9 -using v3::ROIAlign; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/roi_pooling.hpp b/src/core/include/ngraph/op/roi_pooling.hpp deleted file mode 100644 index b6d2ee15f40813..00000000000000 --- a/src/core/include/ngraph/op/roi_pooling.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/roi_pooling.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ROIPooling; -} // namespace v0 -using v0::ROIPooling; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/roll.hpp b/src/core/include/ngraph/op/roll.hpp deleted file mode 100644 index 2f3939bb9a4923..00000000000000 --- a/src/core/include/ngraph/op/roll.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/roll.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::Roll; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/round.hpp b/src/core/include/ngraph/op/round.hpp deleted file mode 100644 index ee3e492b5a670b..00000000000000 --- a/src/core/include/ngraph/op/round.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/round.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::Round; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/scatter_elements_update.hpp b/src/core/include/ngraph/op/scatter_elements_update.hpp deleted file mode 100644 index e65bb444ecb16b..00000000000000 --- a/src/core/include/ngraph/op/scatter_elements_update.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" -#include "openvino/op/scatter_elements_update.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ScatterElementsUpdate; -} // namespace v3 -using v3::ScatterElementsUpdate; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/scatter_nd_update.hpp b/src/core/include/ngraph/op/scatter_nd_update.hpp deleted file mode 100644 index 844f2b4744bf9a..00000000000000 --- a/src/core/include/ngraph/op/scatter_nd_update.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/scatter_nd_base.hpp" -#include "openvino/op/scatter_nd_update.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ScatterNDUpdate; -} // namespace v3 -using v3::ScatterNDUpdate; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/scatter_update.hpp b/src/core/include/ngraph/op/scatter_update.hpp deleted file mode 100644 index 0896521a364ff8..00000000000000 --- a/src/core/include/ngraph/op/scatter_update.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/scatter_base.hpp" -#include "openvino/op/scatter_update.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ScatterUpdate; -} // namespace v3 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/select.hpp b/src/core/include/ngraph/op/select.hpp deleted file mode 100644 index b3f06e3d8995c5..00000000000000 --- a/src/core/include/ngraph/op/select.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/select.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Select; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/selu.hpp b/src/core/include/ngraph/op/selu.hpp deleted file mode 100644 index 26e61c6eb743e2..00000000000000 --- a/src/core/include/ngraph/op/selu.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/selu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Selu; -} // namespace v0 -using v0::Selu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/shape_of.hpp b/src/core/include/ngraph/op/shape_of.hpp deleted file mode 100644 index 79968883133c5c..00000000000000 --- a/src/core/include/ngraph/op/shape_of.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/shape_of.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ShapeOf; -} // namespace v3 - -namespace v0 { -using ov::op::v0::ShapeOf; -} // namespace v0 -using v0::ShapeOf; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/shuffle_channels.hpp b/src/core/include/ngraph/op/shuffle_channels.hpp deleted file mode 100644 index adc901855e7325..00000000000000 --- a/src/core/include/ngraph/op/shuffle_channels.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/shuffle_channels.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ShuffleChannels; -} // namespace v0 -using v0::ShuffleChannels; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sigmoid.hpp b/src/core/include/ngraph/op/sigmoid.hpp deleted file mode 100644 index 89a2e4a25a0444..00000000000000 --- a/src/core/include/ngraph/op/sigmoid.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "ngraph/util.hpp" -#include "openvino/op/sigmoid.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sigmoid; -} // namespace v0 -using v0::Sigmoid; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sign.hpp b/src/core/include/ngraph/op/sign.hpp deleted file mode 100644 index 5cc50db67fec3b..00000000000000 --- a/src/core/include/ngraph/op/sign.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sign.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sign; -} // namespace v0 -using v0::Sign; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sin.hpp b/src/core/include/ngraph/op/sin.hpp deleted file mode 100644 index 45ae7dfd448876..00000000000000 --- a/src/core/include/ngraph/op/sin.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sin.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sin; -} // namespace v0 -using v0::Sin; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sinh.hpp b/src/core/include/ngraph/op/sinh.hpp deleted file mode 100644 index f6b8c99332edf6..00000000000000 --- a/src/core/include/ngraph/op/sinh.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sinh.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sinh; -} // namespace v0 -using v0::Sinh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sink.hpp b/src/core/include/ngraph/op/sink.hpp deleted file mode 100644 index f5fd2e57af6cc7..00000000000000 --- a/src/core/include/ngraph/op/sink.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/sink.hpp" - -namespace ngraph { -namespace op { -using ov::op::Sink; -} // namespace op -using SinkVector = std::vector>; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/slice.hpp b/src/core/include/ngraph/op/slice.hpp deleted file mode 100644 index f9dc771b2bc724..00000000000000 --- a/src/core/include/ngraph/op/slice.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/slice.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::Slice; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/softmax.hpp b/src/core/include/ngraph/op/softmax.hpp deleted file mode 100644 index af1b7d617b4b6e..00000000000000 --- a/src/core/include/ngraph/op/softmax.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/softmax.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Softmax; -} // namespace v1 - -namespace v8 { -using ov::op::v8::Softmax; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/softplus.hpp b/src/core/include/ngraph/op/softplus.hpp deleted file mode 100644 index 3bed43efa1aa16..00000000000000 --- a/src/core/include/ngraph/op/softplus.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/softplus.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::SoftPlus; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/softsign.hpp b/src/core/include/ngraph/op/softsign.hpp deleted file mode 100644 index 289e258d3e229a..00000000000000 --- a/src/core/include/ngraph/op/softsign.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/softsign.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::SoftSign; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/space_to_batch.hpp b/src/core/include/ngraph/op/space_to_batch.hpp deleted file mode 100644 index b1d433f0d5ec4b..00000000000000 --- a/src/core/include/ngraph/op/space_to_batch.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/space_to_batch.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::SpaceToBatch; -} // namespace v1 -using v1::SpaceToBatch; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/space_to_depth.hpp b/src/core/include/ngraph/op/space_to_depth.hpp deleted file mode 100644 index d6e0eb4024aa7c..00000000000000 --- a/src/core/include/ngraph/op/space_to_depth.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/space_to_depth.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::SpaceToDepth; -} // namespace v0 -using v0::SpaceToDepth; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/split.hpp b/src/core/include/ngraph/op/split.hpp index 00d0b80a82237b..1ab39e2ace354d 100644 --- a/src/core/include/ngraph/op/split.hpp +++ b/src/core/include/ngraph/op/split.hpp @@ -18,7 +18,6 @@ #include #include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" #include "openvino/op/split.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/sqrt.hpp b/src/core/include/ngraph/op/sqrt.hpp deleted file mode 100644 index 9db2668b2d81a1..00000000000000 --- a/src/core/include/ngraph/op/sqrt.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sqrt.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sqrt; -} // namespace v0 -using v0::Sqrt; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/squared_difference.hpp b/src/core/include/ngraph/op/squared_difference.hpp deleted file mode 100644 index 44ebfb23b1f296..00000000000000 --- a/src/core/include/ngraph/op/squared_difference.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/squared_difference.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::SquaredDifference; -} // namespace v0 -using v0::SquaredDifference; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/squeeze.hpp b/src/core/include/ngraph/op/squeeze.hpp deleted file mode 100644 index 194ae1b9b60ba6..00000000000000 --- a/src/core/include/ngraph/op/squeeze.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/squeeze.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Squeeze; -} // namespace v0 -using v0::Squeeze; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/strided_slice.hpp b/src/core/include/ngraph/op/strided_slice.hpp deleted file mode 100644 index 1e604ca8b9c2ef..00000000000000 --- a/src/core/include/ngraph/op/strided_slice.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/strided_slice.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::StridedSlice; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/subtract.hpp b/src/core/include/ngraph/op/subtract.hpp deleted file mode 100644 index 6bc02261c18979..00000000000000 --- a/src/core/include/ngraph/op/subtract.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/subtract.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Subtract; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/swish.hpp b/src/core/include/ngraph/op/swish.hpp deleted file mode 100644 index d001693e8e2a1c..00000000000000 --- a/src/core/include/ngraph/op/swish.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/swish.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::Swish; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tan.hpp b/src/core/include/ngraph/op/tan.hpp deleted file mode 100644 index 86fe0ba4a806c3..00000000000000 --- a/src/core/include/ngraph/op/tan.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/tan.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Tan; -} // namespace v0 -using v0::Tan; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tanh.hpp b/src/core/include/ngraph/op/tanh.hpp deleted file mode 100644 index 4b22ea39cba929..00000000000000 --- a/src/core/include/ngraph/op/tanh.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/tanh.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Tanh; -} // namespace v0 -using v0::Tanh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tensor_iterator.hpp b/src/core/include/ngraph/op/tensor_iterator.hpp deleted file mode 100644 index e2a2e8dcd51520..00000000000000 --- a/src/core/include/ngraph/op/tensor_iterator.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/function.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/util/sub_graph_base.hpp" -#include "openvino/op/tensor_iterator.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::TensorIterator; -} // namespace v0 -using v0::TensorIterator; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tile.hpp b/src/core/include/ngraph/op/tile.hpp deleted file mode 100644 index f575960bbd4c9c..00000000000000 --- a/src/core/include/ngraph/op/tile.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/tile.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Tile; -} // namespace v0 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/topk.hpp b/src/core/include/ngraph/op/topk.hpp deleted file mode 100644 index c10368fe2778ab..00000000000000 --- a/src/core/include/ngraph/op/topk.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/topk.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::TopK; -} // namespace v1 - -namespace v3 { -using ov::op::v3::TopK; -} // namespace v3 - -namespace v11 { -using ov::op::v11::TopK; -} // namespace v11 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/transpose.hpp b/src/core/include/ngraph/op/transpose.hpp deleted file mode 100644 index a042ae4eaef2cc..00000000000000 --- a/src/core/include/ngraph/op/transpose.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/transpose.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Transpose; -} // namespace v1 -using v1::Transpose; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/unique.hpp b/src/core/include/ngraph/op/unique.hpp deleted file mode 100644 index c1439fbf4207ae..00000000000000 --- a/src/core/include/ngraph/op/unique.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/unique.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::Unique; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/unsqueeze.hpp b/src/core/include/ngraph/op/unsqueeze.hpp deleted file mode 100644 index 2f278db8f53144..00000000000000 --- a/src/core/include/ngraph/op/unsqueeze.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/unsqueeze.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Unsqueeze; -} // namespace v0 -using v0::Unsqueeze; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/activation_functions.hpp b/src/core/include/ngraph/op/util/activation_functions.hpp deleted file mode 100644 index 0d10003aac57a1..00000000000000 --- a/src/core/include/ngraph/op/util/activation_functions.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/except.hpp" -#include "ngraph/node.hpp" -#include "openvino/op/util/activation_functions.hpp" - -namespace ngraph { -namespace op { -namespace util { -namespace error { -using ov::op::util::error::UnknownActivationFunction; -} // namespace error - -namespace detail { -using ov::op::util::detail::hardsigmoid; -using ov::op::util::detail::relu; -using ov::op::util::detail::sigmoid; -using ov::op::util::detail::tanh; -} // namespace detail - -using ov::op::util::ActivationFunction; -using ov::op::util::ActivationFunctionType; -using ov::op::util::get_activation_func_by_name; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/arithmetic_reduction.hpp b/src/core/include/ngraph/op/util/arithmetic_reduction.hpp deleted file mode 100644 index 03ab9058491b5e..00000000000000 --- a/src/core/include/ngraph/op/util/arithmetic_reduction.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/reduction_base.hpp" -#include "openvino/op/util/arithmetic_reduction.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ArithmeticReduction; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/arithmetic_reductions_keep_dims.hpp b/src/core/include/ngraph/op/util/arithmetic_reductions_keep_dims.hpp deleted file mode 100644 index 829bcf5d085138..00000000000000 --- a/src/core/include/ngraph/op/util/arithmetic_reductions_keep_dims.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/arithmetic_reduction.hpp" -#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ArithmeticReductionKeepDims; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/binary_elementwise_arithmetic.hpp b/src/core/include/ngraph/op/util/binary_elementwise_arithmetic.hpp deleted file mode 100644 index 2e8acdc7462305..00000000000000 --- a/src/core/include/ngraph/op/util/binary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/binary_elementwise_arithmetic.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BinaryElementwiseArithmetic; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/binary_elementwise_comparison.hpp b/src/core/include/ngraph/op/util/binary_elementwise_comparison.hpp deleted file mode 100644 index 6f507e40da67db..00000000000000 --- a/src/core/include/ngraph/op/util/binary_elementwise_comparison.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/binary_elementwise_comparison.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BinaryElementwiseComparison; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/binary_elementwise_logical.hpp b/src/core/include/ngraph/op/util/binary_elementwise_logical.hpp deleted file mode 100644 index 46e733b149b871..00000000000000 --- a/src/core/include/ngraph/op/util/binary_elementwise_logical.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/binary_elementwise_logical.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BinaryElementwiseLogical; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/broadcast_base.hpp b/src/core/include/ngraph/op/util/broadcast_base.hpp deleted file mode 100644 index 297de17107c778..00000000000000 --- a/src/core/include/ngraph/op/util/broadcast_base.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/axis_vector.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/broadcast_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BroadcastBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/deformable_convolution_base.hpp b/src/core/include/ngraph/op/util/deformable_convolution_base.hpp deleted file mode 100644 index c670f9e7fc0fcc..00000000000000 --- a/src/core/include/ngraph/op/util/deformable_convolution_base.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/deformable_convolution_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::DeformableConvolutionBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/detection_output_base.hpp b/src/core/include/ngraph/op/util/detection_output_base.hpp deleted file mode 100644 index 806ba06a0b7e25..00000000000000 --- a/src/core/include/ngraph/op/util/detection_output_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/detection_output_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::DetectionOutputBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/elementwise_args.hpp b/src/core/include/ngraph/op/util/elementwise_args.hpp deleted file mode 100644 index 6049761ef111b2..00000000000000 --- a/src/core/include/ngraph/op/util/elementwise_args.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "openvino/op/util/elementwise_args.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::validate_and_infer_elementwise_args; -} -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/embeddingbag_offsets_base.hpp b/src/core/include/ngraph/op/util/embeddingbag_offsets_base.hpp deleted file mode 100644 index 943737251e178e..00000000000000 --- a/src/core/include/ngraph/op/util/embeddingbag_offsets_base.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/util/embeddingbag_offsets_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::EmbeddingBagOffsetsBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/embeddingbag_packed_base.hpp b/src/core/include/ngraph/op/util/embeddingbag_packed_base.hpp deleted file mode 100644 index a66c0b52315bc5..00000000000000 --- a/src/core/include/ngraph/op/util/embeddingbag_packed_base.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/util/embeddingbag_packed_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::EmbeddingBagPackedBase; -} // namespace util -using util::EmbeddingBagPackedBase; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/eval_copy.hpp b/src/core/include/ngraph/op/util/eval_copy.hpp deleted file mode 100644 index b8f66859b2c9ec..00000000000000 --- a/src/core/include/ngraph/op/util/eval_copy.hpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#define COPY_TENSOR(a) \ - case element::Type_t::a: \ - rc = copy_tensor diff --git a/src/core/include/ngraph/op/util/fft_base.hpp b/src/core/include/ngraph/op/util/fft_base.hpp deleted file mode 100644 index d30134d1a53243..00000000000000 --- a/src/core/include/ngraph/op/util/fft_base.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/fft_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::FFTBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/framework_node.hpp b/src/core/include/ngraph/op/util/framework_node.hpp deleted file mode 100644 index f65b23451b8bbe..00000000000000 --- a/src/core/include/ngraph/op/util/framework_node.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/framework_node.hpp" - -namespace ngraph { -namespace op { -using ov::op::util::FrameworkNode; -using ov::op::util::FrameworkNodeAttrs; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/gather_base.hpp b/src/core/include/ngraph/op/util/gather_base.hpp deleted file mode 100644 index d098fdc272b46d..00000000000000 --- a/src/core/include/ngraph/op/util/gather_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/gather_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::GatherBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/gather_nd_base.hpp b/src/core/include/ngraph/op/util/gather_nd_base.hpp deleted file mode 100644 index be066f26528ffc..00000000000000 --- a/src/core/include/ngraph/op/util/gather_nd_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/gather_nd_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::GatherNDBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/index_reduction.hpp b/src/core/include/ngraph/op/util/index_reduction.hpp deleted file mode 100644 index 1b934cce3f2fbd..00000000000000 --- a/src/core/include/ngraph/op/util/index_reduction.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/index_reduction.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::IndexReduction; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/logical_reduction.hpp b/src/core/include/ngraph/op/util/logical_reduction.hpp deleted file mode 100644 index d20cf28a8b3b15..00000000000000 --- a/src/core/include/ngraph/op/util/logical_reduction.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/reduction_base.hpp" -#include "openvino/op/util/logical_reduction.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::LogicalReduction; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/logical_reduction_keep_dims.hpp b/src/core/include/ngraph/op/util/logical_reduction_keep_dims.hpp deleted file mode 100644 index 12f440bd3e9b41..00000000000000 --- a/src/core/include/ngraph/op/util/logical_reduction_keep_dims.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/logical_reduction.hpp" -#include "openvino/op/util/logical_reduction_keep_dims.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::LogicalReductionKeepDims; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/max_pool_base.hpp b/src/core/include/ngraph/op/util/max_pool_base.hpp deleted file mode 100644 index 72564c888a92bb..00000000000000 --- a/src/core/include/ngraph/op/util/max_pool_base.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/max_pool_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::MaxPoolBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/multi_subgraph_base.hpp b/src/core/include/ngraph/op/util/multi_subgraph_base.hpp deleted file mode 100644 index 069dfeea568aba..00000000000000 --- a/src/core/include/ngraph/op/util/multi_subgraph_base.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/multi_subgraph_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::MultiSubGraphOp; -using MultiSubgraphInputDescriptionPtr = ov::op::util::MultiSubGraphOp::InputDescription::Ptr; -using MultiSubgraphOutputDescriptionPtr = ov::op::util::MultiSubGraphOp::OutputDescription::Ptr; -using MultiSubgraphInputDescriptionVector = util::MultiSubGraphOp::MultiSubgraphInputDescriptionVector; -using MultiSubgraphOutputDescriptionVector = util::MultiSubGraphOp::MultiSubgraphOutputDescriptionVector; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/multiclass_nms_base.hpp b/src/core/include/ngraph/op/util/multiclass_nms_base.hpp deleted file mode 100644 index e599f5e0cb0013..00000000000000 --- a/src/core/include/ngraph/op/util/multiclass_nms_base.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/multiclass_nms_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::MulticlassNmsBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/op_annotations.hpp b/src/core/include/ngraph/op/util/op_annotations.hpp deleted file mode 100644 index dec2879f9c837f..00000000000000 --- a/src/core/include/ngraph/op/util/op_annotations.hpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/deprecated.hpp" -#include "ngraph/except.hpp" -#include "ngraph/ngraph_visibility.hpp" - -namespace ngraph { -namespace op { -namespace util { -struct NGRAPH_API_DEPRECATED oi_pair { - size_t output; - size_t input; - bool destructive; -}; - -/// \brief Base class for annotations added to graph ops -class NGRAPH_API_DEPRECATED NGRAPH_API OpAnnotations { - NGRAPH_SUPPRESS_DEPRECATED_START -public: - virtual ~OpAnnotations() = default; - - void add_in_place_oi_pair(const struct oi_pair& oi) { - for (const auto& e : m_in_place_oi_pairs) { - if (e.input == oi.input || e.output == oi.output) { - OPENVINO_THROW("In_place hint conflicts with an existing entry"); - } - } - m_in_place_oi_pairs.emplace_back(oi); - } - - const std::vector& get_in_place_oi_pairs() const { - return m_in_place_oi_pairs; - } - bool is_cacheable() const { - return m_cacheable; - } - void set_cacheable(bool val) { - m_cacheable = val; - } - -private: - // map of output-input pairs for which in-place computation is valid - std::vector m_in_place_oi_pairs; - - bool m_cacheable = false; - NGRAPH_SUPPRESS_DEPRECATED_END -}; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/recurrent_sequence.hpp b/src/core/include/ngraph/op/util/recurrent_sequence.hpp deleted file mode 100644 index f283947fa154b6..00000000000000 --- a/src/core/include/ngraph/op/util/recurrent_sequence.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/deprecated.hpp" -#include "ngraph/node.hpp" -#include "openvino/op/util/recurrent_sequence.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::validate_seq_input_rank_dimension; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/reduction_base.hpp b/src/core/include/ngraph/op/util/reduction_base.hpp deleted file mode 100644 index 94d653781fc894..00000000000000 --- a/src/core/include/ngraph/op/util/reduction_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/reduction_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ReductionBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/rnn_cell_base.hpp b/src/core/include/ngraph/op/util/rnn_cell_base.hpp deleted file mode 100644 index 1c865cb4eb086f..00000000000000 --- a/src/core/include/ngraph/op/util/rnn_cell_base.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "openvino/op/util/rnn_cell_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::convert_lstm_node_format; -using ov::op::util::LSTMWeightsFormat; -using ov::op::util::RNNCellBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/scatter_base.hpp b/src/core/include/ngraph/op/util/scatter_base.hpp deleted file mode 100644 index 73c457b60ba7d4..00000000000000 --- a/src/core/include/ngraph/op/util/scatter_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/scatter_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ScatterBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/scatter_nd_base.hpp b/src/core/include/ngraph/op/util/scatter_nd_base.hpp deleted file mode 100644 index 9a92acb00917b6..00000000000000 --- a/src/core/include/ngraph/op/util/scatter_nd_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/scatter_nd_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ScatterNDBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/slice_plan.hpp b/src/core/include/ngraph/op/util/slice_plan.hpp deleted file mode 100644 index e47e4ecd80e4f0..00000000000000 --- a/src/core/include/ngraph/op/util/slice_plan.hpp +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_set.hpp" -#include "ngraph/shape.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ngraph { -// -// In various places, like ConstantFolding, it is -// useful to transform DynSlice by converting it to a sequence of ops: -// -// Slice (to do the basic slicing) -// | -// v -// Reshape (non-transposing, to handle shrinks) -// | -// v -// Reverse (to emulate backwards stride) -// -// (The Reshape, Reverse, or both may be omitted if they would just be -// identities.) -// -// A SlicePlan is used to collect parameters for these ops. -// -// This class is moved to dev API -struct NGRAPH_API_DEPRECATED NGRAPH_API SlicePlan { - // Parameters for the Slice - std::vector begins; - std::vector ends; - std::vector strides; - - // Shapes coming into, and going out of, the Reshape. - Shape reshape_in_shape; - Shape reshape_out_shape; - - // Parameters for the Reverse - AxisSet reverse_axes; - - bool operator==(const SlicePlan& other) const; - bool operator!=(const SlicePlan& other) const; -}; - -NGRAPH_API_DEPRECATED SlicePlan NGRAPH_API make_slice_plan(const Shape& input_shape, - const std::vector& begins, - const std::vector& ends, - const std::vector& strides, - const AxisSet& lower_bounds_mask, - const AxisSet& upper_bounds_mask, - const AxisSet& new_axis_mask, - const AxisSet& shrink_axis_mask, - const AxisSet& ellipsis_mask); -} // namespace ngraph - -using ngraph::make_slice_plan; -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/src/core/include/ngraph/op/util/sub_graph_base.hpp b/src/core/include/ngraph/op/util/sub_graph_base.hpp deleted file mode 100644 index 48a3098e16d2b9..00000000000000 --- a/src/core/include/ngraph/op/util/sub_graph_base.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/multi_subgraph_base.hpp" -#include "openvino/op/util/sub_graph_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::SubGraphOp; -using InputDescriptionPtr = util::SubGraphOp::InputDescription::Ptr; -using OutputDescriptionPtr = util::SubGraphOp::OutputDescription::Ptr; -using InputDescriptionVector = std::vector; -using OutputDescriptionVector = std::vector; -} // namespace util -} // namespace op - -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/unary_elementwise_arithmetic.hpp b/src/core/include/ngraph/op/util/unary_elementwise_arithmetic.hpp deleted file mode 100644 index 152467da57fe22..00000000000000 --- a/src/core/include/ngraph/op/util/unary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/unary_elementwise_arithmetic.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::UnaryElementwiseArithmetic; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable.hpp b/src/core/include/ngraph/op/util/variable.hpp deleted file mode 100644 index 6093d69d247783..00000000000000 --- a/src/core/include/ngraph/op/util/variable.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/partial_shape.hpp" -#include "ngraph/type/element_type.hpp" -#include "openvino/op/util/variable.hpp" - -namespace ngraph { -using ov::op::util::Variable; -using ov::op::util::VariableInfo; -using VariablePtr = std::shared_ptr; -using VariableVector = std::vector; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable_context.hpp b/src/core/include/ngraph/op/util/variable_context.hpp deleted file mode 100644 index 680f871f0c9366..00000000000000 --- a/src/core/include/ngraph/op/util/variable_context.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_value.hpp" -#include "ngraph/output_vector.hpp" -#include "openvino/op/util/variable_context.hpp" - -namespace ngraph { -using VariableMap = std::unordered_map; -using ov::op::util::VariableContext; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable_extension.hpp b/src/core/include/ngraph/op/util/variable_extension.hpp deleted file mode 100644 index c11b71b9bf449d..00000000000000 --- a/src/core/include/ngraph/op/util/variable_extension.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "openvino/op/util/variable_extension.hpp" - -namespace ngraph { -using ov::op::util::VariableExtension; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable_value.hpp b/src/core/include/ngraph/op/util/variable_value.hpp deleted file mode 100644 index 3185427acc7ab8..00000000000000 --- a/src/core/include/ngraph/op/util/variable_value.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "openvino/op/util/variable_value.hpp" - -namespace ngraph { -using ov::op::util::VariableValue; -using VariableValuePtr = std::shared_ptr; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/variadic_split.hpp b/src/core/include/ngraph/op/variadic_split.hpp deleted file mode 100644 index aea880731dc1c2..00000000000000 --- a/src/core/include/ngraph/op/variadic_split.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/variadic_split.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::VariadicSplit; -} // namespace v1 -using v1::VariadicSplit; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/xor.hpp b/src/core/include/ngraph/op/xor.hpp deleted file mode 100644 index e25e59d061c7be..00000000000000 --- a/src/core/include/ngraph/op/xor.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_logical.hpp" -#include "openvino/op/logical_xor.hpp" -#include "openvino/op/xor.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalXor; -} // namespace v1 -namespace v0 { -using ov::op::v0::Xor; -} // namespace v0 -using v0::Xor; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/ops.hpp b/src/core/include/ngraph/ops.hpp index 79a4d26adaeecd..b3cecc8e036e58 100644 --- a/src/core/include/ngraph/ops.hpp +++ b/src/core/include/ngraph/ops.hpp @@ -16,181 +16,9 @@ # endif #endif -#include "ngraph/op/abs.hpp" -#include "ngraph/op/acos.hpp" -#include "ngraph/op/acosh.hpp" -#include "ngraph/op/adaptive_avg_pool.hpp" -#include "ngraph/op/adaptive_max_pool.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/and.hpp" -#include "ngraph/op/asin.hpp" -#include "ngraph/op/asinh.hpp" -#include "ngraph/op/assign.hpp" -#include "ngraph/op/atan.hpp" -#include "ngraph/op/atanh.hpp" -#include "ngraph/op/avg_pool.hpp" -#include "ngraph/op/batch_norm.hpp" -#include "ngraph/op/batch_to_space.hpp" -#include "ngraph/op/binary_convolution.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/bucketize.hpp" -#include "ngraph/op/ceiling.hpp" -#include "ngraph/op/clamp.hpp" #include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/convert_like.hpp" -#include "ngraph/op/convolution.hpp" -#include "ngraph/op/cos.hpp" -#include "ngraph/op/cosh.hpp" -#include "ngraph/op/ctc_greedy_decoder.hpp" -#include "ngraph/op/ctc_greedy_decoder_seq_len.hpp" -#include "ngraph/op/ctc_loss.hpp" -#include "ngraph/op/cum_sum.hpp" -#include "ngraph/op/deformable_convolution.hpp" -#include "ngraph/op/deformable_psroi_pooling.hpp" -#include "ngraph/op/depth_to_space.hpp" -#include "ngraph/op/detection_output.hpp" -#include "ngraph/op/dft.hpp" #include "ngraph/op/divide.hpp" -#include "ngraph/op/einsum.hpp" -#include "ngraph/op/elu.hpp" -#include "ngraph/op/embedding_segments_sum.hpp" -#include "ngraph/op/embeddingbag_offsets_sum.hpp" -#include "ngraph/op/embeddingbag_packedsum.hpp" -#include "ngraph/op/equal.hpp" -#include "ngraph/op/erf.hpp" -#include "ngraph/op/exp.hpp" -#include "ngraph/op/experimental_detectron_detection_output.hpp" -#include "ngraph/op/experimental_detectron_generate_proposals.hpp" -#include "ngraph/op/experimental_detectron_prior_grid_generator.hpp" -#include "ngraph/op/experimental_detectron_roi_feature.hpp" -#include "ngraph/op/experimental_detectron_topkrois.hpp" -#include "ngraph/op/extractimagepatches.hpp" -#include "ngraph/op/eye.hpp" -#include "ngraph/op/fake_quantize.hpp" -#include "ngraph/op/floor.hpp" -#include "ngraph/op/floor_mod.hpp" -#include "ngraph/op/gather.hpp" -#include "ngraph/op/gather_elements.hpp" -#include "ngraph/op/gather_nd.hpp" -#include "ngraph/op/gather_tree.hpp" -#include "ngraph/op/gelu.hpp" -#include "ngraph/op/generate_proposals.hpp" -#include "ngraph/op/greater.hpp" -#include "ngraph/op/greater_eq.hpp" -#include "ngraph/op/grid_sample.hpp" -#include "ngraph/op/grn.hpp" -#include "ngraph/op/group_conv.hpp" -#include "ngraph/op/gru_cell.hpp" -#include "ngraph/op/gru_sequence.hpp" -#include "ngraph/op/hard_sigmoid.hpp" -#include "ngraph/op/hsigmoid.hpp" -#include "ngraph/op/hswish.hpp" -#include "ngraph/op/i420_to_bgr.hpp" -#include "ngraph/op/i420_to_rgb.hpp" -#include "ngraph/op/idft.hpp" -#include "ngraph/op/if.hpp" -#include "ngraph/op/interpolate.hpp" -#include "ngraph/op/irdft.hpp" -#include "ngraph/op/is_finite.hpp" -#include "ngraph/op/is_inf.hpp" -#include "ngraph/op/is_nan.hpp" -#include "ngraph/op/less.hpp" -#include "ngraph/op/less_eq.hpp" -#include "ngraph/op/log.hpp" -#include "ngraph/op/log_softmax.hpp" -#include "ngraph/op/loop.hpp" -#include "ngraph/op/lrn.hpp" -#include "ngraph/op/lstm_cell.hpp" -#include "ngraph/op/lstm_sequence.hpp" -#include "ngraph/op/matmul.hpp" -#include "ngraph/op/matrix_nms.hpp" -#include "ngraph/op/max.hpp" -#include "ngraph/op/max_pool.hpp" -#include "ngraph/op/maximum.hpp" -#include "ngraph/op/min.hpp" -#include "ngraph/op/minimum.hpp" -#include "ngraph/op/mish.hpp" -#include "ngraph/op/mod.hpp" -#include "ngraph/op/multiclass_nms.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/mvn.hpp" -#include "ngraph/op/negative.hpp" -#include "ngraph/op/non_max_suppression.hpp" -#include "ngraph/op/non_zero.hpp" -#include "ngraph/op/normalize_l2.hpp" -#include "ngraph/op/not.hpp" -#include "ngraph/op/not_equal.hpp" -#include "ngraph/op/nv12_to_bgr.hpp" -#include "ngraph/op/nv12_to_rgb.hpp" -#include "ngraph/op/one_hot.hpp" -#include "ngraph/op/or.hpp" -#include "ngraph/op/pad.hpp" #include "ngraph/op/parameter.hpp" -#include "ngraph/op/power.hpp" -#include "ngraph/op/prelu.hpp" -#include "ngraph/op/prior_box.hpp" -#include "ngraph/op/prior_box_clustered.hpp" -#include "ngraph/op/proposal.hpp" -#include "ngraph/op/psroi_pooling.hpp" -#include "ngraph/op/random_uniform.hpp" -#include "ngraph/op/range.hpp" -#include "ngraph/op/rdft.hpp" -#include "ngraph/op/read_value.hpp" -#include "ngraph/op/reduce_l1.hpp" -#include "ngraph/op/reduce_l2.hpp" -#include "ngraph/op/reduce_logical_and.hpp" -#include "ngraph/op/reduce_logical_or.hpp" -#include "ngraph/op/reduce_mean.hpp" -#include "ngraph/op/reduce_prod.hpp" -#include "ngraph/op/reduce_sum.hpp" -#include "ngraph/op/region_yolo.hpp" -#include "ngraph/op/relu.hpp" -#include "ngraph/op/reorg_yolo.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/op/reverse.hpp" -#include "ngraph/op/reverse_sequence.hpp" -#include "ngraph/op/rnn_cell.hpp" -#include "ngraph/op/rnn_sequence.hpp" -#include "ngraph/op/roi_align.hpp" -#include "ngraph/op/roi_pooling.hpp" -#include "ngraph/op/roll.hpp" -#include "ngraph/op/round.hpp" -#include "ngraph/op/scatter_elements_update.hpp" -#include "ngraph/op/scatter_nd_update.hpp" -#include "ngraph/op/scatter_update.hpp" -#include "ngraph/op/select.hpp" -#include "ngraph/op/selu.hpp" -#include "ngraph/op/shape_of.hpp" -#include "ngraph/op/shuffle_channels.hpp" -#include "ngraph/op/sigmoid.hpp" -#include "ngraph/op/sign.hpp" -#include "ngraph/op/sin.hpp" -#include "ngraph/op/sinh.hpp" -#include "ngraph/op/slice.hpp" -#include "ngraph/op/softmax.hpp" -#include "ngraph/op/softplus.hpp" -#include "ngraph/op/softsign.hpp" -#include "ngraph/op/space_to_batch.hpp" -#include "ngraph/op/space_to_depth.hpp" #include "ngraph/op/split.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/op/squared_difference.hpp" -#include "ngraph/op/squeeze.hpp" -#include "ngraph/op/strided_slice.hpp" -#include "ngraph/op/subtract.hpp" -#include "ngraph/op/swish.hpp" -#include "ngraph/op/tan.hpp" -#include "ngraph/op/tanh.hpp" -#include "ngraph/op/tensor_iterator.hpp" -#include "ngraph/op/tile.hpp" -#include "ngraph/op/topk.hpp" -#include "ngraph/op/transpose.hpp" -#include "ngraph/op/unique.hpp" -#include "ngraph/op/unsqueeze.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/op_types.hpp" -#include "ngraph/op/variadic_split.hpp" -#include "ngraph/op/xor.hpp" diff --git a/src/core/include/ngraph/opsets/opset.hpp b/src/core/include/ngraph/opsets/opset.hpp index 3f65437c6d3801..de79416b7c2a2d 100644 --- a/src/core/include/ngraph/opsets/opset.hpp +++ b/src/core/include/ngraph/opsets/opset.hpp @@ -54,19 +54,7 @@ class NGRAPH_API OpSet : public ov::OpSet { } }; -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset1(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset2(); NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset3(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset4(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset5(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset6(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset7(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset8(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset9(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset10(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset11(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset12(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset13(); NGRAPH_API_DEPRECATED const NGRAPH_API std::map>& get_available_opsets(); } // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset2.hpp b/src/core/include/ngraph/opsets/opset2.hpp deleted file mode 100644 index d2f09479f941a2..00000000000000 --- a/src/core/include/ngraph/opsets/opset2.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset2 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset2_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset2 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset2_tbl.hpp b/src/core/include/ngraph/opsets/opset2_tbl.hpp deleted file mode 100644 index 2438ff7b341e9d..00000000000000 --- a/src/core/include/ngraph/opsets/opset2_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset2_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/pattern/matcher.hpp b/src/core/include/ngraph/pattern/matcher.hpp index 947dcfb4d58a9c..c37244bed60834 100644 --- a/src/core/include/ngraph/pattern/matcher.hpp +++ b/src/core/include/ngraph/pattern/matcher.hpp @@ -20,7 +20,6 @@ #include #include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/pattern/op/any.hpp" #include "ngraph/pattern/op/any_of.hpp" #include "ngraph/pattern/op/any_output.hpp" diff --git a/src/core/include/ngraph/validation_util.hpp b/src/core/include/ngraph/validation_util.hpp index d7f83a1d8947f6..3535911b1eb9bb 100644 --- a/src/core/include/ngraph/validation_util.hpp +++ b/src/core/include/ngraph/validation_util.hpp @@ -17,11 +17,9 @@ #include #include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/variable_context.hpp" #include "openvino/core/validation_util.hpp" +#include "openvino/op/util/variable_context.hpp" namespace ngraph { using ov::evaluate_as_partial_shape; @@ -31,6 +29,7 @@ using ov::infer_auto_padding; using ov::infer_convolution_forward; using ov::normalize_axes; using ov::normalize_axis; +using ov::op::v0::Constant; NGRAPH_API_DEPRECATED NGRAPH_API @@ -169,15 +168,15 @@ NGRAPH_API std::pair maximum_value(const Output& value); /// \brief Returns a Constant storing scalar value equal to std::numeric_limits::max() NGRAPH_API_DEPRECATED -NGRAPH_API std::shared_ptr get_constant_max_of_type(element::Type_t t); +NGRAPH_API std::shared_ptr get_constant_max_of_type(element::Type_t t); /// \brief Returns a Constant storing scalar value equal to std::numeric_limits::min() NGRAPH_API_DEPRECATED -NGRAPH_API std::shared_ptr get_constant_min_of_type(element::Type_t t); +NGRAPH_API std::shared_ptr get_constant_min_of_type(element::Type_t t); /// \brief Returns a Constant storing scalar value equal to std::numeric_limits::lowest() NGRAPH_API_DEPRECATED -NGRAPH_API std::shared_ptr get_constant_lowest_of_type(element::Type_t t); +NGRAPH_API std::shared_ptr get_constant_lowest_of_type(element::Type_t t); namespace opset1 { /// diff --git a/src/core/include/openvino/core/enum_names.hpp b/src/core/include/openvino/core/enum_names.hpp index 60e7f3297b5b4f..7885200645f0ae 100644 --- a/src/core/include/openvino/core/enum_names.hpp +++ b/src/core/include/openvino/core/enum_names.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include "openvino/core/except.hpp" diff --git a/src/core/src/graph_util.cpp b/src/core/src/graph_util.cpp index 3f3ede1b7c1f62..761293f194fd16 100644 --- a/src/core/src/graph_util.cpp +++ b/src/core/src/graph_util.cpp @@ -221,7 +221,7 @@ std::shared_ptr clone_ov_model(const Model& func, std::unordered_map node : func.get_results()) { auto result = ov::as_type_ptr(node_map.at(node.get())); if (!result) { - OPENVINO_THROW("Results should be of type op::Result"); + OPENVINO_THROW("Results should be of type ov::op::v0::Result"); } cloned_results.push_back(result); } @@ -556,7 +556,7 @@ std::pair, std::shared_ptr res_node = std::make_shared(src_node); + std::shared_ptr res_node = std::make_shared(src_node); return make_pair(res_node, par_node); } diff --git a/src/core/src/op/reduce_prod.cpp b/src/core/src/op/reduce_prod.cpp index d80f040e5ef7ad..f8b1adeb2a77d0 100644 --- a/src/core/src/op/reduce_prod.cpp +++ b/src/core/src/op/reduce_prod.cpp @@ -7,10 +7,10 @@ #include "bound_evaluate.hpp" #include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" #include "openvino/core/shape_util.hpp" #include "openvino/op/util/axes_util.hpp" #include "openvino/reference/reduce_prod.hpp" +#include "validation_util.hpp" namespace ov { namespace op { @@ -99,11 +99,9 @@ bool ReduceProd::evaluate_upper(ov::TensorVector& output_values) const { // In case dimensions has a zero dimension - it should return 0 in any case if (tensor_has_max_value(get_input_tensor(0).get_upper_value()) && !tensor_has_zero_value(get_input_tensor(0).get_upper_value())) { - OPENVINO_SUPPRESS_DEPRECATED_START - auto max_constant = ngraph::get_constant_max_of_type(get_output_element_type(0)); - OPENVINO_SUPPRESS_DEPRECATED_END - OPENVINO_ASSERT(max_constant->get_byte_size() <= output_values[0].get_byte_size()); - memcpy(output_values[0].data(), max_constant->get_data_ptr(), max_constant->get_byte_size()); + const auto max_constant = ov::util::make_tensor_of_max_value(get_output_element_type(0)); + OPENVINO_ASSERT(max_constant.get_byte_size() <= output_values[0].get_byte_size()); + std::memcpy(output_values[0].data(), max_constant.data(), max_constant.get_byte_size()); return true; } diff --git a/src/core/src/op/util/slice_plan.cpp b/src/core/src/op/util/slice_plan.cpp index 2025900745ec95..434f855490615e 100644 --- a/src/core/src/op/util/slice_plan.cpp +++ b/src/core/src/op/util/slice_plan.cpp @@ -6,7 +6,6 @@ #include -#include "ngraph/op/util/slice_plan.hpp" #include "openvino/core/except.hpp" namespace ov { @@ -217,45 +216,3 @@ bool SlicePlan::operator!=(const SlicePlan& other) const { } // namespace util } // namespace op } // namespace ov - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ngraph { - -SlicePlan make_slice_plan(const Shape& input_shape, - const std::vector& begins, - const std::vector& ends, - const std::vector& strides, - const AxisSet& lower_bounds_mask, - const AxisSet& upper_bounds_mask, - const AxisSet& new_axis_mask, - const AxisSet& shrink_axis_mask, - const AxisSet& ellipsis_mask) { - const auto sp = ov::op::util::make_slice_plan(input_shape, - begins, - ends, - strides, - lower_bounds_mask, - upper_bounds_mask, - new_axis_mask, - shrink_axis_mask, - ellipsis_mask); - return SlicePlan{sp.begins, sp.ends, sp.strides, sp.reshape_in_shape, sp.reshape_out_shape, sp.reverse_axes}; -} - -bool SlicePlan::operator==(const SlicePlan& other) const { - bool equal = true; - equal &= begins == other.begins; - equal &= ends == other.ends; - equal &= strides == other.strides; - equal &= reshape_in_shape == other.reshape_in_shape; - equal &= reshape_out_shape == other.reshape_out_shape; - equal &= reverse_axes == other.reverse_axes; - - return equal; -} - -bool SlicePlan::operator!=(const SlicePlan& other) const { - return !(*this == other); -} -} // namespace ngraph -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/src/core/src/opsets/opset.cpp b/src/core/src/opsets/opset.cpp index 3f9104a1011fcb..a92f0d972e49d4 100644 --- a/src/core/src/opsets/opset.cpp +++ b/src/core/src/opsets/opset.cpp @@ -217,86 +217,14 @@ OpSet::OpSet(const OpSet& opset) : ov::OpSet(opset) {} const std::map>& get_available_opsets() { #define _REG_OPSET(OPSET) \ { #OPSET, get_##OPSET } - const static std::map> opset_map = {_REG_OPSET(opset1), - _REG_OPSET(opset2), - _REG_OPSET(opset3), - _REG_OPSET(opset4), - _REG_OPSET(opset5), - _REG_OPSET(opset6), - _REG_OPSET(opset7), - _REG_OPSET(opset8), - _REG_OPSET(opset9), - _REG_OPSET(opset10), - _REG_OPSET(opset11), - _REG_OPSET(opset12), - _REG_OPSET(opset13)}; + const static std::map> opset_map = {_REG_OPSET(opset3)}; #undef _REG_OPSET return opset_map; } -const OpSet& get_opset1() { - static OpSet opset(ov::get_opset1()); - return opset; -} - -const OpSet& get_opset2() { - static OpSet opset(ov::get_opset2()); - return opset; -} - const OpSet& get_opset3() { static OpSet opset(ov::get_opset3()); return opset; } -const OpSet& get_opset4() { - static OpSet opset(ov::get_opset4()); - return opset; -} - -const OpSet& get_opset5() { - static OpSet opset(ov::get_opset5()); - return opset; -} - -const OpSet& get_opset6() { - static OpSet opset(ov::get_opset6()); - return opset; -} - -const OpSet& get_opset7() { - static OpSet opset(ov::get_opset7()); - return opset; -} - -const OpSet& get_opset8() { - static OpSet opset(ov::get_opset8()); - return opset; -} - -const OpSet& get_opset9() { - static OpSet opset(ov::get_opset9()); - return opset; -} - -const OpSet& get_opset10() { - static OpSet opset(ov::get_opset10()); - return opset; -} - -const OpSet& get_opset11() { - static OpSet opset(ov::get_opset11()); - return opset; -} - -const OpSet& get_opset12() { - static OpSet opset(ov::get_opset12()); - return opset; -} - -const OpSet& get_opset13() { - static OpSet opset(ov::get_opset13()); - return opset; -} - } // namespace ngraph diff --git a/src/core/src/specialize_function.cpp b/src/core/src/specialize_function.cpp index 2b1bf5f8346380..08401fde6fc389 100644 --- a/src/core/src/specialize_function.cpp +++ b/src/core/src/specialize_function.cpp @@ -5,13 +5,14 @@ #include "ngraph/specialize_function.hpp" #include "itt.hpp" -#include "ngraph/op/assign.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/op/util/op_types.hpp" +#include "openvino/op/constant.hpp" using namespace ngraph; NGRAPH_SUPPRESS_DEPRECATED_START; +using ov::op::v0::Constant; + std::shared_ptr ngraph::specialize_function(std::shared_ptr f, const std::vector& parameter_element_types, const std::vector& parameter_shapes, @@ -32,9 +33,9 @@ std::shared_ptr ngraph::specialize_function(std::shared_ptr if (parameter_values[i] != nullptr && parameter_shapes[i].is_static() && parameter_element_types[i].is_static()) { - m[f->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], - parameter_shapes[i].to_shape(), - parameter_values[i]); + m[f->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], + parameter_shapes[i].to_shape(), + parameter_values[i]); } else { m[f->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], parameter_shapes[i]); @@ -86,12 +87,12 @@ std::shared_ptr ngraph::specialize_function(std::shared_ptr ResultVector new_results = f->get_results(); for (size_t i = 0; i < new_results.size(); i++) { auto name = new_results[i]->get_friendly_name(); - new_results[i] = std::static_pointer_cast(m[new_results[i].get()]); + new_results[i] = std::static_pointer_cast(m[new_results[i].get()]); new_results[i]->set_friendly_name(name); } - SinkVector new_sinks = f->get_sinks(); + auto new_sinks = f->get_sinks(); for (size_t i = 0; i < new_sinks.size(); i++) { - new_sinks[i] = std::static_pointer_cast(m[new_sinks[i].get()]); + new_sinks[i] = std::static_pointer_cast(m[new_sinks[i].get()]); } return std::make_shared(new_results, new_sinks, new_parameters); diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index 67e2d1e269b92f..a4597f7aac6a76 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -10,10 +10,10 @@ #include "bound_evaluate.hpp" #include "compare.hpp" #include "ngraph/evaluator.hpp" -#include "ngraph/op/negative.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/gather.hpp" +#include "openvino/op/negative.hpp" #include "openvino/op/ops.hpp" #include "sequnce_generator.hpp" #include "validation_util.hpp" @@ -22,6 +22,12 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { using ov::Dimension; +namespace op { +namespace v0 { +using ov::op::v0::Constant; +using ov::op::v0::Negative; +} // namespace v0 +} // namespace op Strides conv_default_strides(const Node* /* node */, const PartialShape& data_batch_shape, @@ -922,10 +928,6 @@ std::shared_ptr get_constant_lowest_of_type(element::Type_t t) return nullptr; } } - -std::shared_ptr operator-(const Output& arg0) { - return std::make_shared(arg0); -} } // namespace ngraph void ov::infer_auto_padding(const Shape& image_shape, diff --git a/src/core/tests/specialize_function.cpp b/src/core/tests/specialize_function.cpp index 771f2aa9ec5124..9cd4b3bbc27376 100644 --- a/src/core/tests/specialize_function.cpp +++ b/src/core/tests/specialize_function.cpp @@ -5,21 +5,26 @@ #include "ngraph/specialize_function.hpp" #include "gtest/gtest.h" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" using namespace ngraph; NGRAPH_SUPPRESS_DEPRECATED_START; +using ov::op::v0::Constant; +using ov::op::v0::Convert; +using ov::op::v0::Parameter; +using ov::op::v1::Add; + // Simple case: create a function with static parameter shapes and "specialize" them to the same // shapes. TEST(specialize_function, et_shape_static) { - auto p0 = std::make_shared(element::f32, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -36,11 +41,11 @@ TEST(specialize_function, et_shape_static) { // Test specialization of dynamic element types. TEST(specialize_function, et_dynamic_shape_static) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -57,11 +62,11 @@ TEST(specialize_function, et_dynamic_shape_static) { // Test specialization of rank-dynamic shapes. TEST(specialize_function, et_static_shape_rank_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -78,11 +83,11 @@ TEST(specialize_function, et_static_shape_rank_dynamic) { // Test specialization of rank-static dynamic shapes. TEST(specialize_function, et_static_shape_rank_static_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -99,11 +104,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic) { // Test specialization of values to a shape-dynamic parameters. TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -119,11 +124,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); ASSERT_EQ(g->get_output_element_type(0), element::f32); - auto plus_node = ov::as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto plus_node = ov::as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(plus_node); - auto convert_node = ov::as_type_ptr(plus_node->input_value(1).get_node_shared_ptr()); + auto convert_node = ov::as_type_ptr(plus_node->input_value(1).get_node_shared_ptr()); ASSERT_TRUE(convert_node); - auto const_node = ov::as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); + auto const_node = ov::as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(const_node); ASSERT_EQ(const_node->get_output_element_type(0), element::i32); @@ -135,11 +140,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { // // (The input shapes we provide at specialization time are inconsistent.) TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -159,11 +164,11 @@ TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) { // // (The input element types we provide at specialization time are inconsistent.) TEST(specialize_function, et_dynamic_shape_static_validation_fails) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -186,11 +191,11 @@ TEST(specialize_function, et_dynamic_shape_static_validation_fails) { // specialize_shape's pre-checks, which use OPENVINO_ASSERT, rather than inside validation as we // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -213,11 +218,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) { // specialize_shape's pre-checks, which use OPENVINO_ASSERT, rather than inside validation as we // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, ov::Dimension::dynamic(), 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, ov::Dimension::dynamic(), 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -235,11 +240,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) { // Test for failure when we supply the wrong number of replacement element types. TEST(specialize_function, et_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -257,11 +262,11 @@ TEST(specialize_function, et_count_wrong) { // Test for failure when we supply the wrong number of replacement shapes. TEST(specialize_function, shape_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -279,11 +284,11 @@ TEST(specialize_function, shape_count_wrong) { // Test for failure when we supply the wrong number of replacement parameter values. TEST(specialize_function, value_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); diff --git a/src/frontends/onnx/frontend/src/op/constant.cpp b/src/frontends/onnx/frontend/src/op/constant.cpp index 1893cfa63c4d39..66a7f978b2d5a7 100644 --- a/src/frontends/onnx/frontend/src/op/constant.cpp +++ b/src/frontends/onnx/frontend/src/op/constant.cpp @@ -10,7 +10,6 @@ #include "core/sparse_tensor.hpp" #include "core/tensor.hpp" #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/validation_util.hpp" #include "openvino/frontend/exception.hpp" diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.cpp b/src/frontends/onnx/frontend/src/op/constant_fill.cpp index 7dbbb02d8567a5..beb08bfd3e536b 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.cpp @@ -8,9 +8,7 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/broadcast.hpp" #include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" #include "onnx_common/utils.hpp" using namespace ov::frontend::onnx::common; diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp index 559b497ac80e6f..1d117f341ce5ad 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp @@ -6,7 +6,6 @@ #include "core/tensor.hpp" #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" #include "onnx_import/core/null_node.hpp" #include "op/constant.hpp" #include "utils/common.hpp" diff --git a/src/frontends/onnx/frontend/src/op/conv.cpp b/src/frontends/onnx/frontend/src/op/conv.cpp index e105b51e0f7e5a..2f4b69dd338740 100644 --- a/src/frontends/onnx/frontend/src/op/conv.cpp +++ b/src/frontends/onnx/frontend/src/op/conv.cpp @@ -10,7 +10,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/group_conv.hpp" #include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index ac3b334f4f47d2..efe5b9f8bd9078 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -9,7 +9,6 @@ #include "default_opset.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/op/convert.hpp" #include "ngraph/shape.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp index 74a33816db2419..c64b87b28d1ef5 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp @@ -9,7 +9,6 @@ #include "default_opset.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/op/convert.hpp" #include "ngraph/shape.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" diff --git a/src/frontends/onnx/frontend/src/op/expand.cpp b/src/frontends/onnx/frontend/src/op/expand.cpp index 12abc218ecf1db..e6736a9b436298 100644 --- a/src/frontends/onnx/frontend/src/op/expand.cpp +++ b/src/frontends/onnx/frontend/src/op/expand.cpp @@ -7,9 +7,6 @@ #include #include "default_opset.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/multiply.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/floor.hpp b/src/frontends/onnx/frontend/src/op/floor.hpp index 535dc4771207b0..2925a6deb293b3 100644 --- a/src/frontends/onnx/frontend/src/op/floor.hpp +++ b/src/frontends/onnx/frontend/src/op/floor.hpp @@ -11,7 +11,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/floor.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/gemm.cpp b/src/frontends/onnx/frontend/src/op/gemm.cpp index 7807539f72f4cf..c0c858b6db3799 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.cpp +++ b/src/frontends/onnx/frontend/src/op/gemm.cpp @@ -7,10 +7,6 @@ #include #include "default_opset.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/matmul.hpp" -#include "ngraph/op/multiply.hpp" #include "ov_models/ov_builders/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/greater.hpp b/src/frontends/onnx/frontend/src/op/greater.hpp index f39f1744ca34c1..73642577f41838 100644 --- a/src/frontends/onnx/frontend/src/op/greater.hpp +++ b/src/frontends/onnx/frontend/src/op/greater.hpp @@ -11,7 +11,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/greater.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/instance_norm.cpp b/src/frontends/onnx/frontend/src/op/instance_norm.cpp index a082aed15cd869..73addcfa76a55a 100644 --- a/src/frontends/onnx/frontend/src/op/instance_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/instance_norm.cpp @@ -10,11 +10,7 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/op/add.hpp" #include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/op/subtract.hpp" #include "ngraph/partial_shape.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" diff --git a/src/frontends/onnx/frontend/src/op/less.hpp b/src/frontends/onnx/frontend/src/op/less.hpp index 9903323219033e..8f5350476645c3 100644 --- a/src/frontends/onnx/frontend/src/op/less.hpp +++ b/src/frontends/onnx/frontend/src/op/less.hpp @@ -11,7 +11,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/less.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/lstm.cpp b/src/frontends/onnx/frontend/src/op/lstm.cpp index 495d34f119dc6f..8281f19fe33be1 100644 --- a/src/frontends/onnx/frontend/src/op/lstm.cpp +++ b/src/frontends/onnx/frontend/src/op/lstm.cpp @@ -14,9 +14,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/enum_names.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/lstm_sequence.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" @@ -58,18 +55,18 @@ struct LSTMNgInputMap { // Weight tensor for the gates. // Shape: [num_directions, 4*hidden_size, input_size] m_input_map[LSTMInput::LSTM_INPUT_W] = - ngraph::op::util::convert_lstm_node_format(ng_inputs.at(1), - ngraph::op::util::LSTMWeightsFormat::IOFC, - ngraph::op::util::LSTMWeightsFormat::FICO, - 1); + ov::op::util::convert_lstm_node_format(ng_inputs.at(1), + ov::op::util::LSTMWeightsFormat::IOFC, + ov::op::util::LSTMWeightsFormat::FICO, + 1); // The recurrence weight tensor. // Shape: [num_directions, 4*hidden_size, hidden_size] m_input_map[LSTMInput::LSTM_INPUT_R] = - ngraph::op::util::convert_lstm_node_format(ng_inputs.at(2), - ngraph::op::util::LSTMWeightsFormat::IOFC, - ngraph::op::util::LSTMWeightsFormat::FICO, - 1); + ov::op::util::convert_lstm_node_format(ng_inputs.at(2), + ov::op::util::LSTMWeightsFormat::IOFC, + ov::op::util::LSTMWeightsFormat::FICO, + 1); // Get dimensions needed for default inputs creation auto shape_of_x = std::make_shared(m_input_map[LSTMInput::LSTM_INPUT_X]); @@ -103,10 +100,10 @@ struct LSTMNgInputMap { m_input_map[LSTMInput::LSTM_INPUT_B] = std::make_shared(split_bias.at(0), split_bias.at(1)); m_input_map[LSTMInput::LSTM_INPUT_B] = - ngraph::op::util::convert_lstm_node_format(m_input_map[LSTMInput::LSTM_INPUT_B], - ngraph::op::util::LSTMWeightsFormat::IOFC, - ngraph::op::util::LSTMWeightsFormat::FICO, - 1); + ov::op::util::convert_lstm_node_format(m_input_map[LSTMInput::LSTM_INPUT_B], + ov::op::util::LSTMWeightsFormat::IOFC, + ov::op::util::LSTMWeightsFormat::FICO, + 1); } else { auto b_shape = std::make_shared( OutputVector{num_directions_node, diff --git a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp index 6c94dabce3be4d..1c30edb27bac07 100644 --- a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp @@ -9,8 +9,9 @@ #include "default_opset.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/op/mvn.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/op/mvn.hpp" +#include "openvino/opsets/opset5.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -36,7 +37,7 @@ OutputVector mean_variance_normalization(const Node& node) { ngraph::normalize_axes(node.get_description(), axes, data.get_partial_shape().rank()); OPENVINO_SUPPRESS_DEPRECATED_END auto const_axes = default_opset::Constant::create(element::i64, Shape{normalized_axes.size()}, normalized_axes); - return {std::make_shared(data, const_axes, true, 1e-09f, ngraph::op::MVNEpsMode::OUTSIDE_SQRT)}; + return {std::make_shared(data, const_axes, true, 1e-09f, ov::op::MVNEpsMode::OUTSIDE_SQRT)}; } } // namespace set_9 diff --git a/src/frontends/onnx/frontend/src/op/mod.cpp b/src/frontends/onnx/frontend/src/op/mod.cpp index 9f52d7251f7aed..755bd28ff08547 100644 --- a/src/frontends/onnx/frontend/src/op/mod.cpp +++ b/src/frontends/onnx/frontend/src/op/mod.cpp @@ -2,16 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/mod.hpp" +#include "op/mod.hpp" #include #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/abs.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "op/mod.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/abs.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/mul.hpp b/src/frontends/onnx/frontend/src/op/mul.hpp index f9a1d01a236472..6ad8680e19eed8 100644 --- a/src/frontends/onnx/frontend/src/op/mul.hpp +++ b/src/frontends/onnx/frontend/src/op/mul.hpp @@ -11,8 +11,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/multiply.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/neg.hpp b/src/frontends/onnx/frontend/src/op/neg.hpp index 19f7cbdcbb82e0..0ae61f23e64a1d 100644 --- a/src/frontends/onnx/frontend/src/op/neg.hpp +++ b/src/frontends/onnx/frontend/src/op/neg.hpp @@ -8,15 +8,15 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "ngraph/node.hpp" -#include "ngraph/op/negative.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/negative.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector neg(const Node& node) { - return {-node.get_ng_inputs().at(0)}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp index 27ac97eb60218a..cd6044489de204 100644 --- a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp +++ b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/non_max_suppression.hpp" +#include "op/non_max_suppression.hpp" #include @@ -10,7 +10,6 @@ #include "exceptions.hpp" #include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" -#include "op/non_max_suppression.hpp" #include "utils/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp index 1eef678c6ec2b0..c17db4f66676b0 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp @@ -2,10 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/detection_output.hpp" +#include "op/org.openvinotoolkit/detection_output.hpp" #include "onnx_import/core/node.hpp" -#include "op/org.openvinotoolkit/detection_output.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/detection_output.hpp" diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp index 63c26e61c9f4fd..e686bc345b4344 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/prior_box.hpp" +#include "op/org.openvinotoolkit/prior_box.hpp" #include "exceptions.hpp" #include "onnx_import/core/node.hpp" -#include "op/org.openvinotoolkit/prior_box.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/prior_box.hpp" diff --git a/src/frontends/onnx/frontend/src/op/pad.cpp b/src/frontends/onnx/frontend/src/op/pad.cpp index 1f7368e70c9340..e02b526a1c702e 100644 --- a/src/frontends/onnx/frontend/src/op/pad.cpp +++ b/src/frontends/onnx/frontend/src/op/pad.cpp @@ -2,19 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/pad.hpp" +#include "op/pad.hpp" #include #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/shape.hpp" #include "onnx_import/core/null_node.hpp" -#include "op/pad.hpp" #include "ov_models/ov_builders/split.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index 95ab25b8f79470..408c2e8ab4fe60 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -6,8 +6,8 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/shape.hpp" +#include "openvino/opsets/opset8.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp index 6fbaba619cf5dc..0537d141ea3520 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp @@ -6,8 +6,8 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/shape.hpp" +#include "openvino/op/random_uniform.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/reciprocal.cpp b/src/frontends/onnx/frontend/src/op/reciprocal.cpp index cb698716c9ad48..ab530c0cdb6dc1 100644 --- a/src/frontends/onnx/frontend/src/op/reciprocal.cpp +++ b/src/frontends/onnx/frontend/src/op/reciprocal.cpp @@ -8,7 +8,6 @@ #include #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/roi_align.cpp b/src/frontends/onnx/frontend/src/op/roi_align.cpp index fbdb77e0246e3a..4efd14a6a727a8 100644 --- a/src/frontends/onnx/frontend/src/op/roi_align.cpp +++ b/src/frontends/onnx/frontend/src/op/roi_align.cpp @@ -13,7 +13,9 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { + namespace set_1 { + OutputVector roi_align(const Node& node) { const auto inputs = node.get_ng_inputs(); diff --git a/src/frontends/onnx/frontend/src/op/selu.cpp b/src/frontends/onnx/frontend/src/op/selu.cpp index 16f8bca9149cfd..9eba44e5fd6fd3 100644 --- a/src/frontends/onnx/frontend/src/op/selu.cpp +++ b/src/frontends/onnx/frontend/src/op/selu.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/selu.hpp" +#include "op/selu.hpp" #include #include #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" -#include "op/selu.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/softsign.cpp b/src/frontends/onnx/frontend/src/op/softsign.cpp index c6fd91a190b111..2228f59c39cd3c 100644 --- a/src/frontends/onnx/frontend/src/op/softsign.cpp +++ b/src/frontends/onnx/frontend/src/op/softsign.cpp @@ -9,6 +9,7 @@ #include "default_opset.hpp" #include "ngraph/shape.hpp" +#include "openvino/opsets/opset9.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/squeeze.cpp b/src/frontends/onnx/frontend/src/op/squeeze.cpp index 8bfc2035cb2315..3c01b1ffe13e53 100644 --- a/src/frontends/onnx/frontend/src/op/squeeze.cpp +++ b/src/frontends/onnx/frontend/src/op/squeeze.cpp @@ -2,11 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/squeeze.hpp" +#include "op/squeeze.hpp" #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" -#include "op/squeeze.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/topk.cpp b/src/frontends/onnx/frontend/src/op/topk.cpp index 1e7a67b71d3395..b19eb8f53ccd33 100644 --- a/src/frontends/onnx/frontend/src/op/topk.cpp +++ b/src/frontends/onnx/frontend/src/op/topk.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/topk.hpp" +#include "op/topk.hpp" #include #include #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/validation_util.hpp" -#include "op/topk.hpp" #include "openvino/frontend/exception.hpp" #include "utils/reshape.hpp" diff --git a/src/inference/src/cnn_network_ngraph_impl.cpp b/src/inference/src/cnn_network_ngraph_impl.cpp index e742ad888dccbf..5a972bd7a347a2 100644 --- a/src/inference/src/cnn_network_ngraph_impl.cpp +++ b/src/inference/src/cnn_network_ngraph_impl.cpp @@ -261,7 +261,7 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, try { for (const auto& layer : _ngraph_function->get_ops()) { // Result can have the same name as previous operation - if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast(layer)) { + if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast(layer)) { // Check that output port exists if (layer->outputs().size() <= outputIndex) { return DescriptionBuffer(OUT_OF_BOUNDS, resp) @@ -275,10 +275,10 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, // Check that we don't have a result for the output port for (const auto& port : layer->output(outputIndex).get_target_inputs()) { - if (dynamic_cast(port.get_node())) + if (dynamic_cast(port.get_node())) return OK; } - auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex)); + auto result = make_shared<::ov::op::v0::Result>(layer->output(outputIndex)); result->set_friendly_name(outputName); _ngraph_function->add_results({result}); // Check that we cannot add Result to layer with non unique friendly name diff --git a/src/inference/src/dev/core_impl_ie.cpp b/src/inference/src/dev/core_impl_ie.cpp index 89445449b17de6..456ea871b6c2e0 100644 --- a/src/inference/src/dev/core_impl_ie.cpp +++ b/src/inference/src/dev/core_impl_ie.cpp @@ -14,7 +14,6 @@ #include "ie_network_reader.hpp" #include "iplugin_wrapper.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/pass/constant_folding.hpp" #include "openvino/itt.hpp" #include "openvino/runtime/device_id_parser.hpp" diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp index aeccde65a25513..2d9fdc6228ac8f 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp @@ -5,7 +5,8 @@ #pragma once #include -#include + +#include "openvino/op/op.hpp" namespace ov { namespace intel_cpu { @@ -58,7 +59,7 @@ namespace intel_cpu { * T2 - FP32 * T3 - I32 */ -class RoPENode : public ngraph::op::Op { +class RoPENode : public ov::op::Op { public: OPENVINO_OP("RoPE", "cpu_plugin_opset"); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp index dfa44b5758a4ea..ca982f2d8ceeaa 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp @@ -4,8 +4,9 @@ #include -#include "single_op_tests/depth_to_space.hpp" #include "common_test_utils/test_constants.hpp" +#include "openvino/opsets/opset3.hpp" +#include "single_op_tests/depth_to_space.hpp" namespace { using ov::test::DepthToSpaceLayerTest; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp index a27aa05c4f9f69..74203f59f38c3b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp @@ -4,8 +4,9 @@ #include -#include "single_op_tests/gather_elements.hpp" #include "common_test_utils/test_constants.hpp" +#include "openvino/opsets/opset6.hpp" +#include "single_op_tests/gather_elements.hpp" namespace { using ov::test::GatherElementsLayerTest; diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp index bbe9239e439d93..0ee4e3af0c6673 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp @@ -8,6 +8,9 @@ #include +#include "common_test_utils/test_common.hpp" +#include "openvino/opsets/opset8.hpp" + using namespace InferenceEngine; namespace HeteroTests { diff --git a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp index ad1bac678f81de..76da288eb2cab0 100644 --- a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp @@ -2,14 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/infer_request/memory_states.hpp" - #include +#include "behavior/infer_request/memory_states.hpp" #include "blob_factory.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/sigmoid.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/sigmoid.hpp" namespace BehaviorTestsDefinitions { std::string InferRequestVariableStateTest::getTestCaseName(const testing::TestParamInfo& obj) { @@ -36,8 +35,8 @@ void InferRequestVariableStateTest::SetUp() { } InferenceEngine::CNNNetwork InferRequestVariableStateTest::getNetwork() { - ngraph::Shape shape = {1, 200}; - ngraph::element::Type type = ngraph::element::f32; + ov::Shape shape = {1, 200}; + ov::element::Type type = ov::element::f32; auto input = std::make_shared(type, shape); auto mem_i1 = std::make_shared(type, shape, 0); @@ -60,8 +59,7 @@ InferenceEngine::CNNNetwork InferRequestVariableStateTest::getNetwork() { mem_w2->add_control_dependency(mem_r2); sigm->add_control_dependency(mem_w2); - auto function = - std::make_shared(ngraph::NodeVector{sigm}, ngraph::ParameterVector{input}, "addOutput"); + auto function = std::make_shared(ov::NodeVector{sigm}, ov::ParameterVector{input}, "addOutput"); return InferenceEngine::CNNNetwork{function}; } diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index 6c4c4d7a80ed1f..af65c153c13e4c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -2,29 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "low_precision_transformations/depth_to_space_transformation.hpp" - +#include #include +#include #include #include -#include #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" - -#include "ov_models/pass/convert_prc.hpp" -#include "ov_models/builders.hpp" - -#include "transformations/utils/utils.hpp" +#include "low_precision_transformations/depth_to_space_transformation.hpp" #include "openvino/core/model.hpp" -#include "openvino/pass/constant_folding.hpp" -#include "transformations/init_node_info.hpp" -#include "transformations/common_optimizations/depth_to_space_fusion.hpp" #include "openvino/op/depth_to_space.hpp" - +#include "openvino/pass/constant_folding.hpp" #include "ov_lpt_models/depth_to_space.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "transformations/common_optimizations/depth_to_space_fusion.hpp" +#include "transformations/init_node_info.hpp" +#include "transformations/utils/utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index a8e6b09d0bc40e..e7c8a06bf41fa1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "low_precision_transformations/mat_mul_transformation.hpp" - +#include #include +#include +#include #include #include #include @@ -12,8 +13,8 @@ #include "transformations/init_node_info.hpp" #include "low_precision_transformations/mat_mul_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/mat_mul.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 05db8b42ed8e4f..9ab4c619a9ae7a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "low_precision_transformations/mat_mul_with_constant_transformation.hpp" - +#include #include +#include +#include #include #include #include @@ -12,8 +13,9 @@ #include "transformations/init_node_info.hpp" #include "low_precision_transformations/mat_mul_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" +#include "low_precision_transformations/mat_mul_with_constant_transformation.hpp" #include "ov_lpt_models/mat_mul.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp index 8e90915913bd3a..ab3ddef713e929 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp @@ -2,7 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include +#include +#include #include #include #include @@ -10,8 +13,8 @@ #include "transformations/init_node_info.hpp" #include "low_precision_transformations/squeeze_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/squeeze.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index ee23959645ea1e..df8280db1a2b02 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -2,7 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include +#include +#include #include #include #include @@ -10,8 +13,8 @@ #include "transformations/init_node_info.hpp" #include "low_precision_transformations/unsqueeze_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/unsqueeze.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp index e5d8a854ffa087..4e92c0d8187502 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp @@ -5,11 +5,10 @@ #pragma once #include -#include #include #include +#include -#include "ngraph/op/detection_output.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp index 2574eef28c6f7e..0e7cf8de26d7a2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp @@ -4,16 +4,18 @@ #pragma once -#include -#include -#include +#include + +#include #include #include #include -#include -#include - +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" #include "ie_core.hpp" #include "ie_precision.hpp" @@ -23,7 +25,8 @@ #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" - +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp index 218fb3028f67e0..08761d7110d809 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp @@ -4,16 +4,18 @@ #pragma once -#include -#include -#include +#include + +#include #include #include #include -#include -#include - +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" #include "ie_core.hpp" #include "ie_precision.hpp" @@ -23,6 +25,8 @@ #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { using priorBoxSpecificParams = std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp index 60642609388e4a..1f35f829f5d61a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp @@ -4,16 +4,18 @@ #pragma once -#include -#include -#include +#include + +#include #include #include #include -#include -#include - +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" #include "ie_core.hpp" #include "ie_precision.hpp" @@ -23,6 +25,8 @@ #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp index 95105c34b9a91a..95f9beabbed829 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp @@ -1,13 +1,12 @@ // Copyright (C) 2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/eye.hpp" - #include #include #include #include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/eye.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp index 841029e6249879..d8ed69769fc839 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/memory.hpp" - #include #include #include #include "ngraph/pass/low_latency.hpp" +#include "openvino/op/util/variable.hpp" #include "openvino/op/util/variable_context.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/memory.hpp" using namespace ngraph; using ov::op::v1::Add; @@ -51,7 +52,7 @@ void MemoryTest::SetUp() { auto tensor = ov::Tensor(ngPrc, inputShape); auto variable_context = ov::op::util::VariableContext(); - auto variable_value = std::make_shared(tensor); + auto variable_value = std::make_shared(tensor); variable_context.set_variable_value(function->get_variable_by_id("v0"), variable_value); eval_context["VariableContext"] = variable_context; } @@ -180,14 +181,14 @@ void MemoryTest::CreateTIFunc() { void MemoryTest::CreateCommonFunc() { ov::ParameterVector param{std::make_shared(ngPrc, ov::Shape(inputShape))}; const auto variable_info = targetDevice == ov::test::utils::DEVICE_GPU - ? VariableInfo{Shape{inputShape}, ngPrc, "v0"} - : VariableInfo{inputShape, ngPrc, "v0"}; - auto variable = std::make_shared(variable_info); + ? ov::op::util::VariableInfo{Shape{inputShape}, ngPrc, "v0"} + : ov::op::util::VariableInfo{inputShape, ngPrc, "v0"}; + auto variable = std::make_shared(variable_info); auto read_value = CreateReadValueOp(param.at(0), variable); auto add = std::make_shared(read_value, param.at(0)); auto assign = CreateAssignOp(add, variable); auto res = std::make_shared(add); - function = std::make_shared(ResultVector{res}, SinkVector{assign}, param, "TestMemory"); + function = std::make_shared(ResultVector{res}, ov::SinkVector{assign}, param, "TestMemory"); } void MemoryTest::ApplyLowLatency() { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp index eedb35fe746ac2..acc382398bb344 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp @@ -5,6 +5,7 @@ #include "shared_test_classes/single_layer/reverse.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/reverse.hpp" using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 87e02e82dd7f70..9f23a9719455ec 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -6,6 +6,9 @@ #include "ov_models/builders.hpp" #include "openvino/core/enum_names.hpp" +#include "openvino/opsets/opset3.hpp" +#include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/roi_align.hpp" using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp index 2e184d9289c254..4f65fd88357d77 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp @@ -76,10 +76,10 @@ void MemoryLSTMCellTest::SetUp() { auto permute_in = std::make_shared(unsqueeze_input, permute_in_params); auto cell_memory_constant = ov::test::utils::deprecated::make_constant(element_type, cell_memory_dims, cell_memory_init); - auto var_cell = - std::make_shared(VariableInfo{PartialShape(cell_memory_dims), element_type, "cell_state_1"}); - auto var_hidden = - std::make_shared(VariableInfo{PartialShape(cell_memory_dims), element_type, "hidden_state_1"}); + auto var_cell = std::make_shared( + ov::op::util::VariableInfo{PartialShape(cell_memory_dims), element_type, "cell_state_1"}); + auto var_hidden = std::make_shared( + ov::op::util::VariableInfo{PartialShape(cell_memory_dims), element_type, "hidden_state_1"}); auto cell_memory_read = std::make_shared(cell_memory_constant, var_cell); auto hidden_memory_constant = diff --git a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp index 901a00ef841c8b..07f942798669c9 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp @@ -2,13 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/subgraph/mul_conv_fusion.hpp" - #include "common_test_utils/graph_comparator.hpp" -#include "openvino/core/validation_util.hpp" -#include "openvino/pass/manager.hpp" #include "common_test_utils/node_builders/constant.hpp" +#include "openvino/pass/manager.hpp" +#include "shared_test_classes/subgraph/mul_conv_fusion.hpp" #include "transformations/common_optimizations/mul_conv_fusion.hpp" +#include "validation_util.hpp" namespace ov { namespace test { @@ -82,7 +81,7 @@ void MulConvFusion::SetUp() { std::shared_ptr conv; if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { weights = std::make_shared(weights, mul_const); - weights = ov::get_constant_from_source(weights); + weights = ov::util::get_constant_from_source(weights); ASSERT_NE(nullptr, weights); conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp index c852fefdb401b7..05ed5e6c744128 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/subgraph/parameter_shapeof_result.hpp" - #include +#include "shared_test_classes/subgraph/parameter_shapeof_result.hpp" + namespace SubgraphTestsDefinitions { std::string ParameterShapeOfResultSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp index 21edaeb9c81522..2f68cf4de340cf 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp @@ -22,6 +22,9 @@ namespace ngraph { namespace builder { namespace subgraph { +using ov::op::util::Variable; +using ov::op::util::VariableInfo; + std::shared_ptr AssignAndReadValueFunction::getOriginal( const ov::PartialShape& inputShape, const element::Type& inputPrecision, From 8e251b82839285ae2254d6b31292e2ca5d16f724 Mon Sep 17 00:00:00 2001 From: Andrey Babushkin Date: Thu, 18 Jan 2024 21:02:18 +0000 Subject: [PATCH 077/122] Decrease number of workers for ONNX Model tests to prevent OOM kills (#22243) * Decrease number of workers for ONNX Model tests to prevent OOM kills * Try to use "-n auto" also --- .github/workflows/job_onnx_models_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/job_onnx_models_tests.yml b/.github/workflows/job_onnx_models_tests.yml index 1fbc0c11fe960c..07d8d12d48a386 100644 --- a/.github/workflows/job_onnx_models_tests.yml +++ b/.github/workflows/job_onnx_models_tests.yml @@ -103,4 +103,4 @@ jobs: python3 -m pip install pytest-xdist[psutil] pytest-forked - name: ONNX Models Tests - run: python3 -m pytest --backend="CPU" --model_zoo_dir="${MODELS_SHARE_PATH}" ${INSTALL_TEST_DIR}/onnx/tests/tests_python/test_zoo_models.py -v -n 12 --forked -k 'not _cuda' --model_zoo_xfail + run: python3 -m pytest --backend="CPU" --model_zoo_dir="${MODELS_SHARE_PATH}" ${INSTALL_TEST_DIR}/onnx/tests/tests_python/test_zoo_models.py -v -n auto --forked -k 'not _cuda' --model_zoo_xfail From 3fc1feb66c2cf5eb393ebc77a6f1861a45dffd3c Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Fri, 19 Jan 2024 06:18:47 +0100 Subject: [PATCH 078/122] [CPU][DEBUG CAPS] Build fix (#22246) --- src/plugins/intel_cpu/src/utils/debug_capabilities.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index 2018f57daa6615..8c1b6c194aa653 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -314,7 +314,7 @@ std::ostream & operator<<(std::ostream & os, const Node &c_node) { if (shape_size(shape) <= 8) { auto type = pmem->getDesc().getPrecision(); - auto tensor = std::make_shared(type, shape, data); + auto tensor = ov::Tensor(type, shape, data); auto constop = std::make_shared(tensor); comma = ""; for (auto & v : constop->get_value_strings()) { From a8331cac613d99a2ead487b32c9ba01fc8cf8df2 Mon Sep 17 00:00:00 2001 From: Chenhu Wang Date: Fri, 19 Jan 2024 13:34:06 +0800 Subject: [PATCH 079/122] [SnippetS][CPU] JIT segmentation fault detector (#21905) --- .../docs/debug_capabilities/README.md | 3 +- .../snippets_segfault_detector.md | 11 + .../include/snippets/lowered/linear_ir.hpp | 2 - src/common/snippets/src/generator.cpp | 11 +- src/common/snippets/src/op/subgraph.cpp | 4 - .../src/emitters/plugin/x64/jit_emitter.hpp | 17 ++ .../snippets/utils/debug_caps_config.cpp | 26 ++ .../snippets/utils/debug_caps_config.hpp | 29 +++ .../emitters/snippets/x64/cpu_generator.cpp | 56 ++++- .../emitters/snippets/x64/cpu_generator.hpp | 7 + .../x64/jit_brgemm_copy_b_emitter.hpp | 4 + .../snippets/x64/jit_brgemm_emitter.hpp | 4 + .../snippets/x64/jit_debug_emitter.cpp | 73 ++++++ .../snippets/x64/jit_debug_emitter.hpp | 60 +++++ .../snippets/x64/jit_kernel_emitter.hpp | 4 + .../snippets/x64/jit_memory_emitters.hpp | 3 + .../x64/jit_segfault_detector_emitter.cpp | 88 +++++++ .../x64/jit_segfault_detector_emitter.hpp | 53 +++++ .../src/emitters/snippets/x64/verbose.cpp | 222 ++++++++++++++++++ .../src/emitters/snippets/x64/verbose.hpp | 39 +++ src/plugins/intel_cpu/src/nodes/subgraph.cpp | 42 +++- src/plugins/intel_cpu/src/nodes/subgraph.h | 9 +- 22 files changed, 744 insertions(+), 23 deletions(-) create mode 100644 src/common/snippets/docs/debug_capabilities/snippets_segfault_detector.md create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.cpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.hpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.hpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.hpp diff --git a/src/common/snippets/docs/debug_capabilities/README.md b/src/common/snippets/docs/debug_capabilities/README.md index 1f9947a8787832..f03a0ac5a45c38 100644 --- a/src/common/snippets/docs/debug_capabilities/README.md +++ b/src/common/snippets/docs/debug_capabilities/README.md @@ -7,4 +7,5 @@ Use the following cmake option to enable snippets debug capabilities: `-DENABLE_DEBUG_CAPS=ON` -* [Performance counters](perf_count.md) \ No newline at end of file +* [Performance counters](perf_count.md) +* [Snippets segfault detector](snippets_segfault_detector.md) \ No newline at end of file diff --git a/src/common/snippets/docs/debug_capabilities/snippets_segfault_detector.md b/src/common/snippets/docs/debug_capabilities/snippets_segfault_detector.md new file mode 100644 index 00000000000000..0c0747660e1c95 --- /dev/null +++ b/src/common/snippets/docs/debug_capabilities/snippets_segfault_detector.md @@ -0,0 +1,11 @@ +# Snippets segfault detector + +Subgraph in snippets is decomposed to many simple operations. These operations are converted to corresponding emitters to generate execution instruction. If a segfault happens during a subgraph execution, it often requires a significant effort to debug and investigate the problem. This capability is introduced to identify the faulty emitter among the large kernel, and to print some useful emitter information. + +To turn on snippets segfault detector, the following environment variable should be used: +```sh + OV_CPU_SNIPPETS_SEGFAULT_DETECTOR= binary ... +``` + +Currently snippets segfault detector has only one level, any digit can be used for activation. +Currently snippets segfault detector is only effective for x86 or x86-64 CPU backend. \ No newline at end of file diff --git a/src/common/snippets/include/snippets/lowered/linear_ir.hpp b/src/common/snippets/include/snippets/lowered/linear_ir.hpp index 12cd5c0ea5c253..5034de4e481540 100644 --- a/src/common/snippets/include/snippets/lowered/linear_ir.hpp +++ b/src/common/snippets/include/snippets/lowered/linear_ir.hpp @@ -30,8 +30,6 @@ enum PerfCountMode { class Config { public: - // True if the lowered Emitters need to be accessed during runtime. Normally they're destroyed after code emission. - bool m_save_expressions = false; // True if we should check runtime info for nodes to call specific needed transformations bool m_need_fill_tail_register = false; size_t m_loop_depth = 1; diff --git a/src/common/snippets/src/generator.cpp b/src/common/snippets/src/generator.cpp index b1b179ab4fdfa1..5c4848c2535358 100644 --- a/src/common/snippets/src/generator.cpp +++ b/src/common/snippets/src/generator.cpp @@ -57,12 +57,11 @@ void Generator::generate(lowered::LinearIR& linear_ir, LoweringResult& result, c // 1. some emitters use precompiled kernels. They need to be saved, so the kernels are accessible at runtime. // 2. perf count node as field of emitter should be alive at runtime. - if (linear_ir.get_config().m_save_expressions) { - for (const auto& expr : linear_ir) { - const auto& emitter = expr->get_emitter(); - if (uses_precompiled_kernel(emitter)) - result.m_saved_emitters.emplace_back(emitter); - } + // 3. Emitters with segfault detector debug capabilty also need to be accessible at runtime. + for (const auto& expr : linear_ir) { + const auto& emitter = expr->get_emitter(); + if (uses_precompiled_kernel(emitter)) + result.m_saved_emitters.emplace_back(emitter); } result.compiled_snippet = target->get_snippet(); } diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index 7bbbd8af8230b8..d0bf43006d73a2 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -343,10 +343,6 @@ std::shared_ptr Subgraph::convert_body_to_linear_ir(size_t min_parallel_work_amount, size_t min_kernel_work_amount, const std::shared_ptr& shape_infer_factory) { lowered::Config lowering_config; - lowering_config.m_save_expressions = config.m_has_domain_sensitive_ops; -#ifdef SNIPPETS_DEBUG_CAPS - lowering_config.m_save_expressions = lowering_config.m_save_expressions || (lowering_config.perf_count_mode != lowered::PerfCountMode::Disabled); -#endif lowering_config.m_need_fill_tail_register = config.m_has_domain_sensitive_ops; lowering_config.m_loop_depth = tileRank; lowering_config.m_enable_domain_optimization = !config.m_has_domain_sensitive_ops; diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp index 4cc31959224581..243ce573811d3d 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp @@ -12,6 +12,10 @@ #include +#ifdef SNIPPETS_DEBUG_CAPS +#include "emitters/snippets/x64/verbose.hpp" +#endif + namespace ov { namespace intel_cpu { @@ -50,6 +54,14 @@ class jit_emitter : public ov::snippets::Emitter { */ static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); +#ifdef SNIPPETS_DEBUG_CAPS + const char *info() const { + if (!info_.is_initialized()) + info_.init(this); + return info_.c_str(); + } +#endif + protected: virtual size_t aux_gprs_count() const; @@ -138,6 +150,11 @@ class jit_emitter : public ov::snippets::Emitter { void internal_call_rsp_align() const; void internal_call_rsp_restore() const; +#ifdef SNIPPETS_DEBUG_CAPS + mutable jit_emitter_info_t info_; + friend class jit_debug_emitter; +#endif + private: mutable std::vector preserved_vec_idxs; mutable std::vector preserved_gpr_idxs; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.cpp b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.cpp new file mode 100644 index 00000000000000..b7c51539861ff8 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#ifdef SNIPPETS_DEBUG_CAPS + +#include "debug_caps_config.hpp" + +namespace ov { +namespace intel_cpu { + +void SnippetsDebugCapsConfig::readProperties() { + auto readEnv = [](const char* envVar) { + const char* env = std::getenv(envVar); + if (env && *env) + return env; + + return (const char*)nullptr; + }; + + enable_segfault_detector = readEnv("OV_CPU_SNIPPETS_SEGFAULT_DETECTOR") ? true : false; +} + +} // namespace intel_cpu +} // namespace ov + +#endif // SNIPPETS_DEBUG_CAPS diff --git a/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.hpp b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.hpp new file mode 100644 index 00000000000000..14dcae0ddf0c69 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include +#include + +namespace ov { +namespace intel_cpu { + +class SnippetsDebugCapsConfig { +public: + SnippetsDebugCapsConfig() { + readProperties(); + } + + bool enable_segfault_detector; + +private: + void readProperties(); +}; + +} // namespace intel_cpu +} // namespace ov + +#endif // SNIPPETS_DEBUG_CAPS diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp index 85061d906c05af..dfba703338f1a1 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp @@ -34,10 +34,62 @@ #include "emitters/snippets/x64/jit_perf_count_chrono_emitters.hpp" #include "emitters/snippets/x64/jit_perf_count_rdtsc_emitters.hpp" #include "transformations/snippets/x64/op/perf_count_rdtsc.hpp" +#include "emitters/snippets/x64/jit_debug_emitter.hpp" +#include "emitters/snippets/x64/jit_segfault_detector_emitter.hpp" +#include "emitters/snippets/x64/verbose.hpp" #endif namespace ov { +#ifdef SNIPPETS_DEBUG_CAPS +static bool is_load_emitter(const intel_cpu::jit_emitter *emitter) { + bool ret = false; + if (dynamic_cast(emitter) || + dynamic_cast(emitter) || + dynamic_cast(emitter)) { + return true; + } + return ret; +} + +static bool is_store_emitter(const intel_cpu::jit_emitter *emitter) { + bool ret = false; + if (dynamic_cast(emitter) || + dynamic_cast(emitter)) { + return true; + } + return ret; +} + +static bool is_segfault_detector_emitter(const intel_cpu::jit_emitter *emitter) { + // default active for typical tensor memory access emitters + bool ret = false; + ret = is_load_emitter(emitter) || + is_store_emitter(emitter) || + dynamic_cast(emitter) || + dynamic_cast(emitter) || + dynamic_cast(emitter); + return ret; + // use below code to active all emitters for extend usage + // return !dynamic_cast(emitter); +} + +#define CREATE_SNIPPETS_EMITTER(e_type) { \ + [this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr { \ + auto emitter = std::make_shared(h.get(), isa, expr); \ + if (debug_config.enable_segfault_detector && is_segfault_detector_emitter(emitter.get())) { \ + auto segfault_emitter = std::make_shared(h.get(), isa, emitter.get(), \ + is_load_emitter(emitter.get()), is_store_emitter(emitter.get()), expr->get_node()->get_friendly_name()); \ + return std::make_shared(emitter, segfault_emitter, jit_debug_emitter::EmissionLocation::preamble); \ + } else { \ + return emitter; \ + } \ + }, \ + [](const std::shared_ptr& n) -> std::set> { \ + return e_type::get_supported_precisions(n); \ + } \ +} +#else #define CREATE_SNIPPETS_EMITTER(e_type) { \ [this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr { \ return std::make_shared(h.get(), isa, expr); \ @@ -46,6 +98,7 @@ namespace ov { return e_type::get_supported_precisions(n); \ } \ } +#endif #define CREATE_CPU_EMITTER(e_type) { \ [this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr { \ @@ -227,7 +280,8 @@ bool intel_cpu::CPUGenerator::uses_precompiled_kernel(const std::shared_ptr(e) || std::dynamic_pointer_cast(e); #ifdef SNIPPETS_DEBUG_CAPS - need = need || + const auto cpu_target_machine = std::dynamic_pointer_cast(target); + need = need || (cpu_target_machine && cpu_target_machine->debug_config.enable_segfault_detector) || std::dynamic_pointer_cast(e) || std::dynamic_pointer_cast(e) || std::dynamic_pointer_cast(e) || diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp index 41131f1d4eb640..ed5da62771d12f 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp @@ -9,6 +9,10 @@ #include "snippets/target_machine.hpp" #include "snippets/generator.hpp" +#ifdef SNIPPETS_DEBUG_CAPS +#include "emitters/snippets/utils/debug_caps_config.hpp" +#endif + namespace ov { namespace intel_cpu { @@ -29,6 +33,9 @@ class CPUTargetMachine : public snippets::TargetMachine { snippets::CompiledSnippetPtr get_snippet() override; size_t get_lanes() const override; dnnl::impl::cpu::x64::cpu_isa_t get_isa() const; +#ifdef SNIPPETS_DEBUG_CAPS + SnippetsDebugCapsConfig debug_config; +#endif private: std::unique_ptr h; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp index 28e13f6fc33ee3..f11c1e84c29733 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp @@ -47,6 +47,10 @@ class jit_brgemm_copy_b_emitter : public jit_emitter { size_t m_in_offset = 0lu; size_t m_out_offset = 0lu; size_t m_comp_offset = 0lu; + +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_brgemm_copy_b_emitter(const jit_brgemm_copy_b_emitter *emitter); +#endif }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp index 34214e31ad4ca8..7e05ffa43a8cc4 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp @@ -55,6 +55,10 @@ class jit_brgemm_emitter : public jit_emitter { size_t m_store_offset_c = 0lu; std::vector io_data_size {}; + +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_brgemm_emitter(const jit_brgemm_emitter *emitter); +#endif }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp new file mode 100644 index 00000000000000..0125ac69b0b525 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#include "jit_debug_emitter.hpp" +#include +#include "utils/general_utils.h" + +using namespace dnnl::impl::cpu; +using namespace dnnl::impl; +using namespace Xbyak; + +namespace ov { +namespace intel_cpu { + +size_t jit_debug_emitter::get_inputs_num() const { + return m_target_emitter->get_inputs_num(); +} + +size_t jit_debug_emitter::aux_vecs_count() const { + return m_target_emitter->aux_vecs_count(); +} + +size_t jit_debug_emitter::aux_gprs_count() const { + return m_target_emitter->aux_gprs_count(); +} + +void jit_debug_emitter::emitter_preamble(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs, const std::vector &pool_gpr_idxs) const { + m_target_emitter->emitter_preamble(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); +} + +void jit_debug_emitter::emitter_postamble() const { + m_target_emitter->emitter_postamble(); +} + +void jit_debug_emitter::validate_arguments(const std::vector& arg0, const std::vector& arg1) const { + m_target_emitter->validate_arguments(arg0, arg1); +} + +void jit_debug_emitter::emit_data() const { + m_target_emitter->emit_data(); +} + +void jit_debug_emitter::prepare_table() { + m_target_emitter->prepare_table(); +} + +void jit_debug_emitter::register_table_entries() { + m_target_emitter->register_table_entries(); +} + +void jit_debug_emitter::emit_impl(const std::vector &in_idxs, const std::vector &out_idxs) const { + m_target_emitter->emit_impl(in_idxs, out_idxs); +} + +void jit_debug_emitter::emit_code(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs, const std::vector &pool_gpr_idxs) const { + if (m_decorator_emit_loc == EmissionLocation::preamble || m_decorator_emit_loc == EmissionLocation::both) + m_decorator_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); + + m_target_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); + + if (m_decorator_emit_loc == EmissionLocation::postamble || m_decorator_emit_loc == EmissionLocation::both) + m_decorator_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); +} + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp new file mode 100644 index 00000000000000..116e17dcf4c1df --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include "emitters/plugin/x64/jit_emitter.hpp" + + +namespace ov { +namespace intel_cpu { + +class jit_debug_emitter : public jit_emitter { +public: + enum class EmissionLocation { + preamble, + postamble, + both + }; + jit_debug_emitter(const std::shared_ptr& target_emitter, const std::shared_ptr& decorator_emitter, const EmissionLocation& loc) + : jit_emitter(target_emitter->h, target_emitter->host_isa_, target_emitter->exec_prc_, target_emitter->in_out_type_), + m_target_emitter(target_emitter), m_decorator_emitter(decorator_emitter), m_decorator_emit_loc(loc) { + prepare_table(); + } + + void emit_code(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs = {}, const std::vector &pool_gpr_idxs = {}) const override; + void emit_data() const override; + + size_t get_inputs_num() const override; + size_t aux_vecs_count() const override; + +protected: + size_t aux_gprs_count() const override; + + void prepare_table() override; + void register_table_entries() override; + + void emit_impl(const std::vector &in_idxs, const std::vector &out_idxs) const override; + + void emitter_preamble(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs, const std::vector &pool_gpr_idxs) const override; + void emitter_postamble() const override; + +private: + void validate_arguments(const std::vector& arg0, const std::vector& arg1) const override; + // wrapper emitter for product function + const std::shared_ptr m_target_emitter; + // debug capability emitter + const std::shared_ptr m_decorator_emitter; + + EmissionLocation m_decorator_emit_loc; +}; + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp index 230bee0152f225..43d0d3dc9ca901 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp @@ -77,6 +77,10 @@ class jit_kernel_emitter : public jit_container_emitter { const size_t reg_indexes_idx; const size_t reg_const_params_idx; + +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_kernel_emitter(const jit_kernel_emitter *emitter); +#endif }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp index 5a49af108561d4..50276d9d9e2f1b 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp @@ -22,6 +22,9 @@ class jit_memory_emitter : public jit_emitter { size_t count = 0; size_t byte_offset = 0; +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_memory_emitter(const jit_memory_emitter *emitter); +#endif }; class jit_load_memory_emitter : public jit_memory_emitter { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp new file mode 100644 index 00000000000000..109950dd3a668e --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp @@ -0,0 +1,88 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#include "jit_segfault_detector_emitter.hpp" + +using namespace dnnl::impl::utils; +using namespace dnnl::impl; +using namespace dnnl::impl::cpu::x64; +using namespace Xbyak; + +namespace ov { +namespace intel_cpu { + +std::shared_ptr> g_custom_segfault_handler = + std::make_shared>(); + +jit_uni_segfault_detector_emitter::jit_uni_segfault_detector_emitter(dnnl::impl::cpu::x64::jit_generator* host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, + jit_emitter* target_emitter, bool is_load, bool is_store, std::string target_node_name) : + jit_emitter(host, host_isa), + m_target_emitter(target_emitter), + is_target_use_load_emitter(is_load), + is_target_use_store_emitter(is_store), + m_target_node_name(target_node_name) { +} + +size_t jit_uni_segfault_detector_emitter::get_inputs_num() const { return 1; } + +const jit_emitter* jit_uni_segfault_detector_emitter::get_target_emitter() const { + return m_target_emitter; +} + +void jit_uni_segfault_detector_emitter::emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const { + save_target_emitter(); + if (is_target_use_load_emitter) { + memory_track(in_vec_idxs[0]); + } else if (is_target_use_store_emitter) { + memory_track(out_vec_idxs[0]); + } +} + +void jit_uni_segfault_detector_emitter::save_target_emitter() const { + // use internal call as "->local" shoule be the execution thread. Otherwise always compilation thread. + internal_call_preamble(); + + const auto &set_local_handler_overload = static_cast(set_local_handler); + h->mov(h->rax, reinterpret_cast(set_local_handler_overload)); + h->mov(abi_param1, reinterpret_cast(this)); + internal_call_rsp_align(); + h->call(h->rax); + internal_call_rsp_restore(); + + internal_call_postamble(); +} + +void jit_uni_segfault_detector_emitter::set_local_handler(jit_uni_segfault_detector_emitter* emitter_address) { + g_custom_segfault_handler->local() = emitter_address; +} + +void jit_uni_segfault_detector_emitter::memory_track(size_t gpr_idx_for_mem_address) const { + h->push(h->r15); + Xbyak::Label label_set_address_current; + Xbyak::Label label_set_address_end; + h->mov(h->r15, reinterpret_cast(&start_address)); + h->cmp(h->qword[h->r15], 0); + h->jne(label_set_address_current); + h->mov(h->qword[h->r15], Xbyak::Reg64(gpr_idx_for_mem_address)); + h->mov(h->r15, reinterpret_cast(¤t_address)); + h->mov(h->qword[h->r15], Xbyak::Reg64(gpr_idx_for_mem_address)); + h->jmp(label_set_address_end); + h->L(label_set_address_current); + { + h->mov(h->r15, reinterpret_cast(¤t_address)); + h->mov(h->qword[h->r15], Xbyak::Reg64(gpr_idx_for_mem_address)); + } + h->L(label_set_address_end); + // iteration++, 1 means first access + h->mov(h->r15, reinterpret_cast(&iteration)); + h->add(h->qword[h->r15], 0x01); + h->pop(h->r15); +} + +} // namespace intel_cpu +} // namespace ov + +#endif diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.hpp new file mode 100644 index 00000000000000..68849e5a21563e --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include +#include "emitters/plugin/x64/jit_emitter.hpp" +#include "openvino/runtime/threading/thread_local.hpp" + +using namespace ov::threading; + +namespace ov { +namespace intel_cpu { + +class jit_uni_segfault_detector_emitter; +extern std::shared_ptr> g_custom_segfault_handler; + +class jit_uni_segfault_detector_emitter : public jit_emitter { +public: + jit_uni_segfault_detector_emitter(dnnl::impl::cpu::x64::jit_generator* host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, + jit_emitter* target_emitter, bool is_load, bool is_store, std::string target_node_name); + + size_t get_inputs_num() const override; + + const jit_emitter* get_target_emitter() const; + +private: + // emit code is to save "this" pointer(jit_uni_segfault_detector_emitter) to global handler, then print info w/ it's target_emitter. + // and to save tracked memory address, iteration, etc to print + void emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const override; + jit_emitter *m_target_emitter = nullptr; + bool is_target_use_load_emitter = false; + bool is_target_use_store_emitter = false; + std::string m_target_node_name = ""; + + void save_target_emitter() const; + static void set_local_handler(jit_uni_segfault_detector_emitter* emitter_address); + void memory_track(size_t gpr_idx_for_mem_address) const; + + mutable size_t start_address = 0; + mutable size_t current_address = 0; + mutable size_t iteration = 0; + + friend std::string init_info_jit_uni_segfault_detector_emitter(const jit_uni_segfault_detector_emitter *emitter); +}; + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp new file mode 100644 index 00000000000000..d73502825050ca --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp @@ -0,0 +1,222 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#include "verbose.hpp" +#include "jit_segfault_detector_emitter.hpp" +#include "jit_memory_emitters.hpp" +#include "jit_brgemm_emitter.hpp" +#include "jit_brgemm_copy_b_emitter.hpp" +#include "jit_kernel_emitter.hpp" +#include "jit_snippets_emitters.hpp" + +#ifndef _WIN32 +#include +#endif + +namespace ov { +namespace intel_cpu { + +template +std::string join(const T& v, const std::string& sep = ", ") { + std::ostringstream ss; + size_t count = 0; + for (const auto& x : v) { + if (count++ > 0) { + ss << sep; + } + ss << x; + } + return ss.str(); +} + +template +std::string vector_to_string(const T& v) { + std::ostringstream os; + os << "[ " << ov::util::join(v) << " ]"; + return os.str(); +} + +std::string get_emitter_type_name(const jit_emitter* emitter) { + std::string name = typeid(*emitter).name(); +#ifndef _WIN32 + int status; + std::unique_ptr demangled_name( + abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), + std::free); + name = demangled_name.get(); +#endif + return name; +} + +std::string init_info_jit_memory_emitter(const jit_memory_emitter *emitter) { + std::stringstream ss; + ss << " src_precision:" << emitter->src_prc + << " dst_precision:" << emitter->dst_prc + << " load/store_element_number:" << emitter->count + << " byte_offset:" << emitter->byte_offset; + return ss.str(); +} + +static std::string init_info_jit_load_memory_emitter(const jit_load_memory_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_load_memory_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_load_broadcast_emitter(const jit_load_broadcast_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_load_broadcast_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_load_convert_emitter(const jit_load_convert_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_load_convert_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_store_memory_emitter(const jit_store_memory_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_store_memory_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_store_convert_emitter(const jit_store_convert_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_store_convert_emitter" + << memory_emitter_info; + return ss.str(); +} + +std::string init_info_jit_brgemm_emitter(const jit_brgemm_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:jit_brgemm_emitter" + << " m_ctx.M:" << emitter->m_ctx.M + << " m_ctx.K:" << emitter->m_ctx.K + << " m_ctx.N:" << emitter->m_ctx.N + << " m_ctx.LDA:" << emitter->m_ctx.LDA + << " m_ctx.LDB:" << emitter->m_ctx.LDB + << " m_ctx.LDC:" << emitter->m_ctx.LDC + << " m_ctx.dt_in0:" << emitter->m_ctx.dt_in0 + << " m_ctx.dt_in1:" << emitter->m_ctx.dt_in1 + << " m_ctx.palette:" << emitter->m_ctx.palette + << " m_ctx.is_with_amx:" << emitter->m_ctx.is_with_amx + << " m_ctx.is_with_comp:" << emitter->m_ctx.is_with_comp + << " m_ctx.beta:" << emitter->m_ctx.beta + << " m_load_offset_a:" << emitter->m_load_offset_a + << " m_load_offset_b:" << emitter->m_load_offset_b + << " m_load_offset_scratch:" << emitter->m_load_offset_scratch + << " m_store_offset_c:" << emitter->m_store_offset_c + << " m_with_scratch:" << emitter->m_with_scratch + << " m_with_comp:" << emitter->m_with_comp; + + return ss.str(); +} + +std::string init_info_jit_brgemm_copy_b_emitter(const jit_brgemm_copy_b_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:jit_brgemm_copy_b_emitter" + << " m_LDB:" << emitter->m_LDB + << " m_K:" << emitter->m_K + << " m_K_blk:" << emitter->m_K_blk + << " m_K_tail:" << emitter->m_K_tail + << " m_N:" << emitter->m_N + << " m_N_blk:" << emitter->m_N_blk + << " m_N_tail:" << emitter->m_N_tail + << " m_brgemm_prc_in0:" << emitter->m_brgemm_prc_in0 + << " m_brgemm_prc_in1:" << emitter->m_brgemm_prc_in1 + << " m_brgemmVNNIFactor:" << emitter->m_brgemmVNNIFactor + << " m_with_comp:" << emitter->m_with_comp + << " m_in_offset:" << emitter->m_in_offset + << " m_out_offset:" << emitter->m_out_offset + << ",m_comp_offset:" << emitter->m_comp_offset; + + return ss.str(); +} + +std::string init_info_jit_kernel_emitter(const jit_kernel_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:jit_kernel_emitter" + << " jcp.parallel_executor_ndims:" << emitter->jcp.parallel_executor_ndims + << " gp_regs_pool:"<< vector_to_string(emitter->gp_regs_pool) + << " master_shape:" << vector_to_string(emitter->master_shape) + << " num_inputs:" << emitter->num_inputs + << " num_outputs:" << emitter->num_outputs + << " num_unique_buffers:" << emitter->num_unique_buffers + << " io_data_sizes:" << vector_to_string(emitter->io_data_sizes) + << " data_ptr_regs_idx:" << vector_to_string(emitter->data_ptr_regs_idx) + << " vec_regs_pool:" << vector_to_string(emitter->vec_regs_pool) + << " reg_indexes_idx:" << emitter->reg_indexes_idx + << " reg_const_params_idx:" << emitter->reg_const_params_idx; + for (size_t i = 0; i < emitter->io_data_layouts.size(); ++i) + ss << " io_data_layouts for " << i << " is:" << vector_to_string(emitter->io_data_layouts[i]); + for (size_t i = 0; i < emitter->io_shapes.size(); ++i) + ss << " io_shapes for " << i << " is: "<< vector_to_string(emitter->io_shapes[i]); + return ss.str(); +} + +std::string init_info_jit_uni_segfault_detector_emitter(const jit_uni_segfault_detector_emitter *emitter) { + std::stringstream ss; + ss << "Node_name:" << emitter->m_target_node_name + << " use_load_emitter:"<< emitter->is_target_use_load_emitter + << " use_store_emitter:"<< emitter->is_target_use_store_emitter; + if (emitter->is_target_use_load_emitter || emitter->is_target_use_store_emitter) { + ss << " start_address:" << emitter->start_address + << " current_address:" << emitter->current_address + << " iteration:" << emitter->iteration << " "; + } + // traget emitter info + if (auto target_e = emitter->get_target_emitter()) { + ss << target_e->info(); + } + return ss.str(); +} + +static std::string init_info_jit_emitter_general(const jit_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:" << get_emitter_type_name(emitter); + return ss.str(); +} + +void jit_emitter_info_t::init(const jit_emitter *emitter) { + if (is_initialized_) return; + if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_load_memory_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_load_broadcast_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_load_convert_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_store_memory_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_store_convert_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_brgemm_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_brgemm_copy_b_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_kernel_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_uni_segfault_detector_emitter(e_type); + } else { + str_ = init_info_jit_emitter_general(emitter); + } + is_initialized_ = true; +} + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.hpp new file mode 100644 index 00000000000000..a81364039b98a7 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include + +namespace ov { +namespace intel_cpu { +class jit_emitter; +struct jit_emitter_info_t { + jit_emitter_info_t() = default; + jit_emitter_info_t(const jit_emitter_info_t &rhs) + : str_(rhs.str_), is_initialized_(rhs.is_initialized_) {} + jit_emitter_info_t &operator=(const jit_emitter_info_t &rhs) { + is_initialized_ = rhs.is_initialized_; + str_ = rhs.str_; + return *this; + } + + const char *c_str() const { return str_.c_str(); } + bool is_initialized() const { return is_initialized_; } + + void init(const jit_emitter *emitter); + +private: + std::string str_; + bool is_initialized_ = false; +}; + +std::string get_emitter_type_name(const jit_emitter* emitter); + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index 4e98c9ef2029a2..47889638b70247 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -36,6 +36,12 @@ #include #include +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) +#include "emitters/snippets/x64/jit_segfault_detector_emitter.hpp" +#include +std::mutex err_print_lock; +#endif + using namespace dnnl::impl::utils; using namespace dnnl::impl::cpu; using namespace dnnl::impl::cpu::x64; @@ -395,7 +401,7 @@ void Snippet::prepareParams() { auto builder = [this](const SnippetKey& key) -> std::shared_ptr { std::shared_ptr executor = - std::make_shared(key.attrs, is_dynamic, context->getConfig().inferencePrecision == ov::element::bf16); + std::make_shared(key.attrs, is_dynamic); return executor; }; @@ -416,7 +422,7 @@ void Snippet::prepareParams() { getOrCreateExecutor(); } else { // in case perf count is enabled, disable executor cache by default to not mix up perf counters for different subgraphs. - execPtr = std::make_shared(key.attrs, is_dynamic, context->getConfig().inferencePrecision == ov::element::bf16); + execPtr = std::make_shared(key.attrs, is_dynamic); } #endif } @@ -510,10 +516,31 @@ void Snippet::SnippetJitExecutor::update_ptrs(jit_snippets_call_args& call_args, } } +#ifdef SNIPPETS_DEBUG_CAPS +void Snippet::SnippetJitExecutor::segfault_detector() { + const auto target = std::dynamic_pointer_cast(snippetAttrs.snippet->get_generator()->get_target_machine()); + if (target && target->debug_config.enable_segfault_detector) { + __sighandler_t signal_handler = [](int signal) { + std::lock_guard guard(err_print_lock); + if (auto segfault_detector_emitter = ov::intel_cpu::g_custom_segfault_handler->local()) + std::cout << segfault_detector_emitter->info() << std::endl; + auto tid = parallel_get_thread_num(); + OPENVINO_THROW("Segfault was caught by the signal handler in subgraph node execution on thread " + std::to_string(tid)); + }; + struct sigaction new_handler{}; + new_handler.sa_handler = signal_handler; + sigaction(SIGSEGV, &new_handler, nullptr); + } +} +#endif + void Snippet::SnippetJitExecutor::schedule_6d(const std::vector& inMemPtrs, const std::vector& outMemPtrs) { const auto& dom = parallel_exec_domain; // < N, C, H, W > < 1, 1, N, C*H*W> const auto& callable = schedule.get_callable(); +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) + segfault_detector(); +#endif parallel_for5d(dom[0], dom[1], dom[2], dom[3], dom[4], [&](int64_t d0, int64_t d1, int64_t d2, int64_t d3, int64_t d4) { int64_t indexes[] = {d0, d1, d2, d3, d4}; @@ -525,6 +552,9 @@ void Snippet::SnippetJitExecutor::schedule_6d(const std::vector& inMe void Snippet::SnippetJitExecutor::schedule_nt(const std::vector& inMemPtrs, const std::vector& outMemPtrs) { const auto& work_size = parallel_exec_domain; +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) + segfault_detector(); +#endif parallel_nt(0, [&](const int ithr, const int nthr) { jit_snippets_call_args call_args; update_ptrs(call_args, inMemPtrs, outMemPtrs); @@ -545,11 +575,11 @@ void Snippet::SnippetJitExecutor::schedule_nt(const std::vector& inMe }); } -Snippet::SnippetExecutor::SnippetExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16) - : snippetAttrs(std::move(attrs)), is_dynamic(is_dynamic), enforceBF16(enforceBF16) {} +Snippet::SnippetExecutor::SnippetExecutor(SnippetAttrs attrs, bool is_dynamic) + : snippetAttrs(std::move(attrs)), is_dynamic(is_dynamic) {} -Snippet::SnippetJitExecutor::SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16) : - SnippetExecutor(std::move(attrs), is_dynamic, enforceBF16) { +Snippet::SnippetJitExecutor::SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic) : + SnippetExecutor(std::move(attrs), is_dynamic) { numInput = snippetAttrs.inMemBlockedDims.size(); numOutput = snippetAttrs.outMemBlockedDims.size(); start_offset_in.resize(numInput); diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.h b/src/plugins/intel_cpu/src/nodes/subgraph.h index 8abc00bbc16f02..9ce3a3b71b760b 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.h +++ b/src/plugins/intel_cpu/src/nodes/subgraph.h @@ -73,7 +73,7 @@ class Snippet : public Node { class SnippetExecutor { public: - SnippetExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16); + SnippetExecutor(SnippetAttrs attrs, bool is_dynamic); virtual void exec(const std::vector& inMemPtrs, const std::vector& outMemPtrs) = 0; virtual ~SnippetExecutor() = default; std::shared_ptr shapeInference = nullptr; @@ -81,14 +81,13 @@ class Snippet : public Node { protected: SnippetAttrs snippetAttrs; bool is_dynamic = false; - bool enforceBF16 = false; }; std::shared_ptr execPtr = nullptr; class SnippetJitExecutor : public SnippetExecutor { public: - SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16); + SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic); void exec(const std::vector& inMemPtrs, const std::vector& outMemPtrs) override; bool schedule_created(); @@ -126,6 +125,10 @@ class Snippet : public Node { // Buffer scratchpad std::vector buffer_scratchpad = {}; size_t buffer_scratchpad_size = 0; + +#ifdef SNIPPETS_DEBUG_CAPS + inline void segfault_detector(); +#endif }; }; From f05e89ac8a20511084491b2aacd6eb727b280e50 Mon Sep 17 00:00:00 2001 From: yanlan song Date: Fri, 19 Jan 2024 14:16:00 +0800 Subject: [PATCH 080/122] fix race condition in layout attribute visiting (#21852) * fix rc Signed-off-by: fishbell * enable tests Signed-off-by: fishbell --------- Signed-off-by: fishbell Co-authored-by: Chen Peter --- src/core/src/layout.cpp | 6 +++- .../behavior/ov_plugin/core_threading.hpp | 30 ++++++++++++------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/src/core/src/layout.cpp b/src/core/src/layout.cpp index eb90ec191d79e1..14e19ff21f0ed7 100644 --- a/src/core/src/layout.cpp +++ b/src/core/src/layout.cpp @@ -640,7 +640,11 @@ void AttributeAdapter::set(const std::string& value) { bool LayoutAttribute::visit_attributes(AttributeVisitor& visitor) { std::string layout_str = value.to_string(); visitor.on_attribute("layout", layout_str); - value = Layout(layout_str); + // some attribute visitor will not change the value + // for example, rt info serializer + // in this case, parallelization can be supported in hash pass + if (layout_str != value.to_string()) + value = Layout(layout_str); return true; } diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp index 56c4e0f9e6230d..b7aebac307a351 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp @@ -106,8 +106,8 @@ class CoreThreadingTestsWithCacheEnabled : public testing::WithParamInterface model; - void SetupModel() { + std::vector> models; + void SetupModels() { ov::Core core; std::string ir_with_meta = R"V0G0N( @@ -278,23 +278,33 @@ class CoreThreadingTestsWithCacheEnabled : public testing::WithParamInterface )V0G0N"; ov::Tensor weights = {}; - model = core.read_model(ir_with_meta, weights); + auto model = core.read_model(ir_with_meta, weights); OPENVINO_ASSERT(model); + models.emplace_back(model); // model with cli_parameter + // test model with runtime attributes -- layout + model = ov::test::utils::make_split_multi_conv_concat(); + for (auto& iter : model->get_parameters()) + iter->set_layout("NCHW"); + for (auto& iter : model->get_results()) + iter->set_layout("NHCW"); + models.emplace_back(model); } }; // tested function: set_property, compile_model TEST_P(CoreThreadingTestsWithCacheEnabled, smoke_compilemodel_cache_enabled) { ov::Core core; - SetupModel(); + SetupModels(); core.set_property(target_device, config); core.set_property(ov::cache_dir(cache_path)); - runParallel( - [&]() { - (void)core.compile_model(model, target_device); - }, - numIterations, - numThreads); + for (auto& model : models) { + runParallel( + [&]() { + (void)core.compile_model(model, target_device); + }, + numIterations, + numThreads); + } core.set_property(ov::cache_dir("")); } From 06a6a9a6d0056c2fd9cd5598f6392451ee672a28 Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Fri, 19 Jan 2024 15:03:40 +0800 Subject: [PATCH 081/122] [AUTO BATCH Plugin][Unite Tests] remove 1.0 properties & using 2.0 property naming (#22244) Signed-off-by: Zhai, Xuejun --- .../tests/unit/async_infer_request_test.cpp | 27 +--- ...ompile_model_create_infer_request_test.cpp | 22 +-- .../unit/compile_model_get_property_test.cpp | 37 +---- .../compile_model_get_runtime_model_test.cpp | 21 +-- .../unit/compile_model_set_property_test.cpp | 23 +-- .../auto_batch/tests/unit/mock_common.hpp | 9 +- .../tests/unit/parse_batch_device_test.cpp | 16 -- .../tests/unit/parse_meta_device_test.cpp | 36 ++--- .../tests/unit/plugin_compile_model_test.cpp | 152 ++++++++---------- .../tests/unit/plugin_get_property_test.cpp | 38 +---- .../tests/unit/plugin_query_model_test.cpp | 22 +-- .../tests/unit/plugin_set_property_test.cpp | 26 +-- .../tests/unit/sync_infer_request_test.cpp | 23 +-- 13 files changed, 117 insertions(+), 335 deletions(-) diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index 646d7403df47e7..412a5abc2c1d96 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -2,31 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "async_infer_request.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" +#include "ov_models/subgraph_builders.hpp" #include "transformations/utils/utils.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" - -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; using AutoBatchRequestTestParams = std::tuple>(m_model, m_hardware_plugin); m_compile_model_without_batch = {m_i_compile_model_without_batch, {}}; - m_config = {{"AUTO_BATCH_TIMEOUT", "200"}}; + m_config = {{ov::auto_batch_timeout.name(), "200"}}; m_device_info = {"CPU", {}, m_batch_size}; @@ -211,13 +196,13 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParam(workerRequestPtr->_tasks.size()); if (sz == workerRequestPtr->_batch_size) { - std::pair t; + std::pair t; for (int n = 0; n < sz; n++) { OPENVINO_ASSERT(workerRequestPtr->_tasks.try_pop(t)); workerRequestPtr->_completion_tasks[n] = std::move(t.second); t.first->m_sync_request->copy_inputs_if_needed(); t.first->m_sync_request->m_batched_request_status = - ov::autobatch_plugin::SyncInferRequest::eExecutionFlavor::BATCH_EXECUTED; + SyncInferRequest::eExecutionFlavor::BATCH_EXECUTED; } workerRequestPtr->_infer_request_batched->start_async(); } else if ((status == std::cv_status::timeout) && sz) { diff --git a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp index 39094d161393ca..71db54be4fe8ed 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp @@ -2,31 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using CreateInferRequestTestParams = std::tuple; // inferReq number @@ -94,7 +74,7 @@ class CompileModelCreateInferRequestTest : public ::testing::TestWithParam>(m_model, m_auto_batch_plugin); m_compile_model_without_batch = {m_i_compile_model_without_batch, {}}; - m_config = {{"AUTO_BATCH_TIMEOUT", "200"}}; + m_config = {{ov::auto_batch_timeout(static_cast(200))}}; m_device_info = {"CPU", {}, m_batch_size}; m_batched_inputs = {"Parameter_0"}; diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp index b3fc8497c9f052..883cca1dcd5d5d 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp @@ -2,30 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using get_property_param = std::tuple; // Throw exception @@ -106,7 +86,7 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParam(200))}, {ov::device::priorities("CPU(16)")}}; ASSERT_NO_THROW(auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); std::string network_name = m_model.get()->get_name(); @@ -132,8 +112,8 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParam res_config; - res_config.emplace_back("CACHE_DIR"); - res_config.emplace_back("OPTIMAL_BATCH_SIZE"); + res_config.emplace_back(ov::cache_dir.name()); + res_config.emplace_back(ov::optimal_batch_size.name()); return res_config; }); @@ -151,16 +131,15 @@ TEST_P(CompileModelGetPropertyTest, CompileModelGetPropertyTestCase) { } const std::vector compile_model_get_property_param_test = { - get_property_param{METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS), false}, - get_property_param{METRIC_KEY(NETWORK_NAME), false}, - get_property_param{METRIC_KEY(SUPPORTED_METRICS), false}, - get_property_param{METRIC_KEY(SUPPORTED_CONFIG_KEYS), false}, + get_property_param{ov::optimal_number_of_infer_requests.name(), false}, + get_property_param{ov::model_name.name(), false}, + get_property_param{ov::supported_properties.name(), false}, get_property_param{ov::execution_devices.name(), false}, - get_property_param{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), false}, + get_property_param{ov::device::priorities.name(), false}, get_property_param{ov::auto_batch_timeout.name(), false}, get_property_param{ov::cache_dir.name(), false}, // Config in dependent m_plugin - get_property_param{"OPTIMAL_BATCH_SIZE", false}, + get_property_param{ov::optimal_batch_size.name(), false}, // Incorrect Property get_property_param{"INCORRECT_METRIC", true}, get_property_param{"INCORRECT_CONFIG", true}, diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp index f338e6dd3e610a..a8e83a4f5bd5ca 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp @@ -2,30 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" #include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - class CompileModelGetRuntimeModelTest : public ::testing::Test { public: std::shared_ptr> m_core; @@ -89,7 +70,7 @@ class CompileModelGetRuntimeModelTest : public ::testing::Test { ON_CALL(*m_mock_i_compile_model.get(), get_runtime_model()).WillByDefault(Return(m_model)); - const ov::AnyMap configs = {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(16)"}}; + const ov::AnyMap configs = {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(16)")}}; ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); } diff --git a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp index ee03043a162c93..29801f05924ae1 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp @@ -2,30 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" #include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using set_property_param = std::tuple; // Throw exception @@ -109,7 +90,7 @@ class CompileModelSetPropertyTest : public ::testing::TestWithParam(200))}, {ov::device::priorities("CPU(16)")}}; ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); } @@ -123,7 +104,7 @@ TEST_P(CompileModelSetPropertyTest, CompileModelSetPropertyTestCase) { } const std::vector compile_model_set_property_param_test = { - set_property_param{{{CONFIG_KEY(AUTO_BATCH_TIMEOUT), std::uint32_t(100)}}, false}, + set_property_param{{{ov::auto_batch_timeout(static_cast(100))}}, false}, set_property_param{{{"INCORRECT_CONFIG", 2}}, true}, }; diff --git a/src/plugins/auto_batch/tests/unit/mock_common.hpp b/src/plugins/auto_batch/tests/unit/mock_common.hpp index 7ab113c06544d5..2a9f0230fb43e7 100644 --- a/src/plugins/auto_batch/tests/unit/mock_common.hpp +++ b/src/plugins/auto_batch/tests/unit/mock_common.hpp @@ -7,12 +7,15 @@ #include -#include "async_infer_request.hpp" #include "compiled_model.hpp" -#include "ie_icore.hpp" #include "openvino/runtime/make_tensor.hpp" #include "plugin.hpp" -#include "sync_infer_request.hpp" + +using ::testing::_; +using ::testing::MatcherCast; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::StrEq; using namespace ov::mock_autobatch_plugin; diff --git a/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp b/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp index 565ac80a13643b..b6696f8d08c4f8 100644 --- a/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp +++ b/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp @@ -2,24 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using batch_device_config_params = std::tuple -#include - #include "mock_common.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using meta_device_params = std::tuple; // Throw exception const std::vector cpu_supported_properties = { - "CACHE_DIR", + ov::cache_dir.name(), }; const std::vector gpu_supported_properties = { - "CACHE_DIR", - "OPTIMAL_BATCH_SIZE", + ov::cache_dir.name(), + ov::optimal_batch_size.name(), }; class ParseMetaDeviceTest : public ::testing::TestWithParam { @@ -124,16 +108,16 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDeviceTestCase) { const std::vector meta_device_test_configs = { meta_device_params{"CPU(4)", {}, DeviceInformation{"CPU", {}, 4}, false}, meta_device_params{"CPU(4)", {{}}, DeviceInformation{"CPU", {{}}, 4}, true}, - meta_device_params{"CPU(4)", {{"CACHE_DIR", "./"}}, DeviceInformation{"CPU", {{"CACHE_DIR", "./"}}, 4}, false}, - meta_device_params{"GPU(4)", {{"CACHE_DIR", "./"}}, DeviceInformation{"GPU", {{"CACHE_DIR", "./"}}, 4}, false}, + meta_device_params{"CPU(4)", {{ov::cache_dir("./")}}, DeviceInformation{"CPU", {{ov::cache_dir("./")}}, 4}, false}, + meta_device_params{"GPU(4)", {{ov::cache_dir("./")}}, DeviceInformation{"GPU", {{ov::cache_dir("./")}}, 4}, false}, meta_device_params{"GPU(8)", - {{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}}, - DeviceInformation{"GPU", {{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}}, 8}, + {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, + DeviceInformation{"GPU", {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, 8}, false}, - meta_device_params{"CPU(4)", {{"OPTIMAL_BATCH_SIZE", "16"}}, DeviceInformation{"CPU", {{}}, 4}, true}, + meta_device_params{"CPU(4)", {{ov::optimal_batch_size.name(), "16"}}, DeviceInformation{"CPU", {{}}, 4}, true}, meta_device_params{"CPU(4)", - {{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}}, - DeviceInformation{"CPU", {{"CACHE_DIR", "./"}}, 4}, + {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, + DeviceInformation{"CPU", {{ov::cache_dir("./")}}, 4}, true}, }; diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index ebc52426bfe504..9235bd62f73114 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -2,30 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - +#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/runtime/intel_gpu/properties.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp" -#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" - -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; using plugin_compile_model_param = std::tuple&>(_), @@ -149,82 +131,82 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTe const std::vector plugin_compile_model_param_test = { // Case 1: explict apply batch size by config of AUTO_BATCH_DEVICE_CONFIG - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(32)"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(32)")}}, 32}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU(32)"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU(32)")}}, 32}, // Case 2: CPU batch size is figured out by min of opt_batch_size and infReq_num // If config contains "PERFORMANCE_HINT_NUM_REQUESTS" - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU")}}, 12}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(8)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(16)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(8)}, + {ov::hint::num_requests(16)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU")}}, 8}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(8)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(2)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(8)}, + {ov::hint::num_requests(2)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU")}}, 1}, // Case 3: GPU batch size is figured out by // 1) min of opt_batch_size and infReq_num // 2) available_mem/one_graph_mem_footprint with power 2 // Final m_batch_size is the min of 1) and 2) - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "5000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(5000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 4}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "40960000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(40960000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 12}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(32)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(24)}, - {"GPU_MEMORY_STATISTICS", "1000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "18000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(32)}, + {ov::hint::num_requests(24)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(18000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 16}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(32)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(48)}, - {"GPU_MEMORY_STATISTICS", "1000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "180000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(32)}, + {ov::hint::num_requests(48)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(180000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 32}, // Case 4: - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::LATENCY}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(32)"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::LATENCY}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(32)")}}, 32}, }; diff --git a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp index 5d259789333310..850bb4ee11f8a2 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp @@ -2,30 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using get_property_params = std::tuple; // Throw exception -const char supported_metric[] = "SUPPORTED_METRICS FULL_DEVICE_NAME SUPPORTED_CONFIG_KEYS"; -const char supported_config_keys[] = "AUTO_BATCH_DEVICE_CONFIG MULTI_DEVICE_PRIORITIES AUTO_BATCH_TIMEOUT CACHE_DIR"; - class GetPropertyTest : public ::testing::TestWithParam { public: std::string m_property_name; @@ -70,29 +51,18 @@ TEST_P(GetPropertyTest, GetPropertyTestCase) { } else { ov::Any value; ASSERT_NO_THROW(value = m_plugin->get_property(m_property_name, options)); - if (m_property_name == METRIC_KEY(SUPPORTED_METRICS)) { - EXPECT_EQ(value.as(), supported_metric); - return; - } if (m_property_name == ov::device::full_name.name()) { EXPECT_EQ(value.as(), "BATCH"); return; } - if (m_property_name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { - EXPECT_EQ(value.as(), supported_config_keys); - return; - } } } const std::vector get_property_params_test = { - get_property_params{"AUTO_BATCH_TIMEOUT", false}, - get_property_params{"AUTO_BATCH_DEVICE_CONFIG", true}, - get_property_params{"CACHE_DIR", true}, - get_property_params{METRIC_KEY(SUPPORTED_METRICS), false}, - get_property_params{METRIC_KEY(SUPPORTED_CONFIG_KEYS), false}, - get_property_params{"CPU_THREADS_NUM", true}, - get_property_params{"PERFORMANCE_HINT", true}, + get_property_params{ov::auto_batch_timeout.name(), false}, + get_property_params{ov::device::priorities.name(), true}, + get_property_params{ov::cache_dir.name(), true}, + get_property_params{ov::hint::performance_mode.name(), true}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp index d36945693bd51c..619c8ba7e8f65f 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp @@ -2,27 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" #include "ov_models/subgraph_builders.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using query_model_params = std::tuple; @@ -81,9 +65,9 @@ TEST_P(QueryModelTest, QueryModelTestCase) { const std::vector query_model_params_test = { query_model_params{{{}}, true}, - query_model_params{{{"AUTO_BATCH_TIMEOUT", "200"}}, true}, - query_model_params{{{"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, - query_model_params{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, + query_model_params{{{ov::auto_batch_timeout(static_cast(200))}}, true}, + query_model_params{{{ov::device::priorities("CPU(4)")}}, false}, + query_model_params{{{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(4)")}}, false}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp index 28cc8e4dcf9e99..ef67def84bf216 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp @@ -2,24 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using set_property_params = std::tuple; @@ -72,14 +56,14 @@ TEST_P(SetPropertyTest, SetPropertyTestCase) { } const std::vector plugin_set_property_params_test = { - set_property_params{{{"AUTO_BATCH_TIMEOUT", "200"}}, false}, - set_property_params{{{"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, + set_property_params{{{ov::auto_batch_timeout(static_cast(200))}}, false}, + set_property_params{{{ov::device::priorities("CPU(4)")}}, false}, set_property_params{{{"CACHE_DIR", "./xyz"}}, false}, - set_property_params{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, - set_property_params{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}, {"CACHE_DIR", "./xyz"}}, + set_property_params{{{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(4)")}}, false}, + set_property_params{{{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(4)")}, {"CACHE_DIR", "./xyz"}}, false}, set_property_params{{{"XYZ", "200"}}, true}, - set_property_params{{{"XYZ", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}, {"CACHE_DIR", "./xyz"}}, true}, + set_property_params{{{"XYZ", "200"}, {ov::device::priorities("CPU(4)")}, {"CACHE_DIR", "./xyz"}}, true}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp index 6d2b0a32a2b5ac..3a7148459a78b9 100644 --- a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp @@ -2,31 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "sync_infer_request.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" +#include "ov_models/subgraph_builders.hpp" #include "transformations/utils/utils.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" - -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; using AutoBatchRequestTestParams = std::tuple; // data type @@ -108,7 +93,7 @@ class AutoBatchRequestTest : public ::testing::TestWithParam>(m_model, m_auto_batch_plugin); m_compile_model_without_batch = {m_i_compile_model_without_batch, {}}; - m_config = {{"AUTO_BATCH_TIMEOUT", "200"}}; + m_config = {{ov::auto_batch_timeout(static_cast(200))}}; m_device_info = {"CPU", {}, m_batch_size}; m_batched_inputs = {"Parameter_0"}; From e5ef8947cf7ae070bfe89b9db1a69442d143ae19 Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Fri, 19 Jan 2024 08:48:53 +0100 Subject: [PATCH 082/122] Docs graph.js bf16 prec (#22239) * Main changes without valid data * fix --- docs/sphinx_setup/_static/js/graphs.js | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/sphinx_setup/_static/js/graphs.js b/docs/sphinx_setup/_static/js/graphs.js index 9c4badd7a9101a..c3b86fca1b157a 100644 --- a/docs/sphinx_setup/_static/js/graphs.js +++ b/docs/sphinx_setup/_static/js/graphs.js @@ -948,14 +948,15 @@ $(document).ready(function () { var graphConfigs = kpis.map((str) => { var kpi = str.toLowerCase(); var groupUnit = model[0]; - var indexes = []; if (kpi === 'throughput') { var throughputData = Graph.getDatabyKPI(model, kpi); var config = Graph.getGraphConfig(kpi, groupUnit, precisions); precisions.forEach((prec, index) => { config.datasets[index].data = throughputData.map(tData => tData[prec]); }); - return removeEmptyLabel(config, indexes); + return config; + //to fix + // return removeEmptyLabel(config); } else if(kpi === 'latency'){ var latencyData = Graph.getDatabyKPI(model, kpi); @@ -963,7 +964,8 @@ $(document).ready(function () { precisions.forEach((prec, index) => { config.datasets[index].data = latencyData.map(tData => tData[prec]); }); - return removeEmptyLabel(config, indexes); + return config; + // return removeEmptyLabel(config); } var config = Graph.getGraphConfig(kpi, groupUnit); config.datasets[0].data = Graph.getDatabyKPI(model, kpi); @@ -1027,6 +1029,7 @@ $(document).ready(function () { adjustHeaderIcons(display.mode); } function removeEmptyLabel(config, indexes) { + var indexes = []; config.datasets.forEach((item, index) =>{ if(item.data[0] == '') { indexes.push(index); @@ -1037,6 +1040,7 @@ $(document).ready(function () { sorted.forEach((index)=>{ config.datasets.splice(index,1); }) + console.log(config); return config; } From eb873cfc9f818748e1a22d7d78bcc573d274da21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Jan 2024 12:11:27 +0400 Subject: [PATCH 083/122] Bump actions/dependency-review-action from 3 to 4 (#22252) Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 3 to 4. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/dependency_review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency_review.yml b/.github/workflows/dependency_review.yml index cadf46ab3b9d1c..777198358c78f1 100644 --- a/.github/workflows/dependency_review.yml +++ b/.github/workflows/dependency_review.yml @@ -12,6 +12,6 @@ jobs: uses: actions/checkout@v4 - name: Dependency Review - uses: actions/dependency-review-action@v3 + uses: actions/dependency-review-action@v4 with: config-file: './.github/dependency_review.yml' From 810dcc45e8986a814acb5433f0051e60370eb871 Mon Sep 17 00:00:00 2001 From: Fang Xu Date: Fri, 19 Jan 2024 16:12:13 +0800 Subject: [PATCH 084/122] use different cpus for multiple sync infer (#21418) Co-authored-by: Wanglei Shen --- src/inference/src/dev/threading/cpu_streams_executor.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index eb706a37192143..e3eeafe85930ad 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -186,11 +186,7 @@ struct CPUStreamsExecutor::Impl { int max_threads_per_core; StreamCreateType stream_type; const auto org_proc_type_table = get_org_proc_type_table(); - const auto stream_id = - _impl->_config._streams == 0 - ? 0 - : (_streamId >= _impl->_config._streams ? _impl->_config._streams - 1 : _streamId); - + const auto stream_id = _impl->_config._streams == 0 ? 0 : _streamId % _impl->_config._streams; get_cur_stream_info(stream_id, _impl->_config._cpu_reservation, org_proc_type_table, From a78e9e8cf84454d7bbcef6d42ced40eb6b525475 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Fri, 19 Jan 2024 12:48:39 +0400 Subject: [PATCH 085/122] [GPU] Move clamp fp16 output pass to ngraph (#22245) --- .../openvino/pass/pattern/op/pattern.hpp | 11 ++ src/core/src/pattern/op/pattern.cpp | 10 ++ .../graph_optimizer/clamp_fp16_output.cpp | 45 ------- .../src/graph/include/pass_manager.h | 8 -- src/plugins/intel_gpu/src/graph/program.cpp | 4 - .../transformations/clamp_fp16_output.cpp | 62 ++++++++++ .../transformations/clamp_fp16_output.hpp | 27 +++++ .../src/plugin/transformations_pipeline.cpp | 2 + .../clamp_fp16_output_test.cpp | 110 ++++++++++++++++++ 9 files changed, 222 insertions(+), 57 deletions(-) delete mode 100644 src/plugins/intel_gpu/src/graph/graph_optimizer/clamp_fp16_output.cpp create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.cpp create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp create mode 100644 src/plugins/intel_gpu/tests/unit/transformations/clamp_fp16_output_test.cpp diff --git a/src/core/include/openvino/pass/pattern/op/pattern.hpp b/src/core/include/openvino/pass/pattern/op/pattern.hpp index c44a6bf0bd376d..643a1c935b8927 100644 --- a/src/core/include/openvino/pass/pattern/op/pattern.hpp +++ b/src/core/include/openvino/pass/pattern/op/pattern.hpp @@ -35,6 +35,14 @@ std::function)> has_class() { return pred; } +template +std::function)> class_other_than() { + auto pred = [](std::shared_ptr node) -> bool { + return !ov::is_type(node); + }; + + return pred; +} OPENVINO_API std::function)> consumers_count(size_t n); @@ -63,6 +71,9 @@ std::function)> type_matches(const element::Type& type); OPENVINO_API std::function)> type_matches_any(const std::vector& types); +OPENVINO_API +std::function)> all_of(const std::vector)>>& predicates); + namespace op { using NodePredicate = std::function)>; using ValuePredicate = std::function& value)>; diff --git a/src/core/src/pattern/op/pattern.cpp b/src/core/src/pattern/op/pattern.cpp index 22156d08c39a2d..f3c95fea3c1291 100644 --- a/src/core/src/pattern/op/pattern.cpp +++ b/src/core/src/pattern/op/pattern.cpp @@ -107,6 +107,16 @@ std::function)> type_matches_any(const std::vector)> all_of(const std::vector)>>& predicates) { + return [=](Output output) -> bool { + for (auto& p : predicates) { + if (!p(output)) + return false; + } + return true; + }; +} } // namespace pattern } // namespace pass } // namespace ov diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/clamp_fp16_output.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/clamp_fp16_output.cpp deleted file mode 100644 index 02f68d76df954e..00000000000000 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/clamp_fp16_output.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pass_manager.h" -#include "program_node.h" - -#include "gemm_inst.h" -#include "reshape_inst.h" -#include "softmax_inst.h" - -using namespace cldnn; - -void clamp_fp16_output::run(program& p) { - for (auto& node : p.get_processing_order()) { - // Add clamp activation to avoid inf result which causes Nan output - if (node->is_type() && !node->is_output() && node->get_output_layout().data_type == data_types::f16) { - auto user = node->get_users().front(); - // Reshape could be added in CreateMatMulOp : check a user node of the Reshape - if (user->is_type()) - user = user->get_users().front(); - - if (user->is_type()) { - float out_lo = data_type_traits::min(data_types::f16); - float out_hi = data_type_traits::max(data_types::f16); - auto activ_id = node->id() + "_overflow_clip"; - auto activ = std::make_shared(activ_id, input_info(node->id()), - activation_func::clamp, activation_additional_params{out_lo, out_hi}); - program_node& act_node = p.get_or_create(activ); - - fused_primitive_desc local_desc(activ); - local_desc.input_layout = node->get_output_layout(); - local_desc.f_param = act_node.get_fuse_params(); - local_desc.outer_dep_start_idx = -1; // No external dep - local_desc.total_num_deps = 1; - local_desc.output_layout = node->get_output_layout(); - if (node->get_fused_primitives().size() > 0) { - local_desc.fused_deps.emplace(node->get_fused_primitives().back().desc->id, 0); - } - - node->add_fused_primitive(local_desc); - } - } - } -} diff --git a/src/plugins/intel_gpu/src/graph/include/pass_manager.h b/src/plugins/intel_gpu/src/graph/include/pass_manager.h index ba393760a07962..93bc4338072094 100644 --- a/src/plugins/intel_gpu/src/graph/include/pass_manager.h +++ b/src/plugins/intel_gpu/src/graph/include/pass_manager.h @@ -98,14 +98,6 @@ class mark_nodes : public base_pass { void run(program& p) override; }; -class clamp_fp16_output : public base_pass { -public: - clamp_fp16_output() : base_pass("clamp_fp16_output") {} - -private: - void run(program& p) override; -}; - class mark_shape_of_subgraphs : public base_pass { // This optimization pass aggregates nodes into shape_of subgraphs for further optimizations. // There are few key requirements to decide if node belongs to shape_of subgraph or not: diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index aad680e3bd1a0e..28edf4774e1833 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -589,10 +589,6 @@ void program::pre_optimize_graph(bool is_internal) { // check if there exists some layout incompatibilities and add an reorder node if required apply_opt_pass(); - // Modify fused post operation to resolve overflow of fp16 output by adding clamp activation - // Currently, 'gemm-softmax' case is applied for clamping - apply_opt_pass(); - // add optimization attributes for onednn primitives apply_opt_pass(); diff --git a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.cpp b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.cpp new file mode 100644 index 00000000000000..941b5c51ec3a67 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "clamp_fp16_output.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/softmax.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/pass/pattern/op/pattern.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/pass/pattern/op/or.hpp" + +#include + +namespace ov { +namespace intel_gpu { + +ClampFP16Output::ClampFP16Output() { + using namespace ov::op; + using namespace ov::pass::pattern; + using namespace ov::pass::pattern::op; + + auto in0 = any_input(as_value_predicate(class_other_than())); + auto in1 = any_input(as_value_predicate(class_other_than())); + auto matmul_m = wrap_type({in0, in1}, all_of({type_matches(ov::element::f16), consumers_count(1)})); + auto reshape_m = wrap_type({matmul_m, any_input()}, all_of({type_matches(ov::element::f16), consumers_count(1)})); + auto softmax_input_m = std::make_shared(ov::OutputVector{reshape_m, matmul_m}); + auto softmax_m = wrap_type({softmax_input_m}, type_matches(ov::element::f16)); + + ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto softmax = std::dynamic_pointer_cast(pattern_map.at(softmax_m).get_node_shared_ptr()); + if (!softmax || transformation_callback(softmax)) { + return false; + } + + auto matmul = pattern_map.at(matmul_m).get_node_shared_ptr(); + auto target_inputs = matmul->get_output_target_inputs(0); + + auto min = static_cast(std::numeric_limits::lowest()); + auto max = static_cast(std::numeric_limits::max()); + auto clamp = std::make_shared(matmul, min, max); + clamp->set_friendly_name(matmul->get_friendly_name() + "/ClampFP16Output"); + ov::copy_runtime_info({matmul, softmax}, clamp); + + for (auto& in : target_inputs) { + in.replace_source_output(clamp); + } + + return true; + }; + + auto m = std::make_shared(softmax_m, "ClampFP16Output"); + this->register_matcher(m, callback); +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp new file mode 100644 index 00000000000000..ac93d446ee749d --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace intel_gpu { + +/** + * @brief This transformation adds Clamp primitive between MatMul and Softmax operation + * which is targeting some transformer based models (mainly Stable Diffusion) which may have an fp16 overflow + * on MatMul output tensor which could lead to Inf/Nan values on the model output. + * We assume that Clamp operation handling costs almost nothing from the performance perspective as it's supposed to be fused to MatMul later + */ +class ClampFP16Output: public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::intel_gpu::ClampFP16Output"); + + ClampFP16Output(); +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 8bd282d655d564..0c57b56671349c 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -123,6 +123,7 @@ #include "plugin/transformations/move_convert_after_gather.hpp" #include "plugin/transformations/kv_cache_fusion.hpp" #include "plugin/transformations/fc_convert_fusion.hpp" +#include "plugin/transformations/clamp_fp16_output.hpp" #include "transformations/low_precision/mark_dequantization_subgraph.hpp" #include "low_precision/pull_reshape_through_dequantization.hpp" @@ -693,6 +694,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { { ov::pass::Manager manager; + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/clamp_fp16_output_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/clamp_fp16_output_test.cpp new file mode 100644 index 00000000000000..3973b7701108f5 --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/transformations/clamp_fp16_output_test.cpp @@ -0,0 +1,110 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include + +#include +#include +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/type/element_type.hpp" +#include +#include "openvino/op/clamp.hpp" +#include "openvino/op/reshape.hpp" +#include +#include +#include + +#include "common_test_utils/ov_test_utils.hpp" + +using namespace testing; +using namespace ov::intel_gpu; + +TEST_F(TransformationTestsF, ClampFp16OutputTest1) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto softmax = std::make_shared(matmul, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto min = static_cast(std::numeric_limits::lowest()); + auto max = static_cast(std::numeric_limits::max()); + auto clamp = std::make_shared(matmul, min, max); + auto softmax = std::make_shared(clamp, 1); + + model_ref = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, ClampFp16OutputTest2) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto target_shape = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{ 2 }, { 3, 4 }); + auto reshape = std::make_shared(matmul, target_shape, false); + auto softmax = std::make_shared(reshape, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto min = static_cast(std::numeric_limits::lowest()); + auto max = static_cast(std::numeric_limits::max()); + auto clamp = std::make_shared(matmul, min, max); + auto target_shape = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{ 2 }, { 3, 4 }); + auto reshape = std::make_shared(clamp, target_shape, false); + auto softmax = std::make_shared(reshape, 1); + + model_ref = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, ClampFp16OutputTest3) { + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f32, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto softmax = std::make_shared(matmul, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + manager.register_pass(); + } + { + model_ref = model->clone(); // not changed due to f32 precision + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + + +TEST_F(TransformationTestsF, ClampFp16OutputTest4) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 1, 2, 2 }, { 1 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto softmax = std::make_shared(matmul, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1 }); + manager.register_pass(); + } + { + model_ref = model->clone(); // Not changed due to const input2 + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} From 84d7d8632bc30b705da6c54660716482c21d0685 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Fri, 19 Jan 2024 12:58:03 +0400 Subject: [PATCH 086/122] [GPU] Refactor primitives creation with multiple outputs (#22253) --- .../include/intel_gpu/plugin/common_utils.hpp | 15 +++++++++++ .../intel_gpu/src/plugin/common_utils.cpp | 19 ++++++++++++++ .../src/plugin/ops/ctc_greedy_decoder.cpp | 22 ++-------------- .../src/plugin/ops/non_max_suppression.cpp | 26 +++---------------- .../intel_gpu/src/plugin/ops/proposal.cpp | 18 ++----------- src/plugins/intel_gpu/src/plugin/ops/rms.cpp | 8 +----- src/plugins/intel_gpu/src/plugin/ops/topk.cpp | 18 ++----------- .../intel_gpu/src/plugin/ops/transpose.cpp | 2 +- 8 files changed, 45 insertions(+), 83 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp index 94f56e8b926d39..e3ec998df78890 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp @@ -8,8 +8,10 @@ #include #include "intel_gpu/runtime/layout.hpp" #include "intel_gpu/runtime/memory.hpp" +#include "intel_gpu/runtime/optionals.hpp" #include "intel_gpu/runtime/shape_predictor.hpp" #include "openvino/core/layout.hpp" +#include "openvino/core/node.hpp" #include "openvino/core/type/element_type.hpp" namespace ov { @@ -71,6 +73,19 @@ inline ov::element::Type convert_to_supported_device_type(ov::element::Type et) } } +using PrecisionMap = std::map; + +std::vector get_output_data_types(const ov::Node* op, PrecisionMap precision_map = {}); +std::vector get_output_paddings(const ov::Node* op); + +inline std::vector get_output_data_types(const std::shared_ptr& op, PrecisionMap precision_map = {}) { + return get_output_data_types(op.get(), precision_map); +} + +inline std::vector get_output_paddings(const std::shared_ptr& op) { + return get_output_paddings(op.get()); +} + inline ov::Shape get_tensor_shape(const ov::PartialShape& pshape) { ov::Shape res(pshape.size()); for (size_t i = 0; i < pshape.size(); i++) { diff --git a/src/plugins/intel_gpu/src/plugin/common_utils.cpp b/src/plugins/intel_gpu/src/plugin/common_utils.cpp index bf6bb5d79cd01a..7dfa876570b013 100644 --- a/src/plugins/intel_gpu/src/plugin/common_utils.cpp +++ b/src/plugins/intel_gpu/src/plugin/common_utils.cpp @@ -169,5 +169,24 @@ void convert_and_copy(const ov::ITensor* src, ov::ITensor const* dst, const cldn return ::convert_and_copy(src_ptr, src_et, dst_ptr, dst_et, size, cldnn::layout({}, ov::element::undefined, cldnn::format::bfyx, cldnn::padding())); } +std::vector get_output_data_types(const ov::Node* op, PrecisionMap precision_map) { + std::vector output_data_types; + for (size_t i = 0; i < op->get_output_size(); i++) { + auto type = op->get_output_element_type(i); + if (precision_map.find(type) != precision_map.end()) + type = precision_map.at(type); + output_data_types.push_back(cldnn::element_type_to_data_type(type)); + } + return output_data_types; +} + +std::vector get_output_paddings(const ov::Node* op) { + std::vector output_paddings; + for (size_t i = 0; i < op->get_output_size(); i++) { + output_paddings.push_back(cldnn::padding()); + } + return output_paddings; +} + } // namespace intel_gpu } // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp index 1535170b64c5fe..c05ca4aec5595e 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp @@ -44,24 +44,6 @@ static void CreateCommonCTCGreedyDecoderOp(ProgramBuilder& p, const std::shared_ } if (p.use_new_shape_infer()) { - size_t num_outputs = op->get_output_size(); - - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; - uint32_t blank_index = UINT32_MAX; if (reordered_inputs.size() == 3) { auto blank_index_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(2)); @@ -84,8 +66,8 @@ static void CreateCommonCTCGreedyDecoderOp(ProgramBuilder& p, const std::shared_ cldnn::padding({0, 0, 0, 0}, 0), cldnn::element_type_to_data_type(op->get_output_element_type(0)), op->get_output_size()); - primitive.output_paddings = get_output_paddings(); - primitive.output_data_types = get_output_data_types(); + primitive.output_paddings = get_output_paddings(op); + primitive.output_data_types = get_output_data_types(op); p.add_primitive(*op, primitive); } else { uint32_t blank_index = static_cast(op->get_input_shape(0).back() - 1); diff --git a/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp b/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp index 6e91cc7db9fe2f..843e4706764b8c 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp @@ -53,26 +53,6 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh auto boxesShape = op->get_input_partial_shape(0); size_t num_outputs = op->get_output_size(); - - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - // GPU primitive supports only i32 as output data type - if (type == ov::element::i64) { - type = ov::element::i32; - } - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; - if (p.use_new_shape_infer()) { auto nonMaxSuppressionLayerName = layer_type_name_ID(op); auto prim = cldnn::non_max_suppression( @@ -84,8 +64,8 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh op->m_sort_result_descending, "", "", "", "", "", "", num_outputs); - prim.output_paddings = get_output_paddings(); - prim.output_data_types = get_output_data_types(); + prim.output_paddings = get_output_paddings(op); + prim.output_data_types = get_output_data_types(op, {{ov::element::i64, ov::element::i32}}); prim.rotation = rotation; switch (reordered_inputs.size()) { @@ -153,7 +133,7 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh op->m_sort_result_descending, "", "", "", "", "", ""); - prim.output_data_types = get_output_data_types(); + prim.output_data_types = get_output_data_types(op, {{ov::element::i64, ov::element::i32}}); prim.rotation = rotation; switch (reordered_inputs.size()) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp index 6247c701079cf7..84ac443fe7f98d 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp @@ -56,20 +56,6 @@ static void CreateProposalOp(ProgramBuilder& p, const std::shared_ptrget_output_size(); - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; auto proposalPrim = cldnn::proposal(layerName, inputs[0], // cls_score @@ -98,8 +84,8 @@ static void CreateProposalOp(ProgramBuilder& p, const std::shared_ptrget_output_element_type(0)), num_outputs); - proposalPrim.output_paddings = get_output_paddings(); - proposalPrim.output_data_types = get_output_data_types(); + proposalPrim.output_paddings = get_output_paddings(op); + proposalPrim.output_data_types = get_output_data_types(op); p.add_primitive(*op, proposalPrim); } else { if (op->get_output_size() == 2) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/rms.cpp b/src/plugins/intel_gpu/src/plugin/ops/rms.cpp index 01289bd5022d6d..bf36aab7f32128 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/rms.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/rms.cpp @@ -23,17 +23,11 @@ static void CreateRMSOp(ProgramBuilder& p, const std::shared_ptr& op) { auto inputs = p.GetInputInfo(op); std::string primitive_name = layer_type_name_ID(op); - auto get_output_data_types = [&]() { - std::vector output_data_types; - auto type = op->get_output_element_type(0); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - return output_data_types; - }; auto rms = cldnn::rms(primitive_name, inputs[0], inputs[1], op->get_epsilon()); - rms.output_data_types = get_output_data_types(); + rms.output_data_types = get_output_data_types(op); p.add_primitive(*op, rms); } diff --git a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp index 2c38259f540c38..79bc1508a8bcd2 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp @@ -27,20 +27,6 @@ static void TopKImpl(ProgramBuilder& p, if (p.use_new_shape_infer()) { size_t num_outputs = op->get_output_size(); - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; auto topk_constant = std::dynamic_pointer_cast(op->input_value(1).get_node_shared_ptr()); auto argmaxPrim = cldnn::arg_max_min(layerName, @@ -55,8 +41,8 @@ static void TopKImpl(ProgramBuilder& p, cldnn::padding({0, 0, 0, 0}, 0), cldnn::element_type_to_data_type(op->get_output_element_type(0)), num_outputs); - argmaxPrim.output_paddings = get_output_paddings(); - argmaxPrim.output_data_types = get_output_data_types(); + argmaxPrim.output_paddings = get_output_paddings(op); + argmaxPrim.output_data_types = get_output_data_types(op); p.add_primitive(*op, argmaxPrim); } else { if (op->get_output_size() == 2) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp b/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp index ae7daf2b3992ce..4ae9362bc5d6c6 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp @@ -34,7 +34,7 @@ static void CreateTransposeOp(ProgramBuilder& p, const std::shared_ptrget_output_element_type(0)); + permutePrim.output_data_types = get_output_data_types(op); p.add_primitive(*op, permutePrim); } From 25a55455a6731639e8ba2db1c58d4e3be6cb8df3 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 19 Jan 2024 01:03:00 -0800 Subject: [PATCH 087/122] [ONNX] Frontend refactoring: ONNXEditor replacing by InputModel (#22231) * Updating tests for InputModel instead of ONNXEditor * Update onnx_editor.cpp: int -> size_t * Update onnx_utils.cpp * Update onnx_transformations.cpp: commented unused func * Update onnx_utils.cpp: code style * Update onnx_editor.cpp: removed debug code --- .../onnx/frontend/src/input_model.cpp | 34 +- src/frontends/onnx/tests/onnx_editor.cpp | 1932 +++++------------ .../tests/onnx_editor_topological_sort.cpp | 46 +- .../onnx/tests/onnx_import_with_editor.in.cpp | 3 +- .../onnx/tests/onnx_ops_registration.cpp | 3 +- .../onnx/tests/onnx_transformations.cpp | 38 +- src/frontends/onnx/tests/onnx_utils.cpp | 72 +- src/frontends/onnx/tests/onnx_utils.hpp | 10 +- 8 files changed, 697 insertions(+), 1441 deletions(-) diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index e43b1b0bb301a4..29ffcccbd63603 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -96,6 +96,8 @@ ov::frontend::Place::Ptr InputModel::get_place_by_operation_name_and_output_port } void InputModel::set_name_for_tensor(const ov::frontend::Place::Ptr& tensor, const std::string& new_name) { + FRONT_END_GENERAL_CHECK(tensor, __FUNCTION__, " expects a pointer to place."); + const auto onnx_tensor = std::dynamic_pointer_cast(tensor); FRONT_END_GENERAL_CHECK(onnx_tensor, __FUNCTION__, " expects a pointer to place of ONNX tensor type."); const auto original_name = onnx_tensor->get_names().at(0); @@ -113,6 +115,8 @@ void InputModel::set_name_for_tensor(const ov::frontend::Place::Ptr& tensor, con } void InputModel::set_name_for_operation(const ov::frontend::Place::Ptr& operation, const std::string& new_name) { + FRONT_END_GENERAL_CHECK(operation, __FUNCTION__, " expects a pointer to place."); + const auto onnx_operation = std::dynamic_pointer_cast(operation); FRONT_END_GENERAL_CHECK(onnx_operation, __FUNCTION__, " expects a pointer to place of ONNX operation type."); onnx_operation->set_name(new_name); @@ -125,12 +129,15 @@ void InputModel::free_name_for_operation(const std::string& name) { void InputModel::set_name_for_dimension(const ov::frontend::Place::Ptr& tensor, size_t shape_dim_index, const std::string& dim_name) { + FRONT_END_GENERAL_CHECK(tensor, __FUNCTION__, " expects a pointer to place."); + const auto onnx_tensor = std::dynamic_pointer_cast(tensor); FRONT_END_GENERAL_CHECK(onnx_tensor, __FUNCTION__, " expects a pointer to place of ONNX tensor type."); onnx_tensor->set_name_for_dimension(shape_dim_index, dim_name); } void InputModel::add_name_for_tensor(const ov::frontend::Place::Ptr& tensor, const std::string& new_name) { + FRONT_END_GENERAL_CHECK(tensor, __FUNCTION__, " expects a pointer to place."); FRONT_END_GENERAL_CHECK(!new_name.empty(), "The additional tensor name cannot be empty."); ov::frontend::Place::Ptr tensor_place = tensor; @@ -153,6 +160,8 @@ void InputModel::free_name_for_tensor(const std::string&) { } void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const ov::PartialShape& shape) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string input_name; // name of the model input which should be reshaped const auto input_edge = std::dynamic_pointer_cast(place); if (input_edge) { @@ -173,6 +182,8 @@ void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const } ov::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& place) const { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string tensor_name; // name of the model input which should be reshaped const auto input_edge = std::dynamic_pointer_cast(place); const auto output_edge = std::dynamic_pointer_cast(place); @@ -194,13 +205,16 @@ ov::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& p } void InputModel::set_element_type(const ov::frontend::Place::Ptr& place, const ov::element::Type& type) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::map m; m[place->get_names().at(0)] = type; m_editor->set_input_types(m); } ov::element::Type InputModel::get_element_type(const ov::frontend::Place::Ptr& place) const { - OPENVINO_ASSERT(place, "Cannot return a type for nullptr Place."); + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string tensor_name; const auto input_edge = std::dynamic_pointer_cast(place); const auto output_edge = std::dynamic_pointer_cast(place); @@ -333,6 +347,8 @@ void InputModel::extract_subgraph(const std::vector& i } ov::frontend::Place::Ptr InputModel::add_output(const ov::frontend::Place::Ptr& place) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string name = place->get_names().at(0); const auto& outputs = m_editor->model_outputs(); @@ -364,6 +380,8 @@ ov::frontend::Place::Ptr InputModel::add_output(const ov::frontend::Place::Ptr& } void InputModel::remove_output(const ov::frontend::Place::Ptr& place) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string name = place->get_names().at(0); std::vector outputs = get_outputs(); const auto& output_names = m_editor->model_outputs(); @@ -383,12 +401,14 @@ void InputModel::remove_output(const ov::frontend::Place::Ptr& place) { } void InputModel::cut_and_add_new_input(const ov::frontend::Place::Ptr& place, const std::string& new_name_optional) { - std::vector inputs = get_inputs(); - std::vector outputs = get_outputs(); + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); if (place->is_input()) return; + std::vector inputs = get_inputs(); + std::vector outputs = get_outputs(); + const auto edge_place = convert_place_to_input_edge({place}); const auto edge_outputs = convert_place_to_output_edge(outputs); @@ -404,9 +424,11 @@ void InputModel::cut_and_add_new_input(const ov::frontend::Place::Ptr& place, co } void InputModel::set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) { - std::map> map; + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); if (const auto var_place = std::dynamic_pointer_cast(place)) { + std::map> map; + auto name = place->get_names().at(0); auto p_shape = m_editor->get_tensor_shape(name); auto el_type = m_editor->get_input_type(name); @@ -488,6 +510,8 @@ std::vector InputModel::convert_place_to_output_edge( } void InputModel::add_tensor_names(std::shared_ptr& model) { + FRONT_END_GENERAL_CHECK(model, __FUNCTION__, " expects a pointer to model."); + auto model_inputs = model->inputs(); const auto find_input_by_tensor_name = [&model_inputs](const std::string& name) { return std::find_if(std::begin(model_inputs), @@ -508,6 +532,8 @@ void InputModel::add_tensor_names(std::shared_ptr& model) { } void InputModel::reshape_model_inputs(std::shared_ptr& model) { + FRONT_END_GENERAL_CHECK(model, __FUNCTION__, " expects a pointer to model."); + const auto& inputs = model->inputs(); const auto is_input_name = [&inputs](const std::string& name) { return std::find_if(std::begin(inputs), std::end(inputs), [&name](const OutputVector::value_type& input) { diff --git a/src/frontends/onnx/tests/onnx_editor.cpp b/src/frontends/onnx/tests/onnx_editor.cpp index b3c8038165c4d8..d8dc11bac5e0fe 100644 --- a/src/frontends/onnx/tests/onnx_editor.cpp +++ b/src/frontends/onnx/tests/onnx_editor.cpp @@ -6,6 +6,7 @@ #include #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" #include "editor.hpp" @@ -15,7 +16,7 @@ #include "openvino/op/constant.hpp" using namespace ov; -using namespace ov::onnx_editor; +using namespace ov::frontend; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); @@ -42,13 +43,13 @@ std::shared_ptr find_input(const ParameterVector& inputs, con OPENVINO_TEST(onnx_editor, types__single_input_type_substitution) { // the original model contains 2 inputs with i64 data type and one f32 input - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_abc.onnx", &front_end); - editor.set_input_types({{"A", element::i64}}); + input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::i64); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); const auto float_inputs_count = std::count_if(std::begin(graph_inputs), std::end(graph_inputs), element_type_is(element::f32)); @@ -64,14 +65,16 @@ OPENVINO_TEST(onnx_editor, types__single_input_type_substitution) { OPENVINO_TEST(onnx_editor, types__all_inputs_type_substitution) { // the original model contains 2 inputs with i64 data type and one f32 input - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_abc.onnx", &front_end); - editor.set_input_types({{"A", element::i8}, {"B", element::i8}, {"C", element::i8}}); + input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::i8); + input_model->set_element_type(input_model->get_place_by_tensor_name("B"), element::i8); + input_model->set_element_type(input_model->get_place_by_tensor_name("C"), element::i8); - const auto function = editor.get_function(); + const auto model = front_end->convert(input_model); - const auto graph_inputs = function->get_parameters(); + const auto graph_inputs = model->get_parameters(); const auto float_inputs_count = std::count_if(std::begin(graph_inputs), std::end(graph_inputs), element_type_is(element::f32)); @@ -84,85 +87,80 @@ OPENVINO_TEST(onnx_editor, types__all_inputs_type_substitution) { } OPENVINO_TEST(onnx_editor, types__missing_type_in_input_descriptor) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/invalid_input_no_type.onnx"})}; + auto input_model = load_model("model_editor/invalid_input_no_type.onnx"); // input A doesn't have the "type" field in the model and so the data type cannot be modified - EXPECT_THROW(editor.set_input_types({{"A", element::f32}}), ov::Exception); + EXPECT_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::f32), + ov::Exception); } OPENVINO_TEST(onnx_editor, types__missing_tensor_type_in_input_descriptor) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/invalid_input_no_tensor_type.onnx"})}; + auto input_model = load_model("model_editor/invalid_input_no_tensor_type.onnx"); // input A doesn't have the "tensor_type" field in the model - EXPECT_THROW(editor.set_input_types({{"A", element::f32}}), ov::Exception); + EXPECT_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::f32), + ov::Exception); } OPENVINO_TEST(onnx_editor, types__unsupported_data_type_passed) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + auto input_model = load_model("model_editor/add_abc.onnx"); - EXPECT_THROW(editor.set_input_types({{"A", element::dynamic}}), ov::Exception); + EXPECT_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::dynamic), + ov::Exception); } OPENVINO_TEST(onnx_editor, types__incorrect_input_name_passed) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + auto input_model = load_model("model_editor/add_abc.onnx"); - EXPECT_THROW(editor.set_input_types({{"ShiaLaBeouf", element::i64}}), ov::Exception); + EXPECT_EQ(input_model->get_place_by_tensor_name("ShiaLaBeouf"), nullptr); } OPENVINO_TEST(onnx_editor, types__elem_type_missing_in_input) { // the original model contains 2 inputs with i64 data type and one f32 input - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/elem_type_missing_in_input.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/elem_type_missing_in_input.onnx", &front_end); // the "elem_type" is missing in the model but it should be possible to set the type anyway - EXPECT_NO_THROW(editor.set_input_types({{"A", element::i64}})); + EXPECT_NO_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::i64)); - const auto function = editor.get_function(); + const auto model = front_end->convert(input_model); - const auto graph_inputs = function->get_parameters(); + const auto graph_inputs = model->get_parameters(); const auto integer_inputs_count = std::count_if(std::begin(graph_inputs), std::end(graph_inputs), element_type_is(element::i64)); EXPECT_EQ(integer_inputs_count, 2); - const auto function_result = function->get_result(); + const auto function_result = model->get_result(); EXPECT_EQ(function_result->get_element_type(), element::i64); } OPENVINO_TEST(onnx_editor, shapes__modify_single_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{1}; - editor.set_input_shapes({{"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "B")->get_partial_shape().same_scheme(new_shape)); } OPENVINO_TEST(onnx_editor, shapes__modify_all_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{1, 2, 3, 5, 8, 13}; - editor.set_input_shapes({{"A", new_shape}, {"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); for (const auto& input : graph_inputs) { EXPECT_TRUE(input->get_partial_shape().same_scheme(new_shape)); @@ -170,48 +168,46 @@ OPENVINO_TEST(onnx_editor, shapes__modify_all_inputs) { } OPENVINO_TEST(onnx_editor, shapes__dynamic_rank_in_model) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__dynamic_rank_in_model.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__dynamic_rank_in_model.onnx", &front_end); // input A in the model doesn't have the "shape" field meaning it has dynamic rank // it should still be possible to set such input's shape to some custom value const auto expected_shape_of_A = PartialShape{1, 2}; - EXPECT_NO_THROW(editor.set_input_shapes({{"A", expected_shape_of_A}})); + EXPECT_NO_THROW(input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), expected_shape_of_A)); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "A")->get_partial_shape().same_scheme(expected_shape_of_A)); } OPENVINO_TEST(onnx_editor, shapes__set_dynamic_dimension) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{Dimension::dynamic()}; - editor.set_input_shapes({{"A", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "A")->get_partial_shape().same_scheme(new_shape)); } OPENVINO_TEST(onnx_editor, shapes__set_mixed_dimensions) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape_A = PartialShape{21, Dimension::dynamic()}; const auto new_shape_B = PartialShape{Dimension::dynamic(), 37}; - editor.set_input_shapes({{"A", new_shape_A}, {"B", new_shape_B}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape_A); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape_B); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); const auto input_A = find_input(graph_inputs, "A"); EXPECT_TRUE(input_A->get_partial_shape().same_scheme(new_shape_A)); @@ -221,16 +217,16 @@ OPENVINO_TEST(onnx_editor, shapes__set_mixed_dimensions) { } OPENVINO_TEST(onnx_editor, shapes__set_scalar_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{}; - editor.set_input_shapes({{"A", new_shape}, {"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); const auto input_A = find_input(graph_inputs, "A"); EXPECT_TRUE(input_A->get_partial_shape().same_scheme(new_shape)); @@ -240,16 +236,16 @@ OPENVINO_TEST(onnx_editor, shapes__set_scalar_inputs) { } OPENVINO_TEST(onnx_editor, shapes__static_to_dynamic_rank_substitution) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape::dynamic(); - editor.set_input_shapes({{"A", new_shape}, {"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); for (const auto& input : graph_inputs) { EXPECT_TRUE(input->get_partial_shape().same_scheme(new_shape)); @@ -257,1614 +253,802 @@ OPENVINO_TEST(onnx_editor, shapes__static_to_dynamic_rank_substitution) { } OPENVINO_TEST(onnx_editor, subgraph__linear_model_head_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge(1, 0)}}, {}); + input_model->extract_subgraph({input_model->get_place_by_operation_name("relu1")}, {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_head_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_head_cut_ins_and_outs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge(1, 0)}}, {{OutputEdge(2, 0)}}); + input_model->extract_subgraph({input_model->get_place_by_operation_name("relu1")}, {input_model->get_outputs()[0]}); - // expected to behave the same way as subgraph__linear_model_head_cut - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_head_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_deeper_head_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge(2, 0)}}, {}); + input_model->extract_subgraph({input_model->get_place_by_operation_name("maxpool1")}, {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_deeper_head_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_deeper_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_operation_name("relu1")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_tail_cut_ins_and_outs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge{0, 0}}}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({input_model->get_inputs()[0]}, {input_model->get_place_by_operation_name("relu1")}); - // expected to behave the same way as subgraph__linear_model_tail_cut - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_with_initializer_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head_with_initializer.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head_with_initializer.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("conv1/7x7_s2_2")}); - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__initializer_without_matching_input_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__initializer_without_matching_input.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__initializer_without_matching_input.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("conv1/7x7_s2_2")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__initializer_without_matching_input_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__initializer_without_matching_input_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_deeper_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{0, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_deeper_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_deeper_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__no_input_params) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - ONNXModelEditor editor{model_path}; + input_model->extract_subgraph({}, {}); - editor.extract_subgraph({}, {}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/subgraph__inception_head.onnx"); - const auto result = compare_onnx_models(editor.model_string(), model_path); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__initializer_to_input_replacement) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head_with_initializer.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head_with_initializer.onnx", &front_end); - editor.extract_subgraph({{InputEdge{0, 2}}}, {{OutputEdge{0, 0}}}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("conv1/7x7_s2_b_0")}, + {input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}); - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__initializer_to_input_replacement.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__initializer_to_input_replacement.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__initializer_to_input_replacement_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__initializer_without_matching_input.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__initializer_without_matching_input.onnx", &front_end); - editor.extract_subgraph({{InputEdge{0, 2}}}, {{OutputEdge{0, 0}}}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("conv1/7x7_s2_b_0")}, + {input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}); - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__initializer_to_input_replacement.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__initializer_to_input_replacement.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__multiout_op_output_edge) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{5, 1}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("split2")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__multiout_op_output_edge.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__multiout_op_output_edge.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__existing_inputs_and_outputs_based_extraction) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - editor.extract_subgraph({{InputEdge{1, 1}, InputEdge{2, 0}}}, {{OutputEdge{4, 0}}}); + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("in1"), input_model->get_place_by_tensor_name("in3")}, + {input_model->get_place_by_tensor_name("mul2")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__existing_inputs_and_outputs_based_extraction.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = + convert_model("model_editor/reference/subgraph__existing_inputs_and_outputs_based_extraction.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__twice_input_edge_from_tensor_with_single_consumer) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_ab.onnx", &front_end); - editor.extract_subgraph({InputEdge{1, 1}}, {}); + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("X")->get_consuming_operations()[0]->get_input_port(1)}, + {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__twice_input_edge_from_tensor_with_single_consumer.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = + convert_model("model_editor/reference/subgraph__twice_input_edge_from_tensor_with_single_consumer.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - editor.extract_subgraph({{InputEdge{1, 0}, InputEdge{6, 0}}}, {{OutputEdge{6, 0}, OutputEdge{4, 0}}}); + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers.onnx"}); + input_model->extract_subgraph( + {relu_consumers[0]->get_input_port(0), relu_consumers[2]->get_input_port(0)}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("mul2")}); + auto model = front_end->convert(input_model); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers.onnx"); - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{3, 0}, InputEdge{3, 1}}}, {{OutputEdge{3, 0}, OutputEdge{4, 0}}}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_2.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_3) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{3, 0}, InputEdge{6, 0}}}, {{OutputEdge{6, 0}, OutputEdge{5, 1}}}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_3.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_4) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{1, 0}, InputEdge{3, 0}}}, {}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_4.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_5) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({InputEdge{3, 0}}, {{OutputEdge{6, 0}, OutputEdge{5, 1}}}); - - // expected to behave the same way as the test above - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_5.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{1, 0, "new_name_1"}, InputEdge{6, 0, "new_name_2"}}}, - {{OutputEdge{6, 0}, OutputEdge{4, 0}}}); - - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_input_relu2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{4, 0}}}, {}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_input_relu2.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{2, 0}}}, {}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_initializer.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{2, 0}, InputEdge{3, 0}}}, {}); - - // same as above - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_initializer.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_relu2_and_init) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{5, 0}, InputEdge{3, 0}}}, {}); - - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__invalid_edge_idx) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); - - ONNXModelEditor editor{model_path}; - try { - editor.extract_subgraph({{InputEdge{15, 0}}}, {}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("The specified node index is out of range of nodes in the original model") != - std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, subgraph__invalid_port_idx) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); - - ONNXModelEditor editor{model_path}; - try { - editor.extract_subgraph({{InputEdge{0, 3}}}, {}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("The specified node with index: 0 has not input port with index: 3") != std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, subgraph__inputs_getter) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - EXPECT_EQ(editor.model_inputs(), (std::vector{"data_0", "conv1/7x7_s2_w_0", "conv1/7x7_s2_b_0"})); - - editor.extract_subgraph({{InputEdge{1, 0}}}, {}); - - EXPECT_EQ(editor.model_inputs(), (std::vector{"conv1/7x7_s2_1"})); -} - -OPENVINO_TEST(onnx_editor, subgraph__custom_input_name_already_exist) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); - - ONNXModelEditor editor{model_path}; - try { - editor.extract_subgraph({{InputEdge{1, 0, "conv1/7x7_s2_b_0"}}}, {}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("New custom input name: conv1/7x7_s2_b_0 already exist in the graph") != - std::string::npos); - } -} - -// HIGHT LEVEL API TESTS -// INPUT EDGES TEST -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_output_name_and_input_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - const InputEdge edge = - editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_2"}}, EditorInput{"conv1/7x7_s2_1"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_1"}}, EditorInput{"data_0"}); - EXPECT_EQ(edge2.m_node_idx, 0); - EXPECT_EQ(edge2.m_port_idx, 0); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_output_name_and_input_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_2"}}, EditorInput{0}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_1"}}, EditorInput{1}); - EXPECT_EQ(edge2.m_node_idx, 0); - EXPECT_EQ(edge2.m_port_idx, 1); - - const InputEdge edge3 = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_1"}}, EditorInput{2}); - EXPECT_EQ(edge3.m_node_idx, 0); - EXPECT_EQ(edge3.m_port_idx, 2); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_name_and_input_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{"relu1"}, EditorInput{"conv1/7x7_s2_1"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"conv1"}, EditorInput{"conv1/7x7_s2_w_0"}); - EXPECT_EQ(edge2.m_node_idx, 0); - EXPECT_EQ(edge2.m_port_idx, 1); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_name_and_input_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{"relu1_name"}, EditorInput{0}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"split_name"}, EditorInput{0}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 0); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_name_and_input_index_custom_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{"relu1_name"}, EditorInput{0, "custom_input_name_1"}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - EXPECT_EQ(edge.m_new_input_name, "custom_input_name_1"); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"split_name"}, EditorInput{0, "custom_input_name_2"}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 0); - EXPECT_EQ(edge2.m_new_input_name, "custom_input_name_2"); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{0}, EditorInput{0, "custom_input_name_1"}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - EXPECT_EQ(edge.m_new_input_name, "custom_input_name_1"); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{5}, EditorInput{0}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 0); - - try { - editor.find_input_edge(EditorNode{99}, EditorInput{"conv1/7x7_s2_1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Provided node index: 99 is out of scope") != std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_empty_node_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - try { - editor.find_input_edge(EditorNode{""}, EditorInput{"conv1/7x7_s2_1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with name: not_given and output_name: not_given was not found") != - std::string::npos); - } -} - -// OUTPUT EDGES TEST -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_output_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const OutputEdge edge = editor.find_output_edge(EditorNode{EditorOutput{"mul2"}}, EditorOutput{"mul2"}); - EXPECT_EQ(edge.m_node_idx, 4); - EXPECT_EQ(edge.m_port_idx, 0); - - const OutputEdge edge2 = editor.find_output_edge(EditorNode{EditorOutput{"split1"}}, EditorOutput{"split2"}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); - - // simplified overload - const OutputEdge edge3 = editor.find_output_edge("mul2"); - EXPECT_EQ(edge3.m_node_idx, 4); - EXPECT_EQ(edge3.m_port_idx, 0); +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_2) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - const OutputEdge edge4 = editor.find_output_edge("split2"); - EXPECT_EQ(edge4.m_node_idx, 5); - EXPECT_EQ(edge4.m_port_idx, 1); -} + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_output_name_and_output_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + input_model->extract_subgraph( + {relu_consumers[1]->get_input_port(0), relu_consumers[1]->get_input_port(1)}, + {input_model->get_place_by_tensor_name("mul2"), relu_consumers[1]->get_output_port()}); + auto model = front_end->convert(input_model); - const OutputEdge edge = editor.find_output_edge(EditorNode{EditorOutput{"add2"}}, EditorOutput{0}); - EXPECT_EQ(edge.m_node_idx, 3); - EXPECT_EQ(edge.m_port_idx, 0); + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_2.onnx"); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{EditorOutput{"split1"}}, EditorOutput{1}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - const OutputEdge edge3 = editor.find_output_edge(EditorNode{EditorOutput{"split2"}}, EditorOutput{0}); - EXPECT_EQ(edge3.m_node_idx, 5); - EXPECT_EQ(edge3.m_port_idx, 0); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_node_name_and_output_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const OutputEdge edge = editor.find_output_edge(EditorNode{"relu1_name"}, EditorOutput{"relu1"}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"split_name"}, EditorOutput{"split2"}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_node_name_and_output_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_3) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - const OutputEdge edge = editor.find_output_edge(EditorNode{"relu1_name"}, EditorOutput{0}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"split_name"}, EditorOutput{1}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); -} + input_model->extract_subgraph( + {relu_consumers[1]->get_input_port(0), relu_consumers[2]->get_input_port(0)}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("split2")}); + auto model = front_end->convert(input_model); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_node_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_3.onnx"); - const OutputEdge edge = editor.find_output_edge(EditorNode{5}, EditorOutput{1}); - EXPECT_EQ(edge.m_node_idx, 5); - EXPECT_EQ(edge.m_port_idx, 1); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - try { - editor.find_output_edge(EditorNode{99}, EditorOutput{"conv1/7x7_s2_1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Provided node index: 99 is out of scope") != std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_edge_const_network) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{EditorOutput{"relu4"}}, EditorInput{0}); - EXPECT_EQ(edge.m_node_idx, 3); - EXPECT_EQ(edge.m_port_idx, 0); - - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"relu4_name"}, EditorOutput{0}); - EXPECT_EQ(edge2.m_node_idx, 3); - EXPECT_EQ(edge2.m_port_idx, 0); - - const OutputEdge edge3 = editor.find_output_edge(EditorNode{"add1_name"}, EditorOutput{0}); - EXPECT_EQ(edge3.m_node_idx, 4); - EXPECT_EQ(edge3.m_port_idx, 0); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_edge_error_handling) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - // node with given output name not found - try { - editor.find_input_edge(EditorNode{EditorOutput{"not_existed"}}, EditorInput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with name: not_given and output_name: not_existed was not found") != - std::string::npos); - } +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_4) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - // node with given name not found - try { - editor.find_input_edge(EditorNode{"not_existed"}, EditorInput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with name: not_existed and output_name: not_given was not found") != - std::string::npos); - } + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - // input index out of scope - try { - editor.find_input_edge(EditorNode{"relu4_name"}, EditorInput{1}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not input with index: 1") != std::string::npos); - } + input_model->extract_subgraph({relu_consumers[0]->get_input_port(0), relu_consumers[1]->get_input_port(0)}, {}); + auto model = front_end->convert(input_model); - // output index out of scope - try { - editor.find_output_edge(EditorNode{"relu4_name"}, EditorOutput{1}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not output with index: 1") != std::string::npos); - } + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_4.onnx"); - // input name not found - try { - editor.find_input_edge(EditorNode{"relu4_name"}, EditorInput{"not_existed"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not input with name: not_existed") != std::string::npos); - } + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - // output name not found - try { - editor.find_output_edge(EditorNode{"relu4_name"}, EditorOutput{"not_existed"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not output with name: not_existed") != std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -// Nodes with ambiguous node names tests -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_ambiguous_node_name_but_matched_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - InputEdge edge = editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"in2"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 1); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"add1"}); - EXPECT_EQ(edge2.m_node_idx, 3); - EXPECT_EQ(edge2.m_port_idx, 1); -} +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_5) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_ambiguous_node_name_and_not_matched_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - try { - editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"in3"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Input edge described by: add_ambiguous_name and input name: in3 was not found") != - std::string::npos); - } + input_model->extract_subgraph( + {relu_consumers[1]->get_input_port(0)}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("split2")}); + auto model = front_end->convert(input_model); - try { - editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"relu1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "Given node name: add_ambiguous_name and input name: relu1 are ambiguous to determine input edge") != - std::string::npos); - } -} + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_5.onnx"); -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_ambiguous_node_name_and_input_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - try { - editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find("Given node name: add_ambiguous_name and input index: 0 are ambiguous to determine input edge") != - std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_ambiguous_node_name_but_matched_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - const OutputEdge edge = editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{"add1"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{"add2"}); - EXPECT_EQ(edge2.m_node_idx, 3); - EXPECT_EQ(edge2.m_port_idx, 0); -} + input_model->cut_and_add_new_input(relu_consumers[0]->get_input_port(0), "new_name_1"); + input_model->cut_and_add_new_input(relu_consumers[2]->get_input_port(0), "new_name_2"); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_the_same_node_name_and_output_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; + input_model->extract_subgraph( + {}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("mul2")}); - const OutputEdge edge = editor.find_output_edge(EditorNode{"add1"}, EditorOutput{0}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); + auto model = front_end->convert(input_model); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{EditorOutput{"add1"}}, EditorOutput{0}); - EXPECT_EQ(edge2.m_node_idx, 4); - EXPECT_EQ(edge2.m_port_idx, 0); -} + auto model_ref = convert_model( + "model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.onnx"); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_ambiguous_node_name_and_not_matched_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - try { - editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{"split2"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Output edge described by: add_ambiguous_name and output name: split2 was not found") != - std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_ambiguous_node_name_and_output_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - try { - editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "Given node name: add_ambiguous_name and output index: 0 are ambiguous to determine output edge") != - std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, editor_api_use_edge_mapper_with_graph_cutter) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{1, "in2"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(1)); - // InputEdge{2, "in3"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("conv1")), EditorInput(0)); - - const auto output_edge = editor.find_output_edge(EditorNode(EditorOutput("mul2")), EditorOutput(0)); - // OutputEdge{4, "mul2"} - editor.extract_subgraph({input_edge_1, input_edge_2}, {output_edge}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__existing_inputs_and_outputs_based_extraction.onnx"}); +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_input_relu2) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("relu1")}, {}); - EXPECT_TRUE(result.is_ok) << result.error_message; + auto model = front_end->convert(input_model); - // check if mapper was updated after the model changed - const auto input_edge_4 = editor.find_input_edge(EditorNode(EditorOutput("relu1")), EditorInput(0)); - EXPECT_EQ(input_edge_4.m_node_idx, 0); - EXPECT_EQ(input_edge_4.m_port_idx, 0); + auto model_ref = convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_input_relu2.onnx"); - const auto input_edge_5 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(1)); - EXPECT_EQ(input_edge_5.m_node_idx, 1); - EXPECT_EQ(input_edge_5.m_port_idx, 1); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - const auto output_edge_3 = editor.find_output_edge("mul2"); - EXPECT_EQ(output_edge_3.m_node_idx, 3); - EXPECT_EQ(output_edge_3.m_port_idx, 0); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_use_edge_mapper_with_graph_cutter_custom_names) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); - const auto input_edge_1 = editor.find_input_edge(EditorNode{EditorOutput{"mul2"}}, EditorInput{1, "new_name_1"}); - const auto input_edge_2 = - editor.find_input_edge(EditorNode{EditorOutput{"split2"}}, EditorInput{"add2", "new_name_2"}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("in2")}, {}); - editor.extract_subgraph({input_edge_1, input_edge_2}, {}); + auto model = front_end->convert(input_model); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__use_edge_mapper_with_graph_cutter_custom_names.onnx"}); + auto model_ref = convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_find_output_consumers) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - std::vector output_consumers = editor.find_output_consumers("relu1"); - EXPECT_EQ(output_consumers.size(), 3); - EXPECT_EQ(output_consumers[0].m_node_idx, 1); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); - EXPECT_EQ(output_consumers[1].m_node_idx, 3); - EXPECT_EQ(output_consumers[1].m_port_idx, 0); - EXPECT_EQ(output_consumers[2].m_node_idx, 6); - EXPECT_EQ(output_consumers[2].m_port_idx, 0); - - output_consumers = editor.find_output_consumers("add1"); - EXPECT_EQ(output_consumers.size(), 2); - EXPECT_EQ(output_consumers[0].m_node_idx, 3); - EXPECT_EQ(output_consumers[0].m_port_idx, 1); - EXPECT_EQ(output_consumers[1].m_node_idx, 4); - EXPECT_EQ(output_consumers[1].m_port_idx, 0); - - output_consumers = editor.find_output_consumers("in3"); - EXPECT_EQ(output_consumers.size(), 1); - EXPECT_EQ(output_consumers[0].m_node_idx, 2); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); -} +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_2) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); -OPENVINO_TEST(onnx_editor, editor_api_find_output_consumers_empty_result) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("in2"), input_model->get_place_by_tensor_name("in1")}, + {}); - const std::vector output_consumers = editor.find_output_consumers("not_existed"); - EXPECT_EQ(output_consumers.size(), 0); -} + auto model = front_end->convert(input_model); -OPENVINO_TEST(onnx_editor, editor_api_inputs_with_the_same_name) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; + auto model_ref = convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.onnx"); - std::vector output_consumers = editor.find_output_consumers("X"); - EXPECT_EQ(output_consumers[0].m_node_idx, 1); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); - EXPECT_EQ(output_consumers[1].m_node_idx, 1); - EXPECT_EQ(output_consumers[1].m_port_idx, 1); -} + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); -OPENVINO_TEST(onnx_editor, editor_api_find_output_consumers_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_3.onnx"})}; - const std::string output_name{"2891"}; - - std::vector output_consumers = editor.find_output_consumers(output_name); - EXPECT_EQ(output_consumers[0].m_node_idx, 3); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); - EXPECT_EQ(output_consumers[0].m_new_input_name, output_name); - EXPECT_EQ(output_consumers[1].m_node_idx, 4); - EXPECT_EQ(output_consumers[1].m_port_idx, 0); - EXPECT_EQ(output_consumers[1].m_new_input_name, output_name); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_is_correct_and_unambiguous_node) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - bool is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"relu1"}}); - EXPECT_EQ(is_correct_node, true); - - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"mul2"}}); - EXPECT_EQ(is_correct_node, true); - - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"split2"}}); - EXPECT_EQ(is_correct_node, true); - - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{"relu1_name"}); - EXPECT_EQ(is_correct_node, true); +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_relu2_and_init) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{2}); - EXPECT_EQ(is_correct_node, true); + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("in2"), + input_model->get_place_by_tensor_name("relu3")->get_consuming_operations()[0]->get_input_port(0)}, + {}); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{99}); - EXPECT_EQ(is_correct_node, false); + auto model = front_end->convert(input_model); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"in3"}}); - EXPECT_EQ(is_correct_node, false); + auto model_ref = + convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.onnx"); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{"add_ambiguous_name"}); - EXPECT_EQ(is_correct_node, false); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{"not_exist"}); - EXPECT_EQ(is_correct_node, false); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_get_node_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__inputs_getter) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - EXPECT_EQ(editor.get_node_index(EditorNode{2}), 2); - EXPECT_EQ(editor.get_node_index(EditorNode{EditorOutput{"relu1"}}), 0); - EXPECT_EQ(editor.get_node_index(EditorNode{EditorOutput{"split2"}}), 5); - EXPECT_EQ(editor.get_node_index(EditorNode{"relu1_name"}), 0); + auto inputs = input_model->get_inputs(); + auto inputs_ref = std::vector{"data_0", "conv1/7x7_s2_w_0", "conv1/7x7_s2_b_0"}; - try { - editor.get_node_index(EditorNode{99}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Provided node index: 99 is out of scope") != std::string::npos); + EXPECT_EQ(inputs.size(), inputs_ref.size()); + for (size_t idx = 0; idx < inputs_ref.size(); ++idx) { + EXPECT_EQ(inputs[idx]->get_names()[0], inputs_ref[idx]); } - try { - editor.get_node_index(EditorNode{"add_ambiguous_name"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "The node with name: add_ambiguous_name, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, editor_api_input_edge_from_tensor_with_single_consumer) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; - - const auto edge = editor.find_input_edge(EditorNode{EditorOutput{"Y"}}, EditorInput{1}); - editor.extract_subgraph({edge}, {}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}, {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__twice_input_edge_from_tensor_with_single_consumer.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; + inputs = input_model->get_inputs(); + EXPECT_EQ(inputs.size(), 1); + EXPECT_EQ(inputs[0]->get_names()[0], "conv1/7x7_s2_1"); } -OPENVINO_TEST(onnx_editor, editor_api_input_edge_from_tensor_with_single_consumer_ambiguous) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; - +OPENVINO_TEST(onnx_editor, subgraph__custom_input_name_already_exist) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); try { - editor.find_input_edge(EditorNode{EditorOutput{"Y"}}, EditorInput{"X"}); + input_model->cut_and_add_new_input(input_model->get_place_by_operation_name("relu1"), "conv1/7x7_s2_b_0"); } catch (const std::exception& e) { std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 1 has more than one inputs with name: X") != std::string::npos); + EXPECT_TRUE(msg.find("The name 'conv1/7x7_s2_b_0' is already used by another tensor.") != std::string::npos); } } OPENVINO_TEST(onnx_editor, values__append_one_initializer) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_1D.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D.onnx", &front_end); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 2})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_tensor_value(place, std::vector{1, 2}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_input(Shape{2}, {5, 6}); test_case.add_expected_output(Shape{2}, {6, 8}); test_case.run(); } +/* +// Not applicable for InputModel OPENVINO_TEST(onnx_editor, values__append_two_initializers_to_invalid) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_1D_invalid.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_invalid.onnx", &front_end); std::map> in_vals; + // in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {4, 2})); + // in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 3})); + // editor.set_input_values(in_vals); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {4, 2})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 3})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_operation_name_and_input_port("add_node", 0); + input_model->set_tensor_value(place, std::vector{3, 2}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + place = input_model->get_place_by_operation_name_and_input_port("add_node", 1); + input_model->set_tensor_value(place, std::vector{1, 3}.data()); + + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {5, 5}); test_case.run(); } +*/ OPENVINO_TEST(onnx_editor, values__modify_one_initializer) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_1D_with_initializers.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_with_initializers.onnx", &front_end); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 4})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("B"); + input_model->set_tensor_value(place, std::vector{3, 4}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {4, 6}); test_case.run(); } OPENVINO_TEST(onnx_editor, values__modify_two_initializers) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_1D_with_initializers.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_with_initializers.onnx", &front_end); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 6})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 1})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_tensor_value(place, std::vector{3, 6}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + place = input_model->get_place_by_tensor_name("B"); + input_model->set_tensor_value(place, std::vector{2, 1}.data()); + + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {5, 7}); test_case.run(); } +/* +// Not applicable for InputModel OPENVINO_TEST(onnx_editor, values__no_inputs_modify_two_initializers) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_1D_with_initializers_only.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_with_initializers_only.onnx", &front_end); std::map> in_vals; - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 2})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {11, 22})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_tensor_value(place, std::vector{1, 2}.data()); + + place = input_model->get_place_by_tensor_name("B"); + input_model->set_tensor_value(place, std::vector{11, 22}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {12, 24}); test_case.run(); } +*/ OPENVINO_TEST(onnx_editor, values__append_two_initializers_change_shape_type) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_1D.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D.onnx", &front_end); + + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_element_type(place, element::i8); + input_model->set_partial_shape(place, Shape{2, 1}); + input_model->set_tensor_value(place, std::vector{-1, 1}.data()); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i8, Shape{2, 1}, {-1, 1})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i8, Shape{2, 1}, {-2, 2})); - editor.set_input_values(in_vals); + place = input_model->get_place_by_tensor_name("B"); + input_model->set_element_type(place, element::i8); + input_model->set_partial_shape(place, Shape{2, 1}); + input_model->set_tensor_value(place, std::vector{-2, 2}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2, 1}, {-3, 3}); test_case.run(); } OPENVINO_TEST(onnx_editor, values__append_two_initializers_mixed_types) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "gather_elements_float_3D_axis_2.onnx"})}; - std::map> in_vals; - - in_vals.emplace("data", ov::op::v0::Constant::create(element::i16, Shape{2, 2, 2}, {1, 2, 3, 4, 5, 6, 7, 8})); - in_vals.emplace("indices", ov::op::v0::Constant::create(element::i32, Shape{2, 2, 1}, {0, 1, 0, 1})); - editor.set_input_values(in_vals); - - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + FrontEnd::Ptr front_end; + auto input_model = load_model("gather_elements_float_3D_axis_2.onnx", &front_end); + auto place = input_model->get_place_by_tensor_name("data"); + input_model->set_element_type(place, element::i16); + input_model->set_partial_shape(place, Shape{2, 2, 2}); + input_model->set_tensor_value(place, std::vector{1, 2, 3, 4, 5, 6, 7, 8}.data()); + + place = input_model->get_place_by_tensor_name("indices"); + input_model->set_element_type(place, element::i32); + input_model->set_partial_shape(place, Shape{2, 2, 1}); + input_model->set_tensor_value(place, std::vector{0, 1, 0, 1}.data()); + + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2, 2, 1}, {1, 4, 5, 8}); test_case.run(); } -OPENVINO_TEST(onnx_editor, read_model_from_stream) { - std::string path = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "external_data/external_data.onnx"}); - std::ifstream stream{path, std::ios::in | std::ios::binary}; - ASSERT_TRUE(stream.is_open()); - ONNXModelEditor editor{stream, path}; - - auto test_case = ov::test::TestCase(editor.get_function()); - test_case.add_input({1.f, 2.f, 3.f, 4.f}); - test_case.add_expected_output(Shape{2, 2}, {3.f, 6.f, 9.f, 12.f}); - - test_case.run(); - - stream.close(); -} - OPENVINO_TEST(onnx_editor, combined__cut_and_replace_shape) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); const auto new_shape = PartialShape({1, 64, 112, 112}); - editor.extract_subgraph({{InputEdge(1, 0)}}, {}); - editor.set_input_shapes({{"conv1/7x7_s2_1", new_shape}}); + auto place = input_model->get_place_by_tensor_name("conv1/7x7_s2_1"); + input_model->extract_subgraph({place}, {}); + input_model->set_partial_shape(place, new_shape); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_head_cut.onnx"}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; - const auto graph_inputs = editor.get_function()->get_parameters(); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "conv1/7x7_s2_1")->get_partial_shape().same_scheme(new_shape)); } OPENVINO_TEST(onnx_editor, cut_operator_with_no_schema) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/unknown_input_value_info.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/unknown_input_value_info.onnx", &front_end); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("X")}, {}); - editor.extract_subgraph({{InputEdge{1, 0}}}, {}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/reference/unknown_input_value_info.onnx"); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/unknown_input_value_info.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, get_source_tensor_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{0, 0}), "in1"); - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{1, 0}), "relu1"); - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{1, 1}), "in2"); - const auto edge1 = editor.find_input_edge(EditorOutput{"conv1"}, 1); - EXPECT_EQ(editor.get_source_tensor_name(edge1), "in4"); - const auto edge2 = editor.find_input_edge(EditorOutput{"split2"}, 0); - EXPECT_EQ(editor.get_source_tensor_name(edge2), "add2"); - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{999, 999}), ""); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, is_model_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_TRUE(editor.is_input(InputEdge{0, 0})); - const auto edge1 = editor.find_input_edge(EditorOutput{"add1"}, 1); - EXPECT_TRUE(editor.is_input(edge1)); + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); - EXPECT_FALSE(editor.is_input(InputEdge{1, 2})); - EXPECT_FALSE(editor.is_input(InputEdge{3, 0})); - EXPECT_FALSE(editor.is_input(InputEdge{11, 0})); - const auto edge2 = editor.find_input_edge(EditorOutput{"conv1"}, 2); - EXPECT_FALSE(editor.is_input(edge2)); - EXPECT_FALSE(editor.is_input(InputEdge{2, 1})); // initializer is not treated as input - const auto edge3 = editor.find_input_edge(EditorOutput{"conv1"}, EditorInput{"in4"}); - EXPECT_FALSE(editor.is_input(edge3)); -} - -OPENVINO_TEST(onnx_editor, get_target_tensor_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{0, 0}), "relu1"); - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{1, 0}), "add1"); - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{4, 0}), "mul2"); - const auto edge1 = editor.find_output_edge("split1"); - EXPECT_EQ(editor.get_target_tensor_name(edge1), "split1"); - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{999, 999}), ""); + EXPECT_TRUE(input_model->get_place_by_tensor_name("in2")->is_input()); + EXPECT_FALSE(input_model->get_place_by_tensor_name("conv1")->is_input()); } OPENVINO_TEST(onnx_editor, is_model_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); - EXPECT_TRUE(editor.is_output(OutputEdge{4, 0})); - EXPECT_TRUE(editor.is_output(OutputEdge{5, 1})); - const auto edge1 = editor.find_output_edge(EditorNode{"split_name"}, EditorOutput{"split2"}); - EXPECT_TRUE(editor.is_output(edge1)); - - EXPECT_FALSE(editor.is_output(OutputEdge{4, 1})); - EXPECT_FALSE(editor.is_output(OutputEdge{0, 0})); - EXPECT_FALSE(editor.is_output(OutputEdge{11, 0})); - const auto edge2 = editor.find_output_edge("add2"); - EXPECT_FALSE(editor.is_output(edge2)); + EXPECT_TRUE(input_model->get_place_by_tensor_name("split2")->is_output()); + EXPECT_FALSE(input_model->get_place_by_tensor_name("add2")->is_output()); } OPENVINO_TEST(onnx_editor, model_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + + auto inputs = input_model->get_inputs(); + auto inputs_ref = std::vector{"in1", "in2", "in3"}; - const auto inputs = editor.model_inputs(); - EXPECT_TRUE(inputs == (std::vector{"in1", "in2", "in3"})); // in4 is initializer + EXPECT_EQ(inputs.size(), inputs_ref.size()); + for (size_t idx = 0; idx < inputs_ref.size(); ++idx) { + EXPECT_EQ(inputs[idx]->get_names()[0], inputs_ref[idx]); + } } OPENVINO_TEST(onnx_editor, model_inputs_with_non_input_initializers) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "instance_norm_dynamic.onnx"})}; + auto input_model = load_model("instance_norm_dynamic.onnx"); + auto inputs = input_model->get_inputs(); + auto inputs_ref = std::vector{"input"}; - const auto inputs = editor.model_inputs(); - EXPECT_TRUE(inputs == (std::vector{"input"})); + EXPECT_EQ(inputs.size(), inputs_ref.size()); + for (size_t idx = 0; idx < inputs_ref.size(); ++idx) { + EXPECT_EQ(inputs[idx]->get_names()[0], inputs_ref[idx]); + } } OPENVINO_TEST(onnx_editor, model_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + auto outputs = input_model->get_outputs(); + auto outputs_ref = std::vector{"mul1", "split2", "mul2"}; - const auto outputs = editor.model_outputs(); - EXPECT_TRUE(outputs == (std::vector{"mul1", "split2", "mul2"})); + EXPECT_EQ(outputs.size(), outputs_ref.size()); + for (size_t idx = 0; idx < outputs_ref.size(); ++idx) { + EXPECT_EQ(outputs[idx]->get_names()[0], outputs_ref[idx]); + } } OPENVINO_TEST(onnx_editor, get_tensor_shape) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_tensor_shape("mul2"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("in1"), (PartialShape{2, 2})); - EXPECT_EQ(editor.get_tensor_shape("in2"), (PartialShape{})); - EXPECT_EQ(editor.get_tensor_shape("in3"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("relu1"), (PartialShape{2, 2})); - EXPECT_EQ(editor.get_tensor_shape("add1"), (PartialShape{2, 2})); + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("mul2")), + (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in1")), (PartialShape{2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in2")), (PartialShape{})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in3")), (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("relu1")), (PartialShape{2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("add1")), (PartialShape{2, 2})); try { - editor.get_tensor_shape("not_existed"); + input_model->get_partial_shape(input_model->get_place_by_tensor_name("not_existed")); } catch (const std::exception& e) { std::string msg{e.what()}; - EXPECT_TRUE(msg.find("The tensor: not_existed was not found in the graph") != std::string::npos); + EXPECT_TRUE(msg.find("expects a pointer") != std::string::npos); } + EXPECT_THROW(input_model->get_partial_shape(nullptr), ov::Exception); } OPENVINO_TEST(onnx_editor, get_tensor_shape_after_modification) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_tensor_shape("in3"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("conv1"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("mul2"), (PartialShape{1, 1, 2, 2})); - editor.set_input_shapes({{"in3", (PartialShape{1, 1, 4, 4})}}); - EXPECT_EQ(editor.get_tensor_shape("conv1"), (PartialShape{1, 1, 4, 4})); - EXPECT_EQ(editor.get_tensor_shape("in3"), (PartialShape{1, 1, 4, 4})); + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in3")), (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("conv1")), + (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("mul2")), + (PartialShape{1, 1, 2, 2})); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("in3"), PartialShape{1, 1, 4, 4}); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("conv1")), + (PartialShape{1, 1, 4, 4})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in3")), (PartialShape{1, 1, 4, 4})); } OPENVINO_TEST(onnx_editor, is_correct_tensor_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_TRUE(editor.is_correct_tensor_name("in1")); - EXPECT_TRUE(editor.is_correct_tensor_name("relu1")); - EXPECT_TRUE(editor.is_correct_tensor_name("split2")); - EXPECT_TRUE(editor.is_correct_tensor_name("mul2")); - EXPECT_TRUE(editor.is_correct_tensor_name("in4")); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); + EXPECT_TRUE(input_model->get_place_by_tensor_name("in1")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("relu1")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("split2")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("mul2")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("in4")); + EXPECT_FALSE(input_model->get_place_by_operation_name("add_ambiguous_name")); + EXPECT_FALSE(input_model->get_place_by_operation_name("")); - EXPECT_FALSE(editor.is_correct_tensor_name("relu1_name")); - EXPECT_FALSE(editor.is_correct_tensor_name("not_existed")); - EXPECT_FALSE(editor.is_correct_tensor_name("")); + EXPECT_FALSE(input_model->get_place_by_tensor_name("relu1_name")); + EXPECT_FALSE(input_model->get_place_by_tensor_name("not_existed")); + EXPECT_FALSE(input_model->get_place_by_tensor_name("")); } OPENVINO_TEST(onnx_editor, get_input_ports) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const auto ports_1 = editor.get_input_ports(EditorNode{"relu1_name"}); - EXPECT_EQ(ports_1.size(), 1); - EXPECT_EQ(ports_1[0], "in1"); - const auto ports_2 = editor.get_input_ports(EditorNode{"split_name"}); - EXPECT_EQ(ports_2.size(), 1); - EXPECT_EQ(ports_2[0], "add2"); - const auto ports_3 = editor.get_input_ports(EditorNode{EditorOutput{"add2"}}); - EXPECT_EQ(ports_3.size(), 2); - EXPECT_EQ(ports_3[0], "relu1"); - EXPECT_EQ(ports_3[1], "add1"); - try { - editor.get_input_ports(EditorNode{"add_ambiguous_name"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "The node with name: add_ambiguous_name, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } - try { - editor.get_input_ports(EditorNode{""}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find("The node with name: not_given, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + const auto ports_1 = input_model->get_place_by_operation_name("relu1_name"); + EXPECT_EQ(ports_1->get_input_port()->get_source_tensor()->get_names()[0], "in1"); + EXPECT_FALSE(ports_1->get_input_port(1)); + const auto ports_2 = input_model->get_place_by_operation_name("split_name"); + EXPECT_EQ(ports_2->get_input_port(0)->get_source_tensor()->get_names()[0], "add2"); + EXPECT_FALSE(ports_2->get_input_port(1)); + const auto ports_3 = input_model->get_place_by_tensor_name("add2")->get_producing_operation(); + EXPECT_EQ(ports_3->get_input_port(0)->get_source_tensor()->get_names()[0], "relu1"); + EXPECT_EQ(ports_3->get_input_port(1)->get_source_tensor()->get_names()[0], "add1"); + EXPECT_FALSE(ports_3->get_input_port(2)); } + OPENVINO_TEST(onnx_editor, get_output_ports) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const auto ports_1 = editor.get_output_ports(EditorNode{"relu1_name"}); - EXPECT_EQ(ports_1.size(), 1); - EXPECT_EQ(ports_1[0], "relu1"); - const auto ports_2 = editor.get_output_ports(EditorNode{"split_name"}); - EXPECT_EQ(ports_2.size(), 2); - EXPECT_EQ(ports_2[0], "split1"); - EXPECT_EQ(ports_2[1], "split2"); - const auto ports_3 = editor.get_output_ports(EditorNode{EditorOutput{"add2"}}); - EXPECT_EQ(ports_3.size(), 1); - EXPECT_EQ(ports_3[0], "add2"); - try { - editor.get_output_ports(EditorNode{"add_ambiguous_name"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "The node with name: add_ambiguous_name, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } - try { - editor.get_output_ports(EditorNode{""}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find("The node with name: not_given, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + const auto ports_1 = input_model->get_place_by_operation_name("relu1_name"); + EXPECT_EQ(ports_1->get_output_port(0)->get_target_tensor()->get_names()[0], "relu1"); + EXPECT_FALSE(ports_1->get_output_port(1)); + const auto ports_2 = input_model->get_place_by_operation_name("split_name"); + EXPECT_EQ(ports_2->get_output_port(0)->get_target_tensor()->get_names()[0], "split1"); + EXPECT_EQ(ports_2->get_output_port(1)->get_target_tensor()->get_names()[0], "split2"); + EXPECT_FALSE(ports_2->get_output_port(2)); + const auto ports_3 = input_model->get_place_by_tensor_name("add2")->get_producing_operation(); + EXPECT_EQ(ports_3->get_output_port()->get_target_tensor()->get_names()[0], "add2"); + EXPECT_FALSE(ports_3->get_output_port(1)); } OPENVINO_TEST(onnx_editor, add_output) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; - - editor.add_output({OutputEdge{0, 0}}); - - const auto edge1 = editor.find_output_edge(EditorNode{"add_node1"}, EditorOutput{"X"}); - EXPECT_TRUE(editor.is_output(edge1)); -} + auto input_model = load_model("model_editor/add_abc.onnx"); -OPENVINO_TEST(onnx_editor, get_tensor_element_type) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_input_type("in1"), (element::f32)); - EXPECT_EQ(editor.get_input_type("in2"), (element::f32)); - editor.set_input_types({{"in3", (element::f16)}}); - EXPECT_EQ(editor.get_input_type("in3"), (element::f16)); -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_one_edge_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{1, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1}}, {}, true); - - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_one_edge_and_merge_all_new_inputs.onnx"}); - - auto result = compare_onnx_models(editor.model_string(), ref_model); - - // InputEdge{5, "add2"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("split1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_2}}, {}, true); - - const auto ref_model1 = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_one_another_edge_and_merge_all_new_inputs.onnx"}); + input_model->add_output(input_model->get_place_by_operation_name("add_node1")->get_target_tensor()); - result = compare_onnx_models(editor.model_string(), ref_model1); + EXPECT_EQ(input_model->get_outputs().size(), 2); - EXPECT_TRUE(result.is_ok) << result.error_message; + EXPECT_THROW(input_model->add_output(nullptr), ov::Exception); } -OPENVINO_TEST(onnx_editor, subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(0)); - // InputEdge{6, "relu1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1, input_edge_2}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "add1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(1)); - // InputEdge{6, "relu1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1, input_edge_2}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(0)); - // InputEdge{6, "relu1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul1")), EditorInput(0)); - // InputEdge{1, "relu1"} - const auto input_edge_3 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1, input_edge_2, input_edge_3}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(0)); - // InputEdge{4, "add1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul2")), EditorInput(0)); - // InputEdge{3, "add1"} - const auto input_edge_3 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(1)); - - editor.extract_subgraph({{input_edge_2, input_edge_1, input_edge_3}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; +OPENVINO_TEST(onnx_editor, get_tensor_element_type) { + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + EXPECT_EQ(input_model->get_element_type(input_model->get_place_by_tensor_name("in1")), element::f32); + EXPECT_EQ(input_model->get_element_type(input_model->get_place_by_tensor_name("in2")), element::f32); + input_model->set_element_type(input_model->get_place_by_tensor_name("in3"), element::f16); + EXPECT_EQ(input_model->get_element_type(input_model->get_place_by_tensor_name("in3")), element::f16); + EXPECT_THROW(input_model->get_element_type(nullptr), ov::Exception); } OPENVINO_TEST(onnx_editor, subgraph__duplicated_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_ab_duplicated_output.onnx"})}; - - const auto y_out_edge = editor.find_output_edge("Y"); - editor.extract_subgraph({}, {{y_out_edge}}); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_ab_duplicated_output.onnx", &front_end); + const auto y_out = input_model->get_place_by_tensor_name("Y"); + EXPECT_TRUE(y_out); + input_model->extract_subgraph({}, {y_out}); - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/add_ab.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__duplicated_output_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_ab_duplicated_output.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_ab_duplicated_output.onnx", &front_end); + const auto y_out_1 = input_model->get_place_by_tensor_name("Y"); + const auto y_out_2 = input_model->get_place_by_tensor_name("Y"); + EXPECT_TRUE(y_out_1); + EXPECT_TRUE(y_out_2); + input_model->extract_subgraph({}, {y_out_1, y_out_2}); - const auto y_out_edge_1 = editor.find_output_edge("Y"); - const auto y_out_edge_2 = editor.find_output_edge("Y"); - editor.extract_subgraph({}, {{y_out_edge_1, y_out_edge_2}}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/add_ab_duplicated_output.onnx"); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_ab_duplicated_output.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - // Model not changed - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, onnx_shape_infer_exception) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/onnx_shape_infer_exception.onnx"})}; - - const auto input_edge = editor.find_input_edge(EditorNode(EditorOutput("input_ReduceMin")), EditorInput(0)); + auto input_model = load_model("model_editor/onnx_shape_infer_exception.onnx"); - EXPECT_NO_THROW(editor.extract_subgraph({{input_edge}}, {})); + EXPECT_NO_THROW(input_model->extract_subgraph({input_model->get_place_by_operation_name("input_ReduceMin")}, {})); } diff --git a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp index 7b8a36ad209f1c..e9ea726415622a 100644 --- a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp +++ b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp @@ -13,58 +13,55 @@ #include "onnx_utils.hpp" using namespace ov; -using namespace ov::onnx_editor; +using namespace ov::frontend; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); OPENVINO_TEST(onnx_editor, topological_sort_two_nodes_swap) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/two_nodes_swap.onnx"})}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/two_nodes_swap.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/completely_unsorted.onnx"})}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/completely_unsorted_2.onnx"})}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted_2_wstring) { - ONNXModelEditor editor{ - ov::util::string_to_wstring(ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/completely_unsorted_2.onnx"}))}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model(L"model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } #endif OPENVINO_TEST(onnx_editor, topological_sort_constant_node_in_the_graph) { const std::string rel_path_to_model = "model_editor/topological_sort/add_abc_const_node_unsorted.onnx"; - ONNXModelEditor editor{ - ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, rel_path_to_model})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(editor.get_function()); + ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_multioutput_node) { const std::string rel_path_to_model = "model_editor/topological_sort/multioutput_split_unsorted.onnx"; - ONNXModelEditor editor{ - ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, rel_path_to_model})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(editor.get_function()); + ASSERT_NO_THROW(front_end->convert(input_model)); } +/* +// No suitable functionality yet OPENVINO_TEST(onnx_editor, topological_sort_graph_not_changed_if_the_same_name_of_unsorted_node_and_initializer) { const std::string rel_path_to_model = "model_editor/topological_sort/same_name_of_unsorted_node_and_initializer.onnx"; @@ -91,3 +88,4 @@ OPENVINO_TEST(onnx_editor, topological_sort_graph_not_changed_if_empty_input_nam const auto result = compare_onnx_models(editor.model_string(), ref_model); EXPECT_TRUE(result.is_ok) << result.error_message; } +*/ \ No newline at end of file diff --git a/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp b/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp index d0b9cb86391d43..a7e204414253d2 100644 --- a/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp @@ -24,7 +24,7 @@ using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); static std::string s_device = backend_name_to_device("${BACKEND_NAME}"); - +/* // ############################################################################ CORE TESTS OPENVINO_TEST(${BACKEND_NAME}, onnx_compress_axis_0) { ov::onnx_editor::ONNXModelEditor editor{ @@ -140,3 +140,4 @@ REGISTER_TYPED_TEST_SUITE_P(ElemTypesTests, onnx_test_split_multioutput_set_precission); typedef ::testing::Types ElemTypes; INSTANTIATE_TYPED_TEST_SUITE_P(${BACKEND_NAME}, ElemTypesTests, ElemTypes); +*/ diff --git a/src/frontends/onnx/tests/onnx_ops_registration.cpp b/src/frontends/onnx/tests/onnx_ops_registration.cpp index 9424fead9882a4..d4bca0639ecb73 100644 --- a/src/frontends/onnx/tests/onnx_ops_registration.cpp +++ b/src/frontends/onnx/tests/onnx_ops_registration.cpp @@ -20,7 +20,7 @@ using namespace ov::onnx_editor; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); - +/* OPENVINO_TEST(ops_registration, check_importing_abs_in_all_opset_versions) { ONNXModelEditor editor{ ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "abs.onnx"})}; @@ -51,3 +51,4 @@ OPENVINO_TEST(ops_registration, check_importing_add_in_different_opsets) { } } } +*/ diff --git a/src/frontends/onnx/tests/onnx_transformations.cpp b/src/frontends/onnx/tests/onnx_transformations.cpp index 8f3d2117b5aaa9..c3edd053d5d836 100644 --- a/src/frontends/onnx/tests/onnx_transformations.cpp +++ b/src/frontends/onnx/tests/onnx_transformations.cpp @@ -3,6 +3,7 @@ // #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_control.hpp" #include "editor.hpp" #include "gtest/gtest.h" @@ -20,6 +21,8 @@ namespace { // As a result, the names are different during each tests execution. // It requires custom way of input/output names comparison. // https://github.com/onnx/onnx/blob/767f752829f83dbc9bd0a364d6138890f667fc38/onnx/defs/function.cc#L23 +/* +// Could be used later bool after_func_expand_name_comp(std::string lhs, std::string rhs) { // it is equivalent (simplified) to (0x)?[0-9A-Fa-f]{8,} regex, but GCC 4.8 has limited support auto cut_hex_address = [](std::string& name) { @@ -58,37 +61,32 @@ bool after_func_expand_name_comp(std::string lhs, std::string rhs) { }; return cut_hex_address(lhs) == cut_hex_address(rhs); } +*/ } // namespace OPENVINO_TEST(onnx_transformations, expand_function_greater_or_equal) { - ONNXModelEditor editor{util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/greater_or_equal.onnx"})}; - editor.decode(); // onnx transformations are applied + auto model = convert_model("transformations/greater_or_equal.onnx"); + auto model_ref = convert_model("transformations/reference/greater_or_equal_expanded.onnx"); - const auto ref_model = util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/reference/" - "greater_or_equal_expanded.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.disable(FunctionsComparator::TENSOR_NAMES); - const auto result = compare_onnx_models(editor.model_string(), ref_model, after_func_expand_name_comp); + const FunctionsComparator::Result res = func_comparator(model, model_ref); // After operation translation was implemented - check it doesn't apply - EXPECT_FALSE(result.is_ok) << result.error_message; + ASSERT_TRUE(!res.valid) << res.message; } // Disabled, ticket: #81976 +/* OPENVINO_TEST(onnx_transformations, DISABLED_expand_function_softmax_crossentropy) { - ONNXModelEditor editor{util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/softmax_crossentropy_consumed.onnx"})}; - editor.decode(); // onnx transformations are applied + auto model = convert_model("transformations/softmax_crossentropy_consumed.onnx"); + auto model_ref = convert_model("transformations/reference/softmax_crossentropy_consumed_expanded.onnx"); - const auto ref_model = util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/reference/" - "softmax_crossentropy_consumed_expanded.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.disable(FunctionsComparator::TENSOR_NAMES); - const auto result = compare_onnx_models(editor.model_string(), ref_model, after_func_expand_name_comp); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } +*/ diff --git a/src/frontends/onnx/tests/onnx_utils.cpp b/src/frontends/onnx/tests/onnx_utils.cpp index 9795e1f4e09cc9..3b401578ab0793 100644 --- a/src/frontends/onnx/tests/onnx_utils.cpp +++ b/src/frontends/onnx/tests/onnx_utils.cpp @@ -23,13 +23,32 @@ namespace tests { const std::string ONNX_FE = ::ONNX_FE; -shared_ptr convert_model(const string& model_path, const ov::frontend::ConversionExtensionBase::Ptr& conv_ext) { - auto fem = FrontEndManager(); - FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); +static FrontEnd::Ptr get_onnx_frontend(bool default_front_end = true) { + static FrontEnd::Ptr _front_end = nullptr; + + FrontEnd::Ptr front_end = nullptr; + + if (default_front_end) { + if (_front_end == nullptr) { + auto fem = FrontEndManager(); + _front_end = fem.load_by_framework(ONNX_FE); + } + front_end = _front_end; + } else { + auto fem = FrontEndManager(); + front_end = fem.load_by_framework(ONNX_FE); + } + if (!front_end) { throw "ONNX FrontEnd is not initialized"; } + return front_end; +} + +shared_ptr convert_model(const string& model_path, const ov::frontend::ConversionExtensionBase::Ptr& conv_ext) { + auto front_end = get_onnx_frontend(conv_ext == nullptr); + if (conv_ext) { front_end->add_extension(conv_ext); } @@ -49,11 +68,7 @@ shared_ptr convert_model(const string& model_path, const ov::frontend::Co } shared_ptr convert_model(ifstream& model_stream) { - auto fem = FrontEndManager(); - FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); - if (!front_end) { - throw "ONNX FrontEnd is not initialized"; - } + auto front_end = get_onnx_frontend(); InputModel::Ptr input_model = front_end->load(dynamic_cast(&model_stream)); if (!input_model) { @@ -69,11 +84,7 @@ shared_ptr convert_model(ifstream& model_stream) { } shared_ptr convert_partially(const string& model_path) { - auto fem = FrontEndManager(); - FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); - if (!front_end) { - throw "ONNX FrontEnd is not initialized"; - } + auto front_end = get_onnx_frontend(); auto full_path = FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + model_path); InputModel::Ptr input_model = front_end->load(full_path); @@ -89,6 +100,39 @@ shared_ptr convert_partially(const string& model_path) { return model; } +InputModel::Ptr load_model(const string& model_path, FrontEnd::Ptr* return_front_end) { + auto front_end = get_onnx_frontend(); + + auto full_path = FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + model_path); + InputModel::Ptr input_model = front_end->load(full_path); + if (!input_model) { + throw "Input Model is not loaded"; + } + + if (return_front_end != nullptr) { + *return_front_end = front_end; + } + + return input_model; +} + +InputModel::Ptr load_model(const wstring& model_path, FrontEnd::Ptr* return_front_end) { + auto front_end = get_onnx_frontend(); + + auto full_path = + FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + ov::util::wstring_to_string(model_path)); + InputModel::Ptr input_model = front_end->load(ov::util::string_to_wstring(full_path)); + if (!input_model) { + throw "Input Model is not loaded"; + } + + if (return_front_end != nullptr) { + *return_front_end = front_end; + } + + return input_model; +} + std::string onnx_backend_manifest(const std::string& manifest) { return ov::util::path_join({ov::test::utils::getExecutableDirectory(), manifest}); } @@ -96,4 +140,4 @@ std::string onnx_backend_manifest(const std::string& manifest) { } // namespace tests } // namespace onnx } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/onnx/tests/onnx_utils.hpp b/src/frontends/onnx/tests/onnx_utils.hpp index eea1e10475a764..6a263c72546d02 100644 --- a/src/frontends/onnx/tests/onnx_utils.hpp +++ b/src/frontends/onnx/tests/onnx_utils.hpp @@ -6,11 +6,12 @@ #include -#include -#include #include #include "common_test_utils/test_constants.hpp" +#include "openvino/core/model.hpp" +#include "openvino/frontend/extension.hpp" +#include "openvino/frontend/manager.hpp" // Resolves different backend names to an internal device enumeration inline std::string backend_name_to_device(const std::string& backend_name) { @@ -38,7 +39,10 @@ std::shared_ptr convert_model(std::ifstream& model_stream); // A wrapper to create ONNX Frontend and configure the conversion pipeline to get // a model with possible Framework Nodes std::shared_ptr convert_partially(const std::string& model_path); - +// Returns loaded InputModel for customizing before conversion +// If FrontEnd::Ptr has been passed - return a FrontEnd object which was used for loading model +InputModel::Ptr load_model(const std::string& model_path, ov::frontend::FrontEnd::Ptr* return_front_end = nullptr); +InputModel::Ptr load_model(const std::wstring& model_path, ov::frontend::FrontEnd::Ptr* return_front_end = nullptr); // Returns path to a manifest file std::string onnx_backend_manifest(const std::string& manifest); } // namespace tests From eac1b29ef1f80742171027bfbb647e63f8e844df Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Fri, 19 Jan 2024 17:32:10 +0800 Subject: [PATCH 088/122] [CPU][MLAS]Fix mlas threading with odd threads (#22205) --- .../intel_cpu/tests/unit/gemm_api_test.cpp | 27 ++++++++++++++++++- src/plugins/intel_cpu/thirdparty/mlas | 2 +- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp b/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp index a70d37a80370f9..510b6745bb09eb 100644 --- a/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp @@ -5,10 +5,35 @@ #include #include #include "mlas/sgemm.hpp" +#include "onednn/dnnl.h" +#include "cpu_memory.h" +#include "openvino/core/parallel.hpp" +#include "openvino/runtime/aligned_buffer.hpp" // This test is used to test whether mlas gemm lib compiles successfully -TEST(GemmTests, getPackedSize) { +TEST(MLASGemmTests, getPackedSize) { int N = 51864; int K = 384; ASSERT_NO_THROW(ov::intel_cpu::mlas_sgemm_pack_get_size(N, K)); +} +// Test mlas thread partition with even/odd threads +TEST(MLASGemmTests, simpleGemm) { + const auto L2cacheSize = dnnl::utils::get_cache_size(2, true); + size_t M = 128; + size_t K = 512; + size_t N = L2cacheSize / sizeof(float) / (M); + std::vector aData(M * K, (1.0f/33)); + size_t bSize = ov::intel_cpu::mlas_sgemm_pack_get_size(N, K); + size_t nthr = parallel_get_max_threads(); + auto alignedB = ov::AlignedBuffer(bSize, 64); + float* bData = reinterpret_cast(alignedB.get_ptr()); + std::vector cData(M * N, 0.0f); + + ASSERT_NO_THROW( + ov::intel_cpu:: + mlas_sgemm_compute("N", "T", M, N, K, 1.0f, aData.data(), K, bData, N, 0.0f, cData.data(), N, nullptr, nthr)); + + ASSERT_NO_THROW( + ov::intel_cpu:: + mlas_sgemm_compute("N", "T", M, N, K, 1.0f, aData.data(), K, bData, N, 0.0f, cData.data(), N, nullptr, nthr - 1)); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/thirdparty/mlas b/src/plugins/intel_cpu/thirdparty/mlas index 7a35e48a723944..d1bc25ec4660cd 160000 --- a/src/plugins/intel_cpu/thirdparty/mlas +++ b/src/plugins/intel_cpu/thirdparty/mlas @@ -1 +1 @@ -Subproject commit 7a35e48a723944972088627be1a8b60841e8f6a5 +Subproject commit d1bc25ec4660cddd87804fcf03b2411b5dfb2e94 From fa8e2c8111bbace8f57b9f66446735011be5831e Mon Sep 17 00:00:00 2001 From: Maksim Kutakov Date: Fri, 19 Jan 2024 11:12:31 +0100 Subject: [PATCH 089/122] [CPU] Fix the shape processing of nodes fused to MatMul (#21823) * Consider fused nodes shapes in the matmul dummy shapes * SL test * Modify test * Minor changes in error handling --- src/plugins/intel_cpu/src/cpu_shape.cpp | 23 +++++++++++ src/plugins/intel_cpu/src/cpu_shape.h | 12 ++++++ src/plugins/intel_cpu/src/nodes/matmul.cpp | 39 +++++++++++++----- src/plugins/intel_cpu/src/nodes/matmul.h | 3 +- .../instances/x64/matmul.cpp | 40 +++++++++++++++++++ 5 files changed, 106 insertions(+), 11 deletions(-) diff --git a/src/plugins/intel_cpu/src/cpu_shape.cpp b/src/plugins/intel_cpu/src/cpu_shape.cpp index 3c87d6668d84d2..b130902db00c87 100644 --- a/src/plugins/intel_cpu/src/cpu_shape.cpp +++ b/src/plugins/intel_cpu/src/cpu_shape.cpp @@ -49,5 +49,28 @@ std::string Shape::toString() const { return output.str(); } +Shape mergeShapes(const Shape& lhs, const Shape& rhs) { + OPENVINO_ASSERT(lhs.getRank() == rhs.getRank(), + "Couldn't merge shapes of different ranks: shape 1:", + lhs.toString(), + " shape 2: ", + rhs.toString()); + + const auto& lhsMinDims = lhs.getMinDims(); + const auto& lhsMaxDims = lhs.getMaxDims(); + const auto& rhsMinDims = rhs.getMinDims(); + const auto& rhsMaxDims = rhs.getMaxDims(); + + VectorDims resultMinDims(lhsMinDims.size()); + VectorDims resultMaxDims(lhsMaxDims.size()); + + for (size_t i = 0; i < resultMinDims.size(); ++i) { + resultMinDims[i] = std::max(lhsMinDims[i], rhsMinDims[i]); + resultMaxDims[i] = std::min(lhsMaxDims[i], rhsMaxDims[i]); + OPENVINO_ASSERT(resultMinDims[i] <= resultMaxDims[i], "Couldn't merge shapes as the dims intervals are not overlapping."); + } + return Shape{resultMinDims, resultMaxDims}; +} + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/cpu_shape.h b/src/plugins/intel_cpu/src/cpu_shape.h index 1c5b48f7c458c6..7623aa12757663 100644 --- a/src/plugins/intel_cpu/src/cpu_shape.h +++ b/src/plugins/intel_cpu/src/cpu_shape.h @@ -218,5 +218,17 @@ class Shape { VectorDims dims; }; +/** + * @brief Merges two shapes overlapping their dims intervals. + * @note When one of the dims intervals are not overlapped an exception is thrown. + * @param lhs + * first shape + * @param rhs + * second shape + * @return resulting shape + */ + +Shape mergeShapes(const Shape& lhs, const Shape& rhs); + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/matmul.cpp b/src/plugins/intel_cpu/src/nodes/matmul.cpp index 7061bf60cb8218..9de62ae2ada01c 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.cpp +++ b/src/plugins/intel_cpu/src/nodes/matmul.cpp @@ -207,7 +207,11 @@ Node::AttrPtr MatMul::initPrimitiveAttr(const VectorDims &dims) { } Node::AttrPtr MatMul::initPrimitiveAttr() { - auto dummyShape = MemoryDescUtils::makeDummyShape(getOutputShapeAtPort(0)); + auto outputShape = getOutputShapeAtPort(0); + for (auto&& node : fusedWith) { + outputShape = mergeShapes(outputShape, node->getOutputShapeAtPort(0)); + } + auto dummyShape = MemoryDescUtils::makeDummyShape(outputShape); return initPrimitiveAttr(dummyShape.getStaticDims()); } @@ -293,7 +297,7 @@ void MatMul::getSupportedDescriptors() { const auto& inputShape0 = getInputShapeAtPort(0); const auto& inputShape1 = getInputShapeAtPort(1); - const auto& outputShape = getOutputShapeAtPort(0); + auto outputShape = getOutputShapeAtPort(0); if (inputShape0.getRank() != inputShape1.getRank() || inputShape0.getRank() != outputShape.getRank()) OPENVINO_THROW(errorPrefix, " has invalid dims count"); @@ -325,9 +329,14 @@ void MatMul::getSupportedDescriptors() { } } + for (auto&& node : fusedWith) { + outputShape = mergeShapes(outputShape, node->getOutputShapeAtPort(0)); + } + std::vector staticInputShapes{inputShape0, inputShape1}; if (inputShape0.isDynamic() || inputShape1.isDynamic()) { - std::tie(staticInputShapes[0], staticInputShapes[1]) = makeDummyInputShapes(inputShape0, inputShape1); + std::tie(staticInputShapes[0], staticInputShapes[1]) = + makeDummyInputShapes(inputShape0, inputShape1, outputShape); } auto staticOutputShape = outputShape.isStatic() ? outputShape : Shape(shapeInferGeneric(staticInputShapes).front()); @@ -342,14 +351,13 @@ void MatMul::getSupportedDescriptors() { createDescriptor({inDataDesc[0], inDataDesc[1]}, {outDataDesc}); } -std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Shape& in1) const { +std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Shape& in1, const Shape& out) const { if (in0.getRank() < 2 || in1.getRank() < 2) { OPENVINO_THROW("Can't create dummy inputs with rank less 2"); } - if (in0.getRank() != in1.getRank()) { - OPENVINO_THROW("Can't create dummy inputs if input's rank not equal"); - } + OPENVINO_ASSERT((in0.getRank() == in1.getRank()) && (in1.getRank() == out.getRank()), + "Can't create dummy inputs if argument shapes ranks are not equal"); auto swapTranspDims = [&](VectorDims& in0, VectorDims& in1) { if (transposeIn[0]) { @@ -362,6 +370,7 @@ std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Sha auto inDims0 = in0.getDims(); auto inDims1 = in1.getDims(); + auto outDims = out.getDims(); auto minDims0 = in0.getMinDims(); auto maxDims0 = in0.getMaxDims(); @@ -397,18 +406,28 @@ std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Sha fillDummy(inDims0.size() - 1, inDims1.size() - 2); // fill m, n - if (inDims0[inDims0.size() - 2] == Shape::UNDEFINED_DIM) { + if (outDims[outDims.size() - 2] != Shape::UNDEFINED_DIM) { + inDims0[inDims0.size() - 2] = outDims[outDims.size() - 2]; + } else if (inDims0[inDims0.size() - 2] == Shape::UNDEFINED_DIM) { inDims0[inDims0.size() - 2] = std::min(maxDims0[inDims0.size() - 2], std::max(minDims0[inDims0.size() - 2], static_cast(MemoryDescUtils::DEFAULT_DUMMY_VAL))); } - if (inDims1[inDims1.size() - 1] == Shape::UNDEFINED_DIM) { + + if (outDims[outDims.size() - 1] != Shape::UNDEFINED_DIM) { + inDims1[inDims1.size() - 1] = outDims[outDims.size() - 1]; + } else if (inDims1[inDims1.size() - 1] == Shape::UNDEFINED_DIM) { inDims1[inDims1.size() - 1] = std::min(maxDims1[inDims1.size() - 1], std::max(minDims1[inDims1.size() - 1], static_cast(MemoryDescUtils::DEFAULT_DUMMY_VAL))); } // fill batches for (size_t i = 0; i < inDims0.size() - 2; i++) { - fillDummy(i, i); + if (outDims[i] != Shape::UNDEFINED_DIM) { + inDims0[i] = outDims[i]; + inDims1[i] = outDims[i]; + } else { + fillDummy(i, i); + } } swapTranspDims(inDims0, inDims1); diff --git a/src/plugins/intel_cpu/src/nodes/matmul.h b/src/plugins/intel_cpu/src/nodes/matmul.h index 697de0a4f4fcdc..8b1eec8797b40c 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.h +++ b/src/plugins/intel_cpu/src/nodes/matmul.h @@ -51,7 +51,8 @@ class MatMul : public Node { using executorPtr = std::shared_ptr; executorPtr execPtr = nullptr; dnnl::memory::desc getBiasDescFrom(const DnnlMemoryDescCPtr outMemDesc); - std::pair makeDummyInputShapes(const Shape& in0, const Shape& in1) const; + std::pair + makeDummyInputShapes(const Shape& in0, const Shape& in1, const Shape& out) const; bool withBiases; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp index 6850cd585ae1ab..878176c900e101 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp @@ -795,6 +795,46 @@ const auto testParams3D_nightly = ::testing::Combine(fullyConnectedParams3D_nigh ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); INSTANTIATE_TEST_SUITE_P(nightly_FC_3D, MatMulLayerCPUTest, testParams3D_nightly, MatMulLayerCPUTest::getTestCaseName); + +class MatMulLayerCPUTestUndefShapes : public MatMulLayerCPUTest { +}; + +TEST_P(MatMulLayerCPUTestUndefShapes, CompareWithRefs) { + auto second_shape = inputDynamicShapes.at(1); + PartialShape new_second_shape(std::vector(second_shape.rank().get_length(), -1)); + std::map new_inputs; + new_inputs[0] = inputDynamicShapes.at(0); + new_inputs[1] = new_second_shape; + function->reshape(new_inputs); + run(); + CheckPluginRelatedResults(compiledModel, cpuNodeType); +} + +const fusingSpecificParams matmulFullDynInputsFusingParams[] = { + fusingMultiplyPerChannel, + fusingMultiplyAddPerChannel, + fusingAddPerChannel +}; + +const auto matMulParamsDynamicFusingFullUndefShapes = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())); + +const auto testParamsDynamicFusingFullUndefShapes = ::testing::Combine(matMulParamsDynamicFusingFullUndefShapes, + ::testing::Values(MatMulNodeType::MatMul), + ::testing::ValuesIn(matmulFullDynInputsFusingParams), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); + +INSTANTIATE_TEST_SUITE_P( + smoke_MM_Dynamic_Fusing_Full_Undef_Shapes, + MatMulLayerCPUTestUndefShapes, + testParamsDynamicFusingFullUndefShapes, + MatMulLayerCPUTest::getTestCaseName); + } // namespace } // namespace MatMul } // namespace test From afea0474082daaf7cf5e985a56c38c0372d7fcad Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 19 Jan 2024 02:44:15 -0800 Subject: [PATCH 090/122] Refactoring operations C-D (#22230) --- src/frontends/onnx/frontend/src/op/cast.cpp | 9 +- src/frontends/onnx/frontend/src/op/cast.hpp | 1 - .../onnx/frontend/src/op/cast_like.cpp | 8 +- .../onnx/frontend/src/op/cast_like.hpp | 1 - src/frontends/onnx/frontend/src/op/ceil.hpp | 7 +- src/frontends/onnx/frontend/src/op/clip.cpp | 19 +-- src/frontends/onnx/frontend/src/op/clip.hpp | 1 - .../src/op/com.microsoft/attention.cpp | 1 - .../onnx/frontend/src/op/compress.cpp | 25 ++-- .../onnx/frontend/src/op/compress.hpp | 1 - src/frontends/onnx/frontend/src/op/concat.cpp | 8 +- src/frontends/onnx/frontend/src/op/concat.hpp | 1 - .../onnx/frontend/src/op/constant.cpp | 32 ++--- .../onnx/frontend/src/op/constant.hpp | 1 - .../onnx/frontend/src/op/constant_fill.cpp | 11 +- .../onnx/frontend/src/op/constant_fill.hpp | 3 - .../frontend/src/op/constant_of_shape.cpp | 11 +- .../frontend/src/op/constant_of_shape.hpp | 1 - src/frontends/onnx/frontend/src/op/conv.cpp | 26 ++-- src/frontends/onnx/frontend/src/op/conv.hpp | 6 +- .../onnx/frontend/src/op/conv_transpose.cpp | 116 +++++++++--------- .../onnx/frontend/src/op/conv_transpose.hpp | 3 +- src/frontends/onnx/frontend/src/op/cos.cpp | 6 +- src/frontends/onnx/frontend/src/op/cos.hpp | 1 - src/frontends/onnx/frontend/src/op/cosh.cpp | 6 +- src/frontends/onnx/frontend/src/op/cosh.hpp | 1 - src/frontends/onnx/frontend/src/op/crop.cpp | 32 ++--- src/frontends/onnx/frontend/src/op/crop.hpp | 1 - .../onnx/frontend/src/op/cum_sum.cpp | 13 +- .../onnx/frontend/src/op/cum_sum.hpp | 1 - .../onnx/frontend/src/op/depth_to_space.cpp | 12 +- .../onnx/frontend/src/op/depth_to_space.hpp | 1 - .../frontend/src/op/dequantize_linear.cpp | 54 ++++---- .../frontend/src/op/dequantize_linear.hpp | 8 +- src/frontends/onnx/frontend/src/op/dft.hpp | 1 - src/frontends/onnx/frontend/src/op/div.hpp | 10 +- .../onnx/frontend/src/op/dropout.cpp | 17 ++- .../src/op/dynamic_quantize_linear.cpp | 109 ++++++++-------- .../src/op/dynamic_quantize_linear.hpp | 1 - 39 files changed, 275 insertions(+), 291 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/cast.cpp b/src/frontends/onnx/frontend/src/op/cast.cpp index ba53f7aa9e3d13..4e1d01f26f6a8f 100644 --- a/src/frontends/onnx/frontend/src/op/cast.cpp +++ b/src/frontends/onnx/frontend/src/op/cast.cpp @@ -4,12 +4,11 @@ #include "op/cast.hpp" -#include - -#include "default_opset.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/op/convert.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -21,7 +20,7 @@ OutputVector cast(const Node& node) { int64_t target_type = node.get_attribute_value("to"); element::Type elem_type = common::get_ov_element_type(target_type); - return {std::make_shared(data, elem_type)}; + return {std::make_shared(data, elem_type)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cast.hpp b/src/frontends/onnx/frontend/src/op/cast.hpp index f645bbc3666d58..ff0afe050e4a84 100644 --- a/src/frontends/onnx/frontend/src/op/cast.hpp +++ b/src/frontends/onnx/frontend/src/op/cast.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/cast_like.cpp b/src/frontends/onnx/frontend/src/op/cast_like.cpp index 6d9edc85a3043b..f678057194a632 100644 --- a/src/frontends/onnx/frontend/src/op/cast_like.cpp +++ b/src/frontends/onnx/frontend/src/op/cast_like.cpp @@ -4,11 +4,9 @@ #include "op/cast_like.hpp" -#include +#include "openvino/op/convert_like.hpp" -#include "default_opset.hpp" -#include "ngraph/type/element_type.hpp" -#include "utils/common.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -18,7 +16,7 @@ namespace set_1 { OutputVector cast_like(const Node& node) { auto inputs = node.get_ng_inputs(); - return {std::make_shared(inputs.at(0), inputs.at(1))}; + return {std::make_shared(inputs.at(0), inputs.at(1))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cast_like.hpp b/src/frontends/onnx/frontend/src/op/cast_like.hpp index 84387c59e59749..a51d209f1b1c65 100644 --- a/src/frontends/onnx/frontend/src/op/cast_like.hpp +++ b/src/frontends/onnx/frontend/src/op/cast_like.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/ceil.hpp b/src/frontends/onnx/frontend/src/op/ceil.hpp index 283c7c372338ce..c31dc61e0490d1 100644 --- a/src/frontends/onnx/frontend/src/op/ceil.hpp +++ b/src/frontends/onnx/frontend/src/op/ceil.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/ceiling.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector ceil(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/clip.cpp b/src/frontends/onnx/frontend/src/op/clip.cpp index cad2cc6ebcbd67..06b634797c46c1 100644 --- a/src/frontends/onnx/frontend/src/op/clip.cpp +++ b/src/frontends/onnx/frontend/src/op/clip.cpp @@ -5,11 +5,14 @@ #include "op/clip.hpp" #include -#include -#include "default_opset.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/minimum.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -23,7 +26,7 @@ OutputVector clip(const Node& node) { const double min_value = node.get_attribute_value("min", std::numeric_limits::lowest()); - return {std::make_shared(data, min_value, max_value)}; + return {std::make_shared(data, min_value, max_value)}; } } // namespace set_1 @@ -31,10 +34,10 @@ OutputVector clip(const Node& node) { namespace set_11 { OutputVector clip(const Node& node) { const OutputVector inputs{node.get_ng_inputs()}; - const Output data = inputs.at(0); + const Output data = inputs.at(0); const element::Type data_type = data.get_element_type(); - Output min; - Output max; + Output min; + Output max; // If second input is provided, assign to min input, otherwise set lowest // numeric limit of data type as min input. @@ -56,9 +59,9 @@ OutputVector clip(const Node& node) { OPENVINO_SUPPRESS_DEPRECATED_END } - const auto max_of_min_and_data = std::make_shared(min, data); + const auto max_of_min_and_data = std::make_shared(min, data); - return {std::make_shared(max, max_of_min_and_data)}; + return {std::make_shared(max, max_of_min_and_data)}; } } // namespace set_11 diff --git a/src/frontends/onnx/frontend/src/op/clip.hpp b/src/frontends/onnx/frontend/src/op/clip.hpp index 6281e318eda278..dfddb5cd7e8f59 100644 --- a/src/frontends/onnx/frontend/src/op/clip.hpp +++ b/src/frontends/onnx/frontend/src/op/clip.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp index c5ad926f30de80..4b2f326c784499 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp @@ -4,7 +4,6 @@ #include "op/com.microsoft/attention.hpp" -#include "default_opset.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" diff --git a/src/frontends/onnx/frontend/src/op/compress.cpp b/src/frontends/onnx/frontend/src/op/compress.cpp index d1d31f02a192fd..50141a03ef2a68 100644 --- a/src/frontends/onnx/frontend/src/op/compress.cpp +++ b/src/frontends/onnx/frontend/src/op/compress.cpp @@ -4,11 +4,14 @@ #include "op/compress.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/non_zero.hpp" +#include "openvino/op/squeeze.hpp" #include "ov_models/ov_builders/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -22,14 +25,16 @@ OutputVector compress(const Node& node) { if (node.has_attribute("axis")) { axis = node.get_attribute_value("axis"); } else { - data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); + data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); + data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); + data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); } - auto axis_node = default_opset::Constant::create(element::i64, Shape{}, {axis}); - auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - auto result = std::make_shared( - data, - std::make_shared(std::make_shared(condition), zero_node), - axis_node); + auto axis_node = v0::Constant::create(element::i64, Shape{}, {axis}); + auto zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + auto result = + std::make_shared(data, + std::make_shared(std::make_shared(condition), zero_node), + axis_node); return {result}; } diff --git a/src/frontends/onnx/frontend/src/op/compress.hpp b/src/frontends/onnx/frontend/src/op/compress.hpp index 9135b07155005a..e667aa04ee60f8 100644 --- a/src/frontends/onnx/frontend/src/op/compress.hpp +++ b/src/frontends/onnx/frontend/src/op/compress.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/output_vector.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/concat.cpp b/src/frontends/onnx/frontend/src/op/concat.cpp index 63931703559f19..2a121cf6b9567b 100644 --- a/src/frontends/onnx/frontend/src/op/concat.cpp +++ b/src/frontends/onnx/frontend/src/op/concat.cpp @@ -4,11 +4,11 @@ #include "op/concat.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/concat.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -21,7 +21,7 @@ OutputVector concat(const Node& node) { std::copy_if(inputs.begin(), inputs.end(), std::back_inserter(valid_inputs), [](ov::Output& in) -> bool { return !common::is_failsafe_node(in.get_node_shared_ptr()); }); - return {std::make_shared(valid_inputs, axis)}; + return {std::make_shared(valid_inputs, axis)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/concat.hpp b/src/frontends/onnx/frontend/src/op/concat.hpp index 65e51e0e17823b..be3736f1a46ed0 100644 --- a/src/frontends/onnx/frontend/src/op/concat.hpp +++ b/src/frontends/onnx/frontend/src/op/concat.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/constant.cpp b/src/frontends/onnx/frontend/src/op/constant.cpp index 66a7f978b2d5a7..087bbe5eaadb29 100644 --- a/src/frontends/onnx/frontend/src/op/constant.cpp +++ b/src/frontends/onnx/frontend/src/op/constant.cpp @@ -9,9 +9,11 @@ #include "core/attribute.hpp" #include "core/sparse_tensor.hpp" #include "core/tensor.hpp" -#include "default_opset.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/constant.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -35,24 +37,24 @@ std::vector get_dense_vector(const std::vector& values, const std::vector< } template -std::shared_ptr make_dense_tensor_as_constant(const std::vector& indices, - const Tensor& values_tensor, - const Shape& shape) { +std::shared_ptr make_dense_tensor_as_constant(const std::vector& indices, + const Tensor& values_tensor, + const Shape& shape) { auto values = values_tensor.get_data(); auto dense_vector = get_dense_vector(values, indices, shape_size(shape)); - return default_opset::Constant::create(values_tensor.get_ov_type(), shape, dense_vector); + return v0::Constant::create(values_tensor.get_ov_type(), shape, dense_vector); } -std::shared_ptr get_dense_tensor_as_constant(const std::vector& absolute_indices, - const Tensor& values_tensor, - const Shape& shape) { +std::shared_ptr get_dense_tensor_as_constant(const std::vector& absolute_indices, + const Tensor& values_tensor, + const Shape& shape) { switch (values_tensor.get_ov_type()) { case element::boolean: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::f32: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::f16: - return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); + return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::f64: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::i8: @@ -72,7 +74,7 @@ std::shared_ptr get_dense_tensor_as_constant(const std: case element::u64: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::bf16: - return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); + return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); default: FRONT_END_THROW("Tensor has an unsupported data type"); } @@ -123,15 +125,15 @@ OutputVector constant(const onnx_import::Node& node) { auto& attribute = node.get_attribute(attributes_names[0]); if (attribute.is_float()) { - return {default_opset::Constant::create(element::f32, ngraph::Shape{}, {attribute.get_float()})}; + return {v0::Constant::create(element::f32, ov::Shape{}, {attribute.get_float()})}; } else if (attribute.is_float_array()) { auto values = attribute.get_float_array(); - return {default_opset::Constant::create(element::f32, ngraph::Shape{values.size()}, values)}; + return {v0::Constant::create(element::f32, ov::Shape{values.size()}, values)}; } else if (attribute.is_integer()) { - return {default_opset::Constant::create(element::i64, ngraph::Shape{}, {attribute.get_integer()})}; + return {v0::Constant::create(element::i64, ov::Shape{}, {attribute.get_integer()})}; } else if (attribute.is_integer_array()) { auto values = attribute.get_integer_array(); - return {default_opset::Constant::create(element::i64, ngraph::Shape{values.size()}, values)}; + return {v0::Constant::create(element::i64, ov::Shape{values.size()}, values)}; } else if (attribute.is_sparse_tensor()) { auto sparse_tensor = attribute.get_sparse_tensor(); const Tensor& values_tensor = sparse_tensor.get_values(); diff --git a/src/frontends/onnx/frontend/src/op/constant.hpp b/src/frontends/onnx/frontend/src/op/constant.hpp index fd95f64261c6ff..d4e89450c44dc8 100644 --- a/src/frontends/onnx/frontend/src/op/constant.hpp +++ b/src/frontends/onnx/frontend/src/op/constant.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.cpp b/src/frontends/onnx/frontend/src/op/constant_fill.cpp index beb08bfd3e536b..e4641c29be1cf3 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.cpp @@ -6,11 +6,12 @@ #include // onnx types -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/concat.hpp" #include "onnx_common/utils.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +using namespace ov::op; using namespace ov::frontend::onnx::common; OPENVINO_SUPPRESS_DEPRECATED_START @@ -19,7 +20,7 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector constant_fill(const Node& node) { - Output target_shape; + Output target_shape; const auto dtype = node.get_attribute_value("dtype", static_cast(TensorProto_DataType_FLOAT)); const auto ng_type = onnx_to_ov_data_type(static_cast(dtype)); const auto const_val_to_fill = node.get_attribute_as_constant("value", 0.f, ng_type); @@ -33,14 +34,14 @@ OutputVector constant_fill(const Node& node) { if (node.has_attribute("extra_shape")) { const auto extra_shape_const = node.get_attribute_as_constant>("extra_shape", target_shape.get_element_type()); - target_shape = std::make_shared(OutputVector{target_shape, extra_shape_const}, 0); + target_shape = std::make_shared(OutputVector{target_shape, extra_shape_const}, 0); } } else // use shape attribute as target shape { target_shape = node.get_attribute_as_constant>("shape", ng_type); } - return {std::make_shared(const_val_to_fill, target_shape)}; + return {std::make_shared(const_val_to_fill, target_shape)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.hpp b/src/frontends/onnx/frontend/src/op/constant_fill.hpp index 1f3b9c618d7561..f0bbf50855a76f 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.hpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.hpp @@ -7,9 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp index 1d117f341ce5ad..e9d628ce628db7 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp @@ -5,32 +5,35 @@ #include "op/constant_of_shape.hpp" #include "core/tensor.hpp" -#include "default_opset.hpp" #include "onnx_import/core/null_node.hpp" #include "op/constant.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector constant_of_shape(const onnx_import::Node& node) { - Output constant_value; + Output constant_value; if (node.has_attribute("value")) { auto value_tensor = node.get_attribute_value("value"); constant_value = value_tensor.get_ov_constant(); constant_value = reshape::interpret_as_scalar(constant_value); } else { - constant_value = default_opset::Constant::create(element::f32, {}, {0}); + constant_value = v0::Constant::create(element::f32, {}, {0}); } const auto& inputs = node.get_ng_inputs(); if (inputs.size() == 0 || common::is_failsafe_node(inputs[0].get_node_shared_ptr()) || ov::op::util::is_null(inputs[0])) { return {constant_value}; } - return {std::make_shared(constant_value, inputs[0])}; + return {std::make_shared(constant_value, inputs[0])}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp index 3bd2fbd16e7f4b..9f09462ac3f9a2 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/conv.cpp b/src/frontends/onnx/frontend/src/op/conv.cpp index 2f4b69dd338740..35c4228dca6c74 100644 --- a/src/frontends/onnx/frontend/src/op/conv.cpp +++ b/src/frontends/onnx/frontend/src/op/conv.cpp @@ -4,39 +4,33 @@ #include "op/conv.hpp" -#include -#include -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/shape_of.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START +using namespace ov::op; + namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { namespace detail { -std::shared_ptr add_bias(const Output& ng_conv, const Output& bias) { - const auto conv_shape = std::make_shared(ng_conv); - const auto conv_rank = std::make_shared(conv_shape); +std::shared_ptr add_bias(const Output& ng_conv, const Output& bias) { + const auto conv_shape = std::make_shared(ng_conv); + const auto conv_rank = std::make_shared(conv_shape); - return { - std::make_shared(ng_conv, reshape::reshape_channel_shaped_node_to_nchw(bias, conv_rank))}; + return {std::make_shared(ng_conv, reshape::reshape_channel_shaped_node_to_nchw(bias, conv_rank))}; } -OutputVector conv(const Node& node, - Output data, - Output filters, - Output bias) { +OutputVector conv(const Node& node, Output data, Output filters, Output bias) { // in the current implementation we assume that the data input rank is static // and only the 'batch' dimension can be dynamic const auto groups = node.get_attribute_value("group", 1); @@ -47,7 +41,7 @@ OutputVector conv(const Node& node, const auto strides = convpool::get_strides(node); const auto dilations = convpool::get_dilations(node); const auto paddings = convpool::get_pads(node); - const ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node); + const ov::op::PadType auto_pad_type = convpool::get_auto_pad(node); const auto& padding_below = paddings.first; const auto& padding_above = paddings.second; diff --git a/src/frontends/onnx/frontend/src/op/conv.hpp b/src/frontends/onnx/frontend/src/op/conv.hpp index 90eaaa206df9f2..85e75b5cf61202 100644 --- a/src/frontends/onnx/frontend/src/op/conv.hpp +++ b/src/frontends/onnx/frontend/src/op/conv.hpp @@ -7,21 +7,21 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { namespace detail { -OutputVector conv(const Node& node, Output data, Output filters, Output bias); +OutputVector conv(const Node& node, Output data, Output filters, Output bias); } /// \brief Performs ONNX Conv operation. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX convolution +/// \return The vector containing OV nodes producing output of ONNX convolution /// operation. OutputVector conv(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp index 0fcd58d900f310..565696be7fb86d 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp @@ -4,41 +4,39 @@ #include "op/conv_transpose.hpp" -#include -#include -#include -#include -#include -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/output_vector.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convolution.hpp" +#include "openvino/op/group_conv.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/subtract.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "utils/convpool.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { namespace { -Output make_group_conv_backprop(const Output& data, - const Output& filters, - const Strides& strides, - const Strides& dilations, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const ngraph::op::PadType& auto_pad_type, - const std::vector& output_shape, - const std::vector& output_padding) { +Output make_group_conv_backprop(const Output& data, + const Output& filters, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const ov::op::PadType& auto_pad_type, + const std::vector& output_shape, + const std::vector& output_padding) { if (output_shape.empty()) { - return std::make_shared( + return std::make_shared( data, filters, strides, @@ -48,10 +46,10 @@ Output make_group_conv_backprop(const Output& data, auto_pad_type, CoordinateDiff(std::begin(output_padding), std::end(output_padding))); } else { - return std::make_shared( + return std::make_shared( data, filters, - default_opset::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), + v0::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), strides, dilations, auto_pad_type, @@ -59,17 +57,17 @@ Output make_group_conv_backprop(const Output& data, } } -Output make_conv_backprop(const Output& data, - const Output& filters, - const Strides& strides, - const Strides& dilations, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const ngraph::op::PadType& auto_pad_type, - const std::vector& output_shape, - const std::vector& output_padding) { +Output make_conv_backprop(const Output& data, + const Output& filters, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const ov::op::PadType& auto_pad_type, + const std::vector& output_shape, + const std::vector& output_padding) { if (output_shape.empty()) { - return std::make_shared( + return std::make_shared( data, filters, strides, @@ -79,10 +77,10 @@ Output make_conv_backprop(const Output& data, auto_pad_type, CoordinateDiff(std::begin(output_padding), std::end(output_padding))); } else { - return std::make_shared( + return std::make_shared( data, filters, - default_opset::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), + v0::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), strides, pads_begin, pads_end, @@ -92,39 +90,37 @@ Output make_conv_backprop(const Output& data, } } -Output get_prepared_bias(const Output& bias, const Output& conv) { +Output get_prepared_bias(const Output& bias, const Output& conv) { // Prepare bias shape [1, C, 1, 1] const auto& conv_pshape = conv.get_partial_shape(); - std::shared_ptr bias_shape_node; + std::shared_ptr bias_shape_node; if (conv_pshape.rank().is_static() && conv_pshape[1].is_static()) { Shape new_bias_shape(conv_pshape.rank().get_length(), 1); new_bias_shape[1] = conv_pshape[1].get_length(); - bias_shape_node = default_opset::Constant::create(element::i64, Shape{new_bias_shape.size()}, new_bias_shape); + bias_shape_node = v0::Constant::create(element::i64, Shape{new_bias_shape.size()}, new_bias_shape); } else { - const auto conv_shape = std::make_shared(conv); - const auto conv_rank = std::make_shared(conv_shape); + const auto conv_shape = std::make_shared(conv); + const auto conv_rank = std::make_shared(conv_shape); // Prepare new bias shape base: [1, 1, 1, 1, ... ] - const auto one_node = default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto two_node = default_opset::Constant::create(element::i64, Shape{1}, {2}); - const auto remaining_shape_length = std::make_shared(conv_rank, two_node); - const auto remaining_bias_shape_ones = - std::make_shared(one_node, remaining_shape_length); - - const auto C_dim = std::make_shared(conv_shape, - one_node, // begin - two_node, // end - std::vector{0}, // begin mask - std::vector{0}); // end mask + const auto one_node = v0::Constant::create(element::i64, Shape{1}, {1}); + const auto two_node = v0::Constant::create(element::i64, Shape{1}, {2}); + const auto remaining_shape_length = std::make_shared(conv_rank, two_node); + const auto remaining_bias_shape_ones = std::make_shared(one_node, remaining_shape_length); + + const auto C_dim = std::make_shared(conv_shape, + one_node, // begin + two_node, // end + std::vector{0}, // begin mask + std::vector{0}); // end mask // Construct new bias shape: [1, C, 1, 1, ... ] - bias_shape_node = - std::make_shared(OutputVector{one_node, C_dim, remaining_bias_shape_ones}, 0); + bias_shape_node = std::make_shared(OutputVector{one_node, C_dim, remaining_bias_shape_ones}, 0); } - return std::make_shared(bias, bias_shape_node, false); + return std::make_shared(bias, bias_shape_node, false); } } // namespace @@ -145,7 +141,7 @@ OutputVector conv_transpose(const Node& node) { std::size_t num_spatial_dims = 0; Strides strides, dilations; std::pair paddings; - ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node); + ov::op::PadType auto_pad_type = convpool::get_auto_pad(node); // Get attirbutes or infer them from input data rank it it's static. if (data_pshape.rank().is_static()) { @@ -180,7 +176,7 @@ OutputVector conv_transpose(const Node& node) { CHECK_VALID_NODE(node, groups >= 0, "Incorrect value of 'group' attribute: ", groups); - Output conv_node; + Output conv_node; if (groups > 1) { filters = convpool::get_reshaped_filters(filters, groups); @@ -211,7 +207,7 @@ OutputVector conv_transpose(const Node& node) { } const auto reshaped_bias = get_prepared_bias(inputs[2], conv_node); - return {std::make_shared(conv_node, reshaped_bias)}; + return {std::make_shared(conv_node, reshaped_bias)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp b/src/frontends/onnx/frontend/src/op/conv_transpose.hpp index f5a9866cdfebdb..94fd5cdc1f3efb 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { @@ -18,7 +17,7 @@ namespace set_1 { /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX convolution +/// \return The vector containing OV nodes producing output of ONNX convolution /// operation. OutputVector conv_transpose(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/cos.cpp b/src/frontends/onnx/frontend/src/op/cos.cpp index 05fb3bbd78ffe9..63a565246f4402 100644 --- a/src/frontends/onnx/frontend/src/op/cos.cpp +++ b/src/frontends/onnx/frontend/src/op/cos.cpp @@ -4,9 +4,9 @@ #include "op/cos.hpp" -#include +#include "openvino/op/cos.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,7 +14,7 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector cos(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cos.hpp b/src/frontends/onnx/frontend/src/op/cos.hpp index f79f066a4ba4d4..b7998734ac3804 100644 --- a/src/frontends/onnx/frontend/src/op/cos.hpp +++ b/src/frontends/onnx/frontend/src/op/cos.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/output_vector.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/cosh.cpp b/src/frontends/onnx/frontend/src/op/cosh.cpp index 4928a16bd62db4..eaaf27ab2c886a 100644 --- a/src/frontends/onnx/frontend/src/op/cosh.cpp +++ b/src/frontends/onnx/frontend/src/op/cosh.cpp @@ -4,9 +4,9 @@ #include "op/cosh.hpp" -#include +#include "openvino/op/cosh.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,7 +14,7 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector cosh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cosh.hpp b/src/frontends/onnx/frontend/src/op/cosh.hpp index 22c7eb0d3ce3b9..293b2a6534ca76 100644 --- a/src/frontends/onnx/frontend/src/op/cosh.hpp +++ b/src/frontends/onnx/frontend/src/op/cosh.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/output_vector.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/crop.cpp b/src/frontends/onnx/frontend/src/op/crop.cpp index 175b18bf218680..cc1d54f3b5a803 100644 --- a/src/frontends/onnx/frontend/src/op/crop.cpp +++ b/src/frontends/onnx/frontend/src/op/crop.cpp @@ -4,9 +4,13 @@ #include "op/crop.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/strided_slice.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -23,12 +27,11 @@ OutputVector crop(const Node& node) { // Border values: leftBorder, topBorder, rightBorder, bottomBorder. const auto border = node.get_attribute_value>("border"); - std::shared_ptr end; + std::shared_ptr end; // Set slice begin values to border values (note order of indexes) - const auto begin = default_opset::Constant::create(ngraph::element::i64, - Shape{4}, - std::vector{0, 0, border[1], border[0]}); + const auto begin = + v0::Constant::create(ov::element::i64, Shape{4}, std::vector{0, 0, border[1], border[0]}); // If scale is given, then start crop at left/top `border` // and end on left/top `border` + `scale`. @@ -43,10 +46,9 @@ OutputVector crop(const Node& node) { // Set slice end values to topBorder+heightScale and leftBorder+widthScale // Note that indexes don't match, e.g. border[0] + scale[1] - end = default_opset::Constant::create( - ngraph::element::i64, - Shape{4}, - std::vector{0, 0, border[1] + scale[0], border[0] + scale[1]}); + end = v0::Constant::create(ov::element::i64, + Shape{4}, + std::vector{0, 0, border[1] + scale[0], border[0] + scale[1]}); } // If scale is not provided, crop the image by values provided in `border`. else { @@ -56,19 +58,17 @@ OutputVector crop(const Node& node) { border.size()); // Calculate ends as shape(input) - border[2:3] - const auto input_shape = std::make_shared(input_data); + const auto input_shape = std::make_shared(input_data); const auto end_offset = - default_opset::Constant::create(ngraph::element::i64, - Shape{4}, - std::vector{0, 0, -border[3], -border[2]}); - end = std::make_shared(input_shape, end_offset); + v0::Constant::create(ov::element::i64, Shape{4}, std::vector{0, 0, -border[3], -border[2]}); + end = std::make_shared(input_shape, end_offset); } // Input data shape [N,C,H,W], slicing only along spatial dimensions std::vector begin_mask{1, 1, 0, 0}; std::vector end_mask{1, 1, 0, 0}; - return {std::make_shared(input_data, begin, end, begin_mask, end_mask)}; + return {std::make_shared(input_data, begin, end, begin_mask, end_mask)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/crop.hpp b/src/frontends/onnx/frontend/src/op/crop.hpp index 58310c65a762fb..6acc222c580f2e 100644 --- a/src/frontends/onnx/frontend/src/op/crop.hpp +++ b/src/frontends/onnx/frontend/src/op/crop.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.cpp b/src/frontends/onnx/frontend/src/op/cum_sum.cpp index 7f4bef11793799..f3bcf29fde38dc 100644 --- a/src/frontends/onnx/frontend/src/op/cum_sum.cpp +++ b/src/frontends/onnx/frontend/src/op/cum_sum.cpp @@ -4,11 +4,12 @@ #include "op/cum_sum.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/cum_sum.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -19,16 +20,16 @@ OutputVector cum_sum(const Node& node) { auto data = inputs.at(0); bool exclusive = node.get_attribute_value("exclusive", 0); bool reverse = node.get_attribute_value("reverse", 0); - Output axis; + Output axis; if (inputs.size() > 1) { // optional input, 0-D or 1-D tensor const auto& axis_shape = inputs.at(1).get_partial_shape(); axis = axis_shape.is_dynamic() ? inputs.at(1) : ngraph::onnx_import::reshape::interpret_as_scalar(inputs.at(1)); } else { - axis = default_opset::Constant::create(element::i64, Shape{}, {0}); // default + axis = v0::Constant::create(element::i64, Shape{}, {0}); // default } - return OutputVector{std::make_shared(data, axis, exclusive, reverse)}; + return OutputVector{std::make_shared(data, axis, exclusive, reverse)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.hpp b/src/frontends/onnx/frontend/src/op/cum_sum.hpp index 4e3a39c8e297c2..f12e32e6ced3df 100644 --- a/src/frontends/onnx/frontend/src/op/cum_sum.hpp +++ b/src/frontends/onnx/frontend/src/op/cum_sum.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp index 8940e77eb74b35..d644f35447a89d 100644 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp +++ b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp @@ -4,8 +4,10 @@ #include "op/depth_to_space.hpp" -#include "default_opset.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/depth_to_space.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -18,16 +20,16 @@ OutputVector depth_to_space(const Node& node) { FRONT_END_GENERAL_CHECK(shape.rank().is_static() && shape.rank().get_length() == 4, "Input must be 4-dimensional"); const auto mode = node.get_attribute_value("mode", "DCR"); - default_opset::DepthToSpace::DepthToSpaceMode ngraph_mode; + v0::DepthToSpace::DepthToSpaceMode ov_mode; if (mode == "DCR") - ngraph_mode = default_opset::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST; + ov_mode = v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST; else if (mode == "CRD") - ngraph_mode = default_opset::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST; + ov_mode = v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST; else FRONT_END_GENERAL_CHECK(false, "only 'DCR' and 'CRD' modes are supported"); const auto block_size = node.get_attribute_value("blocksize"); - return OutputVector{std::make_shared(data, ngraph_mode, block_size)}; + return OutputVector{std::make_shared(data, ov_mode, block_size)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp b/src/frontends/onnx/frontend/src/op/depth_to_space.hpp index dae6bf41e6541a..9e30edc25431e7 100644 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp +++ b/src/frontends/onnx/frontend/src/op/depth_to_space.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index efe5b9f8bd9078..5e234a39b1a5d0 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -7,25 +7,29 @@ #include #include -#include "default_opset.hpp" -#include "ngraph/axis_set.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace detail { -std::shared_ptr get_zero_point(const OutputVector& inputs) { +std::shared_ptr get_zero_point(const OutputVector& inputs) { if (inputs.size() == 3 && !ov::op::util::is_null(inputs[2])) { const auto& zero_point = inputs[2]; if (zero_point.get_element_type() != element::f32) { - return std::make_shared(zero_point, element::f32); + return std::make_shared(zero_point, element::f32); } return zero_point.get_node_shared_ptr(); @@ -47,22 +51,20 @@ OutputVector dequantize_linear(const Node& node) { common::validate_scalar_input("Dequantization scale", scale.get_node_shared_ptr(), {element::f32}); - const auto converted_x = std::make_shared(x, element::f32); + const auto converted_x = std::make_shared(x, element::f32); if (zero_point) { common::validate_scalar_input("Zero point", zero_point); - return {std::make_shared( - std::make_shared(converted_x, zero_point), - scale)}; + return {std::make_shared(std::make_shared(converted_x, zero_point), scale)}; } else { - return {std::make_shared(converted_x, scale)}; + return {std::make_shared(converted_x, scale)}; } } } // namespace set_1 namespace set_13 { namespace detail { -void validate_scale(const Output scale, const Output x, const int64_t axis) { +void validate_scale(const Output scale, const Output x, const int64_t axis) { const auto& scale_shape = scale.get_partial_shape(); FRONT_END_GENERAL_CHECK(scale_shape.rank().get_length() == 0 || scale_shape.rank().get_length() == 1, "Dequantization scale needs to be a scalar or a vector."); @@ -82,7 +84,7 @@ void validate_scale(const Output scale, const Output } } -void validate_zero_point(const Output zero_point, const Output x, const int64_t axis) { +void validate_zero_point(const Output zero_point, const Output x, const int64_t axis) { const auto& zero_point_shape = zero_point.get_partial_shape(); FRONT_END_GENERAL_CHECK(zero_point_shape.rank().get_length() == 0 || zero_point_shape.rank().get_length() == 1, "Zero point needs to be a scalar or a vector."); @@ -102,9 +104,9 @@ void validate_zero_point(const Output zero_point, const Output reshape_input(const Output& input, - const int64_t axis, - const PartialShape& x_shape) { +std::shared_ptr reshape_input(const Output& input, + const int64_t axis, + const PartialShape& x_shape) { // these reshapes make sure that dequantization happens over the specified axis auto input_rank = input.get_partial_shape().rank(); @@ -129,14 +131,14 @@ std::shared_ptr reshape_input(const Output& input, target_dims.push_back(1); } - const auto target_shape = default_opset::Constant::create(element::i64, Shape{target_dims.size()}, target_dims); + const auto target_shape = v0::Constant::create(element::i64, Shape{target_dims.size()}, target_dims); - return std::make_shared(input, target_shape, true); + return std::make_shared(input, target_shape, true); } -OutputVector dequantize_linear(const Output& x, - const Output& scale, - const std::shared_ptr& zero_point, +OutputVector dequantize_linear(const Output& x, + const Output& scale, + const std::shared_ptr& zero_point, int64_t axis, const Node& node) { const auto& x_shape = x.get_partial_shape(); @@ -144,20 +146,20 @@ OutputVector dequantize_linear(const Output& x, FRONT_END_GENERAL_CHECK(x_shape.rank().is_static(), "Rank of the input data tensor has to be known (static)."); OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, x_shape.rank()); + axis = ov::normalize_axis(node.get_description(), axis, x_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END validate_scale(scale, x, axis); const auto scale_reshaped = reshape_input(scale, axis, x_shape); - const auto converted_x = std::make_shared(x, element::f32); + const auto converted_x = std::make_shared(x, element::f32); if (zero_point) { validate_zero_point(zero_point, x, axis); - return {std::make_shared( - std::make_shared(converted_x, reshape_input(zero_point, axis, x_shape)), + return {std::make_shared( + std::make_shared(converted_x, reshape_input(zero_point, axis, x_shape)), scale_reshaped)}; } else { - return {std::make_shared(converted_x, scale_reshaped)}; + return {std::make_shared(converted_x, scale_reshaped)}; } } } // namespace detail diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp index d44a019adabd8f..7bb121d7e2df29 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp @@ -7,8 +7,8 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/core/node.hpp" namespace ngraph { namespace onnx_import { @@ -21,9 +21,9 @@ OutputVector dequantize_linear(const Node& node); namespace set_13 { namespace detail { -OutputVector dequantize_linear(const Output& x, - const Output& scale, - const std::shared_ptr& zero_point, +OutputVector dequantize_linear(const Output& x, + const Output& scale, + const std::shared_ptr& zero_point, int64_t axis, const Node& node); } diff --git a/src/frontends/onnx/frontend/src/op/dft.hpp b/src/frontends/onnx/frontend/src/op/dft.hpp index 0390bbedc0c875..bae572d84e51a8 100644 --- a/src/frontends/onnx/frontend/src/op/dft.hpp +++ b/src/frontends/onnx/frontend/src/op/dft.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/div.hpp b/src/frontends/onnx/frontend/src/op/div.hpp index 8d37bae67a81f9..a2d6afbfcf4940 100644 --- a/src/frontends/onnx/frontend/src/op/div.hpp +++ b/src/frontends/onnx/frontend/src/op/div.hpp @@ -7,26 +7,22 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/shape.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/divide.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector div(const Node& node) { - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { inline OutputVector div(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/dropout.cpp b/src/frontends/onnx/frontend/src/op/dropout.cpp index 0cfd8f2d941f47..553006e97cef7d 100644 --- a/src/frontends/onnx/frontend/src/op/dropout.cpp +++ b/src/frontends/onnx/frontend/src/op/dropout.cpp @@ -4,14 +4,15 @@ #include "op/dropout.hpp" -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/op/util/op_types.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -24,9 +25,8 @@ OutputVector build_dropout(const Node& node, bool training_mode) { const bool return_mask = node.get_outputs_size() > 1; if (return_mask) { - const auto mask = std::make_shared( - default_opset::Constant::create(ngraph::element::boolean, Shape{}, {true}), - std::make_shared(input_data)); + const auto mask = std::make_shared(v0::Constant::create(ov::element::boolean, Shape{}, {true}), + std::make_shared(input_data)); return {input_data, mask}; } else { return {input_data}; @@ -44,8 +44,7 @@ OutputVector dropout(const Node& node) { CHECK_VALID_NODE(node, ov::op::util::is_constant(ng_inputs.at(2).get_node_shared_ptr()), "Non-constant training_mode input is not supported."); - training_mode = - ov::as_type_ptr(ng_inputs.at(2).get_node_shared_ptr())->cast_vector()[0]; + training_mode = ov::as_type_ptr(ng_inputs.at(2).get_node_shared_ptr())->cast_vector()[0]; } return build_dropout(node, training_mode); } diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp index c64b87b28d1ef5..fabe0c784d14b5 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp @@ -4,70 +4,76 @@ #include "op/dynamic_quantize_linear.hpp" -#include -#include - -#include "default_opset.hpp" -#include "ngraph/axis_set.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/minimum.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_max.hpp" +#include "openvino/op/reduce_min.hpp" +#include "openvino/op/round.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace { -std::shared_ptr find_min_value(const ov::Output& input) { - const auto& zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto& one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); +std::shared_ptr find_min_value(const ov::Output& input) { + const auto& zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto& one_node = v0::Constant::create(element::i64, Shape{}, {1}); - const auto& input_shape = std::make_shared(input); - const auto& input_rank = std::make_shared(input_shape); - const auto& input_rank_as_scalar = std::make_shared(input_rank); + const auto& input_shape = std::make_shared(input); + const auto& input_rank = std::make_shared(input_shape); + const auto& input_rank_as_scalar = std::make_shared(input_rank); - const auto& reduce_axes = - std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); + const auto& reduce_axes = std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); - const auto& input_min = std::make_shared(input, reduce_axes); + const auto& input_min = std::make_shared(input, reduce_axes); - const auto& zero_node_u8 = default_opset::Constant::create(element::f32, Shape{}, {0}); - return std::make_shared(zero_node_u8, input_min); + const auto& zero_node_u8 = v0::Constant::create(element::f32, Shape{}, {0}); + return std::make_shared(zero_node_u8, input_min); } -std::shared_ptr find_max_value(const ov::Output& input) { - const auto& zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto& one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); +std::shared_ptr find_max_value(const ov::Output& input) { + const auto& zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto& one_node = v0::Constant::create(element::i64, Shape{}, {1}); - const auto& input_shape = std::make_shared(input); - const auto& input_rank = std::make_shared(input_shape); - const auto& input_rank_as_scalar = std::make_shared(input_rank); + const auto& input_shape = std::make_shared(input); + const auto& input_rank = std::make_shared(input_shape); + const auto& input_rank_as_scalar = std::make_shared(input_rank); - const auto& reduce_axes = - std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); + const auto& reduce_axes = std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); - const auto& input_max = std::make_shared(input, reduce_axes); + const auto& input_max = std::make_shared(input, reduce_axes); - const auto& zero_node_u8 = default_opset::Constant::create(element::f32, Shape{}, {0}); - return std::make_shared(zero_node_u8, input_max); + const auto& zero_node_u8 = v0::Constant::create(element::f32, Shape{}, {0}); + return std::make_shared(zero_node_u8, input_max); } -std::shared_ptr quantize_linear(Output x, - Output x_span, - Output quant_range_span, - Output y_zero_point) { - const auto& x_scaled = - std::make_shared(std::make_shared(x, quant_range_span), x_span); +std::shared_ptr quantize_linear(Output x, + Output x_span, + Output quant_range_span, + Output y_zero_point) { + const auto& x_scaled = std::make_shared(std::make_shared(x, quant_range_span), x_span); - const auto& x_rounded = - std::make_shared(x_scaled, ov::op::v5::Round::RoundMode::HALF_TO_EVEN); + const auto& x_rounded = std::make_shared(x_scaled, ov::op::v5::Round::RoundMode::HALF_TO_EVEN); - const auto& y_zero_point_f32 = std::make_shared(y_zero_point, ov::element::f32); + const auto& y_zero_point_f32 = std::make_shared(y_zero_point, ov::element::f32); - const auto& result_shifted = std::make_shared(x_rounded, y_zero_point_f32); - const auto& result_clamped = std::make_shared(result_shifted, 0, 255); + const auto& result_shifted = std::make_shared(x_rounded, y_zero_point_f32); + const auto& result_clamped = std::make_shared(result_shifted, 0, 255); - return std::make_shared(result_clamped, ov::element::u8); + return std::make_shared(result_clamped, ov::element::u8); } } // namespace namespace op { @@ -77,24 +83,23 @@ OutputVector dynamic_quantize_linear(const Node& node) { const auto& x = inputs.at(0); // quantization range in case of uint8 is [0, 255] - const auto& quant_range_min = default_opset::Constant::create(element::f32, Shape{}, {0}); - const auto& quant_range_max = default_opset::Constant::create(element::f32, Shape{}, {255}); - const auto& quant_range_span = std::make_shared(quant_range_max, quant_range_min); + const auto& quant_range_min = v0::Constant::create(element::f32, Shape{}, {0}); + const auto& quant_range_max = v0::Constant::create(element::f32, Shape{}, {255}); + const auto& quant_range_span = std::make_shared(quant_range_max, quant_range_min); const auto& x_max = find_max_value(x); const auto& x_min = find_min_value(x); - const auto& x_span = std::make_shared(x_max, x_min); + const auto& x_span = std::make_shared(x_max, x_min); - const auto& y_scale = std::make_shared(x_span, quant_range_max); + const auto& y_scale = std::make_shared(x_span, quant_range_max); - const auto& x_min_shifted = std::make_shared(quant_range_min, x_min); + const auto& x_min_shifted = std::make_shared(quant_range_min, x_min); const auto& intermediate_zero_point = - std::make_shared(std::make_shared(x_min_shifted, y_scale), - ov::op::v5::Round::RoundMode::HALF_TO_EVEN); + std::make_shared(std::make_shared(x_min_shifted, y_scale), + ov::op::v5::Round::RoundMode::HALF_TO_EVEN); - const auto& y_zero_point = std::make_shared( - std::make_shared(intermediate_zero_point, 0, 255), - ov::element::u8); + const auto& y_zero_point = + std::make_shared(std::make_shared(intermediate_zero_point, 0, 255), ov::element::u8); const auto& y = quantize_linear(x, x_span, quant_range_span, y_zero_point); diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp index 84d8bc852628d7..8962f6602145fa 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { From 608d94e05119cba9e2837c5c82641ca54849a5a1 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 19 Jan 2024 02:59:28 -0800 Subject: [PATCH 091/122] [ONNX] Added new expanded operations (#22254) * Added new expanded operations * Update test_backend.py: removed xpassed * Update __init__.py --- src/frontends/onnx/frontend/src/core/transform.hpp | 5 ++++- src/frontends/onnx/tests/__init__.py | 1 - src/frontends/onnx/tests/tests_python/test_backend.py | 11 ----------- 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/src/frontends/onnx/frontend/src/core/transform.hpp b/src/frontends/onnx/frontend/src/core/transform.hpp index 9d8fd4b8c081f1..4a4e3707315043 100644 --- a/src/frontends/onnx/frontend/src/core/transform.hpp +++ b/src/frontends/onnx/frontend/src/core/transform.hpp @@ -10,8 +10,11 @@ namespace ngraph { namespace onnx_import { namespace transform { -static const std::vector onnx_functions_to_expand = {"Bernoulli", +static const std::vector onnx_functions_to_expand = {"AffineGrid", + "Bernoulli", "Celu", + "CenterCropPad", + "Gelu", "NegativeLogLikelihoodLoss", "SoftmaxCrossEntropyLoss", "LayerNormalization"}; diff --git a/src/frontends/onnx/tests/__init__.py b/src/frontends/onnx/tests/__init__.py index e47f08323e0a48..f2843e89ce33b2 100644 --- a/src/frontends/onnx/tests/__init__.py +++ b/src/frontends/onnx/tests/__init__.py @@ -168,7 +168,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): # ONNX 1.15 xfail_issue_125485 = xfail_test(reason="AffineGrid operation is not supported") -xfail_issue_125486 = xfail_test(reason="Gelu operation is not supported") xfail_issue_125488 = xfail_test(reason="ImageDecoder operation is not supported") skip_issue_125487 = pytest.mark.skip(reason="GridSample doesn't support cubic and linear modes, and 4D tensor") # Need to enable after bumping to 1.15 skip_issue_125489 = pytest.mark.skip(reason="IsInf changed behavior since opset-20") # Need to enable after opset-20 will be released diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index f131ce48ed58a9..3f59e94c3f3bd1 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -74,7 +74,6 @@ xfail_issue_119925, xfail_issue_119926, xfail_issue_125485, - xfail_issue_125486, xfail_issue_125488, skip_issue_125487, skip_issue_125489, @@ -422,13 +421,10 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ( xfail_issue_99950, - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_and_pad_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_chw_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_chw_expanded_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_hwc_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_hwc_expanded_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_pad_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_negative_axes_hwc_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_negative_axes_hwc_expanded_cpu", ), @@ -700,13 +696,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_affine_grid_3d_align_corners_cpu", "OnnxBackendNodeModelTest.test_affine_grid_3d_cpu", ), - ( - xfail_issue_125486, - "OnnxBackendNodeModelTest.test_gelu_default_1_cpu", - "OnnxBackendNodeModelTest.test_gelu_default_2_cpu", - "OnnxBackendNodeModelTest.test_gelu_tanh_1_cpu", - "OnnxBackendNodeModelTest.test_gelu_tanh_2_cpu", - ), ( xfail_issue_125488, "OnnxBackendNodeModelTest.test_image_decoder_decode_bmp_rgb_cpu", From f4fbbb954d7a98fb43314970665001ff62547191 Mon Sep 17 00:00:00 2001 From: Haiqi Pan Date: Fri, 19 Jan 2024 19:18:52 +0800 Subject: [PATCH 092/122] [API 2.0] Add memory_states test cases (#21627) * add memory_states test case * add memory_states.cpp * fix m_cached_output_names.count(tensor_name) failed * add inferreq_smoke_VariableState_SetState * add inferreq_smoke_VariableState_Reset * add inferreq_smoke_VariableState_2infers_set * add inferreq_smoke_VariableState_2infers * fix name style * Update src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp Co-authored-by: Pawel Raasz * move SKIP_IF_CURRENT_TEST_IS_DISABLED before std::tie * Update src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp Co-authored-by: Vitaliy Urusovskij * Update src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp Co-authored-by: Vitaliy Urusovskij * Update src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp Co-authored-by: Vitaliy Urusovskij * fix comment * use vector to replace array * Update src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp Co-authored-by: Pawel Raasz * avoid temporary vector creation * fix element * fix element * remove test in arm --------- Co-authored-by: Pawel Raasz Co-authored-by: Vitaliy Urusovskij Co-authored-by: Chen Peter --- .../ov_infer_request/memory_states.cpp | 25 ++ .../skip_tests_config.cpp | 1 + .../ov_infer_request/memory_states.hpp | 36 +++ .../ov_infer_request/memory_states.cpp | 270 ++++++++++++++++++ 4 files changed, 332 insertions(+) create mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp create mode 100644 src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp create mode 100644 src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp new file mode 100644 index 00000000000000..57533fdc278211 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/memory_states.hpp" + +using namespace ov::test::behavior; +using namespace ov; + +namespace { +std::vector memoryStateTestCases = { + memoryStateParams(OVInferRequestVariableStateTest::get_network(), + {"c_1-3", "r_1-3"}, + ov::test::utils::DEVICE_CPU, + {}), + memoryStateParams(OVInferRequestVariableStateTest::get_network(), + {"c_1-3", "r_1-3"}, + ov::test::utils::DEVICE_HETERO, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; + +INSTANTIATE_TEST_SUITE_P(smoke_VariableState, + OVInferRequestVariableStateTest, + ::testing::ValuesIn(memoryStateTestCases), + OVInferRequestVariableStateTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 447898598cc806..ddff2c7d345de3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -287,6 +287,7 @@ std::vector disabledTestPatterns() { R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=FakeQuantizeBinaryConvolution.*)"); // Issue: 124395 retVector.emplace_back(R"(smoke_VariableStateBasic/InferRequestVariableStateTest.*)"); + retVector.emplace_back(R"(smoke_VariableState/OVInferRequestVariableStateTest.*)"); # endif #endif diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp new file mode 100644 index 00000000000000..d9b93c2f8352f5 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "base/behavior_test_utils.hpp" +#include "common_test_utils/test_common.hpp" + +namespace ov { +namespace test { +namespace behavior { + +using memoryStateParams = std::tuple, // Model to work with + std::vector, // Memory States to query + std::string, // Target device name + ov::AnyMap>; // device configuration + +class OVInferRequestVariableStateTest : public testing::WithParamInterface, + public OVInferRequestTestBase { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + void SetUp() override; + void TearDown() override; + static std::shared_ptr get_network(); + +protected: + std::shared_ptr net; + std::vector statesToQuery; + std::string deviceName; + ov::AnyMap configuration; + ov::CompiledModel prepare_network(); +}; +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp new file mode 100644 index 00000000000000..b075da8365ddad --- /dev/null +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp @@ -0,0 +1,270 @@ +// // Copyright (C) 2018-2023 Intel Corporation +// // SPDX-License-Identifier: Apache-2.0 +// // + +#include "behavior/ov_infer_request/memory_states.hpp" + +#include "base/behavior_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/sigmoid.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestVariableStateTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::ostringstream result; + std::shared_ptr net; + std::string deviceName; + std::vector statesToQuery; + ov::AnyMap configuration; + std::tie(net, statesToQuery, deviceName, configuration) = obj.param; + result << "targetDevice=" << deviceName; + if (!configuration.empty()) { + using namespace ov::test::utils; + for (auto& configItem : configuration) { + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + } + } + return result.str(); +} + +void OVInferRequestVariableStateTest::SetUp() { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + std::tie(net, statesToQuery, deviceName, configuration) = GetParam(); + OVInferRequestTestBase::SetUp(); +} + +void OVInferRequestVariableStateTest::TearDown() { + OVInferRequestTestBase::TearDown(); +} + +std::shared_ptr OVInferRequestVariableStateTest::get_network() { + ov::Shape shape = {1, 200}; + ov::element::Type type = ov::element::f32; + + auto input = std::make_shared(type, shape); + auto mem_i1 = std::make_shared(type, shape, 0); + auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); + auto mul1 = std::make_shared(mem_r1, input); + + auto mem_i2 = std::make_shared(type, shape, 0); + auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); + auto mul2 = std::make_shared(mem_r2, mul1); + auto mem_w2 = std::make_shared(mul2, "c_1-3"); + + auto mem_w1 = std::make_shared(mul2, "r_1-3"); + auto sigm = std::make_shared(mul2); + sigm->set_friendly_name("sigmod_state"); + sigm->get_output_tensor(0).set_names({"sigmod_state"}); + mem_r1->set_friendly_name("Memory_1"); + mem_r1->get_output_tensor(0).set_names({"Memory_1"}); + mem_w1->add_control_dependency(mem_r1); + sigm->add_control_dependency(mem_w1); + + mem_r2->set_friendly_name("Memory_2"); + mem_r2->get_output_tensor(0).set_names({"Memory_2"}); + mem_w2->add_control_dependency(mem_r2); + sigm->add_control_dependency(mem_w2); + + auto function = std::make_shared(ov::NodeVector{sigm}, ov::ParameterVector{input}, "add_output"); + return function; +} + +ov::CompiledModel OVInferRequestVariableStateTest::prepare_network() { + net->add_output("Memory_1"); + net->add_output("Memory_2"); + ov::Core core = createCoreWithTemplate(); + return core.compile_model(net, deviceName, configuration); +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_QueryState) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + + auto states = infer_req.query_state(); + ASSERT_TRUE(states.size() == 2) << "Incorrect number of VariableStates"; + + for (auto&& state : states) { + auto name = state.get_name(); + ASSERT_TRUE(std::find(statesToQuery.begin(), statesToQuery.end(), name) != statesToQuery.end()) + << "State " << name << "expected to be in memory states but it is not!"; + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_SetState) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + + const float new_state_val = 13.0f; + for (auto&& state : infer_req.query_state()) { + state.reset(); + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + + for (auto&& state : infer_req.query_state()) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + for (int i = 0; i < last_state_size; i++) { + EXPECT_NEAR(new_state_val, last_state_data[i], 1e-5); + } + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_Reset) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + + const float new_state_val = 13.0f; + for (auto&& state : infer_req.query_state()) { + state.reset(); + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + + infer_req.query_state().front().reset(); + + auto states = infer_req.query_state(); + for (int i = 0; i < states.size(); ++i) { + auto last_state = states[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + if (i == 0) { + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(0, last_state_data[j], 1e-5); + } + } else { + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(new_state_val, last_state_data[j], 1e-5); + } + } + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_2infers_set) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + auto infer_req2 = executable_net.create_infer_request(); + + const float new_state_val = 13.0f; + for (auto&& state : infer_req.query_state()) { + state.reset(); + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + for (auto&& state : infer_req2.query_state()) { + state.reset(); + } + + auto states = infer_req.query_state(); + auto states2 = infer_req2.query_state(); + for (int i = 0; i < states.size(); ++i) { + auto last_state = states[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(13.0f, last_state_data[j], 1e-5); + } + } + for (int i = 0; i < states2.size(); ++i) { + auto last_state = states2[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(0, last_state_data[j], 1e-5); + } + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_2infers) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + auto infer_req2 = executable_net.create_infer_request(); + const float new_state_val = 13.0f; + + // set the input data for the network + auto input = executable_net.input(); + auto tensor = utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + infer_req.set_tensor(input, tensor); + // } + + // initial state for 2nd infer request + for (auto&& state : infer_req2.query_state()) { + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + + // reset state for 1st infer request + for (auto&& state : infer_req.query_state()) { + state.reset(); + } + + infer_req.infer(); + auto states = infer_req.query_state(); + auto states2 = infer_req2.query_state(); + // check the output and state of 1st request + auto output_tensor = infer_req.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + auto data = static_cast(output_data); + for (int i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, data[i], 1e-5); + } + for (int i = 0; i < states.size(); ++i) { + auto last_state = states[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(0.0, last_state_data[j], 1e-5); + } + } + + // // check the output and state of 2nd request + for (int i = 0; i < states2.size(); ++i) { + auto last_state = states2[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(new_state_val, last_state_data[j], 1e-5); + } + } +} + +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file From 15a098f31426510e8553858422881a60d4c5ae05 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Fri, 19 Jan 2024 15:44:30 +0400 Subject: [PATCH 093/122] [GPU] Make update_shape_info_tensor a virtual method (#22255) --- .../src/graph/include/primitive_inst.h | 3 + .../intel_gpu/src/graph/primitive_inst.cpp | 118 +++++++----------- 2 files changed, 51 insertions(+), 70 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h index cd56778fa7bbda..e2973d892e9fdd 100644 --- a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h @@ -384,6 +384,9 @@ class primitive_inst { virtual void update_shape(); virtual event::ptr update_weights(); + virtual void update_shape_info_tensor(const kernel_impl_params& params); + + void fill_shape_info_data(const layout& runtime_layout, const layout& node_layout, int32_t* shape_info_ptr, size_t& offset); bool use_async_compilation(); // if primitive_inst doesn't replace impl to new impl(static impl with opt kerenl or dynamic impl), return false bool update_impl(); diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 5a9110edc19412..e52c1505bb2cae 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -689,80 +689,58 @@ bool primitive_inst::use_async_compilation() { _node->get_selected_impl()->get_kernel_name().find("softmax_gpu_ref") != std::string::npos)); } +void primitive_inst::fill_shape_info_data(const layout& runtime_layout, const layout& node_layout, int32_t* shape_info_ptr, size_t& offset) { + if (node_layout.is_static()) { + GPU_DEBUG_TRACE_DETAIL << "tensor is static. Skipping" << std::endl; + return; + } + auto pshape = runtime_layout.get_partial_shape(); + auto shape_with_max_rank = layout::transform(pshape, + format::get_default_format(pshape.size()), + format::get_default_format(layout::max_rank())).to_shape(); + for (size_t j = 0; j < shape_with_max_rank.size(); ++j) { + GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << shape_with_max_rank[j] << std::endl; + shape_info_ptr[offset++] = static_cast(shape_with_max_rank[j]); + } + auto dynamic_pad = node_layout.data_padding.get_dynamic_pad_dims().sizes(format::get_default_format(layout::max_rank())); + auto data_padding = runtime_layout.data_padding; + for (size_t j = 0; j < shape_with_max_rank.size(); ++j) { + if (dynamic_pad[j] == 1) { + auto lower_pads = data_padding.lower_size().sizes(format::get_default_format(layout::max_rank())); + GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << lower_pads[j] + << "(pad_before for " << j << "-th dim)" << std::endl; + shape_info_ptr[offset++] = lower_pads[j]; // pad_before + auto upper_pads = data_padding.upper_size().sizes(format::get_default_format(layout::max_rank())); + GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << upper_pads[j] + << "(pad_after for " << j << "-th dim)" << std::endl; + shape_info_ptr[offset++] = upper_pads[j]; // pad_after + } + } +} + +void primitive_inst::update_shape_info_tensor(const kernel_impl_params& params) { + mem_lock lock(_shape_info_memory, _network.get_stream()); + auto shape_info_ptr = lock.data(); + size_t offset = 0; + for (size_t i = 0; i < _node->get_dependencies().size(); i++) { + GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for input[" << i << "]" << std::endl; + const auto& node_in_lay = _node->get_dependency(i).get_output_layout(); + const auto& runtime_in_lay = params.input_layouts[i]; + fill_shape_info_data(runtime_in_lay, node_in_lay, shape_info_ptr, offset); + } + for (size_t i = 0; i < _node->get_output_layouts().size(); i++) { + GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for output[" << i << "]" << std::endl; + const auto& node_out_lay = _node->get_output_layout(i); + const auto& runtime_out_lay = params.output_layouts[i]; + fill_shape_info_data(runtime_out_lay, node_out_lay, shape_info_ptr, offset); + } +} + bool primitive_inst::update_impl() { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("update_impl: " + id())); GPU_DEBUG_PROFILED_STAGE(instrumentation::pipeline_stage::update_implementation); auto prev_impl_str = _impl != nullptr ? _impl->get_kernel_name() : "nullptr"; - auto update_shape_info = [this, prev_impl_str](const kernel_impl_params& params) { - mem_lock lock(_shape_info_memory, _network.get_stream()); - size_t offset = 0; - for (size_t i = 0; i < _node->get_dependencies().size(); i++) { - auto node_in_lay = _node->get_dependency(i).get_output_layout(); - if (node_in_lay.is_dynamic()) { - auto pshape = params.get_input_layout(i).get_partial_shape(); - GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for input[" << i << "]" << std::endl; - auto input_shape_max_rank = layout::transform(pshape, - format::get_default_format(pshape.size()), - format::get_default_format(layout::max_rank())).to_shape(); - for (size_t j = 0; j < input_shape_max_rank.size(); ++j) { - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << input_shape_max_rank[j] << std::endl; - lock[offset++] = static_cast(input_shape_max_rank[j]); - } - auto is_dynamic_pad = node_in_lay.data_padding.get_dynamic_pad_dims().sizes(format::get_default_format(layout::max_rank())); - auto data_padding = params.input_layouts[i].data_padding; - for (size_t j = 0; j < input_shape_max_rank.size(); ++j) { - if (is_dynamic_pad[j] == 1) { - auto lower_pads = - data_padding.lower_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << lower_pads[j] - << "(pad_before for input[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = lower_pads[j]; // pad_before - auto upper_pads = - data_padding.upper_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << upper_pads[j] - << "(pad_after for input[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = upper_pads[j]; // pad_after - } - } - } - } - for (size_t i = 0; i < _node->get_output_layouts().size(); i++) { - auto node_out_lay = _node->get_output_layout(i); - if (node_out_lay.is_dynamic()) { - GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for output[" << i << "]" << std::endl; - auto pshape = params.get_output_layout(i).get_partial_shape(); - auto output_shape_max_rank = layout::transform(pshape, - format::get_default_format(pshape.size()), - format::get_default_format(layout::max_rank())) - .to_shape(); - for (size_t j = 0; j < output_shape_max_rank.size(); j++) { - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << output_shape_max_rank[j] << std::endl; - lock[offset++] = static_cast(output_shape_max_rank[j]); - } - auto is_dynamic_pad = node_out_lay.data_padding.get_dynamic_pad_dims().sizes(format::get_default_format(layout::max_rank())); - auto data_padding = params.output_layouts[i].data_padding; - for (size_t j = 0; j < output_shape_max_rank.size(); j++) { - if (is_dynamic_pad[j] == 1) { - auto lower_pads = data_padding.lower_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << lower_pads[j] - << "(pad_before for output[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = lower_pads[j]; - auto upper_pads = data_padding.upper_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << upper_pads[j] - << "(pad_after for output[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = upper_pads[j]; // pad_after - } - } - } - } - std::stringstream s; - s << "shapes: "; - for (size_t i = 0; i < offset; i++) - s << lock[i] << " "; - GPU_DEBUG_TRACE_DETAIL << id() << ": update dynamic impl " << prev_impl_str << " to new shape: " << s.str() << std::endl; - }; - if (_impl != nullptr && (_impl->is_cpu() || can_be_optimized())) { // Return false if shape not changed, otherwise return true to trigger realloc_if_needed, but do not change impl itself return shape_changed(); @@ -836,7 +814,7 @@ bool primitive_inst::update_impl() { _impl = std::move(_dynamic_impl); auto new_impl_params = _impl->canonicalize_shapes(*_impl_params); _impl->update_dispatch_data(new_impl_params); - update_shape_info(new_impl_params); + update_shape_info_tensor(new_impl_params); } } else { _impl = _node->type()->choose_impl(*_node, updated_params_no_dyn_pad); From d0619edd211d9447f1474817d150fdf1f4930db3 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 19 Jan 2024 16:48:27 +0400 Subject: [PATCH 094/122] Removed IE and ngraph headers / cmake interfaces from package (#22203) * Revert "Revert IE cmake config back" * Update src/cmake/openvino.cmake * Fixed for NVIDIA plugin * Fixed tests compilation * Use ilya's contrib repo * Fixes for ONNX Runtime tests * Use custom nVIDIA repo * Disable ONNX Runtime --- .github/workflows/job_onnx_runtime.yml | 12 +-- .github/workflows/linux.yml | 10 +- .github/workflows/linux_arm64.yml | 10 +- .../InferenceEngineConfig-version.cmake.in | 29 ----- .../templates/InferenceEngineConfig.cmake.in | 87 --------------- cmake/templates/ngraphConfig.cmake.in | 102 ------------------ install_build_dependencies.sh | 2 +- src/cmake/openvino.cmake | 13 --- src/core/CMakeLists.txt | 14 --- src/inference/dev_api/blob_factory.hpp | 13 +++ .../dev_api/openvino/runtime/make_tensor.hpp | 17 ++- src/inference/src/dev/core_impl_ie.cpp | 1 + src/inference/src/dev/make_tensor.cpp | 2 +- src/inference/tests/unit/ie_blob_test.cpp | 1 + .../ov_executable_network/properties.cpp | 1 + .../behavior/ov_plugin/properties_tests.cpp | 1 + .../ov_executable_network/exec_net_base.cpp | 1 + .../behavior/ov_infer_request/callback.cpp | 1 + .../ov_infer_request/cancellation.cpp | 1 + .../behavior/ov_infer_request/io_tensor.cpp | 1 + .../ov_infer_request/multithreading.cpp | 1 + .../ov_infer_request/perf_counters.cpp | 1 + .../behavior/ov_infer_request/wait.cpp | 1 + .../ov_executable_network/properties.cpp | 1 + .../behavior/ov_plugin/properties_tests.cpp | 1 + .../include/base/behavior_test_utils.hpp | 1 + .../compiled_model/properties_hetero.cpp | 2 +- .../ov_executable_network/get_metric.cpp | 4 +- .../src/behavior/ov_infer_request/wait.cpp | 1 + .../behavior/ov_plugin/properties_tests.cpp | 1 + .../base/layer_test_utils.hpp | 3 +- .../functional_test_utils/plugin_cache.hpp | 7 +- .../src/plugin_cache.cpp | 3 +- 33 files changed, 65 insertions(+), 281 deletions(-) delete mode 100644 cmake/templates/InferenceEngineConfig-version.cmake.in delete mode 100644 cmake/templates/InferenceEngineConfig.cmake.in delete mode 100644 cmake/templates/ngraphConfig.cmake.in diff --git a/.github/workflows/job_onnx_runtime.yml b/.github/workflows/job_onnx_runtime.yml index 1bf4d4aa96eea3..3dce4a9d66fa35 100644 --- a/.github/workflows/job_onnx_runtime.yml +++ b/.github/workflows/job_onnx_runtime.yml @@ -80,7 +80,10 @@ jobs: popd - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + run: | + ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + # since we are on Ubuntu 22.04, but compiled OpenVINO on Ubuntu 20.04, we need to install `libtbb2` + apt-get install --assume-yes --no-install-recommends libtbb2 - name: Clone ONNX Runtime run: | @@ -139,13 +142,6 @@ jobs: ./onnxruntime_global_thread_pools_test working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo -# Test removed in onnxruntime 1.16.2 -# - name: Run onnxruntime_api_tests_without_env -# run: | -# source ${INSTALL_DIR}/setupvars.sh -# ./onnxruntime_api_tests_without_env -# working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - name: Run pytorch-converted tests run: | source ${INSTALL_DIR}/setupvars.sh diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 4e37bfe6b6c0da..d619cedc3cb109 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -407,14 +407,16 @@ jobs: ONNX_Runtime: name: ONNX Runtime Integration - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || - fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE + # Enable back once https://github.com/microsoft/onnxruntime/pull/19184 is merged + if: ${{ 'false' }} + # if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || + # fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE needs: [ Build, Smart_CI ] uses: ./.github/workflows/job_onnx_runtime.yml with: runner: 'aks-linux-16-cores-32gb' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' - sccache-azure-key-prefix: 'ubuntu20_x86_64_onnxruntime' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' + sccache-azure-key-prefix: 'ubuntu22_x86_64_onnxruntime' ONNX_Models: name: ONNX Models Tests diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index 9894bccaa48615..d2b79bcc82d19c 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -308,14 +308,16 @@ jobs: ONNX_Runtime: name: ONNX Runtime Integration - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || - fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE + # Enable back once https://github.com/microsoft/onnxruntime/pull/19184 is merged + if: ${{ 'false' }} + # if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || + # fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE needs: [ Build, Smart_CI ] uses: ./.github/workflows/job_onnx_runtime.yml with: runner: 'aks-linux-16-cores-arm' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' - sccache-azure-key-prefix: 'ubuntu20_aarch64_onnxruntime' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' + sccache-azure-key-prefix: 'ubuntu22_aarch64_onnxruntime' CXX_Unit_Tests: name: C++ unit tests diff --git a/cmake/templates/InferenceEngineConfig-version.cmake.in b/cmake/templates/InferenceEngineConfig-version.cmake.in deleted file mode 100644 index 2da3f42e1c6a54..00000000000000 --- a/cmake/templates/InferenceEngineConfig-version.cmake.in +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(PACKAGE_VERSION_MAJOR @OpenVINO_VERSION_MAJOR@) -set(PACKAGE_VERSION_MINOR @OpenVINO_VERSION_MINOR@) -set(PACKAGE_VERSION_PATCH @OpenVINO_VERSION_PATCH@) -set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}") - -set(PACKAGE_VERSION_EXACT False) -set(PACKAGE_VERSION_COMPATIBLE False) - -# Compatibility with old versioning for 2.x -if(PACKAGE_FIND_VERSION_MAJOR VERSION_EQUAL 2) - set(PACKAGE_VERSION_COMPATIBLE True) - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED) - message(WARNING "Inference Engine versioning has changed. Use ${PACKAGE_VERSION} instead of ${PACKAGE_FIND_VERSION}") - endif() -endif() - -if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) - set(PACKAGE_VERSION_EXACT True) - set(PACKAGE_VERSION_COMPATIBLE True) -endif() - -if(PACKAGE_FIND_VERSION_MAJOR EQUAL PACKAGE_VERSION_MAJOR AND - PACKAGE_FIND_VERSION VERSION_LESS PACKAGE_VERSION) - set(PACKAGE_VERSION_COMPATIBLE True) -endif() diff --git a/cmake/templates/InferenceEngineConfig.cmake.in b/cmake/templates/InferenceEngineConfig.cmake.in deleted file mode 100644 index f94124a5e88708..00000000000000 --- a/cmake/templates/InferenceEngineConfig.cmake.in +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# -# -# Inference Engine cmake config -# ------ -# -# This config defines the following variables: -# -# InferenceEngine_FOUND - True if the system has the Inference Engine library -# InferenceEngine_INCLUDE_DIRS - Inference Engine include directories -# InferenceEngine_LIBRARIES - Inference Engine libraries -# -# and the following imported targets: -# -# IE::inference_engine - The Inference Engine library -# IE::inference_engine_c_api - The Inference Engine C API library -# -# Inference Engine version variables: -# -# InferenceEngine_VERSION_MAJOR - major version component -# InferenceEngine_VERSION_MINOR - minor version component -# InferenceEngine_VERSION_PATCH - patch version component -# - -@PACKAGE_INIT@ - -message(WARNING "find_package(InferenceEngine) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINO)") - -if(NOT DEFINED CMAKE_FIND_PACKAGE_NAME) - set(CMAKE_FIND_PACKAGE_NAME InferenceEngine) - set(_ie_need_package_name_reset ON) -endif() - -# need to store current PACKAGE_PREFIX_DIR, because it's overwritten by sub-package one -set(_ie_package_prefix_dir "${PACKAGE_PREFIX_DIR}") - -include(CMakeFindDependencyMacro) - -find_dependency(OpenVINO - PATHS "${CMAKE_CURRENT_LIST_DIR}" - "${CMAKE_CURRENT_LIST_DIR}/../openvino${InferenceEngine_VERSION}" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - -# create targets with old names for compatibility -if(TARGET openvino::runtime AND NOT TARGET IE::inference_engine) - add_library(IE::inference_engine INTERFACE IMPORTED) - set_target_properties(IE::inference_engine PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::runtime) -endif() - -if(TARGET openvino::runtime::c AND NOT TARGET IE::inference_engine_c_api) - add_library(IE::inference_engine_c_api INTERFACE IMPORTED) - set_target_properties(IE::inference_engine_c_api PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::runtime::c) -endif() - -# mark components as available -foreach(comp inference_engine inference_engine_c_api) - set(${CMAKE_FIND_PACKAGE_NAME}_${comp}_FOUND ON) -endforeach() - -if(NOT ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) - set(${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS inference_engine inference_engine_c_api) -endif() - -unset(InferenceEngine_LIBRARIES) -foreach(comp IN LISTS ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) - # check if the component is available - if(${CMAKE_FIND_PACKAGE_NAME}_${comp}_FOUND) - set(pcomp IE::${comp}) - - list(APPEND InferenceEngine_LIBRARIES ${pcomp}) - endif() -endforeach() - -# restore PACKAGE_PREFIX_DIR -set(PACKAGE_PREFIX_DIR ${_ie_package_prefix_dir}) -unset(_ie_package_prefix_dir) - -check_required_components(${CMAKE_FIND_PACKAGE_NAME}) - -if(_ie_need_package_name_reset) - unset(CMAKE_FIND_PACKAGE_NAME) - unset(_ie_need_package_name_reset) -endif() diff --git a/cmake/templates/ngraphConfig.cmake.in b/cmake/templates/ngraphConfig.cmake.in deleted file mode 100644 index a0111c2302195f..00000000000000 --- a/cmake/templates/ngraphConfig.cmake.in +++ /dev/null @@ -1,102 +0,0 @@ -# ****************************************************************************** -# Copyright 2017-2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** -# -# -# ngraph config file -# ------ -# -# This script defines the following variables and imported targets: -# -# ngraph::ngraph - nGraph core target -# ngraph_FOUND - True if the system has the nGraph library -# NGRAPH_LIBRARIES - nGraph libraries -# -# Frontends: -# -# ngraph_onnx_frontend_FOUND - True if the system has ngraph::onnx_frontend library -# ngraph::onnx_frontend - ONNX FrontEnd target (optional) -# -# ngraph_paddle_frontend_FOUND - True if the system has Paddle frontend -# ngraph::paddle_frontend - nGraph Paddle frontend (optional) -# -# ngraph_ir_frontend_FOUND - True if the system has OpenVINO IR frontend -# -# ngraph_tensorflow_frontend_FOUND - True if the system has TensorFlow frontend -# ngraph::tensorflow_frontend - nGraph TensorFlow frontend (optional) -# - -@PACKAGE_INIT@ - -include(CMakeFindDependencyMacro) - -message(WARNING "find_package(ngraph) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINO)") - -find_dependency(OpenVINO - PATHS "${CMAKE_CURRENT_LIST_DIR}" - "${CMAKE_CURRENT_LIST_DIR}/../openvino${ngraph_VERSION}" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - -# create targets with old names for compatibility -if(TARGET openvino::runtime AND NOT TARGET ngraph::ngraph) - add_library(ngraph::ngraph INTERFACE IMPORTED) - set_target_properties(ngraph::ngraph PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::runtime) -endif() - -if(TARGET openvino::frontend::onnx AND NOT TARGET ngraph::onnx_frontend) - add_library(ngraph::onnx_frontend INTERFACE IMPORTED) - set_target_properties(ngraph::onnx_frontend PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::frontend::onnx) -endif() - -if(TARGET openvino::frontend::paddle AND NOT TARGET ngraph::paddle_frontend) - add_library(ngraph::paddle_frontend INTERFACE IMPORTED) - set_target_properties(ngraph::paddle_frontend PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::frontend::paddle) -endif() - -if(TARGET openvino::frontend::tensorflow AND NOT TARGET ngraph::tensorflow_frontend) - add_library(ngraph::tensorflow_frontend INTERFACE IMPORTED) - set_target_properties(ngraph::tensorflow_frontend PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::frontend::tensorflow) -endif() - -set(ngraph_ngraph_FOUND ON) -set(NGRAPH_LIBRARIES ngraph::ngraph) - -set(ngraph_onnx_frontend_FOUND ${OpenVINO_Frontend_ONNX_FOUND}) -set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND}) -set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND}) -set(ngraph_onnx_importer_FOUND ${OpenVINO_Frontend_ONNX_FOUND}) - -if(ngraph_onnx_importer_FOUND) - set(ONNX_IMPORTER_LIBRARIES ngraph::onnx_frontend) - # ngraph::onnx_importer target and variables are deprecated - # but need to create a dummy target for BW compatibility - if(NOT TARGET ngraph::onnx_importer) - add_library(ngraph::onnx_importer INTERFACE IMPORTED) - set_target_properties(ngraph::onnx_importer PROPERTIES - INTERFACE_LINK_LIBRARIES ngraph::onnx_frontend) - endif() -endif() - -set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND}) -set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND}) -set(ngraph_onnx_frontend_FOUND ${OpenVINO_Frontend_ONNX_FOUND}) -set(ngraph_ir_frontend_FOUND ${OpenVINO_Frontend_IR_FOUND}) - -check_required_components(ngraph) diff --git a/install_build_dependencies.sh b/install_build_dependencies.sh index 2875785f6d0a04..de2a53a25c9b77 100755 --- a/install_build_dependencies.sh +++ b/install_build_dependencies.sh @@ -218,7 +218,7 @@ elif command -v cmake3 &> /dev/null; then fi current_cmake_ver=$($cmake_command --version | sed -ne 's/[^0-9]*\(\([0-9]\.\)\{0,4\}[0-9][^.]\).*/\1/p') -required_cmake_ver=3.20.0 +required_cmake_ver=3.24.0 if [ ! "$(printf '%s\n' "$required_cmake_ver" "$current_cmake_ver" | sort -V | head -n1)" = "$required_cmake_ver" ]; then installed_cmake_ver=3.26.0 arch=$(uname -m) diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index 04867095248a24..58b074fd6160b7 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -184,27 +184,14 @@ string(REPLACE "$" "" OPENVINO_LIB_DIR "${OV_CPACK_LIBRARYDIR}") set(OV_TBB_DIR "${OV_TBB_DIR_INSTALL}") set(OV_TBBBIND_DIR "${OV_TBBBIND_DIR_INSTALL}") -configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig.cmake.in" - "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" - INSTALL_DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} - PATH_VARS ${PATH_VARS} ${INSTALL_PATH_VARS}) - configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig.cmake.in" "${CMAKE_BINARY_DIR}/share/OpenVINOConfig.cmake" INSTALL_DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} PATH_VARS ${PATH_VARS} ${INSTALL_PATH_VARS}) -configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in" - "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" @ONLY) configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/OpenVINOConfig-version.cmake" @ONLY) -install(FILES "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" - "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" - DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV} - ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) - install(FILES "${CMAKE_BINARY_DIR}/share/OpenVINOConfig.cmake" "${CMAKE_BINARY_DIR}/OpenVINOConfig-version.cmake" DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 15767de248131d..351978fec651f5 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -157,17 +157,3 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ FILES_MATCHING PATTERN "*.hpp" PATTERN "*.h") - -configure_package_config_file(${OpenVINO_SOURCE_DIR}/cmake/templates/ngraphConfig.cmake.in - ${CMAKE_BINARY_DIR}/ngraphConfig.cmake - INSTALL_DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR}) - -write_basic_package_version_file(${CMAKE_BINARY_DIR}/ngraphConfigVersion.cmake - VERSION ${OpenVINO_VERSION_MAJOR}.${OpenVINO_VERSION_MINOR}.${OpenVINO_VERSION_PATCH} - COMPATIBILITY SameMajorVersion) - -install(FILES ${CMAKE_BINARY_DIR}/ngraphConfig.cmake - ${CMAKE_BINARY_DIR}/ngraphConfigVersion.cmake - DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV} - ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) diff --git a/src/inference/dev_api/blob_factory.hpp b/src/inference/dev_api/blob_factory.hpp index 9258e781fa690e..f37f813604c62f 100644 --- a/src/inference/dev_api/blob_factory.hpp +++ b/src/inference/dev_api/blob_factory.hpp @@ -16,6 +16,8 @@ #include "ie_blob.h" #include "ie_data.h" #include "ie_memcpy.h" +#include "openvino/runtime/itensor.hpp" +#include "openvino/runtime/so_ptr.hpp" IE_SUPPRESS_DEPRECATED_START /** @@ -137,4 +139,15 @@ void CopyVectorToBlob(const InferenceEngine::Blob::Ptr outputBlob, const std::ve IE_THROW() << "Element size mismatch between blob and vector"; ie_memcpy(outputBlob->buffer().as(), outputBlob->byteSize(), &inputVector[0], inputVector.size() * sizeof(T)); } + +namespace ov { + +ov::SoPtr make_tensor(const std::shared_ptr& tensor, bool unwrap = false); + +OPENVINO_RUNTIME_API std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, + bool unwrap = true, + InferenceEngine::TensorDesc desc = {}); + +} // namespace ov + IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/dev_api/openvino/runtime/make_tensor.hpp b/src/inference/dev_api/openvino/runtime/make_tensor.hpp index fb80e7ab5d84eb..7433b22a7fc38c 100644 --- a/src/inference/dev_api/openvino/runtime/make_tensor.hpp +++ b/src/inference/dev_api/openvino/runtime/make_tensor.hpp @@ -4,11 +4,16 @@ #pragma once -#include "ie_blob.h" #include "openvino/runtime/common.hpp" #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/so_ptr.hpp" +namespace InferenceEngine { + +class Blob; + +} // namespace InferenceEngine + namespace ov { /** @@ -65,14 +70,4 @@ OPENVINO_RUNTIME_API ov::Tensor make_tensor(const ov::SoPtr& tensor); */ OPENVINO_RUNTIME_API ov::SoPtr get_tensor_impl(const ov::Tensor& tensor); -IE_SUPPRESS_DEPRECATED_START -/** @cond INTERNAL */ -ov::SoPtr make_tensor(const std::shared_ptr& tensor, bool unwrap = false); - -OPENVINO_RUNTIME_API std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, - bool unwrap = true, - InferenceEngine::TensorDesc desc = {}); -/** @endcond */ - -IE_SUPPRESS_DEPRECATED_END } // namespace ov diff --git a/src/inference/src/dev/core_impl_ie.cpp b/src/inference/src/dev/core_impl_ie.cpp index 456ea871b6c2e0..c6dae8eab29e52 100644 --- a/src/inference/src/dev/core_impl_ie.cpp +++ b/src/inference/src/dev/core_impl_ie.cpp @@ -5,6 +5,7 @@ #include #include "any_copy.hpp" +#include "blob_factory.hpp" #include "compilation_context.hpp" #include "core_impl.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 8e4eaa8f01a9a3..021ff0c15e312d 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -6,7 +6,7 @@ #include -#include "ie_blob.h" +#include "blob_factory.hpp" #include "ie_ngraph_utils.hpp" #include "openvino/runtime/iremote_tensor.hpp" #include "openvino/runtime/properties.hpp" diff --git a/src/inference/tests/unit/ie_blob_test.cpp b/src/inference/tests/unit/ie_blob_test.cpp index 368ea50a9bf2d9..ae7dd7e7a8d5c4 100644 --- a/src/inference/tests/unit/ie_blob_test.cpp +++ b/src/inference/tests/unit/ie_blob_test.cpp @@ -6,6 +6,7 @@ #include #include +#include "blob_factory.hpp" #include "openvino/runtime/make_tensor.hpp" #include "unit_test_utils/mocks/mock_allocator.hpp" diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 18f0a61a64a003..982e8cac50981b 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -4,6 +4,7 @@ #include "behavior/compiled_model/properties.hpp" +#include "ie_plugin_config.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/runtime/system_conf.hpp" diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 1a6f122e0b590b..50dcc1b0d4fc7f 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -4,6 +4,7 @@ #include "behavior/ov_plugin/properties_tests.hpp" +#include "ie_plugin_config.hpp" #include "openvino/runtime/auto/properties.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index f9cbbc18e0662c..8c39fd63103c24 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -3,6 +3,7 @@ // #include "behavior/compiled_model/compiled_model_base.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; namespace { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index b0cbb1e06788da..7e99f01c921077 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -6,6 +6,7 @@ #include "behavior/ov_infer_request/callback.hpp" #include "openvino/runtime/properties.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index b8a6a9cd0d10b8..29b1e8f52a17bd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -3,6 +3,7 @@ // #include "behavior/ov_infer_request/cancellation.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 1116822d236da2..295da790320971 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -6,6 +6,7 @@ #include "behavior/ov_infer_request/io_tensor.hpp" #include "openvino/runtime/properties.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index fb30761e1e60d2..d400ed1108b23b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -5,6 +5,7 @@ #include #include "behavior/ov_infer_request/multithreading.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index b10d622fb56138..70cd59abbae41a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -3,6 +3,7 @@ // #include "behavior/ov_infer_request/perf_counters.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 8fcccf2944694a..fff7b3331e5c32 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -5,6 +5,7 @@ #include #include "behavior/ov_infer_request/wait.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 5622be5f16cce5..d279e2154c2bce 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -4,6 +4,7 @@ #include "behavior/compiled_model/properties.hpp" +#include "ie_plugin_config.hpp" #include "openvino/runtime/properties.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index bfc64eb0454962..f6d55269b6cbf2 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -4,6 +4,7 @@ #include "behavior/ov_plugin/properties_tests.hpp" +#include "ie_plugin_config.hpp" #include "openvino/runtime/properties.hpp" using namespace ov::test::behavior; diff --git a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp index 95311b492f3f59..a8646cd3881a66 100644 --- a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp @@ -6,6 +6,7 @@ #include "ov_behavior_test_utils.hpp" +#include "ie_core.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "common_test_utils/file_utils.hpp" #include "openvino/util/file_util.hpp" diff --git a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp index 5e2c3bbfd9ea80..9436b8426cda00 100644 --- a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp @@ -49,7 +49,7 @@ TEST_P(OVClassHeteroCompiledModelGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricN ov::Any heteroConfigValue = heteroExeNetwork.get_property(deviceConf); ov::Any deviceConfigValue = deviceExeNetwork.get_property(deviceConf); - if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf && + if (ov::internal::exclusive_async_requests.name() != deviceConf && ov::supported_properties.name() != deviceConf) { std::stringstream strm; deviceConfigValue.print(strm); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp index c05b5540243984..2e098ed80884af 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp @@ -261,7 +261,7 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet ov::Any heteroConfigValue = heteroExeNetwork.get_property(deviceConf); ov::Any deviceConfigValue = deviceExeNetwork.get_property(deviceConf); - if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf && + if (ov::internal::exclusive_async_requests.name() != deviceConf && ov::supported_properties.name() != deviceConf) { std::stringstream strm; deviceConfigValue.print(strm); @@ -298,7 +298,7 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN ov::Any deviceConfigValue = deviceExeNetwork.get_property(deviceConf); // HETERO returns EXCLUSIVE_ASYNC_REQUESTS as a boolean value - if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf) { + if (ov::internal::exclusive_async_requests.name() != deviceConf) { std::stringstream strm; deviceConfigValue.print(strm); strm << " "; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp index 8ff25a6b3771e8..5b1231df8e382d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp @@ -4,6 +4,7 @@ #include "behavior/ov_infer_request/wait.hpp" #include "openvino/runtime/exception.hpp" +#include "ie_plugin_config.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index 4246bf3496a75f..6db5526b33e941 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -6,6 +6,7 @@ #include "behavior/ov_plugin/properties_tests.hpp" #include "openvino/runtime/properties.hpp" +#include "ie_plugin_config.hpp" #include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index 8558504cbf6a14..eb29fa1094c984 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -11,8 +11,7 @@ #include #include #include -#include -#include +#include #include #include #include diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp index bdecdd2079a5a9..20ebb92ed0d10e 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp @@ -4,11 +4,16 @@ #pragma once -#include #include #include #include +namespace InferenceEngine { + +class Core; + +} // namespace InferenceEngine + class PluginCache { public: std::shared_ptr ie(const std::string& deviceToCheck = std::string()); diff --git a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp b/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp index 3db56da99c3a0e..9c7e34af62ddc2 100644 --- a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp +++ b/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp @@ -7,12 +7,13 @@ #include #include -#include #include #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_constants.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" +#include "ie_core.hpp" +#include "ie_plugin_config.hpp" #include "openvino/util/file_util.hpp" namespace { From 8302573686cb0025531e421b7866b6a252d57b94 Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Fri, 19 Jan 2024 14:08:08 +0100 Subject: [PATCH 095/122] add save model info (#22267) --- .../quantization_w_accuracy_control.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/quantization_w_accuracy_control.rst b/docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/quantization_w_accuracy_control.rst index cf05760069b7e6..eddde03eb6bb4d 100644 --- a/docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/quantization_w_accuracy_control.rst +++ b/docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/quantization_w_accuracy_control.rst @@ -18,6 +18,12 @@ This is the advanced quantization flow that allows to apply 8-bit quantization t The steps for the quantization with accuracy control are described below. +Prepare model +############################################ + +When working with an original model in FP32 precision, it is recommended to use the model as-is, without compressing weights, as the input for the quantization method with accuracy control. This ensures optimal performance relative to a given accuracy drop. Utilizing compression techniques, such as compressing the original model weights to FP16, may significantly increase the number of reverted layers and lead to reduced performance for the quantized model. +If the original model is converted to OpenVINO and saved through ``openvino.save_model()`` before using it in the quantization method with accuracy control, disable the compression of weights to FP16 by setting ``compress_to_fp16=False``. This is necessary because, by default, ``openvino.save_model()`` saves models in FP16. + Prepare calibration and validation datasets ############################################ @@ -75,7 +81,7 @@ After that the model can be compiled and run with OpenVINO: :language: python :fragment: [inference] -To save the model in the OpenVINO Intermediate Representation (IR), use ``ov.save_model()``. When dealing with an original model in FP32 precision, it's advisable to preserve FP32 precision in the most impactful model operations that were reverted from INT8 to FP32. To do this, consider using compress_to_fp16=False during the saving process. This recommendation is based on the default functionality of ``ov.save_model()``, which saves models in FP16, potentially impacting accuracy through this conversion. +To save the model in the OpenVINO Intermediate Representation (IR), use ``openvino.save_model()``. When dealing with an original model in FP32 precision, it's advisable to preserve FP32 precision in the most impactful model operations that were reverted from INT8 to FP32. To do this, consider using compress_to_fp16=False during the saving process. This recommendation is based on the default functionality of ``openvino.save_model()``, which saves models in FP16, potentially impacting accuracy through this conversion. .. tab-set:: @@ -99,6 +105,6 @@ Examples of NNCF post-training quantization with control of accuracy metric: See also #################### -* :doc:`Optimizing Models at Training Time ` +* :doc:`Optimizing Models at Training Time ` From b263a80bd5c2c4dd8f0081893d1cc7b0ae63efd8 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Fri, 19 Jan 2024 17:29:06 +0400 Subject: [PATCH 096/122] [TRANSFORMATIONS -> CONSTANT FOLDING] Add evaluate for SqDiff due to using by constant folding (#22236) * [TRANSFORMATIONS -> CONSTANT FOLDING] Add evaluate for SqDiff due to using by constant folding * Exclude extra prc * code style fix --- .../openvino/op/squared_difference.hpp | 2 + src/core/src/op/squared_difference.cpp | 53 +++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/src/core/include/openvino/op/squared_difference.hpp b/src/core/include/openvino/op/squared_difference.hpp index 3c52268759d02d..845ef75b28b1cb 100644 --- a/src/core/include/openvino/op/squared_difference.hpp +++ b/src/core/include/openvino/op/squared_difference.hpp @@ -29,6 +29,8 @@ class OPENVINO_API SquaredDifference : public util::BinaryElementwiseArithmetic const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 } // namespace op diff --git a/src/core/src/op/squared_difference.cpp b/src/core/src/op/squared_difference.cpp index 700e0a4a809d12..e91c46aace291f 100644 --- a/src/core/src/op/squared_difference.cpp +++ b/src/core/src/op/squared_difference.cpp @@ -5,6 +5,31 @@ #include "openvino/op/squared_difference.hpp" #include "itt.hpp" +#include "openvino/reference/squared_difference.hpp" +#include "utils.hpp" + +namespace squared_difference { +struct Evaluate : ov::element::NoAction { + using ov::element::NoAction::visit; + + template + static result_type visit(const ov::Tensor& in0, + const ov::Tensor& in1, + ov::Tensor& out, + const ov::Shape& shape0, + const ov::Shape& shape1, + const ov::op::AutoBroadcastSpec& broadcast_spec) { + using T = typename ov::element_type_traits::value_type; + ov::reference::squared_difference(in0.data(), + in1.data(), + out.data(), + shape0, + shape1, + broadcast_spec); + return true; + } +}; +} // namespace squared_difference // ------------------------------ v0 ------------------------------------------- @@ -20,3 +45,31 @@ std::shared_ptr ov::op::v0::SquaredDifference::clone_with_new_inputs(c check_new_args_count(this, new_args); return std::make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } + +bool ov::op::v0::SquaredDifference::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(v0_SquaredDifference_evaluate); + OPENVINO_ASSERT(outputs.size() == 1); + + outputs[0].set_shape(infer_broadcast_shape(this, inputs)); + using namespace ov::element; + return IF_TYPE_OF(v0_SquaredDifference_evaluate, + OV_PP_ET_LIST(f32), + squared_difference::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); +} + +bool ov::op::v0::SquaredDifference::has_evaluate() const { + OV_OP_SCOPE(v0_SquaredDifference_has_evaluate); + switch (get_input_element_type(0)) { + case element::f32: + return true; + default: + return false; + } +} From d7c50d09c650bd0948b30e77e68feba6f95bbf87 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 19 Jan 2024 06:01:19 -0800 Subject: [PATCH 097/122] [ONNX] Frontend refactoring: operations (#22262) * Refactoring operations E-H * Fixed code style --- src/frontends/onnx/frontend/src/op/einsum.cpp | 6 +- src/frontends/onnx/frontend/src/op/einsum.hpp | 1 - src/frontends/onnx/frontend/src/op/elu.cpp | 7 +- src/frontends/onnx/frontend/src/op/elu.hpp | 1 - src/frontends/onnx/frontend/src/op/equal.hpp | 7 +- src/frontends/onnx/frontend/src/op/erf.hpp | 7 +- src/frontends/onnx/frontend/src/op/exp.hpp | 7 +- src/frontends/onnx/frontend/src/op/expand.cpp | 19 +++--- src/frontends/onnx/frontend/src/op/expand.hpp | 1 - .../onnx/frontend/src/op/flatten.cpp | 8 +-- .../onnx/frontend/src/op/flatten.hpp | 1 - src/frontends/onnx/frontend/src/op/floor.hpp | 7 +- src/frontends/onnx/frontend/src/op/gather.hpp | 7 +- .../onnx/frontend/src/op/gather_elements.hpp | 5 +- .../onnx/frontend/src/op/gather_nd.cpp | 7 +- .../onnx/frontend/src/op/gather_nd.hpp | 1 - src/frontends/onnx/frontend/src/op/gemm.cpp | 43 ++++++------ src/frontends/onnx/frontend/src/op/gemm.hpp | 1 - .../frontend/src/op/global_average_pool.cpp | 27 ++++---- .../frontend/src/op/global_average_pool.hpp | 5 +- .../onnx/frontend/src/op/global_max_pool.cpp | 27 ++++---- .../onnx/frontend/src/op/global_max_pool.hpp | 5 +- .../onnx/frontend/src/op/greater.hpp | 7 +- .../onnx/frontend/src/op/greater_or_equal.cpp | 12 ++-- .../onnx/frontend/src/op/greater_or_equal.hpp | 1 - .../onnx/frontend/src/op/grid_sample.cpp | 14 ++-- .../onnx/frontend/src/op/grid_sample.hpp | 1 - .../frontend/src/op/group_normalization.cpp | 36 ++++++---- .../frontend/src/op/group_normalization.hpp | 1 - src/frontends/onnx/frontend/src/op/gru.cpp | 53 +++++++-------- src/frontends/onnx/frontend/src/op/gru.hpp | 1 - .../onnx/frontend/src/op/hammingwindow.cpp | 57 ++++++++-------- .../onnx/frontend/src/op/hammingwindow.hpp | 1 - .../onnx/frontend/src/op/hannwindow.cpp | 49 +++++++------- .../onnx/frontend/src/op/hannwindow.hpp | 1 - .../onnx/frontend/src/op/hard_sigmoid.cpp | 20 +++--- .../onnx/frontend/src/op/hard_sigmoid.hpp | 1 - .../onnx/frontend/src/op/hard_swish.hpp | 5 +- .../onnx/frontend/src/op/hardmax.cpp | 65 +++++++++---------- .../onnx/frontend/src/op/hardmax.hpp | 1 - 40 files changed, 253 insertions(+), 273 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/einsum.cpp b/src/frontends/onnx/frontend/src/op/einsum.cpp index c2732ae4210198..77375139cc5ac0 100644 --- a/src/frontends/onnx/frontend/src/op/einsum.cpp +++ b/src/frontends/onnx/frontend/src/op/einsum.cpp @@ -4,7 +4,9 @@ #include "op/einsum.hpp" -#include "default_opset.hpp" +#include "openvino/op/einsum.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,7 +16,7 @@ namespace set_1 { OutputVector einsum(const Node& node) { const std::string& equation{node.get_attribute_value("equation")}; - return OutputVector{std::make_shared(node.get_ng_inputs(), equation)}; + return OutputVector{std::make_shared(node.get_ng_inputs(), equation)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/einsum.hpp b/src/frontends/onnx/frontend/src/op/einsum.hpp index 7252006fdffba6..b037fd8652f99b 100644 --- a/src/frontends/onnx/frontend/src/op/einsum.hpp +++ b/src/frontends/onnx/frontend/src/op/einsum.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/elu.cpp b/src/frontends/onnx/frontend/src/op/elu.cpp index 4af7e99c1a549a..ca74d8fe92c044 100644 --- a/src/frontends/onnx/frontend/src/op/elu.cpp +++ b/src/frontends/onnx/frontend/src/op/elu.cpp @@ -4,10 +4,9 @@ #include "op/elu.hpp" -#include -#include +#include "openvino/op/elu.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -18,7 +17,7 @@ OutputVector elu(const Node& node) { auto data = node.get_ng_inputs().at(0); double alpha = node.get_attribute_value("alpha", 1); - return OutputVector{std::make_shared(data, alpha)}; + return OutputVector{std::make_shared(data, alpha)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/elu.hpp b/src/frontends/onnx/frontend/src/op/elu.hpp index d0b6cc0daf107e..4304a197ec23c8 100644 --- a/src/frontends/onnx/frontend/src/op/elu.hpp +++ b/src/frontends/onnx/frontend/src/op/elu.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/equal.hpp b/src/frontends/onnx/frontend/src/op/equal.hpp index 108ab06527bbcf..cf8be7ceb632b9 100644 --- a/src/frontends/onnx/frontend/src/op/equal.hpp +++ b/src/frontends/onnx/frontend/src/op/equal.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/equal.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector equal(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/erf.hpp b/src/frontends/onnx/frontend/src/op/erf.hpp index 76933ef8cd370a..1dd303743352c2 100644 --- a/src/frontends/onnx/frontend/src/op/erf.hpp +++ b/src/frontends/onnx/frontend/src/op/erf.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/erf.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector erf(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/exp.hpp b/src/frontends/onnx/frontend/src/op/exp.hpp index ecd5a7c6b46b53..33a7ba16c5da65 100644 --- a/src/frontends/onnx/frontend/src/op/exp.hpp +++ b/src/frontends/onnx/frontend/src/op/exp.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/exp.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector exp(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/expand.cpp b/src/frontends/onnx/frontend/src/op/expand.cpp index e6736a9b436298..cb5c1d462dede6 100644 --- a/src/frontends/onnx/frontend/src/op/expand.cpp +++ b/src/frontends/onnx/frontend/src/op/expand.cpp @@ -4,30 +4,29 @@ #include "op/expand.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector expand(const Node& node) { - const Output data{node.get_ng_inputs().at(0)}; - const Output shape{node.get_ng_inputs().at(1)}; + const Output data{node.get_ng_inputs().at(0)}; + const Output shape{node.get_ng_inputs().at(1)}; if (common::is_failsafe_node(shape.get_node_shared_ptr())) { // in case the "shape" input is connected to a failsafe node created in place of an invalid initializer // the target shape should be ignored and this Expand operation should not modify its input tensor // the Broadcast created below should be eliminated later on by an appropriate optimization pass - const auto identity_broadcast = default_opset::Constant::create(element::i64, Shape{1}, {1}); - return {std::make_shared(data, - identity_broadcast, - ngraph::op::BroadcastType::BIDIRECTIONAL)}; + const auto identity_broadcast = v0::Constant::create(element::i64, Shape{1}, {1}); + return {std::make_shared(data, identity_broadcast, ov::op::BroadcastType::BIDIRECTIONAL)}; } else { - return {std::make_shared(data, shape, ngraph::op::BroadcastType::BIDIRECTIONAL)}; + return {std::make_shared(data, shape, ov::op::BroadcastType::BIDIRECTIONAL)}; } } diff --git a/src/frontends/onnx/frontend/src/op/expand.hpp b/src/frontends/onnx/frontend/src/op/expand.hpp index d76745d0cd7fd5..0f9454b9bd1464 100644 --- a/src/frontends/onnx/frontend/src/op/expand.hpp +++ b/src/frontends/onnx/frontend/src/op/expand.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/flatten.cpp b/src/frontends/onnx/frontend/src/op/flatten.cpp index ebf4499a279d4a..cbdc74697540b1 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.cpp +++ b/src/frontends/onnx/frontend/src/op/flatten.cpp @@ -4,12 +4,12 @@ #include "op/flatten.hpp" -#include - #include "exceptions.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "ov_models/ov_builders/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -25,7 +25,7 @@ OutputVector flatten(const Node& node) { const std::int64_t data_rank_value = data_rank.get_length(); // Accepted range is [-r, r] where r = rank(input). OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, data_rank_value, -data_rank_value, data_rank_value); + axis = ov::normalize_axis(node.get_description(), axis, data_rank_value, -data_rank_value, data_rank_value); OPENVINO_SUPPRESS_DEPRECATED_END } return {ov::op::util::flatten(data, static_cast(axis))}; diff --git a/src/frontends/onnx/frontend/src/op/flatten.hpp b/src/frontends/onnx/frontend/src/op/flatten.hpp index 8f0c7e8458c543..f810b438f12779 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.hpp +++ b/src/frontends/onnx/frontend/src/op/flatten.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/floor.hpp b/src/frontends/onnx/frontend/src/op/floor.hpp index 2925a6deb293b3..b721a5c4a20efd 100644 --- a/src/frontends/onnx/frontend/src/op/floor.hpp +++ b/src/frontends/onnx/frontend/src/op/floor.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/floor.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector floor(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/gather.hpp b/src/frontends/onnx/frontend/src/op/gather.hpp index 15f826f5f809b6..f56adcb5531851 100644 --- a/src/frontends/onnx/frontend/src/op/gather.hpp +++ b/src/frontends/onnx/frontend/src/op/gather.hpp @@ -7,11 +7,8 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" -#include "ngraph/validation_util.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" namespace ngraph { @@ -26,7 +23,7 @@ inline OutputVector gather(const Node& node) { return {std::make_shared(data, indices, - default_opset::Constant::create(element::i64, Shape{}, {axis}))}; + ov::op::v0::Constant::create(element::i64, Shape{}, {axis}))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/gather_elements.hpp b/src/frontends/onnx/frontend/src/op/gather_elements.hpp index a5052a52fc87df..3785be1a672a2a 100644 --- a/src/frontends/onnx/frontend/src/op/gather_elements.hpp +++ b/src/frontends/onnx/frontend/src/op/gather_elements.hpp @@ -7,8 +7,7 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/output_vector.hpp" +#include "openvino/op/gather_elements.hpp" namespace ngraph { namespace onnx_import { @@ -20,7 +19,7 @@ inline OutputVector gather_elements(const Node& node) { auto indices = ng_inputs.at(1); auto axis = node.get_attribute_value("axis", 0); - return {std::make_shared(data, indices, axis)}; + return {std::make_shared(data, indices, axis)}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.cpp b/src/frontends/onnx/frontend/src/op/gather_nd.cpp index fe50c1689b30ac..3332bb923c2c02 100644 --- a/src/frontends/onnx/frontend/src/op/gather_nd.cpp +++ b/src/frontends/onnx/frontend/src/op/gather_nd.cpp @@ -7,8 +7,9 @@ #include "op/gather_nd.hpp" -#include "default_opset.hpp" -#include "utils/common.hpp" +#include "openvino/op/gather_nd.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -21,7 +22,7 @@ OutputVector gather_nd(const Node& node) { const auto indices = ng_inputs.at(1); const auto batch_dims = node.get_attribute_value("batch_dims", 0); - return {std::make_shared(data, indices, batch_dims)}; + return {std::make_shared(data, indices, batch_dims)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.hpp b/src/frontends/onnx/frontend/src/op/gather_nd.hpp index ad2092f21275b7..c16e40b89baa6b 100644 --- a/src/frontends/onnx/frontend/src/op/gather_nd.hpp +++ b/src/frontends/onnx/frontend/src/op/gather_nd.hpp @@ -10,7 +10,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/gemm.cpp b/src/frontends/onnx/frontend/src/op/gemm.cpp index c0c858b6db3799..a2988cc013aa2a 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.cpp +++ b/src/frontends/onnx/frontend/src/op/gemm.cpp @@ -4,11 +4,14 @@ #include "op/gemm.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/multiply.hpp" #include "ov_models/ov_builders/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -16,14 +19,14 @@ namespace op { namespace set_1 { OutputVector gemm(const Node& node) { OutputVector inputs{node.get_ng_inputs()}; - Output input_a = inputs.at(0); - Output input_b = inputs.at(1); - Output input_c; + Output input_a = inputs.at(0); + Output input_b = inputs.at(1); + Output input_c; if (inputs.size() == 3) { input_c = inputs.at(2); } else { - input_c = default_opset::Constant::create(input_b.get_element_type(), ngraph::Shape{}, {0}); + input_c = v0::Constant::create(input_b.get_element_type(), ov::Shape{}, {0}); } const auto alpha = node.get_attribute_value("alpha", 1); @@ -43,16 +46,16 @@ OutputVector gemm(const Node& node) { input_a = ov::op::util::flatten(input_a, 1); input_b = ov::op::util::flatten(input_b, 1); - std::shared_ptr matmul_node = std::make_shared(input_a, input_b); + std::shared_ptr matmul_node = std::make_shared(input_a, input_b); if (alpha != 1) { - const auto alpha_node = default_opset::Constant::create(input_b.get_element_type(), Shape{}, {alpha}); - matmul_node = std::make_shared(matmul_node, alpha_node); + const auto alpha_node = v0::Constant::create(input_b.get_element_type(), Shape{}, {alpha}); + matmul_node = std::make_shared(matmul_node, alpha_node); } - auto beta_times_input_c = std::make_shared(beta_node, input_c); + auto beta_times_input_c = std::make_shared(beta_node, input_c); - return OutputVector{std::make_shared(matmul_node, beta_times_input_c)}; + return OutputVector{std::make_shared(matmul_node, beta_times_input_c)}; } } // namespace set_1 @@ -60,14 +63,14 @@ OutputVector gemm(const Node& node) { namespace set_6 { OutputVector gemm(const Node& node) { OutputVector inputs{node.get_ng_inputs()}; - Output input_a = inputs.at(0); - Output input_b = inputs.at(1); - Output input_c; + Output input_a = inputs.at(0); + Output input_b = inputs.at(1); + Output input_c; if (inputs.size() == 3) { input_c = inputs.at(2); } else { - input_c = default_opset::Constant::create(input_b.get_element_type(), ngraph::Shape{}, {0}); + input_c = v0::Constant::create(input_b.get_element_type(), ov::Shape{}, {0}); } const auto alpha_node = node.get_attribute_as_constant("alpha", 1, input_b.get_element_type()); @@ -76,13 +79,13 @@ OutputVector gemm(const Node& node) { const bool trans_a = node.get_attribute_value("transA", 0); const bool trans_b = node.get_attribute_value("transB", 0); - const auto matmul_node = std::make_shared(input_a, input_b, trans_a, trans_b); - const auto matmul_times_alpha = std::make_shared(matmul_node, alpha_node); + const auto matmul_node = std::make_shared(input_a, input_b, trans_a, trans_b); + const auto matmul_times_alpha = std::make_shared(matmul_node, alpha_node); - const auto beta_times_input_c = std::make_shared(beta_node, input_c); + const auto beta_times_input_c = std::make_shared(beta_node, input_c); const std::string onnx_name = !node.get_name().empty() ? node.get_name() : node.output(0); matmul_node->set_friendly_name(onnx_name + "/WithoutBiases"); - return {std::make_shared(matmul_times_alpha, beta_times_input_c)}; + return {std::make_shared(matmul_times_alpha, beta_times_input_c)}; } } // namespace set_6 diff --git a/src/frontends/onnx/frontend/src/op/gemm.hpp b/src/frontends/onnx/frontend/src/op/gemm.hpp index fd28462c9c1c5f..7b1e801404a88b 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.hpp +++ b/src/frontends/onnx/frontend/src/op/gemm.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp index 750c97c6c2d5f4..83ff50bb6c7279 100644 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp @@ -4,11 +4,13 @@ #include "op/global_average_pool.hpp" -#include -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_mean.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -27,18 +29,17 @@ OutputVector global_average_pool(const Node& node) { // Expected spatial dims indexes: [2, 3, 4] auto data = node.get_ng_inputs()[0]; - const auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); - const auto two_node = default_opset::Constant::create(element::i64, Shape{}, {2}); + const auto zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto one_node = v0::Constant::create(element::i64, Shape{}, {1}); + const auto two_node = v0::Constant::create(element::i64, Shape{}, {2}); - const auto data_shape = std::make_shared(data); - const auto data_rank = std::make_shared(data_shape); - const auto data_rank_as_scalar = std::make_shared(data_rank); + const auto data_shape = std::make_shared(data); + const auto data_rank = std::make_shared(data_shape); + const auto data_rank_as_scalar = std::make_shared(data_rank); - const auto reduce_axes = - std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); + const auto reduce_axes = std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); - return {std::make_shared(data, reduce_axes, true)}; + return {std::make_shared(data, reduce_axes, true)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp b/src/frontends/onnx/frontend/src/op/global_average_pool.hpp index 6d267bab74ba61..e503ce011cc4ab 100644 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/global_average_pool.hpp @@ -7,18 +7,17 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX GlobalAveragePool operation to an nGraph node. +/// \brief Convert ONNX GlobalAveragePool operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX +/// \return The vector containing OV nodes producing output of ONNX /// GlobalAveragePool operation. OutputVector global_average_pool(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp index e7a90f31af0635..2a28a21eeb2c5c 100644 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp @@ -4,11 +4,13 @@ #include "op/global_max_pool.hpp" -#include -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_max.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -27,18 +29,17 @@ OutputVector global_max_pool(const Node& node) { // Expected spatial dims indexes: [2, 3, 4] auto data = node.get_ng_inputs()[0]; - const auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); - const auto two_node = default_opset::Constant::create(element::i64, Shape{}, {2}); + const auto zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto one_node = v0::Constant::create(element::i64, Shape{}, {1}); + const auto two_node = v0::Constant::create(element::i64, Shape{}, {2}); - const auto data_shape = std::make_shared(data); - const auto data_rank = std::make_shared(data_shape); - const auto data_rank_as_scalar = std::make_shared(data_rank); + const auto data_shape = std::make_shared(data); + const auto data_rank = std::make_shared(data_shape); + const auto data_rank_as_scalar = std::make_shared(data_rank); - const auto reduce_axes = - std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); + const auto reduce_axes = std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); - return {std::make_shared(data, reduce_axes, true)}; + return {std::make_shared(data, reduce_axes, true)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp b/src/frontends/onnx/frontend/src/op/global_max_pool.hpp index f3e1c6221f30b0..2b9053ddf4528b 100644 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/global_max_pool.hpp @@ -7,18 +7,17 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX GlobalMaxPool operation to an nGraph node. +/// \brief Convert ONNX GlobalMaxPool operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX +/// \return The vector containing OV nodes producing output of ONNX /// GlobalMaxPool operation. OutputVector global_max_pool(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/greater.hpp b/src/frontends/onnx/frontend/src/op/greater.hpp index 73642577f41838..da9d950c3353be 100644 --- a/src/frontends/onnx/frontend/src/op/greater.hpp +++ b/src/frontends/onnx/frontend/src/op/greater.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/greater.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector greater(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp index ce239143c7edd5..9eb595e216f62d 100644 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp +++ b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp @@ -4,11 +4,11 @@ #include "op/greater_or_equal.hpp" -#include -#include - -#include "default_opset.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -22,7 +22,7 @@ OutputVector greater_or_equal(const Node& node) { FRONT_END_GENERAL_CHECK(A.get_element_type() != ov::element::bf16 && B.get_element_type() != ov::element::bf16, "The input data bfloat16 isn't supported in opset 12"); - const auto C = std::make_shared(A, B); + const auto C = std::make_shared(A, B); return {C}; } @@ -33,7 +33,7 @@ OutputVector greater_or_equal(const Node& node) { const auto A = node.get_ng_inputs().at(0); const auto B = node.get_ng_inputs().at(1); - const auto C = std::make_shared(A, B); + const auto C = std::make_shared(A, B); return {C}; } diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp index c623c17b964e06..30e00067477fba 100644 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp +++ b/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.cpp b/src/frontends/onnx/frontend/src/op/grid_sample.cpp index 63129b4c3dec18..651ca69f5e3d89 100644 --- a/src/frontends/onnx/frontend/src/op/grid_sample.cpp +++ b/src/frontends/onnx/frontend/src/op/grid_sample.cpp @@ -4,7 +4,9 @@ #include "op/grid_sample.hpp" -#include "openvino/opsets/opset9.hpp" +#include "openvino/op/grid_sample.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -15,16 +17,16 @@ OutputVector grid_sample(const Node& node) { const auto data = node.get_ng_inputs().at(0); const auto grid = node.get_ng_inputs().at(1); - ov::opset9::GridSample::Attributes attributes{}; + v9::GridSample::Attributes attributes{}; attributes.align_corners = node.get_attribute_value("align_corners", 0); - attributes.mode = EnumNames::as_enum( + attributes.mode = EnumNames::as_enum( node.get_attribute_value("mode", "bilinear")); - attributes.padding_mode = EnumNames::as_enum( - node.get_attribute_value("padding_mode", "zeros")); + attributes.padding_mode = + EnumNames::as_enum(node.get_attribute_value("padding_mode", "zeros")); - return {std::make_shared(data, grid, attributes)}; + return {std::make_shared(data, grid, attributes)}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.hpp b/src/frontends/onnx/frontend/src/op/grid_sample.hpp index 5c2a4715fbe2f6..b081064bda7196 100644 --- a/src/frontends/onnx/frontend/src/op/grid_sample.hpp +++ b/src/frontends/onnx/frontend/src/op/grid_sample.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.cpp b/src/frontends/onnx/frontend/src/op/group_normalization.cpp index b0dc8f786d8aef..1217ed03232e14 100644 --- a/src/frontends/onnx/frontend/src/op/group_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/group_normalization.cpp @@ -4,7 +4,16 @@ #include "op/group_normalization.hpp" -#include "default_opset.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/group_normalization.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/unsqueeze.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -22,26 +31,25 @@ OutputVector group_normalization(const Node& node) { const auto eps = node.get_attribute_value("epsilon", 1e-05f); const auto num_groups = node.get_attribute_value("num_groups"); - const auto zero = default_opset::Constant::create(element::i64, Shape{1}, {0}); - const auto one = default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto c_dim = - std::make_shared(std::make_shared(data), one, zero); - const auto g_dim = default_opset::Constant::create(element::i64, Shape{1}, {num_groups}); + const auto zero = v0::Constant::create(element::i64, Shape{1}, {0}); + const auto one = v0::Constant::create(element::i64, Shape{1}, {1}); + const auto c_dim = std::make_shared(std::make_shared(data), one, zero); + const auto g_dim = v0::Constant::create(element::i64, Shape{1}, {num_groups}); - const auto c_g_div = std::make_shared(c_dim, g_dim); + const auto c_g_div = std::make_shared(c_dim, g_dim); // Adjust scale and bias shape, [G] -> [G, C/G] -> [C] - const auto scale_unsq = std::make_shared(scale, one); + const auto scale_unsq = std::make_shared(scale, one); const auto broadcast_scale = - std::make_shared(scale_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); - const auto c_scale = std::make_shared(broadcast_scale, c_dim, false); + std::make_shared(scale_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); + const auto c_scale = std::make_shared(broadcast_scale, c_dim, false); - const auto bias_unsq = std::make_shared(bias, one); + const auto bias_unsq = std::make_shared(bias, one); const auto broadcast_bias = - std::make_shared(bias_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); - const auto c_bias = std::make_shared(broadcast_bias, c_dim, false); + std::make_shared(bias_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); + const auto c_bias = std::make_shared(broadcast_bias, c_dim, false); - return {std::make_shared(data, c_scale, c_bias, num_groups, eps)}; + return {std::make_shared(data, c_scale, c_bias, num_groups, eps)}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.hpp b/src/frontends/onnx/frontend/src/op/group_normalization.hpp index fbd38d3667d4dd..4484c6a052e7cc 100644 --- a/src/frontends/onnx/frontend/src/op/group_normalization.hpp +++ b/src/frontends/onnx/frontend/src/op/group_normalization.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/gru.cpp b/src/frontends/onnx/frontend/src/op/gru.cpp index 4a88d3cbfc0ab9..fa38b87c7c7e44 100644 --- a/src/frontends/onnx/frontend/src/op/gru.cpp +++ b/src/frontends/onnx/frontend/src/op/gru.cpp @@ -4,16 +4,17 @@ #include "op/gru.hpp" -#include -#include - -#include "default_opset.hpp" -#include "ngraph/shape.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gru_sequence.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "ov_models/ov_builders/split.hpp" #include "utils/recurrent.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -34,8 +35,8 @@ struct GRUInputMap : public recurrent::OpInputMap { // gates_count * 2 since B is: [Wb, Rb] const int split_parts = 2 * 3; const auto split_bias = ov::op::util::split(bias, split_parts, 1); - const auto wr_z_bias = std::make_shared(split_bias.at(0), split_bias.at(3)); - const auto wr_r_bias = std::make_shared(split_bias.at(1), split_bias.at(4)); + const auto wr_z_bias = std::make_shared(split_bias.at(0), split_bias.at(3)); + const auto wr_r_bias = std::make_shared(split_bias.at(1), split_bias.at(4)); // The result has shape: [num_directions, 4 * hidden_size] // and data layout: // [ @@ -45,17 +46,17 @@ struct GRUInputMap : public recurrent::OpInputMap { // [Rb_h], // // num_directions times // ] - m_map[recurrent::OpInput::B] = std::make_shared( - OutputVector{wr_z_bias, wr_r_bias, split_bias.at(2), split_bias.at(5)}, - 1); + m_map[recurrent::OpInput::B] = + std::make_shared(OutputVector{wr_z_bias, wr_r_bias, split_bias.at(2), split_bias.at(5)}, + 1); } else { const std::size_t hidden_size = m_map[recurrent::OpInput::R].get_shape().back(); const std::size_t num_directions = m_map[recurrent::OpInput::W].get_shape().front(); m_map[recurrent::OpInput::B] = - std::make_shared(el_type, - Shape{num_directions, (gates_count + 1) * hidden_size}, - 0.f); + std::make_shared(el_type, + Shape{num_directions, (gates_count + 1) * hidden_size}, + 0.f); } } } @@ -81,19 +82,19 @@ OutputVector gru(const Node& node) { GRUInputMap input_map{node, gates_count}; GRUAttributes attributes{node}; - auto gru_sequence = std::make_shared(input_map.at(recurrent::OpInput::X), - input_map.at(recurrent::OpInput::INIT_H), - input_map.at(recurrent::OpInput::SEQ_LENGTHS), - input_map.at(recurrent::OpInput::W), - input_map.at(recurrent::OpInput::R), - input_map.at(recurrent::OpInput::B), - attributes.m_hidden_size, - attributes.m_direction, - attributes.m_activations, - attributes.m_activations_alpha, - attributes.m_activations_beta, - attributes.m_clip_threshold, - attributes.m_linear_before_reset); + auto gru_sequence = std::make_shared(input_map.at(recurrent::OpInput::X), + input_map.at(recurrent::OpInput::INIT_H), + input_map.at(recurrent::OpInput::SEQ_LENGTHS), + input_map.at(recurrent::OpInput::W), + input_map.at(recurrent::OpInput::R), + input_map.at(recurrent::OpInput::B), + attributes.m_hidden_size, + attributes.m_direction, + attributes.m_activations, + attributes.m_activations_alpha, + attributes.m_activations_beta, + attributes.m_clip_threshold, + attributes.m_linear_before_reset); const auto Y = gru_sequence->output(0); const auto Y_h = gru_sequence->output(1); diff --git a/src/frontends/onnx/frontend/src/op/gru.hpp b/src/frontends/onnx/frontend/src/op/gru.hpp index 94f0dcba72533e..b77d6c0ab66246 100644 --- a/src/frontends/onnx/frontend/src/op/gru.hpp +++ b/src/frontends/onnx/frontend/src/op/gru.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 702e0695422037..f59b6e3e3bf7d5 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -7,11 +7,17 @@ #include -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -27,43 +33,38 @@ OutputVector hammingwindow(const Node& node) { // Weights as described in ONNX HammingWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow - const auto float_size = std::make_shared(size, ov::element::f32); - const auto a_0 = std::make_shared( - std::make_shared(ov::element::f32, ov::Shape(), std::vector{25.0f}), - std::make_shared(ov::element::f32, ov::Shape(), std::vector{46.0f})); - const auto a_1 = std::make_shared( - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}), + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared( + std::make_shared(ov::element::f32, ov::Shape(), std::vector{25.0f}), + std::make_shared(ov::element::f32, ov::Shape(), std::vector{46.0f})); + const auto a_1 = std::make_shared( + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}), a_0); - const auto start = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto one_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto two_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); - const auto range = std::make_shared(start, size, one_const, ov::element::f32); - const auto pi = - default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = v0::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); std::shared_ptr factor; if (periodic) { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - float_size)); + std::make_shared(std::make_shared(pi, two_const), float_size)); } else { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - std::make_shared(float_size, one_const))); + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); } - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); if (output_datatype == element::f32) { return {y_values}; } else { - return {std::make_shared(y_values, output_datatype)}; + return {std::make_shared(y_values, output_datatype)}; } } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp index d088b4105abc3a..c6523178aae138 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -6,7 +6,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 47911bf1771c36..33d306636eee1a 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -7,11 +7,17 @@ #include -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -27,39 +33,34 @@ OutputVector hannwindow(const Node& node) { // Weights as described in ONNX HannWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow - const auto float_size = std::make_shared(size, ov::element::f32); - const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.5f}); + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.5f}); const auto a_1 = a_0; - const auto start = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto one_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto two_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); - const auto range = std::make_shared(start, size, one_const, ov::element::f32); - const auto pi = - default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = v0::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); std::shared_ptr factor; if (periodic) { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - float_size)); + std::make_shared(std::make_shared(pi, two_const), float_size)); } else { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - std::make_shared(float_size, one_const))); + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); } - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); if (output_datatype == element::f32) { return {y_values}; } else { - return {std::make_shared(y_values, output_datatype)}; + return {std::make_shared(y_values, output_datatype)}; } } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp index 0c9e6993048ef3..0798b5f132208b 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -6,7 +6,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp index 0d7703373671a2..d23deeae05fd2c 100644 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp +++ b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp @@ -4,9 +4,10 @@ #include "op/hard_sigmoid.hpp" -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/hard_sigmoid.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -17,16 +18,15 @@ OutputVector hard_sigmoid(const Node& node) { const auto data = node.get_ng_inputs().at(0); const auto alpha = - default_opset::Constant::create(data.get_element_type(), - Shape{}, - std::vector{node.get_attribute_value("alpha", 0.2)}); + v0::Constant::create(data.get_element_type(), + Shape{}, + std::vector{node.get_attribute_value("alpha", 0.2)}); - const auto beta = - default_opset::Constant::create(data.get_element_type(), - Shape{}, - std::vector{node.get_attribute_value("beta", 0.5)}); + const auto beta = v0::Constant::create(data.get_element_type(), + Shape{}, + std::vector{node.get_attribute_value("beta", 0.5)}); - return {std::make_shared(data, alpha, beta)}; + return {std::make_shared(data, alpha, beta)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp index 66d4c0b5ac04f1..e3161760a75741 100644 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp +++ b/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hard_swish.hpp b/src/frontends/onnx/frontend/src/op/hard_swish.hpp index 7a771ab39d2cdd..50c2e7000364e5 100644 --- a/src/frontends/onnx/frontend/src/op/hard_swish.hpp +++ b/src/frontends/onnx/frontend/src/op/hard_swish.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/hswish.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector hard_swish(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hardmax.cpp b/src/frontends/onnx/frontend/src/op/hardmax.cpp index cb799fb66d8e6f..3bb60bfee46e9d 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.cpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.cpp @@ -5,7 +5,7 @@ #include "op/hardmax.hpp" #include "exceptions.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/gather.hpp" @@ -17,6 +17,8 @@ #include "utils/common.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -29,7 +31,7 @@ OutputVector hardmax(const Node& node) { auto axis = node.get_attribute_value("axis", 1); if (input_shape.rank().is_static()) { OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, input_shape.rank()); + axis = ov::normalize_axis(node.get_description(), axis, input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END } @@ -37,29 +39,26 @@ OutputVector hardmax(const Node& node) { const auto coerced_tensor = ov::op::util::flatten(input, static_cast(axis)); const auto coerced_tensor_shape = std::make_shared(coerced_tensor); - Output row_size = - std::make_shared(coerced_tensor_shape, - ov::op::v0::Constant::create(element::i64, {1}, {1}), - ov::op::v0::Constant::create(element::i64, {}, {0})); + Output row_size = std::make_shared(coerced_tensor_shape, + ov::op::v0::Constant::create(element::i64, {1}, {1}), + ov::op::v0::Constant::create(element::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); const auto indices_axis = 1; - const auto topk = - std::make_shared(coerced_tensor, - ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}), - indices_axis, - ov::op::v11::TopK::Mode::MAX, - ov::op::v11::TopK::SortType::NONE); + const auto topk = std::make_shared(coerced_tensor, + ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}), + indices_axis, + ov::op::v11::TopK::Mode::MAX, + ov::op::v11::TopK::SortType::NONE); - const auto on_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}); - const auto off_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto results = - std::make_shared(topk->output(1), row_size, on_value, off_value, indices_axis); - const auto converted_results = std::make_shared(results, input.get_element_type()); + const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, indices_axis); + const auto converted_results = std::make_shared(results, input.get_element_type()); const auto output_shape = std::make_shared(input); - return {std::make_shared(converted_results, output_shape, false)}; + return {std::make_shared(converted_results, output_shape, false)}; } } // namespace set_1 @@ -70,31 +69,29 @@ OutputVector hardmax(const Node& node) { auto axis = node.get_attribute_value("axis", -1); OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, input_shape.rank()); + axis = ov::normalize_axis(node.get_description(), axis, input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END const auto input_runtime_shape = std::make_shared(input); - Output row_size = - std::make_shared(input_runtime_shape, - ov::op::v0::Constant::create(element::i64, {1}, {axis}), - ov::op::v0::Constant::create(element::i64, {}, {0})); + Output row_size = std::make_shared(input_runtime_shape, + ov::op::v0::Constant::create(element::i64, {1}, {axis}), + ov::op::v0::Constant::create(element::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); - const auto topk = - std::make_shared(input, - ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}), - axis, - ov::op::v11::TopK::Mode::MAX, - ov::op::v11::TopK::SortType::NONE); + const auto topk = std::make_shared(input, + ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}), + axis, + ov::op::v11::TopK::Mode::MAX, + ov::op::v11::TopK::SortType::NONE); - const auto on_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}); - const auto off_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, axis); - const auto converted_results = std::make_shared(results, input.get_element_type()); + const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, axis); + const auto converted_results = std::make_shared(results, input.get_element_type()); const auto output_shape = std::make_shared(input); - return {std::make_shared(converted_results, output_shape, false)}; + return {std::make_shared(converted_results, output_shape, false)}; } } // namespace set_13 diff --git a/src/frontends/onnx/frontend/src/op/hardmax.hpp b/src/frontends/onnx/frontend/src/op/hardmax.hpp index 2bc5bf414472d7..820bac69e5e3ab 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.hpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { From 8e7b1a43af8cf6d41827e046397bb573825a0b5f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 19 Jan 2024 18:25:01 +0400 Subject: [PATCH 098/122] Fix frontends loading under JS (#22260) --- src/frontends/common/src/manager.cpp | 6 +-- src/frontends/common/src/plugin_loader.hpp | 7 +-- src/frontends/common/src/utils.cpp | 62 ---------------------- src/frontends/common/src/utils.hpp | 6 --- 4 files changed, 3 insertions(+), 78 deletions(-) delete mode 100644 src/frontends/common/src/utils.cpp diff --git a/src/frontends/common/src/manager.cpp b/src/frontends/common/src/manager.cpp index 6194fca7583937..22ce0ed4b772d9 100644 --- a/src/frontends/common/src/manager.cpp +++ b/src/frontends/common/src/manager.cpp @@ -4,11 +4,9 @@ #include "openvino/frontend/manager.hpp" -#include -#include - #include "openvino/frontend/exception.hpp" #include "openvino/util/env_util.hpp" +#include "openvino/util/file_util.hpp" #include "openvino/util/log.hpp" #include "plugin_loader.hpp" #include "utils.hpp" @@ -210,7 +208,7 @@ class FrontEndManager::Impl { } void search_all_plugins() { - auto fe_lib_dir = get_frontend_library_path(); + auto fe_lib_dir = ov::util::get_ov_lib_path(); if (!fe_lib_dir.empty()) find_plugins(fe_lib_dir, m_plugins); } diff --git a/src/frontends/common/src/plugin_loader.hpp b/src/frontends/common/src/plugin_loader.hpp index dccf8ddf7a39f3..388402f0139550 100644 --- a/src/frontends/common/src/plugin_loader.hpp +++ b/src/frontends/common/src/plugin_loader.hpp @@ -10,12 +10,7 @@ #include #include "openvino/frontend/manager.hpp" - -#ifdef _WIN32 -static const char PathSeparator[] = ";"; -#else -static const char PathSeparator[] = ":"; -#endif // _WIN32 +#include "openvino/util/file_util.hpp" namespace ov { namespace frontend { diff --git a/src/frontends/common/src/utils.cpp b/src/frontends/common/src/utils.cpp deleted file mode 100644 index 3a0db585fd2eaa..00000000000000 --- a/src/frontends/common/src/utils.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "utils.hpp" - -#include "openvino/frontend/exception.hpp" -#include "openvino/util/file_util.hpp" -#include "plugin_loader.hpp" - -#ifndef _WIN32 -# include -# include -# include -#else -# if defined(WINAPI_FAMILY) && !WINAPI_PARTITION_DESKTOP -# error "Only WINAPI_PARTITION_DESKTOP is supported, because of GetModuleHandleEx[A|W]" -# endif -# ifndef NOMINMAX -# define NOMINMAX -# endif -# include -#endif - -namespace { - -static std::string _get_frontend_library_path() { -#ifdef _WIN32 -# ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - WCHAR ie_library_path[MAX_PATH]; - HMODULE hm = NULL; - if (!GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - reinterpret_cast(ov::frontend::get_frontend_library_path), - &hm)) { - FRONT_END_INITIALIZATION_CHECK(false, "GetModuleHandle returned ", GetLastError()); - } - GetModuleFileNameW(hm, (LPWSTR)ie_library_path, sizeof(ie_library_path) / sizeof(ie_library_path[0])); - return ov::util::wstring_to_string(ov::util::get_directory(std::wstring(ie_library_path))); -# else - CHAR ie_library_path[MAX_PATH]; - HMODULE hm = NULL; - if (!GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - reinterpret_cast(ov::frontend::get_frontend_library_path), - &hm)) { - FRONT_END_INITIALIZATION_CHECK(false, "GetModuleHandle returned ", GetLastError()); - } - GetModuleFileNameA(hm, (LPSTR)ie_library_path, sizeof(ie_library_path)); - return ov::util::get_directory(std::string(ie_library_path)); -# endif -#elif defined(__APPLE__) || defined(__linux__) || defined(__EMSCRIPTEN__) - Dl_info info; - dladdr(reinterpret_cast(ov::frontend::get_frontend_library_path), &info); - return ov::util::get_directory(ov::util::get_absolute_file_path(std::string(info.dli_fname))).c_str(); -#else -# error "Unsupported OS" -#endif // _WIN32 -} -} // namespace - -std::string ov::frontend::get_frontend_library_path() { - return _get_frontend_library_path(); -} diff --git a/src/frontends/common/src/utils.hpp b/src/frontends/common/src/utils.hpp index 24f1bb547e6ca2..bdf29be618f185 100644 --- a/src/frontends/common/src/utils.hpp +++ b/src/frontends/common/src/utils.hpp @@ -48,9 +48,3 @@ catch (...) { \ OPENVINO_ASSERT(false, (MESSAGE)); \ } - -namespace ov { -namespace frontend { -std::string get_frontend_library_path(); -} // namespace frontend -} // namespace ov From 635665ae181e5e27980189fb057f954428c01298 Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Fri, 19 Jan 2024 06:47:27 -0800 Subject: [PATCH 099/122] Delete ngraph `AlignedBuffer`/`SharedBuffer` (#22182) * Delete ngraph `SharedBuffer` * Delete ngraph `AlignedBuffer` * Fix `DenormalNullifyCheck` * Fix comments --- src/common/snippets/src/pass/hash.cpp | 11 --- .../include/ngraph/runtime/aligned_buffer.hpp | 93 ------------------- .../include/ngraph/runtime/shared_buffer.hpp | 45 --------- src/core/include/openvino/op/constant.hpp | 8 +- src/core/src/op/constant.cpp | 1 - src/core/src/pass/serialize.cpp | 14 --- src/core/src/runtime/aligned_buffer.cpp | 62 +------------ .../subgraph_tests/src/denormal_check.cpp | 34 ++----- .../src/fuse_non0_output_port.cpp | 1 - 9 files changed, 9 insertions(+), 260 deletions(-) delete mode 100644 src/core/include/ngraph/runtime/aligned_buffer.hpp delete mode 100644 src/core/include/ngraph/runtime/shared_buffer.hpp diff --git a/src/common/snippets/src/pass/hash.cpp b/src/common/snippets/src/pass/hash.cpp index cea21e37e861cf..40fb2c96a8f6f5 100644 --- a/src/common/snippets/src/pass/hash.cpp +++ b/src/common/snippets/src/pass/hash.cpp @@ -170,17 +170,6 @@ class SnippetsHasher : public ov::AttributeVisitor { void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override { if (const auto& a = ov::as_type>>(&adapter)) { m_hash = hash_combine(hash_combine(m_hash, name), a->get()->get_info().variable_id); - } else if (const auto& a = - ov::as_type>>(&adapter)) { - if (name == "value" && m_node_type_name == "Constant") { - m_hash = hash_combine(m_hash, AttrType::constant); - const int64_t size = a->get()->size(); - m_hash = hash_combine(hash_combine(m_hash, AttrType::size), size); - auto data = static_cast(a->get()->get_ptr()); - for (int64_t i = 0; i < size; i++) { - m_hash = hash_combine(m_hash, data[i]); - } - } } else if (const auto& a = ov::as_type>>(&adapter)) { if (name == "value" && m_node_type_name == "Constant") { diff --git a/src/core/include/ngraph/runtime/aligned_buffer.hpp b/src/core/include/ngraph/runtime/aligned_buffer.hpp deleted file mode 100644 index 4ac11da07b1bc1..00000000000000 --- a/src/core/include/ngraph/runtime/aligned_buffer.hpp +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/util.hpp" -#include "openvino/core/rtti.hpp" - -namespace ngraph { -namespace runtime { -NGRAPH_SUPPRESS_DEPRECATED_START -/// \brief Allocates a block of memory on the specified alignment. The actual size of the -/// allocated memory is larger than the requested size by the alignment, so allocating 1 -/// byte -/// on 64 byte alignment will allocate 65 bytes. -class NGRAPH_API NGRAPH_API_DEPRECATED AlignedBuffer { -public: - // Allocator objects and the allocation interfaces are owned by the - // creators of AlignedBuffers. They need to ensure that the lifetime of - // allocator exceeds the lifetime of this AlignedBuffer. - AlignedBuffer(size_t byte_size, size_t alignment = 64); - - AlignedBuffer(); - virtual ~AlignedBuffer(); - - AlignedBuffer(AlignedBuffer&& other); - AlignedBuffer& operator=(AlignedBuffer&& other); - - size_t size() const { - return m_byte_size; - } - void* get_ptr(size_t offset) const { - return m_aligned_buffer + offset; - } - void* get_ptr() { - return m_aligned_buffer; - } - const void* get_ptr() const { - return m_aligned_buffer; - } - template - T* get_ptr() { - return reinterpret_cast(m_aligned_buffer); - } - template - const T* get_ptr() const { - return reinterpret_cast(m_aligned_buffer); - } - - template - explicit operator T*() { - return get_ptr(); - } - -private: - AlignedBuffer(const AlignedBuffer&) = delete; - AlignedBuffer& operator=(const AlignedBuffer&) = delete; - -protected: - char* m_allocated_buffer; - char* m_aligned_buffer; - size_t m_byte_size; -}; -NGRAPH_SUPPRESS_DEPRECATED_END -} // namespace runtime -} // namespace ngraph - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ov { -template <> -class NGRAPH_API AttributeAdapter> - : public DirectValueAccessor> { -public: - AttributeAdapter(std::shared_ptr& value); - - OPENVINO_RTTI("AttributeAdapter"); -}; -NGRAPH_SUPPRESS_DEPRECATED_END - -} // namespace ov diff --git a/src/core/include/ngraph/runtime/shared_buffer.hpp b/src/core/include/ngraph/runtime/shared_buffer.hpp deleted file mode 100644 index 576c9888561c6a..00000000000000 --- a/src/core/include/ngraph/runtime/shared_buffer.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/runtime/aligned_buffer.hpp" - -namespace ngraph { -namespace runtime { -NGRAPH_SUPPRESS_DEPRECATED_START -/// \brief SharedBuffer class to store pointer to pre-acclocated buffer. -template -class NGRAPH_API_DEPRECATED SharedBuffer : public ngraph::runtime::AlignedBuffer { -public: - SharedBuffer(char* data, size_t size, const T& shared_object) : _shared_object(shared_object) { - m_allocated_buffer = data; - m_aligned_buffer = data; - m_byte_size = size; - } - - virtual ~SharedBuffer() { - m_aligned_buffer = nullptr; - m_allocated_buffer = nullptr; - m_byte_size = 0; - } - -private: - T _shared_object; -}; -NGRAPH_SUPPRESS_DEPRECATED_END -} // namespace runtime -} // namespace ngraph diff --git a/src/core/include/openvino/op/constant.hpp b/src/core/include/openvino/op/constant.hpp index 20280887fae17c..222b006c094ba8 100644 --- a/src/core/include/openvino/op/constant.hpp +++ b/src/core/include/openvino/op/constant.hpp @@ -12,7 +12,8 @@ # define WAS_OV_LIBRARY_DEFINED_CONSTANT #endif -#include "ngraph/runtime/shared_buffer.hpp" +#include "ngraph/util.hpp" +#include "openvino/core/rtti.hpp" #ifdef WAS_OV_LIBRARY_DEFINED_CONSTANT # undef IN_OV_COMPONENT @@ -400,11 +401,6 @@ class OPENVINO_API Constant : public Op { private: Constant(bool memset_allocation, const element::Type& type, const Shape& shape); - OPENVINO_SUPPRESS_DEPRECATED_START - std::shared_ptr legacy_to_ov_aligned_buffer( - const std::shared_ptr& buffer); - OPENVINO_SUPPRESS_DEPRECATED_END - template , typename std::enable_if>>(&adapter)) { m_xml_node.append_attribute(name.c_str()).set_value(a->get()->get_info().variable_id.c_str()); - } else if (const auto& a = - ov::as_type>>(&adapter)) { - if (name == "value" && translate_type_name(m_node_type_name) == "Const") { - const int64_t size = a->get()->size(); - size_t new_size; - int64_t offset = m_constant_write_handler.write(static_cast(a->get()->get_ptr()), - size, - &new_size, - m_compress_to_fp16, - m_output_element_type); - - m_xml_node.append_attribute("offset").set_value(static_cast(offset)); - m_xml_node.append_attribute("size").set_value(static_cast(new_size)); - } } else if (const auto& a = ov::as_type>>(&adapter)) { if (name == "value" && translate_type_name(m_node_type_name) == "Const") { const int64_t size = a->get()->size(); diff --git a/src/core/src/runtime/aligned_buffer.cpp b/src/core/src/runtime/aligned_buffer.cpp index 4207eefe5db9b7..993a3928e79c93 100644 --- a/src/core/src/runtime/aligned_buffer.cpp +++ b/src/core/src/runtime/aligned_buffer.cpp @@ -2,71 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/runtime/aligned_buffer.hpp" +#include "openvino/runtime/aligned_buffer.hpp" #include #include -#include "ngraph/util.hpp" -#include "openvino/runtime/aligned_buffer.hpp" -#include "openvino/util/log.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph { - -runtime::AlignedBuffer::AlignedBuffer() : m_allocated_buffer(nullptr), m_aligned_buffer(nullptr), m_byte_size(0) {} - -runtime::AlignedBuffer::AlignedBuffer(size_t byte_size, size_t alignment) : m_byte_size(byte_size) { - m_byte_size = std::max(1, byte_size); - size_t allocation_size = m_byte_size + alignment; - m_allocated_buffer = new char[allocation_size]; - m_aligned_buffer = m_allocated_buffer; - size_t mod = (alignment != 0) ? size_t(m_aligned_buffer) % alignment : 0; - - if (mod != 0) { - m_aligned_buffer += (alignment - mod); - } -} - -runtime::AlignedBuffer::AlignedBuffer(AlignedBuffer&& other) - : m_allocated_buffer(other.m_allocated_buffer), - m_aligned_buffer(other.m_aligned_buffer), - m_byte_size(other.m_byte_size) { - other.m_allocated_buffer = nullptr; - other.m_aligned_buffer = nullptr; - other.m_byte_size = 0; -} - -runtime::AlignedBuffer::~AlignedBuffer() { - if (m_allocated_buffer != nullptr) { - delete[] m_allocated_buffer; - } -} - -runtime::AlignedBuffer& runtime::AlignedBuffer::operator=(AlignedBuffer&& other) { - if (this != &other) { - if (m_allocated_buffer != nullptr) { - delete[] m_allocated_buffer; - } - m_allocated_buffer = other.m_allocated_buffer; - m_aligned_buffer = other.m_aligned_buffer; - m_byte_size = other.m_byte_size; - other.m_allocated_buffer = nullptr; - other.m_aligned_buffer = nullptr; - other.m_byte_size = 0; - } - return *this; -} -} // namespace ngraph - -namespace ov { -AttributeAdapter>::AttributeAdapter( - std::shared_ptr& value) - : DirectValueAccessor>(value) {} -} // namespace ov -NGRAPH_SUPPRESS_DEPRECATED_END - namespace ov { AlignedBuffer::AlignedBuffer() : m_allocated_buffer(nullptr), m_aligned_buffer(nullptr), m_byte_size(0) {} diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp index c9bb296c72dcb4..8af12820d7b19d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp @@ -6,36 +6,14 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" +#include "openvino/runtime/aligned_buffer.hpp" namespace ov { namespace test { -template -class AlignedBufferWrapper { -public: - AlignedBufferWrapper(size_t size, size_t alignment) { - _buffer.reset(new ngraph::runtime::AlignedBuffer(size * sizeof(T), alignment)); - } - AlignedBufferWrapper(const AlignedBufferWrapper&) = delete; - AlignedBufferWrapper& operator=(const AlignedBufferWrapper&) = delete; - AlignedBufferWrapper(AlignedBufferWrapper&&) = default; - AlignedBufferWrapper& operator=(AlignedBufferWrapper&&) = default; - - T* get_ptr() { - return _buffer->get_ptr(); - } - - size_t size() const { - return _buffer->size() / sizeof(T); - } -private: - std::unique_ptr _buffer = nullptr; -}; - class DenormalNullifyCheck : public SubgraphBaseTest { protected: -std::unique_ptr> pConstStorage; +std::unique_ptr pConstStorage; void validate() override { const auto& actualOutputs = get_plugin_outputs(); @@ -63,7 +41,7 @@ void SetUp() override { const auto elemsCount = shape_size(inpShape); const auto rtPrc = ov::element::f32; ov::ParameterVector params {std::make_shared(rtPrc, ov::Shape(inpShape))}; - pConstStorage.reset(new AlignedBufferWrapper(elemsCount, alignment)); + pConstStorage.reset(new ov::AlignedBuffer(elemsCount, alignment)); auto constTensor = ov::Tensor(rtPrc, inpShape, pConstStorage->get_ptr()); auto constNode = std::make_shared(constTensor); @@ -78,7 +56,7 @@ void SetUp() override { TEST_F(DenormalNullifyCheck, smoke_CPU_Denormal_Check) { using indexInterval = std::pair; - size_t elemsCount = pConstStorage->size(); + size_t elemsCount = pConstStorage->size() / sizeof(float); const indexInterval intervals[] = { {0, elemsCount/2}, {elemsCount/2, elemsCount}, @@ -99,9 +77,9 @@ TEST_F(DenormalNullifyCheck, smoke_CPU_Denormal_Check) { auto denormal = random.Generate(denormalsRange) + 1; float tmp; memcpy(&tmp, &denormal, sizeof(float)); - pConstStorage->get_ptr()[i] = tmp; + pConstStorage->get_ptr()[i] = tmp; } else { - pConstStorage->get_ptr()[i] = randomRange[i]; + pConstStorage->get_ptr()[i] = randomRange[i]; } } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp index 61b362a2dee39e..4c5119c25a5cc7 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp @@ -4,7 +4,6 @@ #include "common_test_utils/ov_tensor_utils.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" #include "ov_models/builders.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" From c8f9033959ad70dae809e6287cce7274afaab42e Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Fri, 19 Jan 2024 06:50:27 -0800 Subject: [PATCH 100/122] LPT `ngraph` cleanup (#22265) --- .../add_transformation.cpp | 50 ++--- .../assign_and_read_value_transformation.cpp | 8 +- .../batch_to_space_transformation.cpp | 4 +- .../clamp_transformation.cpp | 10 +- .../concat_transformation.cpp | 42 ++-- .../concat_with_child_and_output.cpp | 18 +- ...t_with_different_precision_on_children.cpp | 22 +-- ...oncat_with_intermediate_transformation.cpp | 2 +- .../concat_with_neighbors_transformation.cpp | 2 +- .../concat_with_split_transformation.cpp | 18 +- ...nvolution_backprop_data_transformation.cpp | 48 ++--- .../convolution_qdq_transformation.cpp | 92 ++++----- .../convolution_transformation.cpp | 70 +++---- ...ntwise_branch_selection_transformation.cpp | 34 ++-- ...eliminate_fake_quantize_transformation.cpp | 12 +- .../fq_and_avg_pool_transformation.cpp | 2 +- .../fq_and_max_pool_transformation.cpp | 2 +- ...d_two_output_branches_with_convolution.cpp | 2 +- .../fq_precision_selection_transformation.cpp | 14 +- .../fq_transformation.cpp | 2 +- .../fq_with_dq_not_optimal_transformation.cpp | 66 +++---- .../fully_connected_transformation.cpp | 12 +- .../fuse_convert_transformation.cpp | 6 +- .../fuse_dequantize_to_fq_transformation.cpp | 50 ++--- ...fuse_fq_and_scale_shift_transformation.cpp | 2 +- .../gather_transformation.cpp | 8 +- .../gemm_transformation.cpp | 2 +- .../group_convolution_transformation.cpp | 56 +++--- .../groupconvolution_qdq_transformation.cpp | 184 +++++++++--------- .../interpolate_transformation.cpp | 8 +- .../mat_mul_transformation.cpp | 22 +-- .../mat_mul_with_constant_transformation.cpp | 22 +-- .../mat_mul_with_optimized_constant_fq.cpp | 6 +- .../move_fake_quantize_transformation.cpp | 24 +-- .../multiply_to_group_convolution.cpp | 24 +-- .../multiply_transformation.cpp | 54 ++--- .../multiply_with_one_parent.cpp | 4 +- .../mvn_transformation.cpp | 2 +- .../normalize_transformation.cpp | 2 +- .../pad_transformation.cpp | 56 +++--- .../prelu_transformation.cpp | 10 +- .../pull_reshape_through_dequantization.cpp | 32 +-- .../recurrent_cell_transformation.cpp | 40 ++-- .../reduce_max_transformation.cpp | 18 +- .../reduce_mean_transformation.cpp | 22 +-- .../reduce_min_transformation.cpp | 18 +- .../reduce_sum_transformation.cpp | 14 +- .../relu_transformation.cpp | 10 +- .../reshape_transformation.cpp | 28 +-- .../shuffle_channels_transformation.cpp | 14 +- .../space_to_batch_transformation.cpp | 4 +- .../split_transformation.cpp | 14 +- .../squeeze_transformation.cpp | 8 +- .../strided_slice_transformation.cpp | 12 +- .../subtract_multiply_to_multiply_add.cpp | 16 +- .../subtract_transformation.cpp | 2 +- .../transpose_after_matmul_transformation.cpp | 2 +- .../transpose_transformation.cpp | 6 +- .../unsqueeze_transformation.cpp | 10 +- .../variadic_split_transformation.cpp | 14 +- .../add_transformation.cpp | 16 +- ...nvolution_backprop_data_transformation.cpp | 8 +- .../convolution_qdq_transformation.cpp | 10 +- ...ntwise_branch_selection_transformation.cpp | 8 +- .../fq_precision_selection_transformation.cpp | 4 +- .../fq_with_dq_not_optimal_transformation.cpp | 8 +- .../groupconvolution_qdq_transformation.cpp | 26 +-- .../interpolate_transformation.cpp | 6 +- .../mat_mul_with_constant_transformation.cpp | 2 +- .../multiply_transformation.cpp | 18 +- ...nvolution_backprop_data_transformation.cpp | 2 +- .../mat_mul_with_optimized_constant_fq.cpp | 4 +- .../layer_transformation.hpp | 3 +- 73 files changed, 736 insertions(+), 737 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp index 2798e6441d64fd..0348ac2354db8d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp @@ -16,52 +16,52 @@ const std::vector netPrecisions = { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, false, - {ngraph::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ngraph::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } }, true, - {ngraph::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ngraph::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, false, - {ngraph::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ngraph::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } }, true, - {ngraph::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ngraph::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { {}, {}, false }, { {}, {}, true }, }; @@ -69,7 +69,7 @@ const std::vector params = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, AddTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(params)), AddTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp index c6567dbdf9ea05..3fe2ca1394b398 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp @@ -29,22 +29,22 @@ const std::vector trasform const std::vector params{ // u8 { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, // u16 { - { 65536ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 65536ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, // u32 { - { 4294967296ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 4294967296ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, AssignAndReadValueTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::ValuesIn(opsetVersions), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp index ab49ffa3597f3b..420bf2a22961dc 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp @@ -19,7 +19,7 @@ const std::vector params = { { { 4, 3, 50, 86 }, { 1, 1, 2, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, "BatchToSpace", "u8" }, @@ -29,7 +29,7 @@ const std::vector params = { { 1, 1, 2, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp index e06bfa30f24dc0..64fc05849b0793 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp @@ -26,7 +26,7 @@ const std::vector trasform const std::vector params{ // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { {}, {{0.f, 0.f, 0.f}}, @@ -37,7 +37,7 @@ const std::vector params{ }, // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { -12.8f }, { 12.7f } }, { {}, {{0.f, 0.f, 0.f}}, @@ -49,7 +49,7 @@ const std::vector params{ // per-channel quantization with the same values { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -63,7 +63,7 @@ const std::vector params{ { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -78,7 +78,7 @@ const std::vector params{ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ClampTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp index b107e68669b005..b97120a378193f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp @@ -18,71 +18,71 @@ const std::vector testValues = { // U8 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {} }, // I8 { {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {}, {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {} }, // mixed: U8 + I8 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {} }, // mixed: I8 + U8 { {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {}, {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {} }, // FQ with unexpected quantizationLevels { {}, - { 14ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 14ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {}, {}, - { 14ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 14ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {} }, // FQ with INT4 quantizationLevels { {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {}, {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {} }, // FQ with INT4+INT8 quantizationLevels { {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {}, {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {} }, }; -const std::vector shapes = { - ngraph::Shape({ 1, 3, 16, 16 }), - ngraph::Shape({ 4, 3, 16, 16 }) +const std::vector shapes = { + ov::Shape({ 1, 3, 16, 16 }), + ov::Shape({ 4, 3, 16, 16 }) }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, @@ -96,15 +96,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, namespace concat_transformation_mixed { -const std::vector precisions = { - ngraph::element::f16 +const std::vector precisions = { + ov::element::f16 }; const std::vector testValues = { // mixed dequantization: FP32 & FP16 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), {}, @@ -119,7 +119,7 @@ const std::vector testValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), ConcatTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp index 058f376ec162af..3a66bac15d8b97 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp @@ -21,30 +21,30 @@ const std::vector trasform const std::vector testValues = { // U8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithChildAndOutputTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp index 89b41087d5b79d..41131e1a9e362a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -22,39 +22,39 @@ const std::vector testValues = { // U8 { 1, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // U8 and unsupported concat axis { 2, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { 1, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { 1, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { 1, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithDifferentChildrenTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp index 169addf4fa8051..7bdfb00b6b6061 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp @@ -24,7 +24,7 @@ const std::vector trasform const std::vector transparentIntermediateValues = { true, false }; const std::vector multiChannelValues = { /*true,*/ false }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp index 48862426e0dced..f8ccfff534ecb0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp @@ -21,7 +21,7 @@ const std::vector trasform // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8() }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp index c098953d4eaa0e..324734790730f4 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp @@ -24,30 +24,30 @@ const std::vector trasform const std::vector testValues = { // U8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: U8 + I8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithSplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp index b1aedb764c239d..266aa0fe34c977 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp @@ -21,90 +21,90 @@ const std::vector trasform const std::vector params = { // FQ on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, "convolutionBackpropData_original", "u8" }, // FQ on weights { - {256ul, ngraph::Shape{}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, + {256ul, ov::Shape{}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, "convolutionBackpropData_original", "u8" }, // FQ on weights // with zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, "", "" }, // without zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, "", "" }, // with incorrect zero point on activations { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, "", "" }, // with incorrect zero point on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, "", "" }, // QDq on weights // with zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {{ov::element::f32}, { {12.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // without zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {{ngraph::element::f32}, {}, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {{ov::element::f32}, {}, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // with incorrect zero point on activations { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, - {{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, + {{ov::element::f32}, { {12.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // with incorrect zero point on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {{ngraph::element::f32}, { {1000.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {{ov::element::f32}, { {1000.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // issue #56886: with incorrect dequantization on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {{ngraph::element::f32}, {}, { {4.f, 2.f, 4.f, 2.f, 4.f, 2.f, 4.f, 2.f}, ngraph::element::f32, {8, 1, 1, 1}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {{ov::element::f32}, {}, { {4.f, 2.f, 4.f, 2.f, 4.f, 2.f, 4.f, 2.f}, ov::element::f32, {8, 1, 1, 1}, false }}, "", "" } }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 8, 16, 16 }, true} }; -const std::vector outputShapes = { +const std::vector outputShapes = { { 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp index 41ecc00ea1cad2..6944b93f6b4efd 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp @@ -56,20 +56,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector{ 15.f }, ngraph::element::f32}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector{ 15.f }, ov::element::f32}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "f32" @@ -110,20 +110,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector{ 15.f }, ngraph::element::f32}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector{ 15.f }, ov::element::f32}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "u8" @@ -161,20 +161,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "f32" @@ -212,47 +212,47 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "u8" }, { - { 16ul, {{ 1, 1, 1, 1 }}, { -0.8f }, { 0.f }, { 0.f }, { 15.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 16ul, {{ 1, 1, 1, 1 }}, { -0.8f }, { 0.f }, { 0.f }, { 15.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "u8" }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 4, 4 }, { 4, 3, 4, 4 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp index ebbe0e273ae324..066a46e0dbb520 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp @@ -21,7 +21,7 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, {}, false, @@ -31,72 +31,72 @@ const std::vector params { {}, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, { - { 256ul, ngraph::Shape {}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape {}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape {}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape {}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, { - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, true, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, - { 255ul, ngraph::Shape { 1 }, { 0.f }, { 254.f }, { -18.7f }, { 18.7f } }, + { 255ul, ov::Shape { 1 }, { 0.f }, { 254.f }, { -18.7f }, { 18.7f } }, false, "Convolution", "u8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, { - 255ul, ngraph::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, + 255ul, ov::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, { -1.52806e-39f, -0.2f, -0.3f, -0.3f, -0.2f, -0.1f }, { 1.52806e-39f, 0.2f, 0.3f, 0.3f, 0.2f, 0.1f } }, false, @@ -104,10 +104,10 @@ const std::vector params "u8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, { - 255ul, ngraph::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, + 255ul, ov::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, { -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f }, { 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f } }, @@ -117,34 +117,34 @@ const std::vector params }, // not supported quantization level on data { - { 65536ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 65536ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 255ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "f32" }, // not supported quantization level on data & weights { - { 65536ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 65536ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 65536ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 65536ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "f32" }, // not supported quantization level on weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 65536ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 65536ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "f32" } }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; @@ -161,14 +161,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionTransformation, const std::vector incorrectWeightsParams = { // incorrect weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, false }, // correct weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true } }; @@ -176,7 +176,7 @@ const std::vector i INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionWIthIncorrectWeightsTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::Shape({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(incorrectWeightsParams)), @@ -185,16 +185,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionWIthIncorrectWeightsTransformatio namespace convolution3D { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape { 1, 1, 1}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16 }, { 4, 3, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp index f6001604033c55..ace6b989f1c99c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp @@ -22,24 +22,24 @@ const std::vector elementwiseTypes = { const std::vector params = { { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {"fakeQuantizeBefore1", "convolution1"}, {"fakeQuantizeBefore2", "convolution2"}, @@ -53,24 +53,24 @@ const std::vector p }, { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {"fakeQuantizeBefore1", "convolution1"}, {"fakeQuantizeBefore2", "convolution2"}, @@ -87,7 +87,7 @@ const std::vector p INSTANTIATE_TEST_SUITE_P(smoke_LPT, ElementwiseBranchSelectionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(params), ::testing::ValuesIn(elementwiseTypes)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp index 41177df7cc51cb..c4392c4426d098 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp @@ -16,9 +16,9 @@ const std::vector testValues = { {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 }, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 } + ov::element::f32, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 }, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 } }, { { "fakeQuantize1" }, @@ -30,9 +30,9 @@ const std::vector testValues = { {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 }, - { 256ul, {}, { 0.f }, { 255.f / 2.1f }, { 0.f }, { 255.f / 2.1f }, ngraph::element::f32 } + ov::element::f32, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 }, + { 256ul, {}, { 0.f }, { 255.f / 2.1f }, { 0.f }, { 255.f / 2.1f }, ov::element::f32 } }, { { "fakeQuantize1", "fakeQuantize2" }, // not fused diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp index 7feb4ce5537911..83532f091dc4c1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp @@ -28,7 +28,7 @@ const std::vector fakeQuantizes = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndAvgPoolTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizes)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp index c954184ef6ff31..5016feaa761214 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp @@ -28,7 +28,7 @@ const std::vector fakeQuantizes = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndMaxPoolTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizes)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp index 64930a8be44aaa..5f873a1057f7e8 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp @@ -30,7 +30,7 @@ const std::vector testValues = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(testValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp index a2fb067cab8e8e..5a399e3b6ea464 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp @@ -22,22 +22,22 @@ const std::vector trasformationParamValues = { const std::vector testValues = { { - { ngraph::element::u8, ngraph::element::i8 }, - { ngraph::element::u8 }, + { ov::element::u8, ov::element::i8 }, + { ov::element::u8 }, true, { { 256ul, { }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, { 255ul, { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } } }, { - ngraph::element::u8, + ov::element::u8, { 256ul, { }, { 0.f }, { 2.55f }, { 0.f }, { 255.f } }, { } }, }, { - { ngraph::element::u8, ngraph::element::i8 }, - { ngraph::element::i8 }, // Convolution on CPU doesn't support it, but it will be not used + { ov::element::u8, ov::element::i8 }, + { ov::element::i8 }, // Convolution on CPU doesn't support it, but it will be not used // INT8 is not available for limited operation (Convolution) false, { @@ -46,7 +46,7 @@ const std::vector testVa }, { // original precision is used - ngraph::element::u8, + ov::element::u8, // FakeQuantize has to select the first available: U8, not limited operation required I8 but this fact doesn't affect { 256ul, { }, { 0.f }, { 25.5f }, { 0.f }, { 255.f } }, // FakeQuantize on weights is not changed @@ -58,7 +58,7 @@ const std::vector testVa INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizePrecisionSelectionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(testValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp index b6feeab81ff3a6..5b2244139f2107 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp @@ -88,7 +88,7 @@ const std::vector fakeQuantizeOnDataValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp index 6a7f581c03a699..7fabd71421e5a4 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp @@ -24,77 +24,77 @@ const std::vector trasformationParamValues = { const std::vector fakeQuantizeOnDataValues = { { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.3f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.3f}, ov::element::f32, {}, false } }, {}, "f32" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.3f}, ngraph::element::f32, {}, false } + { {0.3f}, ov::element::f32, {}, false } }, {}, "i8" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, { }, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.3f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.3f}, ov::element::f32, {}, false } }, {}, "f32" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, { }, - { {0.3f}, ngraph::element::f32, {}, false } + { {0.3f}, ov::element::f32, {}, false } }, {}, "u8" @@ -104,7 +104,7 @@ const std::vector fakeQuanti INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeWithNotOptimalTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp index f7efaf173eb2a0..43a868ffcaaf4e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp @@ -16,20 +16,20 @@ const std::vector netPrecisions = { const std::vector shapes = { { - ngraph::PartialShape{ 1, 16 }, - ngraph::PartialShape{ 16, 8 }, + ov::PartialShape{ 1, 16 }, + ov::PartialShape{ 16, 8 }, false, false }, { - ngraph::PartialShape{ 1, 16 }, - ngraph::PartialShape{ 8, 16 }, + ov::PartialShape{ 1, 16 }, + ov::PartialShape{ 8, 16 }, false, true }, { - ngraph::PartialShape{ 16, 1 }, - ngraph::PartialShape{ 16, 8 }, + ov::PartialShape{ 16, 1 }, + ov::PartialShape{ 16, 8 }, true, false }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp index 207fa8c07722bc..04e5ec8139b2d9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp @@ -11,18 +11,18 @@ const std::vector precisions = { ov::element::f32 }; -const std::vector< ngraph::PartialShape > inputAndQuantizationShapes = { +const std::vector< ov::PartialShape > inputAndQuantizationShapes = { { 1, 4, 16, 16 }, }; const std::vector deqOperations = { { - { ngraph::element::f32 }, + { ov::element::f32 }, {1.f}, {0.45f} }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, {0.45f} } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp index baf3927a029f2c..3c2af72db8f566 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp @@ -17,11 +17,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, {}, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -29,11 +29,11 @@ const std::vector testValu {128, 3}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, - { {}, {}, { {0.01f, 0.1f, 1.f}, ngraph::element::f32, {1, 3} } }, - ngraph::element::f32, + ov::element::f32, + { {}, {}, { {0.01f, 0.1f, 1.f}, ov::element::f32, {1, 3} } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -42,11 +42,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, {}, { {0.01f, 0.f, 0.01f} } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -55,11 +55,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -68,11 +68,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::u8, - { {ngraph::element::f32}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::u8, + { {ov::element::f32}, { -128 }, { 0.01f } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -81,11 +81,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { {128}, ngraph::element::f32 }, - ngraph::element::u8, - { {ngraph::element::f32}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, + { {128}, ov::element::f32 }, + ov::element::u8, + { {ov::element::f32}, { -128 }, { 0.01f } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -96,9 +96,9 @@ const std::vector testValu { { }, { }, - ngraph::element::i32, - { {ngraph::element::f32}, {}, {} }, - ngraph::element::f32, + ov::element::i32, + { {ov::element::f32}, {}, {} }, + ov::element::f32, { 256ul, {}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } } } }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp index 163a58cdf12b22..a3d26436350cc2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp @@ -36,7 +36,7 @@ const std::vector fakeQuantizeOnD INSTANTIATE_TEST_SUITE_P(smoke_LPT, FuseFakeQuantizeAndScaleShiftTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 9, 9 })), + ::testing::Values(ov::PartialShape({ 1, 3, 9, 9 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp index d1c58593896249..1cbd7152fbccfb 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp @@ -27,7 +27,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, // U8: per-channel quantization @@ -38,7 +38,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {1, 3, 1}, @@ -56,7 +56,7 @@ const std::vector testValues = { {1}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {3, 1}, @@ -74,7 +74,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp index 1a92960ac0f976..3e0b7e149482ac 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp @@ -15,7 +15,7 @@ const std::vector netPrecisions = { ov::element::f32 }; -const std::vector dimensions = { +const std::vector dimensions = { { 1, 3, 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp index 943d2265b5f6ac..3ccc4c23d752cc 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp @@ -20,7 +20,7 @@ const std::vector trasform const std::vector addPrecisionPreserved = { true, false }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 24, 18, 18 }}, {{ 1, 6, 24 }, { 1, 24, 18 }} }; @@ -30,8 +30,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -40,8 +40,8 @@ const std::vector pa { 3ul, 0, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -50,8 +50,8 @@ const std::vector pa { 3ul, 1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -68,7 +68,7 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -77,8 +77,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, false, "Convolution", "u8" @@ -96,7 +96,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, GroupConvolutionTransformation::getTestCaseName); namespace test_values_4d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 24, 18, 18 }}, }; @@ -105,8 +105,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, false, "Convolution", "u8" @@ -115,8 +115,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1, 1 }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1, 1 }, {-127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f}, @@ -148,7 +148,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace test_values_4d namespace test_values_3d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24 }, { 1, 24, 18 }}, }; @@ -157,8 +157,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, false, "Convolution", "u8" @@ -167,8 +167,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1 }, + { 256ul, ov::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1 }, {-127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f}, @@ -200,7 +200,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace test_values_3d namespace depthwise { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 6, 18, 18 }}, {{ 1, 6, 24 }, { 1, 6, 18 }}, }; @@ -210,8 +210,8 @@ const std::vector pa { 6ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "", "" @@ -228,7 +228,7 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "", "" @@ -245,7 +245,7 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "", "" @@ -264,7 +264,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace depthwise namespace i8_3d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{1, 6, 1, 24, 24}, {1, 24, 1, 18, 18}}, {{1, 24, 8, 12, 12}, {1, 24, 1, 1, 1}} }; @@ -274,8 +274,8 @@ const std::vector pa { 3ul, -1, - {256ul, ngraph::Shape{1, 1, 1, 1, 1}, {-12.8f}, {12.7f}, {-12.8f}, {12.7f}}, - {255ul, ngraph::Shape { 1, 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, + {256ul, ov::Shape{1, 1, 1, 1, 1}, {-12.8f}, {12.7f}, {-12.8f}, {12.7f}}, + {255ul, ov::Shape { 1, 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, true, "Convolution", "i8" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp index 5f04f68568e63e..f59641eb25a54d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -62,20 +62,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -126,20 +126,20 @@ const std::vector // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -178,20 +178,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -233,20 +233,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5}}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5}}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -290,20 +290,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -350,20 +350,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -410,20 +410,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {3, 2, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ngraph::element::i8, true }, - { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false } + { ov::element::f32, false }, + { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ov::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ov::element::i8, true }, + { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ov::element::f32, {3, 2, 1, 1, 1}, false } }, {}, "output_original", @@ -467,20 +467,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -524,20 +524,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {3, 2, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false } + { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ov::element::f32, {3, 2, 1, 1, 1}, false } }, {}, "output_original", @@ -584,20 +584,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -606,7 +606,7 @@ const std::vector }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 6, 24, 24 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp index 94b3016055d832..83110bab6dc9a1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp @@ -11,28 +11,28 @@ const std::vector precisions = { ov::element::f32 }; -const std::vector> shapes = { +const std::vector> shapes = { {{1, 4, 16, 16}, {32, 32}}, {{1, 2, 48, 80}, {50, 60}}, }; const std::vector interpAttrs = { interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, true, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "linear", false, false, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp index 25fa68253b5172..a19455e85c1957 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp @@ -16,41 +16,41 @@ const std::vector precisions = { std::vector testValues = { { { 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, + { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, { 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "u8" }, { { 8, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, + { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, { 8, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "u8" }, { { 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "i8" }, { { 1, 1, 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 1, 1, 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "i8" }, { { 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original/MM", "i8" } @@ -59,7 +59,7 @@ std::vector testValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, MatMulTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 384, 1024 })), + ::testing::Values(ov::PartialShape({ 1, 384, 1024 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), MatMulTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 0236b7c4d76d28..51578d3c1a993e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -19,7 +19,7 @@ std::vector testValues = { { { 2, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -29,9 +29,9 @@ std::vector testValues = { { { 2, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::i8, ov::Shape{ 2, 4 } }, {}, - { ngraph::element::f32, {}, {0.1f} }, + { ov::element::f32, {}, {0.1f} }, "FullyConnected", "u8" }, @@ -39,7 +39,7 @@ std::vector testValues = { { { 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}, {-10.5f}, {4.5f}, {-10.5f}, {4.5f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -49,7 +49,7 @@ std::vector testValues = { { { 1, 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -59,9 +59,9 @@ std::vector testValues = { { { 1, 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::i8, ov::Shape{ 2, 4 } }, {}, - { ngraph::element::f32, {}, {{0.1f, 0.01f}, ngraph::element::f32, ngraph::Shape{ 2, 1 }} }, + { ov::element::f32, {}, {{0.1f, 0.01f}, ov::element::f32, ov::Shape{ 2, 1 }} }, "FullyConnected", "u8" }, @@ -69,7 +69,7 @@ std::vector testValues = { { { 1, 3, 4 }, { 256ul, {{1}, {1}, {1}, {1}}, {0.f}, {255.f}, {0.f}, {25.5f} }, - { std::vector(4 * 4, 2.f), ngraph::element::f32, ngraph::Shape{ 4, 4 } }, + { std::vector(4 * 4, 2.f), ov::element::f32, ov::Shape{ 4, 4 } }, { 256ul, {{1}, {1}, {1}, {1}}, {-128.f}, {127.f}, {-128.f}, {127.f} }, { {}, {}, {} }, "FullyConnected", @@ -79,7 +79,7 @@ std::vector testValues = { { { 2, 3 }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-10.f}, {5.f}, {-10.f, -5.f}, {5.f, 5.f} }, - { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::f32, ngraph::Shape{ 2, 3 } }, + { std::vector{1, 2, 3, 4, 5, 6}, ov::element::f32, ov::Shape{ 2, 3 } }, { 256ul, {{1}, {1}, {1}, {1}}, {-128.f}, {127.f}, {-12.8f}, {12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -89,9 +89,9 @@ std::vector testValues = { { { 2, 3 }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-10.f}, {5.f}, {-10.f, -5.f}, {5.f, 5.f} }, - { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::i8, ngraph::Shape{ 2, 3 } }, + { std::vector{1, 2, 3, 4, 5, 6}, ov::element::i8, ov::Shape{ 2, 3 } }, {}, - { ngraph::element::f32, {}, {0.1f} }, + { ov::element::f32, {}, {0.1f} }, "FullyConnected", "u8" } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 2c4707bea7d044..a1d920576f74e1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -16,12 +16,12 @@ const std::vector netPrecisions = { const std::vector params = { { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } } + { 256ul, ov::Shape { 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } } }, }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 16 }, { 10, 16 }}, {{ 1, 16 }, { 16, 10 }} }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp index 4c696bcb7bef97..a933b2170511ff 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -47,9 +47,9 @@ const std::vector pa 3, "", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, { 0.01f } }, @@ -62,9 +62,9 @@ const std::vector pa 3, "relu", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, { 0.01f } }, @@ -102,11 +102,11 @@ const std::vector pa {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {255.f, 255.f / 2.f, 255.f / 3.f, 255.f / 4.f, 255.f / 5.f, 255.f / 6.f}, }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, - { {0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f}, ngraph::element::f32, {1, 6, 1, 1} }, + { {0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f}, ov::element::f32, {1, 6, 1, 1} }, }, "Concatenation", "u8", @@ -124,10 +124,10 @@ const std::vector pa {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {255.f, 255.f / 2.f, 255.f / 3.f, 255.f / 4.f, 255.f / 5.f, 255.f / 6.f}, }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, - { {-127.f, -127.f / 2.f, -127.f / 3.f, -127.f / 4.f, -127.f / 5.f, -127.f / 6.f}, ngraph::element::f32, {1, 6, 1, 1} }, + { ov::element::f32 }, + { {-127.f, -127.f / 2.f, -127.f / 3.f, -127.f / 4.f, -127.f / 5.f, -127.f / 6.f}, ov::element::f32, {1, 6, 1, 1} }, { 0.01f }, }, "Concatenation", @@ -136,7 +136,7 @@ const std::vector pa }, }; -const std::vector> shapes = { +const std::vector> shapes = { {{ 1, 1, 16, 16 }, { 1, 2, 16, 16 }, { 1, 3, 16, 16 }}, {{ 4, 1, 16, 16 }, { 4, 2, 16, 16 }, { 4, 3, 16, 16 }} }; @@ -167,7 +167,7 @@ namespace testValues2 { -1 }, }; - const std::vector> shapes = { + const std::vector> shapes = { {{ 1, 1, 16, 16 }, { 1, 1, 16, 16 }, { 1, 1, 16, 16 }}, {{ 4, 1, 16, 16 }, { 4, 1, 16, 16 }, { 4, 1, 16, 16 }} }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp index 850e4f0bf5927f..510c6bac6dfc46 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp @@ -12,14 +12,14 @@ const std::vector precisions = { }; namespace shape4d { -const std::vector inputShapes = { +const std::vector inputShapes = { { 1ul, 3ul, 16ul, 16ul }, { 4ul, 3ul, 16ul, 16ul } }; const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -27,7 +27,7 @@ const std::vector params = { }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{1, 1, 1, 1}}, "output/GroupConvolution", "", @@ -35,7 +35,7 @@ const std::vector params = { }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{}}, "output/GroupConvolution", "", @@ -43,7 +43,7 @@ const std::vector params = { }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -51,7 +51,7 @@ const std::vector params = { }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -72,42 +72,42 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyToGroupConvolutionTransformation, } // namespace shape4d namespace shape5d { -const std::vector inputShapes = { +const std::vector inputShapes = { { 1ul, 3ul, 16ul, 16ul, 16ul }, { 4ul, 3ul, 16ul, 16ul, 16ul } }; const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1, 1}}, "output/GroupConvolution", "U8" }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{1, 1, 1, 1, 1}}, "output/GroupConvolution", "" }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{}}, "output/GroupConvolution", "" }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1, 1}}, "output/GroupConvolution", "U8" }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1, 1}}, "output/GroupConvolution", "U8" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp index 56f113491eea8e..4e58bdaa91466f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp @@ -21,84 +21,84 @@ const auto precision_for_fused_cases = ov::element::undefined; const std::vector params = { { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape {}, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape {}, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, true }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, { true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, { true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -128.f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -128.f }, { 1.27f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, - { false, {}, false, {}, {}, ngraph::element::f32, false }, - { true, {}, true, {}, {}, ngraph::element::f32, false }, + { false, {}, false, {}, {}, ov::element::f32, false }, + { true, {}, true, {}, {}, ov::element::f32, false }, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(params)), MultiplyTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp index 50eb68bbfe968e..7bc084b95a3d1c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp @@ -16,14 +16,14 @@ const std::vector netPrecisions = { const std::vector values = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } } + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyWithOneParentTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(values)), MultiplyWithOneParentTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp index a2f9a9b82fe404..3eebef6330e9a1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp @@ -11,7 +11,7 @@ const std::vector precisions = { ov::element::f32 }; -const std::vector inputAndQuantizationShapes = { +const std::vector inputAndQuantizationShapes = { { 1ul, 4ul, 16ul, 16ul }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp index af7ae7ddc32c20..378ad22a804f58 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp @@ -14,7 +14,7 @@ const std::vector precisions = { ov::element::f32 }; -const std::vector > inputAndQuantizationShapes = { +const std::vector > inputAndQuantizationShapes = { { { 1ul, 4ul, 16ul, 16ul }, { 1ul } }, { { 1ul, 4ul, 16ul, 16ul }, { 1ul, 4ul, 1ul, 1ul } }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp index ef919dc0b97943..3a91b39d1c89da 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp @@ -16,7 +16,7 @@ const std::vector netPrecisions = { ov::element::f32 }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 3, 16, 16}, { 4, 3, 16, 16} }; @@ -26,17 +26,17 @@ const std::vector trasform }; namespace commonTestCases { -const std::vector padModes = { - ngraph::op::PadMode::CONSTANT, - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector padModes = { + ov::op::PadMode::CONSTANT, + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, 0.f, @@ -46,7 +46,7 @@ const std::vector params = { // per-channel quantization with the same values { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -62,7 +62,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -91,7 +91,7 @@ namespace testCasesForConstantMode { const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, 0.f, @@ -99,7 +99,7 @@ const std::vector params = { "f32" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, -1, 1 }, { 0, 0, 1, -1 }, 0.f, @@ -107,7 +107,7 @@ const std::vector params = { "f32" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 0, 0 }, { 0, 0, -1, -1 }, 0.f, @@ -116,7 +116,7 @@ const std::vector params = { }, // tensor quantization with subtract, non zero padValue and pad by unique dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 2, 0 }, { 0, 0, 1, 0 }, 2.f, @@ -125,7 +125,7 @@ const std::vector params = { }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 2, 0 }, { 0, 0, -1, 0 }, 2.f, @@ -133,7 +133,7 @@ const std::vector params = { "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, -1, 0 }, { 0, 0, -1, 0 }, 2.f, @@ -144,7 +144,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -159,7 +159,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -174,7 +174,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -190,7 +190,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, @@ -203,7 +203,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, @@ -216,7 +216,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, @@ -232,7 +232,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(inputShapes), - ::testing::Values(ngraph::op::PadMode::CONSTANT), + ::testing::Values(ov::op::PadMode::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), @@ -240,16 +240,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, } // namespace testCasesForConstantMode namespace testCasesForOtherModes { -const std::vector modesWithoutConstant = { - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector modesWithoutConstant = { + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, 0.f, @@ -260,7 +260,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp index 854026354cca24..7782168311ba7a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp @@ -16,16 +16,16 @@ const std::vector precisions = { std::vector testValues = { { {}, false}, - { { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, - { { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, - { { 256ul, ngraph::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, - { { 256ul, ngraph::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } + { { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, + { { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, + { { 256ul, ov::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, + { { 256ul, ov::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, PReluTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), PReluTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp index 19ee02775300b1..303ae4f05ca975 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp @@ -21,52 +21,52 @@ const std::vector trasform const std::vector params = { { - ngraph::element::f32, + ov::element::f32, { 256ul, {{ 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {}, - { std::vector{ 2.f }, ngraph::element::i8, {9, 16}}, + { std::vector{ 2.f }, ov::element::i8, {9, 16}}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.03f}, ngraph::element::f32, {/* from parameter */}, false } + { {0.03f}, ov::element::f32, {/* from parameter */}, false } }, { {3, 3, 16, 1} }, - { {2}, ngraph::element::f32, {1, 1, 16, 1}, false }, + { {2}, ov::element::f32, {1, 1, 16, 1}, false }, { {2, 3, 0, 1} }, { {16, 1, 1, 3, 3} }, - ngraph::element::f32, + ov::element::f32, {}, "output_original", "u8" }, { - ngraph::element::f32, + ov::element::f32, { 256ul, {{ 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {}, - { std::vector{ 2.f }, ngraph::element::i8, {9, 16}}, + { std::vector{ 2.f }, ov::element::i8, {9, 16}}, { - { ngraph::element::f32, false }, - { {127.0f}, ngraph::element::f32, {/* from parameter */}, false}, - { {0.03f}, ngraph::element::f32, {/* from parameter */}, false } + { ov::element::f32, false }, + { {127.0f}, ov::element::f32, {/* from parameter */}, false}, + { {0.03f}, ov::element::f32, {/* from parameter */}, false } }, { {3, 3, 16, 1} }, - { {2}, ngraph::element::f32, {1, 1, 16, 1}, false }, + { {2}, ov::element::f32, {1, 1, 16, 1}, false }, { {2, 3, 0, 1} }, { {16, 1, 1, 3, 3} }, - ngraph::element::f32, + ov::element::f32, {}, "output_original", "f32" } }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 16, 9, 9 }, { 4, 16, 9, 9 } }; -const std::vector dequantizationOnWeightElementwiseConstantShapes = { - { ngraph::Shape({1, 16}) } +const std::vector dequantizationOnWeightElementwiseConstantShapes = { + { ov::Shape({1, 16}) } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, PullReshapeThroughDequantizationTransformation, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp index e2b278a4f5eb2e..5b50dcce917a5a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp @@ -24,17 +24,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -54,17 +54,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -82,8 +82,8 @@ const std::vector param } }; -const std::vector> activations_shapes = {{{1, 2, 16}, {1, 1, 128}, {1, 1, 128}}}; -const std::vector> weights_shapes = {{{1, 512, 16}, {1, 512, 128}, {1, 512}}}; +const std::vector> activations_shapes = {{{1, 2, 16}, {1, 1, 128}, {1, 1, 128}}}; +const std::vector> weights_shapes = {{{1, 512, 16}, {1, 512, 128}, {1, 512}}}; INSTANTIATE_TEST_SUITE_P(smoke_LPT, RecurrentCellTransformation, ::testing::Combine( @@ -103,17 +103,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -133,17 +133,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -161,8 +161,8 @@ const std::vector param } }; -const std::vector> activations_shapes = {{{1, 1, 3}, {1, 1, 3}, {}}}; -const std::vector> weights_shapes = {{{1, 9, 3}, {1, 9, 3}, {1, 9}}}; +const std::vector> activations_shapes = {{{1, 1, 3}, {1, 1, 3}, {}}}; +const std::vector> weights_shapes = {{{1, 9, 3}, {1, 9, 3}, {1, 9}}}; INSTANTIATE_TEST_SUITE_P(smoke_LPT, RecurrentCellTransformation, ::testing::Combine( diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp index ca90bddce69815..525e0201626283 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp @@ -22,28 +22,28 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, false, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, false, "Output_original", @@ -51,7 +51,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -64,7 +64,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -77,7 +77,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -90,7 +90,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -106,7 +106,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMaxTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp index 4ac21b8fa6660e..73db6da96a879a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp @@ -22,7 +22,7 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 1.27f }, { 0.f }, { 1.27f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 1.27f }, { 0.f }, { 1.27f } }, {}, {}, {{ 2, 3 }, true}, @@ -31,7 +31,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, { ov::element::u8 }, { { ov::element::f32 }, @@ -44,7 +44,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, { ov::element::u8 }, { { ov::element::f32 }, @@ -57,7 +57,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 2, 3 }, false}, @@ -66,7 +66,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 1 }, true}, @@ -75,7 +75,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 1 }, false}, @@ -85,7 +85,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -100,7 +100,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -115,7 +115,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -130,7 +130,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -148,7 +148,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMeanTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp index c2ce9ff382a8f9..f44e1926120360 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp @@ -22,28 +22,28 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, false, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, false, "Output_original", @@ -51,7 +51,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -64,7 +64,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -77,7 +77,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -90,7 +90,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -106,7 +106,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMinTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp index 93a6f95fc8be0e..6c2f89f1e88fd2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp @@ -22,14 +22,14 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 2.f }, { 10.f }, { 2.f }, { 10.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 2.f }, { 10.f }, { 2.f }, { 10.f } }, { 2, 3 }, false, "Output_original", @@ -37,7 +37,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -50,7 +50,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -63,7 +63,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -76,7 +76,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -92,7 +92,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceSumTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp index 1884e56c210d56..600af536d4404c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp @@ -16,16 +16,16 @@ const std::vector precisions = { std::vector testValues = { { {}, false}, - { { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, - { { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, - { { 256ul, ngraph::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, - { { 256ul, ngraph::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } + { { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, + { { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, + { { 256ul, ov::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, + { { 256ul, ov::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReluTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), ReluTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp index cb32195d087d53..34d7ad6ea2ecf7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp @@ -23,7 +23,7 @@ const std::vector params = { { { 1, 3, 32 }, { 1, 3, 4, 8 }, - { 256ul, ngraph::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -31,7 +31,7 @@ const std::vector params = { { { 1, 3, 32 }, { -1 }, - { 256ul, ngraph::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -39,7 +39,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 1, 3, 256 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -47,7 +47,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 0, 3, -1 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", "u8" }, @@ -55,7 +55,7 @@ const std::vector params = { { { 1, 3, 4, 8 }, { 1, -1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -63,7 +63,7 @@ const std::vector params = { { { 1, 3, 4, 8 }, { 1, 3, 4, 8, 1, 1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -73,7 +73,7 @@ const std::vector params = { { 1, -1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, @@ -88,7 +88,7 @@ const std::vector params = { { 1, 3, -1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, @@ -104,7 +104,7 @@ const std::vector params = { { 1, -1, 8 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, @@ -117,7 +117,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 1, 1, 48, 16 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -127,7 +127,7 @@ const std::vector params = { { { 1, 3, 16 }, { 1, 1, 6, 8 }, - { 256ul, ngraph::Shape{ 1, 3, 1 }, + { 256ul, ov::Shape{ 1, 3, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -137,7 +137,7 @@ const std::vector params = { { { 1, 3, 2, 4 }, { 1, 1, 24 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -147,7 +147,7 @@ const std::vector params = { { { 1, 3, 2, 4, 2 }, { 1, 1, 48 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -157,7 +157,7 @@ const std::vector params = { { { 1, 3, 2, 4, 2 }, { 1, 1, 3, 16 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp index 10b33b99146e47..8753d9002fdf77 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp @@ -14,7 +14,7 @@ const std::vector netPrecisions = { ov::element::f32 }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; @@ -25,14 +25,14 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, 0, 1, "output_original", "u8" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, -3, 1, "output_original", @@ -41,7 +41,7 @@ const std::vector par { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { 0.f, 0.f, 0.f }, @@ -55,7 +55,7 @@ const std::vector par { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { -4.f, -3.f, 0.f }, @@ -67,7 +67,7 @@ const std::vector par "u8" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, 2, 4, "output_original", @@ -76,7 +76,7 @@ const std::vector par { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp index b610d8498151a9..94908afacac49a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp @@ -18,7 +18,7 @@ const std::vector params = { { { 1, 3, 100, 171 }, { 1, 1, 2, 2 }, { 0, 0, 2, 2 }, { 0, 0, 2, 3 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, "SpaceToBatch", "u8" }, @@ -27,7 +27,7 @@ const std::vector params = { { 1, 1, 2, 2 }, { 0, 0, 2, 2 }, { 0, 0, 2, 3 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp index 39f7ffdefe7377..635a53c74b37e6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp @@ -27,18 +27,18 @@ const std::vector trasform const std::vector params = { // tensor quantization, split second dimension { - { 256ul, ngraph::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, + { 256ul, ov::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, 2, 2ul }, // tensor quantization, split third dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, -1, 2ul }, // per-channel quantization with the same values, split second dimension { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -49,7 +49,7 @@ const std::vector params = { // per-channel quantization with the same values, per-channel split { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -61,7 +61,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -73,7 +73,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -86,7 +86,7 @@ const std::vector params = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, SplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp index 9af94d93064c9a..56ccc48355b896 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp @@ -23,22 +23,22 @@ namespace { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3 }, { 1, 3, 5, 1} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2, 3 }, { 1, 1, 1, 1 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3 }, { 1, 64, 32, 1 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2.0, 3.0 }, { 1, 32, 1, 1 } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp index 141cc31aa0c709..ebd9cca91a8e80 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp @@ -23,7 +23,7 @@ const std::vector trasform const std::vector params = { // channel slice, tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 0, 0 }, // begin { 1, 2, 1, 1 }, // end { 1, 1, 1, 1 }, // strided @@ -35,7 +35,7 @@ const std::vector params }, // special dimension slice, tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 0, 0 }, { 1, 3, 20, 24 }, { 1, 1, 1, 1 }, @@ -49,7 +49,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -68,7 +68,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -87,7 +87,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -107,7 +107,7 @@ const std::vector params INSTANTIATE_TEST_SUITE_P(smoke_LPT, StridedSliceTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 24, 24 })), + ::testing::Values(ov::PartialShape({ 1, 3, 24, 24 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp index 690c5103c1f4a0..03ec01495394ba 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp @@ -15,16 +15,16 @@ const std::vector testVal // U8: Multiply {} => Multiply (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + ov::element::f32, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, }, // U8: Multiply { 1x3x1x1 } => Multiply + Add (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1, 3, 1, 1}), + ov::Shape({1, 3, 1, 1}), {0.f, 0.f, 0.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f}, {0.f, 0.f, 0.f}, @@ -34,10 +34,10 @@ const std::vector testVal // U8: Subtract + Multiply { 1x3x1x1 } => Multiply + Add (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1, 3, 1, 1}), + ov::Shape({1, 3, 1, 1}), {2.55f / 2, 2.55f / 4.f, 2.55f / 6.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f}, {2.55f / 2, 2.55f / 4.f, 2.55f / 6.f}, @@ -46,10 +46,10 @@ const std::vector testVal }, { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1}), + ov::Shape({1}), {2.55f / 2}, {2.55f}, {2.55f / 2}, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp index 768e4911a57a91..89fcd6dfc271ca 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp @@ -25,7 +25,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, SubtractTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues)), SubtractTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp index 53d2c6db762d64..7d29f015c8c0b3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -28,7 +28,7 @@ const std::vector transposeChannelDimValues = { true, false }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, TransposeAfterMatMulTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(perTensorValues), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp index e9554599355f7c..1d458f02445152 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp @@ -20,7 +20,7 @@ const std::vector testValues = { { 1, 1000, 1, 1}, { 0, 2, 3, 1}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, // U8: per-channel quantization @@ -28,7 +28,7 @@ const std::vector testValues = { { 1, 3, 1, 1}, { 0, 2, 3, 1}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {1, 3, 1, 1}, @@ -43,7 +43,7 @@ const std::vector testValues = { { 1, 1000, 1, 1, 3, 4}, { 0, 2, 1, 3, 5, 4}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp index 83b61268b52d06..d8f92ba36531bd 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp @@ -23,27 +23,27 @@ namespace { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 3, 5} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 3, 3, 5 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 4, 5, 6 } }, { - { 256ul, ngraph::Shape { 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2.0, 3.0 }, { 3, 4 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 4.0 }, { 46, 128, 2, 3 } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp index 12c19e0d10cc98..4d3827f31cdc23 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp @@ -27,13 +27,13 @@ const std::vector trasform const std::vector params{ // tensor quantization, split second dimension { - { 256ul, ngraph::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, + { 256ul, ov::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, 2, std::vector{9, 7} }, // tensor quantization, split third dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, -1, std::vector{15, 1} }, @@ -41,7 +41,7 @@ const std::vector param { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -54,7 +54,7 @@ const std::vector param { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -66,7 +66,7 @@ const std::vector param // per-channel quantization with the same values, per-channel split { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -78,7 +78,7 @@ const std::vector param // per-channel quantization with the same values, split third dimension { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -92,7 +92,7 @@ const std::vector param INSTANTIATE_TEST_SUITE_P(smoke_LPT, VariadicSplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp index f2a6c3cc7b99c6..8b7cd59c8395cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp @@ -20,49 +20,49 @@ const std::vector params = { { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, false, - {ov::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ov::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } }, true, - {ov::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ov::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, false, - {ov::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ov::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } }, true, - {ov::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ov::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { {}, {}, false }, { {}, {}, true }, }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp index 50b24fb99285fc..51ef48c52bd6e6 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp @@ -75,28 +75,28 @@ const std::vector para { ov::element::u8, false }, { {ov::element::f32}, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector{ 15.f }, ov::element::f32}, @@ -69,7 +69,7 @@ const std::vector para { ov::element::i8, false }, { { ov::element::f32, false }, - { {-128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.2f}, ov::element::f32, {}, false } }, "Convolution", @@ -166,7 +166,7 @@ const std::vector para { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, {{0.5f}, ov::element::i8}, @@ -174,7 +174,7 @@ const std::vector para {}, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.2f}, ov::element::f32, {}, false } }, "Convolution", @@ -217,7 +217,7 @@ const std::vector para { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, {{0.5f}, ov::element::i8}, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp index 6039991ce0ef80..e89e1d4732f846 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp @@ -26,7 +26,7 @@ const std::vector p { {}, { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, - { {ov::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, @@ -35,7 +35,7 @@ const std::vector p { {}, { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, - { {ov::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, @@ -53,7 +53,7 @@ const std::vector p { {}, { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, - { {ov::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, @@ -62,7 +62,7 @@ const std::vector p { {}, { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, - { {ov::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp index 92b387fba06189..8539648d1d0cc9 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp @@ -23,7 +23,7 @@ const std::vector trasformationParamValues = { const std::vector testValues = { { - { ov::element::u8, ngraph::element::i8 }, + { ov::element::u8, ov::element::i8 }, { ov::element::u8 }, true, { @@ -37,7 +37,7 @@ const std::vector testVa }, }, { - { ov::element::u8, ngraph::element::i8 }, + { ov::element::u8, ov::element::i8 }, { ov::element::i8 }, // INT8 is not available for limited operation (Convolution) false, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp index 1c7f50e49ec413..91e57fe3ed6e82 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp @@ -28,7 +28,7 @@ const std::vector fakeQuanti { ov::element::i8, false }, { { ov::element::f32, false }, - { {-128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.1f}, ov::element::f32, {}, false } }, {{5.f}, ov::element::i8}, @@ -36,7 +36,7 @@ const std::vector fakeQuanti {}, { { ov::element::f32, false }, - { {127.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.3f}, ov::element::f32, {}, false } }, {}, @@ -74,7 +74,7 @@ const std::vector fakeQuanti {}, { { ov::element::f32, false }, - { {127.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.3f}, ov::element::f32, {}, false } }, {}, @@ -85,7 +85,7 @@ const std::vector fakeQuanti { ov::element::i8, false }, { { ov::element::f32, false }, - { {-128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.1f}, ov::element::f32, {}, false } }, {{5.f}, ov::element::i8}, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp index 1dadc68802c98a..514149f327168d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -67,7 +67,7 @@ const std::vector { ov::element::u8, false }, { {ov::element::f32}, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, @@ -75,7 +75,7 @@ const std::vector { ov::element::i8, false }, { { ov::element::f32, false }, - { {-128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, @@ -131,7 +131,7 @@ const std::vector { ov::element::u8, false }, { {ov::element::f32}, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, @@ -139,7 +139,7 @@ const std::vector { ov::element::i8, false }, { { ov::element::f32, false }, - { {-128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, @@ -295,7 +295,7 @@ const std::vector { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, @@ -303,7 +303,7 @@ const std::vector {}, { { ov::element::f32, false }, - { {127.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, @@ -355,7 +355,7 @@ const std::vector { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, @@ -363,7 +363,7 @@ const std::vector {}, { { ov::element::f32, false }, - { {127.f}, ov::element::f32, {}, false, 1ul, ngraph::element::i8, true }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, @@ -415,7 +415,7 @@ const std::vector { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, @@ -423,7 +423,7 @@ const std::vector {}, { { ov::element::f32, false }, - { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ov::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ngraph::element::i8, true }, + { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ov::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ov::element::i8, true }, { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ov::element::f32, {3, 2, 1, 1, 1}, false } }, {}, @@ -472,7 +472,7 @@ const std::vector { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, @@ -529,7 +529,7 @@ const std::vector { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, @@ -589,7 +589,7 @@ const std::vector { ov::element::u8, false }, { { ov::element::f32, false }, - { {128.f}, ov::element::f32, {}, false, 1ul, ngraph::element::u8, true }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, { {0.1f}, ov::element::f32, {}, false } }, { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp index 5d15afc56e3052..f7db97cf6c0551 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp @@ -19,21 +19,21 @@ const std::vector> shapes = { const std::vector interpAttrs = { interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, true, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "linear", false, false, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp index cd5c4cd682d24c..7c97eb25af837f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -57,7 +57,7 @@ std::vector testValues = { { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f} }, { std::vector(4 * 2, 2.f), ov::element::i8, ov::Shape{ 2, 4 } }, {}, - { ov::element::f32, {}, {{0.1f, 0.01}, ngraph::element::f32, ov::Shape{ 2, 1 }} }, + { ov::element::f32, {}, {{0.1f, 0.01}, ov::element::f32, ov::Shape{ 2, 1 }} }, "FullyConnected", "U8" }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp index 62af0f771aec0f..5a2f09f48b3699 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp @@ -22,7 +22,7 @@ const std::vector params = { false, { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ov::element::undefined, // ngraph::element::i8 + ov::element::undefined, // ov::element::i8 false }, { @@ -31,7 +31,7 @@ const std::vector params = { false, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, // ngraph::element::u8 + ov::element::undefined, // ov::element::u8 false }, { @@ -40,7 +40,7 @@ const std::vector params = { false, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, //ngraph::element::u8 + ov::element::undefined, //ov::element::u8 false }, { @@ -49,7 +49,7 @@ const std::vector params = { false, { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ov::element::undefined, // ngraph::element::i8 + ov::element::undefined, // ov::element::i8 false }, { @@ -58,7 +58,7 @@ const std::vector params = { true, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ov::element::undefined, // ngraph::element::i8 + ov::element::undefined, // ov::element::i8 false }, { @@ -67,7 +67,7 @@ const std::vector params = { false, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, // ngraph::element::u8 + ov::element::undefined, // ov::element::u8 false }, { @@ -76,11 +76,11 @@ const std::vector params = { true, { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, // ngraph::element::u8 + ov::element::undefined, // ov::element::u8 false }, - { false, {}, false, {}, {}, ov::element::undefined /* ngraph::element::f32 */, false }, - { true, {}, true, {}, {}, ov::element::undefined /* ngraph::element::f32 */, false }, + { false, {}, false, {}, {}, ov::element::undefined /* ov::element::f32 */, false }, + { true, {}, true, {}, {}, ov::element::undefined /* ov::element::f32 */, false }, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyTransformation, diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp index 67ca9cc8fc46af..d61cd20dac8560 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp @@ -42,7 +42,7 @@ void ConvolutionBackpropDataTransformation::SetUp() { std::tie(netPrecision, inputShapeAndHandling, outputShape, targetDevice, params, param) = this->GetParam(); - std::shared_ptr weights; + std::shared_ptr weights; const auto inputShape = inputShapeAndHandling.first; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 51b130d463df22..eac38bdfa4429b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -22,7 +22,7 @@ namespace LayerTestsDefinitions { std::string MatMulWithOptimizedConstantFq::getTestCaseName( const testing::TestParamInfo& obj) { ov::element::Type netPrecision; - std::pair shapes; + std::pair shapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues param; @@ -43,7 +43,7 @@ void MatMulWithOptimizedConstantFq::SetUp() { abs_threshold = 2.1; ov::element::Type precision; - std::pair shapes; + std::pair shapes; ov::pass::low_precision::LayerTransformation::Params params; MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues param; std::tie(precision, shapes, targetDevice, param) = this->GetParam(); diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp index 6e780dc2c30c44..20806c90f728c3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp @@ -11,8 +11,7 @@ #include #include -#include -#include +#include "ov_ops/type_relaxed.hpp" #include "low_precision/layer_transformation.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" From 3407706ac94ebf8c2f9d10468a0a88e16cfe81e6 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 19 Jan 2024 20:44:37 +0400 Subject: [PATCH 101/122] Supported dynamic protobuf in ONNX tests (#22272) --- src/frontends/onnx/CMakeLists.txt | 1 - src/frontends/onnx/tests/CMakeLists.txt | 21 +- src/frontends/onnx/tests/onnx_editor.cpp | 2 - .../tests/onnx_editor_topological_sort.cpp | 2 - .../onnx/tests/onnx_import_exceptions.cpp | 5 +- .../onnx/tests/onnx_import_library.cpp | 29 --- .../tests/onnx_import_org_openvino.in.cpp | 4 +- .../onnx/tests/onnx_import_with_editor.in.cpp | 1 - .../onnx/tests/onnx_ops_registration.cpp | 3 - src/frontends/onnx/tests/onnx_test_util.cpp | 228 ------------------ src/frontends/onnx/tests/onnx_test_util.hpp | 48 ---- .../onnx/tests/onnx_transformations.cpp | 3 - 12 files changed, 8 insertions(+), 339 deletions(-) delete mode 100644 src/frontends/onnx/tests/onnx_import_library.cpp delete mode 100644 src/frontends/onnx/tests/onnx_test_util.cpp delete mode 100644 src/frontends/onnx/tests/onnx_test_util.hpp diff --git a/src/frontends/onnx/CMakeLists.txt b/src/frontends/onnx/CMakeLists.txt index c3f02f98cedcf4..8585c8b80c5641 100644 --- a/src/frontends/onnx/CMakeLists.txt +++ b/src/frontends/onnx/CMakeLists.txt @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # - add_subdirectory(onnx_common) add_subdirectory(frontend) diff --git a/src/frontends/onnx/tests/CMakeLists.txt b/src/frontends/onnx/tests/CMakeLists.txt index e94432041ba8b7..eea80d57ecbf1a 100644 --- a/src/frontends/onnx/tests/CMakeLists.txt +++ b/src/frontends/onnx/tests/CMakeLists.txt @@ -74,10 +74,8 @@ set(SRC onnx_editor.cpp onnx_editor_topological_sort.cpp onnx_import_exceptions.cpp - onnx_import_library.cpp onnx_importer_test.cpp onnx_tensor_names.cpp - onnx_test_util.cpp onnx_utils.cpp onnx_transformations.cpp op_extension.cpp @@ -118,6 +116,8 @@ foreach(BACKEND_NAME IN LISTS ACTIVE_BACKEND_LIST) message(STATUS "Adding unit test for backend ${BACKEND_NAME}") endforeach() +# Create target + add_executable(ov_onnx_frontend_tests ${SRC}) add_test(NAME ov_onnx_frontend_tests COMMAND ov_onnx_frontend_tests --gtest_filter=-*IE_GPU*) set_property(TEST ov_onnx_frontend_tests PROPERTY LABELS OV UNIT ONNX_FE) @@ -141,27 +141,14 @@ endif() target_link_libraries(ov_onnx_frontend_tests PRIVATE gtest_main_manifest frontend_shared_test_classes - openvino::runtime::dev - openvino_onnx_frontend - openvino_onnx_common + openvino::frontend::onnx func_test_utils) -# It's needed by onnx_import_library.cpp and onnx_import_exceptions.cpp tests to include onnx_pb.h. -# Not linking statically to libprotobuf (linked into libonnx) avoids false-failing onnx_editor tests. -target_include_directories(ov_onnx_frontend_tests - SYSTEM PRIVATE - $ - $ - $) -target_compile_definitions(ov_onnx_frontend_tests PRIVATE $) -target_compile_definitions(ov_onnx_frontend_tests PRIVATE ENABLE_OV_ONNX_FRONTEND) - if(OV_COMPILER_IS_CLANG) target_compile_options(ov_onnx_frontend_tests PRIVATE -Wno-undef -Wno-reserved-id-macro) endif() -target_include_directories(ov_onnx_frontend_tests PRIVATE - $) +# Install rules install(TARGETS ov_onnx_frontend_tests RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/src/frontends/onnx/tests/onnx_editor.cpp b/src/frontends/onnx/tests/onnx_editor.cpp index d8dc11bac5e0fe..56aa60642ec667 100644 --- a/src/frontends/onnx/tests/onnx_editor.cpp +++ b/src/frontends/onnx/tests/onnx_editor.cpp @@ -9,9 +9,7 @@ #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" #include "openvino/op/constant.hpp" diff --git a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp index e9ea726415622a..7caea8199c64e3 100644 --- a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp +++ b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp @@ -7,9 +7,7 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" using namespace ov; diff --git a/src/frontends/onnx/tests/onnx_import_exceptions.cpp b/src/frontends/onnx/tests/onnx_import_exceptions.cpp index 09446f39738db7..62a4b332f148a7 100644 --- a/src/frontends/onnx/tests/onnx_import_exceptions.cpp +++ b/src/frontends/onnx/tests/onnx_import_exceptions.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/type_prop.hpp" -#include "exceptions.hpp" #include "gtest/gtest.h" #include "onnx_utils.hpp" @@ -34,10 +33,10 @@ TEST(onnx_importer, exception_msg_onnx_node_validation_failure) { convert_model("instance_norm_bad_scale_type.onnx"); // Should have thrown, so fail if it didn't FAIL() << "ONNX Importer did not detected incorrect model!"; - } catch (const ::ov::frontend::onnx_error::OnnxNodeValidationFailure& e) { + } catch (const ::ov::Exception& e) { EXPECT_HAS_SUBSTRING(e.what(), std::string("While validating ONNX node ' - -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_control.hpp" -#include "gtest/gtest.h" -#include "onnx_utils.hpp" - -using namespace ov::frontend::onnx::tests; - -static std::string s_manifest = onnx_backend_manifest(MANIFEST); - -OPENVINO_TEST(onnx, check_ir_version_support) { - // It appears you've changed the ONNX library version used by OpenVINO. Please update the value - // tested below to make sure it equals the current IR_VERSION enum value defined in ONNX headers - // - // You should also check the onnx_common/src/onnx_model_validator.cpp file and make sure that - // the details::onnx::is_correct_onnx_field() handles any new fields added in the new release - // of the ONNX library. Make sure to update the "Field" enum and the function mentioned above. - // - // The last step is to also update the details::onnx::contains_onnx_model_keys() function - // in the same file to make sure that prototxt format validation also covers the changes in ONNX - EXPECT_EQ(ONNX_NAMESPACE::Version::IR_VERSION, 9) - << "The IR_VERSION defined in ONNX does not match the version that OpenVINO supports. " - "Please check the source code of this test for details and explanation how to proceed."; -} diff --git a/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp b/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp index eb7edeccbb8198..28814dd17b3efa 100644 --- a/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp @@ -117,7 +117,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_priorbox_clustered_first_input_bad_shape) { } catch (const ov::Exception& e) { EXPECT_HAS_SUBSTRING(e.what(), std::string("Only 4D inputs are supported. First input rank: 5 (should be 4)")); } catch (...) { - FAIL() << "Expected OnnxNodeValidationFailure exception was not thrown"; + FAIL() << "Expected ov::Exception exception was not thrown"; } } @@ -128,7 +128,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_priorbox_clustered_second_input_bad_shape) { } catch (const ov::Exception& e) { EXPECT_HAS_SUBSTRING(e.what(), std::string("Only 4D inputs are supported. Second input rank: 5 (should be 4)")); } catch (...) { - FAIL() << "Expected OnnxNodeValidationFailure exception was not thrown"; + FAIL() << "Expected ov::Exception exception was not thrown"; } } diff --git a/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp b/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp index a7e204414253d2..b365c64f253e6c 100644 --- a/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp @@ -14,7 +14,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" #include "onnx_utils.hpp" #include "openvino/op/constant.hpp" diff --git a/src/frontends/onnx/tests/onnx_ops_registration.cpp b/src/frontends/onnx/tests/onnx_ops_registration.cpp index d4bca0639ecb73..2ac414abb6d5b2 100644 --- a/src/frontends/onnx/tests/onnx_ops_registration.cpp +++ b/src/frontends/onnx/tests/onnx_ops_registration.cpp @@ -10,13 +10,10 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" using namespace ov; -using namespace ov::onnx_editor; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); diff --git a/src/frontends/onnx/tests/onnx_test_util.cpp b/src/frontends/onnx/tests/onnx_test_util.cpp deleted file mode 100644 index 4a1025cc9103ef..00000000000000 --- a/src/frontends/onnx/tests/onnx_test_util.cpp +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "onnx_test_util.hpp" - -#include - -#include -#include -#include - -#include "onnx_common/parser.hpp" - -using namespace ov::frontend::onnx::tests; -using namespace ov::frontend::onnx::common; - -namespace { -ComparisonResult compare_nodes(const ONNX_NAMESPACE::GraphProto& graph, - const ONNX_NAMESPACE::GraphProto& ref_graph, - CompType comp) { - if (graph.node_size() != ref_graph.node_size()) { - return ComparisonResult::fail("The number of nodes in compared models doesn't match"); - } else { - for (int i = 0; i < graph.node_size(); ++i) { - const auto& lhs = graph.node(i); - const auto& rhs = ref_graph.node(i); - - if (lhs.op_type() != rhs.op_type()) { - return ComparisonResult::fail("Operation types are different at index " + std::to_string(i) + ": " + - lhs.op_type() + " vs " + rhs.op_type()); - } - - for (int j = 0; j < lhs.input_size(); ++j) { - if (!comp(lhs.input(j), rhs.input(j))) { - return ComparisonResult::fail("Input names don't match for nodes at index " + std::to_string(i) + - ": " + lhs.input(j) + " vs " + rhs.input(j)); - } - } - - for (int j = 0; j < lhs.output_size(); ++j) { - if (!comp(lhs.output(j), rhs.output(j))) { - return ComparisonResult::fail("Output names don't match for nodes at index " + std::to_string(i) + - ": " + lhs.output(j) + " vs " + rhs.output(j)); - } - } - } - } - - return ComparisonResult::pass(); -} - -ComparisonResult compare_value_info(const ONNX_NAMESPACE::ValueInfoProto& lhs, - const ONNX_NAMESPACE::ValueInfoProto& rhs, - const std::string& item_type) { - if (lhs.name() != rhs.name()) { - return ComparisonResult::fail(item_type + " names in the graph don't match: " + lhs.name() + " vs " + - rhs.name()); - } - - const auto& lhs_tensor = lhs.type().tensor_type(); - const auto& rhs_tensor = rhs.type().tensor_type(); - if (lhs_tensor.elem_type() != rhs_tensor.elem_type()) { - return ComparisonResult::fail("Element types don't match for " + item_type + " " + lhs.name() + ": " + - std::to_string(lhs_tensor.elem_type()) + " vs " + - std::to_string(rhs_tensor.elem_type())); - } - - const auto& lhs_shape = lhs_tensor.shape(); - const auto& rhs_shape = rhs_tensor.shape(); - if (lhs_shape.dim_size() != rhs_shape.dim_size()) { - return ComparisonResult::fail("Tensor ranks don't match for " + item_type + " " + lhs.name() + ": " + - std::to_string(lhs_shape.dim_size()) + " vs " + - std::to_string(rhs_shape.dim_size())); - } else { - for (int j = 0; j < lhs_shape.dim_size(); ++j) { - const auto& lhs_dim = lhs_shape.dim(j); - const auto& rhs_dim = rhs_shape.dim(j); - if ((lhs_dim.has_dim_value() && rhs_dim.has_dim_param()) || - (rhs_dim.has_dim_value() && lhs_dim.has_dim_param())) { - return ComparisonResult::fail("Dynamic vs static dimension mismatch for " + item_type + " " + - lhs.name() + " at index: " + std::to_string(j)); - } else if (lhs_dim.has_dim_value() && lhs_dim.dim_value() != rhs_dim.dim_value()) { - return ComparisonResult::fail("Shape dimensions don't match for " + item_type + " " + lhs.name() + - " at index: " + std::to_string(j) + ". " + - std::to_string(lhs_dim.dim_value()) + " vs " + - std::to_string(rhs_dim.dim_value())); - } - } - } - - return ComparisonResult::pass(); -} - -ComparisonResult compare_inputs(const ONNX_NAMESPACE::GraphProto& graph, const ONNX_NAMESPACE::GraphProto& ref_graph) { - if (graph.input_size() != ref_graph.input_size()) { - return ComparisonResult::fail( - "The number of inputs in compared models doesn't match: " + std::to_string(graph.input_size()) + " vs " + - std::to_string(ref_graph.input_size())); - } else { - for (int i = 0; i < graph.input_size(); ++i) { - const auto& lhs = graph.input(i); - const auto& rhs = ref_graph.input(i); - - const auto res = compare_value_info(lhs, rhs, "input"); - if (!res.is_ok) { - return res; - } - } - - return ComparisonResult::pass(); - } -} - -ComparisonResult compare_outputs(const ONNX_NAMESPACE::GraphProto& graph, const ONNX_NAMESPACE::GraphProto& ref_graph) { - if (graph.output_size() != ref_graph.output_size()) { - return ComparisonResult::fail("The number of outputs in compared models doesn't match" + - std::to_string(graph.output_size()) + " vs " + - std::to_string(ref_graph.output_size())); - } else { - for (int i = 0; i < graph.output_size(); ++i) { - const auto& lhs = graph.output(i); - const auto& rhs = ref_graph.output(i); - - const auto res = compare_value_info(lhs, rhs, "output"); - if (!res.is_ok) { - return res; - } - } - - return ComparisonResult::pass(); - } -} - -ComparisonResult compare_initializers(const ONNX_NAMESPACE::GraphProto& graph, - const ONNX_NAMESPACE::GraphProto& ref_graph) { - if (graph.initializer_size() != ref_graph.initializer_size()) { - return ComparisonResult::fail("The number of initializers in compared models doesn't match" + - std::to_string(graph.initializer_size()) + " vs " + - std::to_string(ref_graph.initializer_size())); - } else { - for (int i = 0; i < graph.initializer_size(); ++i) { - const auto& lhs = graph.initializer(i); - const auto& rhs = ref_graph.initializer(i); - - if (lhs.name() != rhs.name()) { - return ComparisonResult::fail("Initializer names in the graph don't match: " + lhs.name() + " vs " + - rhs.name()); - } else if (lhs.data_type() != rhs.data_type()) { - return ComparisonResult::fail( - "Initializer data types in the graph don't match: " + std::to_string(lhs.data_type()) + " vs " + - std::to_string(rhs.data_type())); - } else if (lhs.dims_size() != rhs.dims_size()) { - return ComparisonResult::fail( - "Initializer ranks in the graph don't match: " + std::to_string(lhs.dims_size()) + " vs " + - std::to_string(rhs.dims_size())); - } else { - for (int j = 0; j < lhs.dims_size(); ++j) { - if (lhs.dims(j) != rhs.dims(j)) { - return ComparisonResult::fail("Shape dimensions don't match for initializer " + lhs.name() + - " at index: " + std::to_string(j) + ". " + - std::to_string(lhs.dims(j)) + " vs " + - std::to_string(rhs.dims(j))); - } - } - } - } - - return ComparisonResult::pass(); - } -} - -ComparisonResult compare_onnx_graphs(const ONNX_NAMESPACE::GraphProto& graph, - const ONNX_NAMESPACE::GraphProto& ref_graph, - CompType comp = default_name_comparator) { - ComparisonResult comparison = compare_inputs(graph, ref_graph); - if (!comparison.is_ok) { - return comparison; - } - - comparison = compare_outputs(graph, ref_graph); - if (!comparison.is_ok) { - return comparison; - } - - comparison = compare_initializers(graph, ref_graph); - if (!comparison.is_ok) { - return comparison; - } - - return compare_nodes(graph, ref_graph, comp); -} -} // namespace -namespace ov { -namespace frontend { -namespace onnx { -namespace tests { - -bool default_name_comparator(std::string lhs, std::string rhs) { - return lhs == rhs; -} - -ComparisonResult compare_onnx_models(const std::string& model, const std::string& reference_model_path, CompType comp) { - std::stringstream model_stream{model}; - const auto model_proto = parse_from_istream(model_stream); - const auto ref_model = parse_from_file(reference_model_path); - return compare_onnx_graphs(model_proto.graph(), ref_model.graph(), comp); -} - -std::string change_opset_version(const std::string& model, - const std::vector& new_opset_version, - const std::string& domain) { - std::stringstream model_stream{model}; - auto model_proto = parse_from_istream(model_stream); - model_proto.clear_opset_import(); - for (const auto& opset_version : new_opset_version) { - auto* opset_import = model_proto.add_opset_import(); - opset_import->set_version(opset_version); - opset_import->set_domain(domain); - } - - return model_proto.SerializeAsString(); -} - -} // namespace tests -} // namespace onnx -} // namespace frontend -} // namespace ov \ No newline at end of file diff --git a/src/frontends/onnx/tests/onnx_test_util.hpp b/src/frontends/onnx/tests/onnx_test_util.hpp deleted file mode 100644 index a2aa222905e0c5..00000000000000 --- a/src/frontends/onnx/tests/onnx_test_util.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -namespace ov { -namespace frontend { -namespace onnx { -namespace tests { -struct ComparisonResult { - ComparisonResult() = default; - ComparisonResult(std::string error) : is_ok{false}, error_message{std::move(error)} {} - ComparisonResult(ComparisonResult&&) = default; - ComparisonResult(const ComparisonResult&) = default; - ComparisonResult& operator=(ComparisonResult&&) = default; - ComparisonResult& operator=(const ComparisonResult&) = default; - - bool is_ok = true; - std::string error_message; - - static ComparisonResult pass() { - return {}; - } - static ComparisonResult fail(std::string error) { - return ComparisonResult{std::move(error)}; - } -}; - -bool default_name_comparator(std::string lhs, std::string rhs); - -// comp is a function to compare inputs and outputs names (as default it is a usual std::string comparison) -using CompType = std::function; -ComparisonResult compare_onnx_models(const std::string& model, - const std::string& reference_model_path, - CompType comp = default_name_comparator); - -std::string change_opset_version(const std::string& model, - const std::vector& new_opset_version, - const std::string& domain = "ai.onnx"); -} // namespace tests -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/tests/onnx_transformations.cpp b/src/frontends/onnx/tests/onnx_transformations.cpp index c3edd053d5d836..389032c9bbde34 100644 --- a/src/frontends/onnx/tests/onnx_transformations.cpp +++ b/src/frontends/onnx/tests/onnx_transformations.cpp @@ -5,13 +5,10 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" using namespace ov; -using namespace ov::onnx_editor; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); From 0ab9694d429689f60c2a135289ce089def2b39df Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 19 Jan 2024 20:45:48 +0400 Subject: [PATCH 102/122] Removed ngraph/pattern (#22275) --- .../include/ngraph/pass/graph_rewrite.hpp | 1 - src/core/include/ngraph/pattern/matcher.hpp | 44 ------------ src/core/include/ngraph/pattern/op/any.hpp | 27 -------- src/core/include/ngraph/pattern/op/any_of.hpp | 27 -------- .../include/ngraph/pattern/op/any_output.hpp | 27 -------- src/core/include/ngraph/pattern/op/branch.hpp | 27 -------- .../include/ngraph/pattern/op/capture.hpp | 27 -------- src/core/include/ngraph/pattern/op/label.hpp | 29 -------- src/core/include/ngraph/pattern/op/or.hpp | 27 -------- .../include/ngraph/pattern/op/pattern.hpp | 69 ------------------- src/core/include/ngraph/pattern/op/skip.hpp | 27 -------- src/core/include/ngraph/pattern/op/true.hpp | 27 -------- .../include/ngraph/pattern/op/wrap_type.hpp | 29 -------- .../cpu_opset/common/pass/rope_fusion.cpp | 54 ++++++++------- 14 files changed, 28 insertions(+), 414 deletions(-) delete mode 100644 src/core/include/ngraph/pattern/matcher.hpp delete mode 100644 src/core/include/ngraph/pattern/op/any.hpp delete mode 100644 src/core/include/ngraph/pattern/op/any_of.hpp delete mode 100644 src/core/include/ngraph/pattern/op/any_output.hpp delete mode 100644 src/core/include/ngraph/pattern/op/branch.hpp delete mode 100644 src/core/include/ngraph/pattern/op/capture.hpp delete mode 100644 src/core/include/ngraph/pattern/op/label.hpp delete mode 100644 src/core/include/ngraph/pattern/op/or.hpp delete mode 100644 src/core/include/ngraph/pattern/op/pattern.hpp delete mode 100644 src/core/include/ngraph/pattern/op/skip.hpp delete mode 100644 src/core/include/ngraph/pattern/op/true.hpp delete mode 100644 src/core/include/ngraph/pattern/op/wrap_type.hpp diff --git a/src/core/include/ngraph/pass/graph_rewrite.hpp b/src/core/include/ngraph/pass/graph_rewrite.hpp index 0931a9c704f7a0..ea11d0dff9351c 100644 --- a/src/core/include/ngraph/pass/graph_rewrite.hpp +++ b/src/core/include/ngraph/pass/graph_rewrite.hpp @@ -19,7 +19,6 @@ #include #include "ngraph/pass/pass.hpp" -#include "ngraph/pattern/matcher.hpp" #include "openvino/pass/graph_rewrite.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/pattern/matcher.hpp b/src/core/include/ngraph/pattern/matcher.hpp deleted file mode 100644 index c37244bed60834..00000000000000 --- a/src/core/include/ngraph/pattern/matcher.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/any.hpp" -#include "ngraph/pattern/op/any_of.hpp" -#include "ngraph/pattern/op/any_output.hpp" -#include "ngraph/pattern/op/label.hpp" -#include "ngraph/pattern/op/skip.hpp" -#include "openvino/pass/pattern/matcher.hpp" - -namespace ov { -namespace pass { -class GraphRewrite; -} -} // namespace ov -namespace ngraph { -namespace pass { -using ov::pass::GraphRewrite; -} - -namespace pattern { -using ov::pass::pattern::Matcher; -using ov::pass::pattern::MatcherState; -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/any.hpp b/src/core/include/ngraph/pattern/op/any.hpp deleted file mode 100644 index a7a5aaf194ca25..00000000000000 --- a/src/core/include/ngraph/pattern/op/any.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/any.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Any; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/any_of.hpp b/src/core/include/ngraph/pattern/op/any_of.hpp deleted file mode 100644 index fbf6652f273d90..00000000000000 --- a/src/core/include/ngraph/pattern/op/any_of.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/any_of.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::AnyOf; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/any_output.hpp b/src/core/include/ngraph/pattern/op/any_output.hpp deleted file mode 100644 index 4f733d48d4187d..00000000000000 --- a/src/core/include/ngraph/pattern/op/any_output.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/any_output.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::AnyOutput; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/branch.hpp b/src/core/include/ngraph/pattern/op/branch.hpp deleted file mode 100644 index e93b7b1b00976b..00000000000000 --- a/src/core/include/ngraph/pattern/op/branch.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/branch.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Branch; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/capture.hpp b/src/core/include/ngraph/pattern/op/capture.hpp deleted file mode 100644 index 25031c5c3cdc71..00000000000000 --- a/src/core/include/ngraph/pattern/op/capture.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/capture.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Capture; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/label.hpp b/src/core/include/ngraph/pattern/op/label.hpp deleted file mode 100644 index c780effa3130c6..00000000000000 --- a/src/core/include/ngraph/pattern/op/label.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/label.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Label; -} // namespace op - -using ov::pass::pattern::any_input; -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/or.hpp b/src/core/include/ngraph/pattern/op/or.hpp deleted file mode 100644 index 2c6926aa7d99cd..00000000000000 --- a/src/core/include/ngraph/pattern/op/or.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/or.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Or; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/pattern.hpp b/src/core/include/ngraph/pattern/op/pattern.hpp deleted file mode 100644 index 1ff997f306a4a2..00000000000000 --- a/src/core/include/ngraph/pattern/op/pattern.hpp +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "openvino/pass/pattern/op/pattern.hpp" - -namespace ov { -namespace pass { -namespace pattern { -namespace op { -class Label; -} - -class Matcher; -class MatchState; -} // namespace pattern -} // namespace pass -} // namespace ov -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Label; -} - -using ov::pass::pattern::Matcher; -using ov::pass::pattern::MatcherState; - -using ov::pass::pattern::PatternValueMap; -using ov::pass::pattern::PatternValueMaps; -using ov::pass::pattern::RPatternValueMap; - -using ov::pass::pattern::PatternMap; - -using ov::pass::pattern::as_pattern_map; -using ov::pass::pattern::as_pattern_value_map; -using ov::pass::pattern::consumers_count; -using ov::pass::pattern::has_class; -using ov::pass::pattern::has_static_dim; -using ov::pass::pattern::has_static_dims; -using ov::pass::pattern::has_static_rank; -using ov::pass::pattern::has_static_shape; -using ov::pass::pattern::rank_equals; -using ov::pass::pattern::type_matches; -using ov::pass::pattern::type_matches_any; - -namespace op { -using ov::pass::pattern::op::NodePredicate; -using ov::pass::pattern::op::ValuePredicate; - -using ov::pass::pattern::op::as_value_predicate; -using ov::pass::pattern::op::Pattern; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/skip.hpp b/src/core/include/ngraph/pattern/op/skip.hpp deleted file mode 100644 index 9cfb2f967b0e39..00000000000000 --- a/src/core/include/ngraph/pattern/op/skip.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/skip.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Skip; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/true.hpp b/src/core/include/ngraph/pattern/op/true.hpp deleted file mode 100644 index 62a564a96f1258..00000000000000 --- a/src/core/include/ngraph/pattern/op/true.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/true.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::True; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/wrap_type.hpp b/src/core/include/ngraph/pattern/op/wrap_type.hpp deleted file mode 100644 index 50a3a781bc7745..00000000000000 --- a/src/core/include/ngraph/pattern/op/wrap_type.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::WrapType; -} // namespace op - -using ov::pass::pattern::wrap_type; -} // namespace pattern -} // namespace ngraph diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp index b4fded221efcbe..8552013de64294 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp @@ -6,13 +6,15 @@ #include #include -#include + +#include "openvino/core/rt_info.hpp" #include "openvino/opsets/opset1.hpp" -#include -#include -#include -#include -#include +#include "openvino/opsets/opset6.hpp" +#include "openvino/opsets/opset8.hpp" +#include "openvino/pass/pattern/op/or.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/pass/pattern/matcher.hpp" +#include "transformations/utils/utils.hpp" #include "itt.hpp" #include "ov_ops/type_relaxed.hpp" @@ -55,7 +57,7 @@ ov::intel_cpu::RoPEFusionGPTNEOX::RoPEFusionGPTNEOX() { // [x1, x2]*cos + [-x2, x1]*sin auto result = makePattern({mul_cos, mul_sin}, {{"auto_broadcast", "numpy"}}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { PatternValidator validator(m); if (!validator) { return false; @@ -94,7 +96,7 @@ ov::intel_cpu::RoPEFusionGPTNEOX::RoPEFusionGPTNEOX() { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -154,7 +156,7 @@ ov::intel_cpu::RoPEFusionCosSinPreprocess::RoPEFusionCosSinPreprocess() { auto x = makePattern(ov::Rank(4)); auto rope = makePattern({x, cos_tab, sin_tab}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { PatternValidator validator(m); if (!validator) { return false; @@ -186,7 +188,7 @@ ov::intel_cpu::RoPEFusionCosSinPreprocess::RoPEFusionCosSinPreprocess() { register_new_node(rope_node); return true; }; - auto m = std::make_shared(rope, matcher_name); + auto m = std::make_shared(rope, matcher_name); this->register_matcher(m, callback); } @@ -202,7 +204,7 @@ ov::intel_cpu::RoPEFusionIOSlicing::RoPEFusionIOSlicing() { auto x_emb = makePattern({x, {}, {}}) | makePattern({x, {}, {}, {}}); auto result = makePattern({x_emb, y}, {{"axis", -1}}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); @@ -229,7 +231,7 @@ ov::intel_cpu::RoPEFusionIOSlicing::RoPEFusionIOSlicing() { register_new_node(rope_node); return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -250,7 +252,7 @@ ov::intel_cpu::RoPEFusionPreprocess::RoPEFusionPreprocess() { auto x = makePattern({input_slice | input_to_trans, {0, 2, 1, 3}}); auto result = makePattern({x, {}, {}}) | makePattern({x, {}, {}, {}}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { PatternValidator validator(m); if (!validator) { return false; @@ -279,17 +281,17 @@ ov::intel_cpu::RoPEFusionPreprocess::RoPEFusionPreprocess() { register_new_node(rope_node); return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } // remove stridedslice from 0 to int32_max with stride 1 ov::intel_cpu::EliminateStridedSlice::EliminateStridedSlice() { MATCHER_SCOPE(EliminateStridedSlice); - auto data = ov::pass::pattern::any_input(ngraph::pattern::has_static_rank()); - auto begin = ov::pass::pattern::wrap_type(ngraph::pattern::type_matches(ov::element::i32)); - auto end = ov::pass::pattern::wrap_type(ngraph::pattern::type_matches(ov::element::i32)); - auto stride = ov::pass::pattern::wrap_type(ngraph::pattern::type_matches(ov::element::i32)); + auto data = ov::pass::pattern::any_input(ov::pass::pattern::has_static_rank()); + auto begin = ov::pass::pattern::wrap_type(ov::pass::pattern::type_matches(ov::element::i32)); + auto end = ov::pass::pattern::wrap_type(ov::pass::pattern::type_matches(ov::element::i32)); + auto stride = ov::pass::pattern::wrap_type(ov::pass::pattern::type_matches(ov::element::i32)); auto strided_slice = ov::pass::pattern::wrap_type({data, begin, end, stride}, [](const Output& value) { @@ -351,12 +353,12 @@ ov::intel_cpu::EliminateStridedSlice::EliminateStridedSlice() { return true; }); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { auto root = m.get_match_root(); return replace_output_update_name(root->output(0), root->input_value(0)); }; - auto m = std::make_shared(strided_slice, matcher_name); + auto m = std::make_shared(strided_slice, matcher_name); this->register_matcher(m, callback); } @@ -423,7 +425,7 @@ ov::intel_cpu::RoPEFusionGPTJ::RoPEFusionGPTJ() { auto result = permute_Transpose_1213; - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); PatternValidator validator(m); @@ -451,7 +453,7 @@ ov::intel_cpu::RoPEFusionGPTJ::RoPEFusionGPTJ() { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -539,7 +541,7 @@ ov::intel_cpu::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id) { auto result = cat_Concat_505; - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); PatternValidator validator(m); @@ -576,7 +578,7 @@ ov::intel_cpu::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id) { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -679,7 +681,7 @@ ov::intel_cpu::RoPEFusionQwen::RoPEFusionQwen(int split_output_id) { auto result = add_Add_597; - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); PatternValidator validator(m); @@ -715,6 +717,6 @@ ov::intel_cpu::RoPEFusionQwen::RoPEFusionQwen(int split_output_id) { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } \ No newline at end of file From 6e67749b25e320a6693744cead810eb105d2f1ae Mon Sep 17 00:00:00 2001 From: Vishniakov Nikolai Date: Fri, 19 Jan 2024 20:36:57 +0100 Subject: [PATCH 103/122] Fix wstring join_paths (#22271) * Fix wstring join_paths * Fix codestyle --- src/common/util/src/file_util.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/common/util/src/file_util.cpp b/src/common/util/src/file_util.cpp index 9ff7923cfd89b9..7b78461da518fc 100644 --- a/src/common/util/src/file_util.cpp +++ b/src/common/util/src/file_util.cpp @@ -148,7 +148,11 @@ std::wstring join_paths(const std::wstring& s1, const std::wstring& s2) { } else if (s1.size() > 0) { rc = s1; if (rc[rc.size() - 1] != '/') { +# ifndef _WIN32 rc += '/'; +# else + rc += '\\'; +# endif } rc += s2; } else { From 83a8db31c41909e8c2a7f6bfec325984fe5d8d96 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Sat, 20 Jan 2024 07:41:40 +0900 Subject: [PATCH 104/122] [GPU] Add dynamic input shape support in loop body network (#22181) * add dynamic input shape support in loop body network * add single layer test for the case --- src/plugins/intel_gpu/src/graph/loop.cpp | 54 ++++- .../tests/functional/subgraph_tests/loop.cpp | 215 ++++++++++++++++++ 2 files changed, 259 insertions(+), 10 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/loop.cpp b/src/plugins/intel_gpu/src/graph/loop.cpp index c2ec118c378ff7..36ca523093f595 100644 --- a/src/plugins/intel_gpu/src/graph/loop.cpp +++ b/src/plugins/intel_gpu/src/graph/loop.cpp @@ -1040,24 +1040,58 @@ std::vector loop_inst::handle_buffers_for_next_iteration(const loop_ } } else if (mapping.type == loop_inst::backedge_memory_mapping::SINGLE) { memory::ptr to_mem = mapping.to_primitive->output_memory_ptr(); - if (iter == 0) { - auto ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); - if (ev) event_vec = {ev}; - GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from inintal_mem(" << mapping.initial_mem << ")" << std::endl; - } else { - if (is_dynamic()) { - // In dynamic model, do not swap memory buffer between input and output in inner body network. - // Just copy data from input buffer memory to output buffer memory. + + if (is_dynamic()) { + // In dynamic model, do not swap memory buffer between input and output in inner body network. + // Check size of input buffer memory and output buffer memory + // If size is differnet, allocate new input memory for the required size, + // Else just copy data from input buffer memory to output buffer memory. + cldnn::event::ptr ev; + if (iter == 0) { + auto to_id = mapping.to_primitive->id(); + // Check backedge_to shape needs to be updated by initial_mem + if (!mapping.initial_mem->get_layout().identical(to_mem->get_layout())) { + to_mem = body_network->get_engine().allocate_memory(mapping.initial_mem->get_layout(), false); + body_network->set_input_data(to_id, to_mem); + ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); + GPU_DEBUG_LOG << iter << ") [SINGLE] Backedge_to node(" << to_id << ") is set to new memory(" + << to_mem << ", " << to_mem->get_layout().to_short_string() + << ") because of shape update from initial memory(" + << mapping.initial_mem << "," << mapping.initial_mem->get_layout().to_short_string() << ")" << std::endl; + } else { + ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); + GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from inintal_mem(" << mapping.initial_mem << ")" << std::endl; + } + } else { auto from_id = mapping.from_primitive->id(); + auto to_id = mapping.to_primitive->id(); if (body_network->has_event(from_id)) { auto ev = body_network->get_primitive_event(from_id); if (ev) ev->wait(); } memory::ptr from_mem = mapping.from_primitive->output_memory_ptr(); - auto ev = to_mem->copy_from(body_network->get_stream(), *(from_mem)); - if (ev) event_vec = {ev}; + + // Check backedge_to shape needs to be updated by backedge_from + if (!from_mem->get_layout().identical(to_mem->get_layout())) { + to_mem = body_network->get_engine().allocate_memory(from_mem->get_layout(), false); + GPU_DEBUG_LOG << iter << ") [SINGLE] Backedge_to node(" << to_id << ") is set to new memory(" + << to_mem << ", " << to_mem->get_layout().to_short_string() + << ") because of shape update from backedge_from()" << from_id + <<")'s memory(" << from_mem << "," << from_mem->get_layout().to_short_string() << ")" << std::endl; + body_network->set_input_data(to_id, to_mem); + ev = to_mem->copy_from(body_network->get_stream(), *(from_mem)); + } else { + ev = to_mem->copy_from(body_network->get_stream(), *(from_mem)); + } GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from [" << mapping.from_primitive->id() << "(" << from_mem << ")] to [" << mapping.to_primitive->id() << "(" << to_mem << ")]" << std::endl; + } + if (ev) event_vec = {ev}; + } else { + if (iter == 0) { + auto ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); + if (ev) event_vec = {ev}; + GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from inintal_mem(" << mapping.initial_mem << ")" << std::endl; } else { // In static model, swap memory buffer between output and input in inner body network memory::ptr from_mem = mapping.from_primitive->output_memory_ptr(); diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp index 8200700e0bd902..39501c67e1bbb7 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp @@ -294,4 +294,219 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_dynamic_exit, DynamicShapeLoopTe /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), DynamicShapeLoopTest::getTestCaseName); + +using DynamicShapeLoopDynamicInputParams = typename std::tuple< + bool, + std::tuple< + bool, + int64_t, + int64_t, + int64_t + >, + int64_t, + InputShape, + InputShape, + ov::element::Type, + std::string>; + +class DynamicShapeLoopDynamicInputTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + bool static_iter_num; + bool static_continue_cond; + int64_t max_iter_num; + int64_t dynamic_exit; + int64_t axis; + int64_t start_value; + InputShape data_shapes; + InputShape constant_shapes; + ov::element::Type model_type; + std::string targetDevice; + auto args_pack = std::tie(static_iter_num, max_iter_num, dynamic_exit, axis); + std::tie( + static_continue_cond, + args_pack, + start_value, + data_shapes, + constant_shapes, + model_type, + targetDevice) = obj.param; + + std::ostringstream result; + result << "static_iter_num=" << std::to_string(static_iter_num) << "_"; + result << "static_continue_cond=" << std::to_string(static_continue_cond) << "_"; + result << "max_iter_num=" << std::to_string(max_iter_num) << "_"; + result << "dynamic_exit=" << std::to_string(dynamic_exit) << "_"; + result << "axis=" << std::to_string(axis) << "_"; + result << "start_value=" << std::to_string(start_value) << "_"; + result << "max_iter_num=" << std::to_string(max_iter_num) << "_"; + result << "IS=("; + result << ov::test::utils::partialShape2str({data_shapes.first}) << "_"; + for (size_t i = 0lu; i < data_shapes.second.size(); i++) { + result << "{"; + result << ov::test::utils::vec2str(data_shapes.second[i]) << "_"; + result << "}_"; + } + result << ")_"; + result << "netType=" << model_type << "_"; + result << "targetDevice=" << targetDevice << "_"; + + auto res_str = result.str(); + std::replace(res_str.begin(), res_str.end(), '-', '_'); + return res_str; + } + +private: + bool static_iter_num; // trip count provided by constant node + bool static_continue_cond; // initial_cond provided by constant node + int64_t max_iter_num; // -1 means infinity loop (expected dynamic exit condition in body) + int64_t dynamic_exit; // -1 means always true + int64_t axis; // -1 means no auto concatenation + int64_t start_value; + InputShape data_shapes; + InputShape constant_shapes; + ov::element::Type model_type; + +protected: + void SetUp() override { + auto args_pack = std::tie(static_iter_num, max_iter_num, dynamic_exit, axis); + std::tie( + static_continue_cond, + args_pack, + start_value, + data_shapes, + constant_shapes, + model_type, + targetDevice) = GetParam(); + + const auto inputShape = data_shapes.first; + const auto scalarShape = ov::Shape{}; + init_input_shapes({data_shapes, data_shapes, constant_shapes}); + + ov::ParameterVector params{}; + auto cond_input_create = [¶ms] (ov::element::Type model_type, + const ov::PartialShape &shape, + int value = 0, + bool is_static = false) -> std::shared_ptr { + if (is_static) + return std::make_shared(model_type, shape.to_shape(), value); + + auto input = std::make_shared(model_type, shape); + params.push_back(input); + return input; + }; + + // Create function that has smaller shape of init input backedge-to and bigger shape backedge-from + // It should be updated during iteration + auto start_add = cond_input_create(model_type, inputShape, start_value); + start_add->set_friendly_name("start_add"); + auto start_mul = cond_input_create(model_type, inputShape, 1); + start_mul->set_friendly_name("start_mul"); + auto count = cond_input_create(ov::element::i64, scalarShape, max_iter_num, static_iter_num); + count->set_friendly_name("count"); + auto skip = cond_input_create(ov::element::boolean, scalarShape, true, static_continue_cond); + skip->set_friendly_name("skip"); + auto init_const = cond_input_create(model_type, constant_shapes.first, 1); + init_const->set_friendly_name("init_const"); + + auto b_indx = std::make_shared(ov::element::i64, ov::Shape{}); + b_indx->set_friendly_name("body_index"); + auto b_data_add = std::make_shared(model_type, inputShape); + b_data_add->set_friendly_name("b_data_add"); + auto b_data_mul = std::make_shared(model_type, inputShape); + b_data_mul->set_friendly_name("b_data_mul"); + auto b_data_broadcast = std::make_shared(model_type, constant_shapes.first); + b_data_broadcast->set_friendly_name("b_data_broadcast"); + auto b_indx_cast = std::make_shared(b_indx, model_type); + b_indx_cast->set_friendly_name("body_index_cast"); + auto b_add = std::make_shared(b_data_add, b_indx_cast); + b_add->set_friendly_name("body_add"); + auto b_mul = std::make_shared(b_data_mul, b_indx_cast); + b_mul->set_friendly_name("body_mul"); + auto b_shapeof1 = std::make_shared(b_data_mul); + b_shapeof1->set_friendly_name("b_shapeof1"); + auto b_shapeof2 = std::make_shared(b_data_broadcast); + b_shapeof2->set_friendly_name("b_shapeof2"); + auto b_max = std::make_shared(b_shapeof1, b_shapeof2); + b_max->set_friendly_name("b_max"); + auto b_broadcast = std::make_shared(b_data_broadcast, b_max); + b_broadcast->set_friendly_name("b_broadcast"); + auto b_mul2 = std::make_shared(b_broadcast, b_mul); + b_mul2->set_friendly_name("b_mul2"); + + std::shared_ptr b_cond; + if (dynamic_exit == -1) { + b_cond = std::make_shared(ov::element::boolean, ov::Shape{}, true); + b_cond->set_friendly_name("body_condition"); + } else { + auto b_exit_value = std::make_shared(ov::element::i64, scalarShape, dynamic_exit); + b_exit_value->set_friendly_name("body_exit_value"); + b_cond = std::make_shared(b_indx, b_exit_value); + b_cond->set_friendly_name("body_condition_with_exit_value"); + } + + auto body = std::make_shared( + ov::OutputVector {b_cond, b_add, b_mul, b_mul2}, // TODO: check with reverse + ov::ParameterVector {b_indx, b_data_add, b_data_mul, b_data_broadcast}); // TODO: check with reverse + body->set_friendly_name("body_network"); + + auto loop = std::make_shared(count, skip); + loop->set_friendly_name("loop"); + loop->set_function(body); + loop->set_special_body_ports({0, 0}); + loop->set_merged_input(b_data_add, start_add, b_add); + loop->set_merged_input(b_data_mul, start_mul, b_mul); + loop->set_merged_input(b_data_broadcast, init_const, b_mul2); + if (axis == -1) { + loop->get_iter_value(b_add, -1); + loop->get_iter_value(b_mul, -1); + loop->get_iter_value(b_mul2, -1); + } else { + loop->get_concatenated_slices(b_add, 0, 1, 1, -1, axis); + loop->get_concatenated_slices(b_mul, 0, 1, 1, -1, axis); + } + + ov::ResultVector results; + for (size_t i = 0; i < loop->get_output_size(); i++) { + auto res = std::make_shared(loop->output(i)); + res->set_friendly_name("loop_output_" + std::to_string(i)); + results.push_back(res); + } + function = std::make_shared( + results, + params); + function->set_friendly_name("outer_body_network"); + } +}; + +TEST_P(DynamicShapeLoopDynamicInputTest, Inference) { + run(); +} + +static const std::vector> dynamic_loop_input { + // GCC4.8 limitation: have to specify type of each element in list + // static_trip_count | max | dynamic_exit | axis + std::tuple{ true , 5, 3, -1 }, // n_iter 3, dynamic exit on 3 + std::tuple{ true , -1, 5, -1 }, // n_iter 5, inf loop with dynamic exit on 5 +}; + +std::vector inputs_dynamic_shape = { + InputShape(ov::PartialShape({-1, 1, -1}), {{4, 1, 2}, {10, 1, 2}, {12, 1, 2}}), +}; + +std::vector constant_dynamic_shape = { + InputShape(ov::PartialShape({-1, 1, -1}), {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}}), +}; + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_dynamic, DynamicShapeLoopDynamicInputTest, + testing::Combine( + /* static_continue_cond */ testing::Values(true), + /* args_pack */ testing::ValuesIn(dynamic_loop_input), + /* start_value */ testing::Values(0), + /* data_shape */ testing::ValuesIn(inputs_dynamic_shape), + /* constant_shape */ testing::ValuesIn(constant_dynamic_shape), + /* model_type */ testing::ValuesIn(model_types), + /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), + DynamicShapeLoopDynamicInputTest::getTestCaseName); } // namespace \ No newline at end of file From 3a70f53a2dad681055fe943f8b685747f15187fe Mon Sep 17 00:00:00 2001 From: Andrey Babushkin Date: Sat, 20 Jan 2024 07:53:28 +0000 Subject: [PATCH 105/122] Bump sccache version (#22282) --- .github/workflows/android_arm64.yml | 2 +- .github/workflows/coverity.yml | 2 +- .github/workflows/fedora.yml | 2 +- .github/workflows/job_onnx_runtime.yml | 2 +- .github/workflows/linux.yml | 4 ++-- .github/workflows/linux_arm64.yml | 2 +- .github/workflows/linux_conditional_compilation.yml | 4 ++-- .github/workflows/webassembly.yml | 2 +- .github/workflows/windows.yml | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/android_arm64.yml b/.github/workflows/android_arm64.yml index d313929a1b016e..2405d2a84d71af 100644 --- a/.github/workflows/android_arm64.yml +++ b/.github/workflows/android_arm64.yml @@ -126,7 +126,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" # # Build diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 206d80cd02f0af..3ac8019fba8c66 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -77,7 +77,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 7863d04f47dac4..e7bb07af01707b 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -88,7 +88,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Install python dependencies run: | diff --git a/.github/workflows/job_onnx_runtime.yml b/.github/workflows/job_onnx_runtime.yml index 3dce4a9d66fa35..e4dc420ea2fbf1 100644 --- a/.github/workflows/job_onnx_runtime.yml +++ b/.github/workflows/job_onnx_runtime.yml @@ -100,7 +100,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Build Lin ONNX Runtime run: | diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index d619cedc3cb109..01512464638de2 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -114,7 +114,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python @@ -565,7 +565,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Install CUDA run: | diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index d2b79bcc82d19c..89c8fd8b1f1e40 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -114,7 +114,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index 4d72a4ada3e0e0..607cbfd2dcf720 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -112,7 +112,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python @@ -290,7 +290,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" # # Build # diff --git a/.github/workflows/webassembly.yml b/.github/workflows/webassembly.yml index 135e6a76c49fbb..89d447bea40b53 100644 --- a/.github/workflows/webassembly.yml +++ b/.github/workflows/webassembly.yml @@ -69,7 +69,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: emcmake cmake - configure run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index bcc0db607eb733..71bfa325ab4c1f 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -120,7 +120,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Install build dependencies run: choco install --no-progress ninja From efa7e3bc6e69fba2f0b6be7c15e7097e96d498b0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sat, 20 Jan 2024 19:41:57 +0400 Subject: [PATCH 106/122] Ported conda-forge patch to work with latest protobuf (#22277) --- thirdparty/dependencies.cmake | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/thirdparty/dependencies.cmake b/thirdparty/dependencies.cmake index fc6ef40c64c9bd..78860f2cd86aa3 100644 --- a/thirdparty/dependencies.cmake +++ b/thirdparty/dependencies.cmake @@ -323,7 +323,15 @@ if(ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_TF_FRONTEND # otherwise, fallback to existing default find_package(Protobuf 3.20.3 REQUIRED ${protobuf_config}) endif() - set(PROTOC_EXECUTABLE protobuf::protoc) + + # with newer protobuf versions (4.22 and newer), we use CONFIG first + # so, the Protobuf_PROTOC_EXECUTABLE variable must be checked explicitly, + # because it's not used in this case (oppositely to MODULE case) + if(Protobuf_VERSION VERSION_GREATER_EQUAL 22 AND DEFINED Protobuf_PROTOC_EXECUTABLE) + set(PROTOC_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE}) + else() + set(PROTOC_EXECUTABLE protobuf::protoc) + endif() else() add_subdirectory(thirdparty/protobuf EXCLUDE_FROM_ALL) endif() From 312ffb1a5c613b59dc02cb886ca0ddcddbe8da2c Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Sat, 20 Jan 2024 16:24:34 +0000 Subject: [PATCH 107/122] [SCRIPTS] Introduce `build_samples` for Powershell (#22185) * add pwsh build_samples script * install ps1 script, use it for win workflow * correct syntax * use pwsh script in win cc samples building * align args; add aliases; rm postfixes * return postfix for bat --- .github/workflows/windows.yml | 75 +++++++++++-------- .../windows_conditional_compilation.yml | 2 +- samples/CMakeLists.txt | 3 +- samples/cpp/build_samples.ps1 | 66 ++++++++++++++++ 4 files changed, 113 insertions(+), 33 deletions(-) create mode 100644 samples/cpp/build_samples.ps1 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 71bfa325ab4c1f..06b51728ca0a74 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -280,20 +280,22 @@ jobs: should-setup-pip-paths: 'false' self-hosted-runner: 'true' + # Test with the short names of the arguments - name: Build cpp samples run: | - & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/cpp_samples + & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples.ps1 -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/cpp_samples env: CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + # Test with the full names of the arguments - name: Build c samples run: | - & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples + & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples.ps1 -InstallDirectory ${{ env.INSTALL_DIR }} -BuildDirectory ${{ env.BUILD_DIR }}/c_samples - name: Samples tests run: | python3 -m pip install --ignore-installed PyYAML -r ${{ env.INSTALL_TEST_DIR }}/smoke_tests/requirements.txt - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/smoke_tests --env_conf ${{ env.INSTALL_TEST_DIR }}/smoke_tests/env_config.yml --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-SamplesSmokeTests.xml env: IE_APP_PATH: ${{ env.INSTALL_DIR }}/samples_bin @@ -301,6 +303,17 @@ jobs: SHARE: ${{ env.INSTALL_TEST_DIR }}/smoke_tests/samples_smoke_tests_data WORKSPACE: ${{ env.INSTALL_DIR }} + # Test .bat scripts for samples building + - name: Build cpp samples (bat) + run: | + & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }}/samples_bat -b ${{ env.BUILD_DIR }}/cpp_samples_bat + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + + - name: Build c samples (bat) + run: | + & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }}/samples_bat -b ${{ env.BUILD_DIR }}/c_samples_bat + - name: Upload Test Results uses: actions/upload-artifact@v3 if: ${{ !cancelled() }} @@ -552,7 +565,7 @@ jobs: if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).PDPD_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml - name: OVC unit tests @@ -605,156 +618,156 @@ jobs: - name: OpenVINO Core unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml - name: OpenVINO Inference functional tests if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml - name: OpenVINO Inference unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml - name: Low Precision Transformations Tests if: fromJSON(needs.smart_ci.outputs.affected_components).LP_transformations.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml - name: OpenVINO Conditional compilation tests if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml - name: IR frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).IR_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml - name: PaddlePaddle frontend tests # Disabled because of CVS-95904 if: ${{ 'false' }} run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml - name: ONNX frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - name: TensorFlow Common frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml - name: TensorFlow frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml - name: TensorFlow Lite frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test run: | # Skip ticket: 126320 - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_filter=-*test_decode_convert_equal_convert*:*test_convert_partially_equal_convert* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml - name: Transformations func tests if: fromJSON(needs.smart_ci.outputs.affected_components).transformations.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml - name: Common test utils tests run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml - name: Snippets func tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml - name: CPU plugin unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml - name: ov_subgraphs_dumper_tests tests run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_subgraphs_dumper_tests --gtest_print_time=1 --device=TEMPLATE --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SubgraphsDumperTests.xml - name: Template OpImpl tests run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml - name: AUTO unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml - name: AUTO func Tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml - name: Template plugin func tests if: fromJSON(needs.smart_ci.outputs.affected_components).TEMPLATE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml - name: OpenVINO C API tests if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml - name: AutoBatch unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml - name: AutoBatch func tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml - name: Proxy Plugin func tests if: fromJSON(needs.smart_ci.outputs.affected_components).PROXY.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml - name: Hetero Unit Tests if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml - name: Hetero Func Tests if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml - name: Upload Test Results @@ -830,7 +843,7 @@ jobs: - name: Intel CPU plugin func tests (parallel) run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests.exe -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -- --gtest_filter=*smoke* timeout-minutes: 60 diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index 7902f360efc954..3e038d9def9d0b 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -182,7 +182,7 @@ jobs: - name: Build C samples - OpenVINO install tree run: | - & ${{ env.INSTALL_DIR }}/samples/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples + & ${{ env.INSTALL_DIR }}/samples/c/build_samples.ps1 -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples - name: Ctest - OpenVINO unit tests shell: cmd diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index e2aeebc9c35e7f..8dec09c0528be4 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -24,6 +24,7 @@ if(UNIX) COMPONENT ${OV_CPACK_COMP_CPP_SAMPLES} ${OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL} PATTERN *.bat EXCLUDE + PATTERN *.ps1 EXCLUDE PATTERN *.sh EXCLUDE PATTERN .clang-format EXCLUDE) @@ -52,7 +53,7 @@ if(UNIX) COMPONENT ${OV_CPACK_COMP_C_SAMPLES} ${OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL}) elseif(WIN32) - install(PROGRAMS cpp/build_samples_msvc.bat + install(PROGRAMS cpp/build_samples_msvc.bat cpp/build_samples.ps1 DESTINATION ${OV_CPACK_SAMPLESDIR}/c COMPONENT ${OV_CPACK_COMP_C_SAMPLES} ${OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL}) diff --git a/samples/cpp/build_samples.ps1 b/samples/cpp/build_samples.ps1 new file mode 100644 index 00000000000000..965a41969cc07f --- /dev/null +++ b/samples/cpp/build_samples.ps1 @@ -0,0 +1,66 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Arguments parsing +param ( + [Alias("b")] + [string]$BuildDirectory = "", + + [Alias("i")] + [string]$InstallDirectory = "", + + [Alias("h")] + [switch]$Help +) + +$ErrorActionPreference = "Stop" + +$SourceDirectory = Split-Path $MyInvocation.MyCommand.Path +$SamplesType = (Get-Item $SourceDirectory).Name +$DefaultBuildDirectory = "$Env:USERPROFILE/Documents/Intel/OpenVINO/openvino_${SamplesType}_samples_build" + +if ($Help) { + Write-Host " + Build OpenVINO Runtime samples + Options: + -h/-Help Print the help message and exit + -b/-BuildDirectory Specify the samples build directory. Default is '$DefaultBuildDirectory' + -i/-InstallDirectory Specify the samples install directory + " + exit 0 +} + +$BuildDirectory = if ($BuildDirectory) {$BuildDirectory} else {$DefaultBuildDirectory} + +if (-not $Env:INTEL_OPENVINO_DIR) { + $SetupVars = Join-Path $SourceDirectory "../../setupvars.ps1" + if (Test-Path $SetupVars) { + . $SetupVars + } + else + { + Write-Host " + Failed to set the environment variables automatically. To fix, run the following command: + /setupvars.ps1 + where is the OpenVINO installation directory + " + exit 1 + } +} + +Set-Location -Path $SourceDirectory +New-Item -Path $BuildDirectory -ItemType Directory -Force +Set-Location $BuildDirectory +cmake -DCMAKE_DISABLE_FIND_PACKAGE_PkgConfig=ON $SourceDirectory + +Write-Host "Building command: cmake --build . --config Release --parallel" +cmake --build . --config Release --parallel + +if ($InstallDirectory) { + cmake -DCMAKE_INSTALL_PREFIX="$InstallDirectory" -DCOMPONENT=samples_bin -P "$BuildDirectory/cmake_install.cmake" + Write-Host "Samples are built and installed into $InstallDirectory" +} +else +{ + Write-Host "Samples are built in $BuildDirectory" +} From 02c47e47b22ffaf6d7fde518993e04cbcfc418a6 Mon Sep 17 00:00:00 2001 From: River Li Date: Sun, 21 Jan 2024 21:50:32 +0800 Subject: [PATCH 108/122] [Core] remove ie_metric_helper (#22251) * [Core] remove ie_metric_helper * Update new autobatch test case --- src/inference/dev_api/ie_metric_helpers.hpp | 57 ------------ src/plugins/auto/src/common.hpp | 2 - src/plugins/intel_cpu/src/compiled_model.cpp | 34 +------ src/plugins/intel_cpu/src/compiled_model.h | 2 - src/plugins/intel_cpu/src/plugin.cpp | 90 ------------------- src/plugins/intel_cpu/src/plugin.h | 2 - .../auto_batching_tests.cpp | 4 +- 7 files changed, 3 insertions(+), 188 deletions(-) delete mode 100644 src/inference/dev_api/ie_metric_helpers.hpp rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/{plugin => ov_plugin}/auto_batching_tests.cpp (93%) diff --git a/src/inference/dev_api/ie_metric_helpers.hpp b/src/inference/dev_api/ie_metric_helpers.hpp deleted file mode 100644 index e45c9a9760e1dc..00000000000000 --- a/src/inference/dev_api/ie_metric_helpers.hpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Defines set of macro to safely set plugin and executable network metric values - * @file ie_metric_helpers.hpp - */ - -#pragma once - -#include -#include - -/** - * @cond - */ - -namespace InferenceEngine { -namespace Metrics { - -template -struct MetricType; - -#define DECLARE_METRIC_KEY_IMPL(name, ...) \ - struct name {}; \ - template <> \ - struct MetricType { \ - using type = __VA_ARGS__; \ - } - -} // namespace Metrics -} // namespace InferenceEngine - -/** - * @endcond - */ - -/** - * @def IE_SET_METRIC_RETURN(name, ...) - * @ingroup ie_dev_api - * @brief Return metric value with specified @p name and arguments `...`. Example: - * @code - * IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys); - * @endcode - * - * @param name The metric name - * @param ... A metric value - * - * @return A metric value wrapped with Parameter and returned to a calling function - */ -#define IE_SET_METRIC_RETURN(name, ...) \ - typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::name>::type _##name##_value = \ - __VA_ARGS__; \ - return _##name##_value - -#include "ie_plugin_config.hpp" diff --git a/src/plugins/auto/src/common.hpp b/src/plugins/auto/src/common.hpp index dbb833e34f7909..e0827181aab86b 100644 --- a/src/plugins/auto/src/common.hpp +++ b/src/plugins/auto/src/common.hpp @@ -8,8 +8,6 @@ #include #include #include "ie_icore.hpp" -#include "ie_metric_helpers.hpp" -#include #include "openvino/runtime/icompiled_model.hpp" #include "openvino/runtime/isync_infer_request.hpp" #include "openvino/runtime/iasync_infer_request.hpp" diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index f81f59f94ae418..e11445781e34e8 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -1,7 +1,6 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "ie_metric_helpers.hpp" // must be included first #include "compiled_model.h" #include "async_infer_request.h" @@ -164,35 +163,6 @@ std::shared_ptr CompiledModel::get_runtime_model() const { return get_graph()._graph.dump(); } -ov::Any CompiledModel::get_metric_legacy(const std::string& name, const GraphGuard& graph) const { - OPENVINO_SUPPRESS_DEPRECATED_START - if (name == METRIC_KEY(NETWORK_NAME)) { - IE_SET_METRIC_RETURN(NETWORK_NAME, graph.dump()->get_friendly_name()); - } else if (name == METRIC_KEY(SUPPORTED_METRICS)) { - std::vector metrics; - metrics.push_back(METRIC_KEY(NETWORK_NAME)); - metrics.push_back(METRIC_KEY(SUPPORTED_METRICS)); - metrics.push_back(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); - metrics.push_back(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)); - IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics); - } else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { - std::vector configKeys; - for (auto&& key : graph.getConfig()._config) { - configKeys.push_back(key.first); - } - IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys); - } else if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) { - Config engConfig = graph.getConfig(); - auto option = engConfig._config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)); - OPENVINO_ASSERT(option != engConfig._config.end()); - auto streams = std::stoi(option->second); - IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, static_cast(streams ? streams : 1)); - } else { - OPENVINO_THROW("Unsupported property: ", name); - } - OPENVINO_SUPPRESS_DEPRECATED_END -} - ov::Any CompiledModel::get_property(const std::string& name) const { if (m_graphs.empty()) OPENVINO_THROW("No graph was found"); @@ -298,9 +268,7 @@ ov::Any CompiledModel::get_property(const std::string& name) const { return decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type( config.fcSparseWeiDecompressionRate); } - /* Internally legacy parameters are used with new API as part of migration procedure. - * This fallback can be removed as soon as migration completed */ - return get_metric_legacy(name, graph); + OPENVINO_THROW("Unsupported property: ", name); } void CompiledModel::export_model(std::ostream& modelStream) const { diff --git a/src/plugins/intel_cpu/src/compiled_model.h b/src/plugins/intel_cpu/src/compiled_model.h index a715713094399f..d1527ef9202603 100644 --- a/src/plugins/intel_cpu/src/compiled_model.h +++ b/src/plugins/intel_cpu/src/compiled_model.h @@ -73,8 +73,6 @@ class CompiledModel : public ov::ICompiledModel { * even from main thread */ GraphGuard::Lock get_graph() const; - - ov::Any get_metric_legacy(const std::string& name, const GraphGuard& graph) const; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 7fa87cae09f948..a6afa0386ae851 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -183,10 +183,6 @@ Engine::~Engine() { } static bool streamsSet(const ov::AnyMap& config) { - OPENVINO_SUPPRESS_DEPRECATED_START - if (config.count(InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS)) - return true; - OPENVINO_SUPPRESS_DEPRECATED_END return config.count(ov::num_streams.name()); } @@ -625,24 +621,7 @@ bool Engine::is_legacy_api() const { return !get_core()->is_new_api(); } -ov::Any Engine::get_property_legacy(const std::string& name, const ov::AnyMap& options) const { - ov::Any result; - auto option = engConfig._config.find(name); - if (option != engConfig._config.end()) { - result = option->second; - } else { - return get_metric_legacy(name, options); - } - return result; -} - ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) const { - if (is_legacy_api()) { - auto ret = get_property_legacy(name, options); - if (!ret.empty()) - return ret; - } - if (name == ov::optimal_number_of_infer_requests) { const auto streams = engConfig.streamExecutorConfig._streams; return decltype(ov::optimal_number_of_infer_requests)::value_type( @@ -717,71 +696,7 @@ ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) return get_ro_property(name, options); } -ov::Any Engine::get_metric_legacy(const std::string& name, const ov::AnyMap& options) const { - OPENVINO_SUPPRESS_DEPRECATED_START - if (name == METRIC_KEY(SUPPORTED_METRICS)) { - std::vector metrics = { - METRIC_KEY(AVAILABLE_DEVICES), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(FULL_DEVICE_NAME), - METRIC_KEY(OPTIMIZATION_CAPABILITIES), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS), - METRIC_KEY(RANGE_FOR_STREAMS), - METRIC_KEY(IMPORT_EXPORT_SUPPORT), - }; - return metrics; - } else if (name == ov::device::full_name.name()) { - return decltype(ov::device::full_name)::value_type(deviceFullName); - } else if (name == ov::available_devices.name()) { - std::vector availableDevices = {""}; - return decltype(ov::available_devices)::value_type(std::move(availableDevices)); - } else if (name == ov::device::capabilities.name()) { - std::vector capabilities; - if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) - capabilities.push_back(METRIC_VALUE(BF16)); - if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) - capabilities.push_back(METRIC_VALUE(WINOGRAD)); - capabilities.push_back(METRIC_VALUE(FP32)); - capabilities.push_back(METRIC_VALUE(FP16)); - capabilities.push_back(METRIC_VALUE(INT8)); - capabilities.push_back(METRIC_VALUE(BIN)); - return decltype(ov::device::capabilities)::value_type(std::move(capabilities)); - } else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { - std::vector configKeys; - for (auto&& opt : engConfig._config) - configKeys.push_back(opt.first); - return configKeys; - } else if (name == ov::range_for_async_infer_requests.name()) { - std::tuple range = std::make_tuple(1, 1, 1); - return decltype(ov::range_for_async_infer_requests)::value_type(range); - } else if (name == ov::range_for_streams.name()) { - std::tuple range = std::make_tuple(1, parallel_get_max_threads()); - return decltype(ov::range_for_streams)::value_type(range); - } else if (name == METRIC_KEY(IMPORT_EXPORT_SUPPORT)) { - return true; - } else if (ov::internal::supported_properties.name() == name) { - return decltype(ov::internal::supported_properties)::value_type{ - ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}, - ov::PropertyName{ov::internal::exclusive_async_requests.name(), ov::PropertyMutability::RW}, - ov::PropertyName{ov::internal::compiled_model_runtime_properties.name(), ov::PropertyMutability::RO}, - ov::PropertyName{ov::internal::compiled_model_runtime_properties_supported.name(), ov::PropertyMutability::RO}}; - } else if (name == ov::internal::caching_properties) { - std::vector cachingProperties = {ov::device::full_name.name()}; - return decltype(ov::internal::caching_properties)::value_type(std::move(cachingProperties)); - } - - return {}; - OPENVINO_SUPPRESS_DEPRECATED_END -} - ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& options) const { - if (is_legacy_api()) { - ov::Any ret = get_metric_legacy(name, options); - if (!ret.empty()) - return ret; - } - auto RO_property = [](const std::string& propertyName) { return ov::PropertyName(propertyName, ov::PropertyMutability::RO); }; @@ -858,11 +773,6 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio } else if (name == ov::intel_cpu::sparse_weights_decompression_rate) { return decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type(engConfig.fcSparseWeiDecompressionRate); } - /* Internally legacy parameters are used with new API as part of migration procedure. - * This fallback can be removed as soon as migration completed */ - auto ret = get_metric_legacy(name, options); - if(!ret.empty()) - return ret; OPENVINO_THROW("Cannot get unsupported property: ", name); } diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 756387aa48a13d..53f52706f3c0fd 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -47,9 +47,7 @@ class Engine : public ov::IPlugin { bool is_legacy_api() const; ov::Any get_ro_property(const std::string& name, const ov::AnyMap& options) const; - ov::Any get_metric_legacy(const std::string& name, const ov::AnyMap& options) const; - ov::Any get_property_legacy(const std::string& name, const ov::AnyMap& options) const; void apply_performance_hints(ov::AnyMap &config, const std::shared_ptr& model) const; void get_performance_streams(Config &config, const std::shared_ptr& model) const; StreamCfg get_streams_num(ov::threading::IStreamsExecutor::ThreadBindingType thread_binding_type, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp similarity index 93% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp index 6d05ac79e467f7..d848a5a45a21a6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp @@ -1,13 +1,13 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include +#include "behavior/ov_plugin/auto_batching_tests.hpp" const std::vector get_vs_set{ true, false }; const std::vector num_streams{ 1, 2 }; const std::vector num_requests{ 1, 3, 8, 9, 16, 64 }; const std::vector num_batch{ 1, 4, 8, 16, 32, 64, 128, 256 }; -using namespace AutoBatchingTests; +using namespace ov::test::behavior; namespace { INSTANTIATE_TEST_SUITE_P(smoke_AutoBatching_CPU, AutoBatching_Test, From c39b1f68ba2ff9b029820a5688132bded48114b1 Mon Sep 17 00:00:00 2001 From: River Li Date: Mon, 22 Jan 2024 14:26:42 +0800 Subject: [PATCH 109/122] [CPU] remove OPENVINO_SUPPRESS_DEPRECATED_START/END (#22290) --- src/plugins/intel_cpu/src/config.cpp | 43 ------------------------- src/plugins/intel_cpu/src/plugin.cpp | 10 ------ src/plugins/intel_cpu/src/serialize.cpp | 5 --- 3 files changed, 58 deletions(-) diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 8064339682160c..6b84bb5a3283e1 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -197,11 +197,6 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { ov::internal::exclusive_async_requests.name(), ". Expected only true/false"); } - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (key.compare(InferenceEngine::PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT) == 0) { - // empty string means that dumping is switched off - dumpToDot = val.as(); - OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == ov::intel_cpu::lp_transforms_mode.name()) { try { lpTransformsMode = val.as() ? LPTransformsMode::On : LPTransformsMode::Off; @@ -217,29 +212,6 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { if (!device_id.empty()) { OPENVINO_THROW("CPU plugin supports only '' as device id"); } - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (key == InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16) { - bool enable; - try { - enable = val.as(); - } catch (ov::Exception&) { - OPENVINO_THROW("Wrong value ", - val.as(), - " for property key ", - key, - ". Expected only true/false"); - } - if (enable) { - if (hasHardwareSupport(ov::element::bf16)) { - inferencePrecision = ov::element::bf16; - } else { - OPENVINO_THROW("Platform doesn't support BF16 format"); - } - } else { - inferencePrecision = ov::element::f32; - } - inferencePrecisionSetExplicitly = true; - OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == ov::hint::inference_precision.name()) { try { auto const prec = val.as(); @@ -391,21 +363,6 @@ void Config::updateProperties() { _config.insert({ov::hint::performance_mode.name(), ov::util::to_string(hintPerfMode)}); _config.insert({ov::hint::num_requests.name(), std::to_string(hintNumRequests)}); - - OPENVINO_SUPPRESS_DEPRECATED_START - if (inferencePrecision == ov::element::bf16) { - _config.insert( - {InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}); - } else { - _config.insert( - {InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}); - } - _config.insert({InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - std::to_string(streamExecutorConfig._streams)}); - _config.insert( - {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, std::to_string(streamExecutorConfig._threads)}); - _config.insert({InferenceEngine::PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT, dumpToDot}); - OPENVINO_SUPPRESS_DEPRECATED_END } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index a6afa0386ae851..12b29a5cbb3192 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -293,14 +293,8 @@ void Engine::apply_performance_hints(ov::AnyMap& config, const std::shared_ptr& model, bool imported) const { diff --git a/src/plugins/intel_cpu/src/serialize.cpp b/src/plugins/intel_cpu/src/serialize.cpp index 777d7ea8a04ecc..0b91061684e741 100644 --- a/src/plugins/intel_cpu/src/serialize.cpp +++ b/src/plugins/intel_cpu/src/serialize.cpp @@ -27,7 +27,6 @@ static void setInfo(pugi::xml_node& root, std::shared_ptr& model) { ModelSerializer::ModelSerializer(std::ostream& ostream) : _ostream(ostream) {} void ModelSerializer::operator<<(const std::shared_ptr& model) { - OPENVINO_SUPPRESS_DEPRECATED_START auto serializeInfo = [&](std::ostream& stream) { const std::string name = "cnndata"; pugi::xml_document xml_doc; @@ -41,9 +40,7 @@ void ModelSerializer::operator<<(const std::shared_ptr& model) { xml_doc.save(stream); }; - // Serialize to old representation in case of old API ov::pass::StreamSerialize serializer(_ostream, serializeInfo); - OPENVINO_SUPPRESS_DEPRECATED_END serializer.run_on_model(std::const_pointer_cast(model->clone())); } @@ -64,7 +61,6 @@ void ModelDeserializer::operator>>(std::shared_ptr& model) { // read model input/output precisions _istream.seekg(hdr.custom_data_offset); - OPENVINO_SUPPRESS_DEPRECATED_START pugi::xml_document xmlInOutDoc; if (hdr.custom_data_size > 0) { std::string xmlInOutString; @@ -75,7 +71,6 @@ void ModelDeserializer::operator>>(std::shared_ptr& model) { OPENVINO_THROW("NetworkNotRead: The inputs and outputs information is invalid."); } } - OPENVINO_SUPPRESS_DEPRECATED_END // read blob content _istream.seekg(hdr.consts_offset); From 639c155331db710d9d17199b43967b4e70200585 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Mon, 22 Jan 2024 10:27:40 +0400 Subject: [PATCH 110/122] [GPU] Remove internal swish fusion pass (#22261) --- .../prepare_primitive_fusing.cpp | 66 --------------- .../src/graph/include/pass_manager.h | 1 - .../unit/fusions/convolution_fusion_test.cpp | 83 ------------------- 3 files changed, 150 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index c6faa671dc8cd5..57947bc6da9c83 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -56,7 +56,6 @@ using namespace cldnn; void prepare_primitive_fusing::run(program& p) { fuse_reorders(p); remove_redundant_reshape(p); - fuse_sigmoid_mul_to_swish(p); fuse_bias(p); fuse_simple_primitives(p); fuse_constant_transposes(p); @@ -124,71 +123,6 @@ void prepare_primitive_fusing::remove_redundant_reshape(program &p) { } } -void prepare_primitive_fusing::fuse_sigmoid_mul_to_swish(program &p) { - auto itr = p.get_processing_order().begin(); - while (itr != p.get_processing_order().end()) { - auto node_itr = itr++; - auto& node = (*node_itr); - - if (node->is_output()) - continue; - - program_helpers::do_for_types(*node, [&p](eltwise_node& node) { - if (node.get_dependencies().size() != 2) - return; - - if (node.get_primitive()->mode != eltwise_mode::prod) - return; - - auto& mul = node; - program_node* activation_input = nullptr; - size_t values_id = 1; - if (node.get_dependency(0).is_type()) { - activation_input = &node.get_dependency(0); - } else if (node.get_dependency(1).is_type()) { - activation_input = &node.get_dependency(1); - values_id = 0; - } - - if (!activation_input) - return; - - if (activation_input->as().get_primitive()->activation_function != activation_func::logistic) - return; - - auto& sigmoid = activation_input->as(); - - if (sigmoid.is_output() || sigmoid.get_users().size() != 1) - return; - - auto& input = node.get_dependency(values_id); - - if (&input != &sigmoid.input()) - return; - - activation_additional_params swish_params = {1.0f, 0.0f}; - auto swish_prim = std::make_shared(mul.id() + "_swish", input.id(), activation_func::swish, swish_params); - auto& swish = p.get_or_create(swish_prim); - - p.add_optimized_primitive_info(node.id(), {swish.id()}); - p.add_optimized_primitive_info(sigmoid.id(), {swish.id()}); - - p.add_connection(input, swish); - p.replace_all_usages(mul, swish); - - p.remove_all_connections(mul); - p.remove_all_connections(sigmoid); - - p.remove_if_dangling(mul); - p.remove_if_dangling(sigmoid); - - p.get_processing_order().insert_next(&input, &swish); - - swish.recalc_output_layout(); - }); - } -} - void prepare_primitive_fusing::fuse_reorders(program &p) { // This loop tries fusing several reorders one by one (if present) into one reorder auto itr = p.get_processing_order().begin(); diff --git a/src/plugins/intel_gpu/src/graph/include/pass_manager.h b/src/plugins/intel_gpu/src/graph/include/pass_manager.h index 93bc4338072094..8c92ec4f5c6886 100644 --- a/src/plugins/intel_gpu/src/graph/include/pass_manager.h +++ b/src/plugins/intel_gpu/src/graph/include/pass_manager.h @@ -177,7 +177,6 @@ class prepare_primitive_fusing : public base_pass { private: void run(program& p) override; - void fuse_sigmoid_mul_to_swish(program &p); void fuse_bias(program &p); void fuse_reorders(program& p); void fuse_simple_primitives(program &p); diff --git a/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp index df20a50e33b9b2..abbfea890f9cd0 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp @@ -1488,43 +1488,6 @@ TEST_P(conv_fp32_eltwise_b_fs_zyx_fsv16, vector_ops) { execute(p); } -class conv_fp32_swish : public ConvFusingTest {}; -TEST_P(conv_fp32_swish, basic) { - auto p = GetParam(); - create_topologies( - input_layout("input", get_input_layout(p)), - data("weights", get_mem(get_weights_layout(p))), - data("bias", get_mem(get_per_channel_layout(p))), - convolution("conv_prim", input_info("input"), "weights", "bias", p.groups, p.stride, p.dilation, p.pad, p.pad, format::is_grouped(get_weights_layout(p).format)), - activation("sigmoid", input_info("conv_prim"), activation_func::logistic), - eltwise("mul", { input_info("conv_prim"), input_info("sigmoid") }, eltwise_mode::prod), - reorder("reorder_bfyx", input_info("mul"), p.default_format, data_types::f32) - ); - - if (engine.get_device_info().supports_immad && - p.default_type == data_types::f16) { - GTEST_SKIP(); // Issue: 94154 - } - - tolerance = default_tolerance(p.default_type); - if (p.default_type == data_types::f16) { - tolerance *= 3.f; // Issue: 94154 - } - execute(p); -} - -INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_fp32_swish, ::testing::ValuesIn(std::vector{ - // convolution_test_params{ CASE_CONV_FP32_1, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP32_2, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP32_3, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP32_4, 2, 2, 4 }, - - // convolution_test_params{ CASE_CONV_FP32_1, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP16_2, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP16_3, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP16_4, 2, 2, 4 }, -})); - INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_fp32_eltwise_b_fs_zyx_fsv16, ::testing::ValuesIn(std::vector{ convolution_test_params{ CASE_CONV_FP32_6, 2, 2, 3 }, convolution_test_params{ CASE_CONV_FP32_7, 2, 2, 3 }, @@ -2030,52 +1993,6 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_int8_eltwise, ::testing::ValuesIn(std convolution_test_params{ CASE_CONV3D_S8S8_5, 2, 2, 3 }, })); -class conv_int8_scale_shift_swish : public ConvFusingTest {}; -TEST_P(conv_int8_scale_shift_swish, basic) { - auto p = GetParam(); - create_topologies( - input_layout("input", get_input_layout(p)), - data("weights", get_mem(get_weights_layout(p))), - data("bias", get_mem(get_per_channel_layout(p))), - data("scale_data", get_mem(get_per_channel_layout(p), 1.0f/255.f)), - data("shift_data", get_mem(get_per_channel_layout(p), 1)), - convolution("conv_prim", input_info("input"), "weights", "bias", p.groups, p.stride, p.dilation, p.pad, p.pad, format::is_grouped(get_weights_layout(p).format)), - eltwise("scale0", { input_info("conv_prim"), input_info("scale_data") }, eltwise_mode::prod), - eltwise("scale1", { input_info("conv_prim"), input_info("scale_data") }, eltwise_mode::prod), - eltwise("shift0", { input_info("scale0"), input_info("shift_data") }, eltwise_mode::sum), - eltwise("shift1", { input_info("scale1"), input_info("shift_data") }, eltwise_mode::sum), - activation("sigmoid", input_info("shift0"), activation_func::logistic), - eltwise("mul", { input_info("shift1"), input_info("sigmoid") }, eltwise_mode::prod), - reorder("reorder_bfyx", input_info("mul"), p.default_format, data_types::f32) - ); - - // high tolerance because many eltwise operations - tolerance = default_tolerance(p.default_type) * 10; - execute(p, -20, 20); -} - -INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_int8_scale_shift_swish, ::testing::ValuesIn(std::vector{ - convolution_test_params{ CASE_CONV_U8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_U8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_U8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_U8S8_4, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_4, 2, 2, 8 }, - - convolution_test_params{ CASE_CONV3D_U8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_4, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_5, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_4, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_5, 2, 2, 8 }, -})); - class conv_int8_prelu_eltwise : public ConvFusingTest {}; TEST_P(conv_int8_prelu_eltwise, basic) { auto p = GetParam(); From 959306fa98f85e95eaf4e44b8c34dbe4e107aa7a Mon Sep 17 00:00:00 2001 From: Przemyslaw Wysocki Date: Mon, 22 Jan 2024 09:25:26 +0100 Subject: [PATCH 111/122] Add runable torchvision preprocessing script to samples (#22288) * Add runable docs * Update docs/snippets/torchvision_preprocessing.py Co-authored-by: Anastasia Kuporosova --------- Co-authored-by: Ilya Lavrenov Co-authored-by: Anastasia Kuporosova --- .../torchvision_preprocessing_converter.rst | 35 +--------- docs/snippets/torchvision_preprocessing.py | 65 +++++++++++++++++++ 2 files changed, 68 insertions(+), 32 deletions(-) create mode 100644 docs/snippets/torchvision_preprocessing.py diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst index 264edda073b2d6..5d6bd9c4633e4b 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst @@ -37,35 +37,6 @@ and enabling additional performance optimizations. Example ################### -.. code-block:: py - - preprocess_pipeline = torchvision.transforms.Compose( - [ - torchvision.transforms.Resize(256, interpolation=transforms.InterpolationMode.NEAREST), - torchvision.transforms.CenterCrop((216, 218)), - torchvision.transforms.Pad((2, 3, 4, 5), fill=3), - torchvision.transforms.ToTensor(), - torchvision.transforms.ConvertImageDtype(torch.float32), - torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - - torch_model = SimpleConvnet(input_channels=3) - - torch.onnx.export(torch_model, torch.randn(1, 3, 224, 224), "test_convnet.onnx", verbose=False, input_names=["input"], output_names=["output"]) - core = Core() - ov_model = core.read_model(model="test_convnet.onnx") - - test_input = np.random.randint(255, size=(260, 260, 3), dtype=np.uint16) - ov_model = PreprocessConverter.from_torchvision( - model=ov_model, transform=preprocess_pipeline, input_example=Image.fromarray(test_input.astype("uint8"), "RGB") - ) - ov_model = core.compile_model(ov_model, "CPU") - ov_input = np.expand_dims(test_input, axis=0) - output = ov_model.output(0) - ov_result = ov_model(ov_input)[output] - - - - - +.. doxygensnippet:: docs/snippets/torchvision_preprocessing.py + :language: Python + :fragment: torchvision_preprocessing diff --git a/docs/snippets/torchvision_preprocessing.py b/docs/snippets/torchvision_preprocessing.py new file mode 100644 index 00000000000000..45452059102bce --- /dev/null +++ b/docs/snippets/torchvision_preprocessing.py @@ -0,0 +1,65 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +def main(): + + #! [torchvision_preprocessing] + import torch.nn.functional as f + import openvino as ov + import numpy as np + import torchvision + import torch + import os + + from openvino.preprocess.torchvision import PreprocessConverter + from PIL import Image + + + # 1. Create a sample model + class Convnet(torch.nn.Module): + def __init__(self, input_channels): + super(Convnet, self).__init__() + self.conv1 = torch.nn.Conv2d(input_channels, 6, 5) + self.conv2 = torch.nn.Conv2d(6, 16, 3) + + def forward(self, data): + data = f.max_pool2d(f.relu(self.conv1(data)), 2) + data = f.max_pool2d(f.relu(self.conv2(data)), 2) + return data + + + # 2. Define torchvision preprocessing pipeline + preprocess_pipeline = torchvision.transforms.Compose( + [ + torchvision.transforms.Resize(256, interpolation=torchvision.transforms.InterpolationMode.NEAREST), + torchvision.transforms.CenterCrop((216, 218)), + torchvision.transforms.Pad((2, 3, 4, 5), fill=3), + torchvision.transforms.ToTensor(), + torchvision.transforms.ConvertImageDtype(torch.float32), + torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + ) + + # 3. Read the model into OpenVINO + torch_model = Convnet(input_channels=3) + torch.onnx.export(torch_model, torch.randn(1, 3, 224, 224), "test_convnet.onnx", verbose=False, input_names=["input"], output_names=["output"]) + core = ov.Core() + ov_model = core.read_model(model="test_convnet.onnx") + if os.path.exists("test_convnet.onnx"): + os.remove("test_convnet.onnx") + test_input = np.random.randint(255, size=(260, 260, 3), dtype=np.uint16) + + # 4. Embed the torchvision preocessing into OpenVINO model + ov_model = PreprocessConverter.from_torchvision( + model=ov_model, transform=preprocess_pipeline, input_example=Image.fromarray(test_input.astype("uint8"), "RGB") + ) + ov_model = core.compile_model(ov_model, "CPU") + + # 5. Perform inference + ov_input = np.expand_dims(test_input, axis=0) + output = ov_model.output(0) + ov_result = ov_model(ov_input)[output] + #! [torchvision_preprocessing] + + return 0 From b3a8f414c99008db788539c5e3074c565cfee758 Mon Sep 17 00:00:00 2001 From: Gorokhov Dmitriy Date: Mon, 22 Jan 2024 13:10:52 +0400 Subject: [PATCH 112/122] Added debug caps build for Linux CC config (#22298) --- .github/workflows/linux_conditional_compilation.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index 607cbfd2dcf720..b537810ff5c414 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -151,6 +151,7 @@ jobs: -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ -DENABLE_PROFILING_ITT=ON \ -DSELECTIVE_BUILD=COLLECT \ + -DENABLE_DEBUG_CAPS=ON \ -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ -S ${OPENVINO_REPO} \ From 92a1f959d688e4bccbf510fdc9f4f41c3141bac5 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 22 Jan 2024 10:40:00 +0100 Subject: [PATCH 113/122] [PT FE] Save state_dict names in Constant name (#22242) * Save state_dict names in Constant name * Fix issue with cert * Fix cert issue again --- .github/workflows/windows.yml | 6 +++++ .../openvino/frontend/pytorch/ts_decoder.py | 20 ++++++++++++---- .../src/openvino/frontend/pytorch/utils.py | 4 +++- src/frontends/pytorch/src/node_context.cpp | 1 - src/frontends/pytorch/src/op/get_attr.cpp | 4 ++++ .../py_frontend_tests/test_torch_frontend.py | 23 +++++++++++++++++++ 6 files changed, 52 insertions(+), 6 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 06b51728ca0a74..daf14e16fd4185 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -442,6 +442,12 @@ jobs: # TODO: replace with Python API tests requirements python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt + # For getting rid of SSL issues during model downloading for unit tests + python3 -m pip install certifi + + - name: Set SSL_CERT_FILE for model downloading for unit tests + run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV + - name: Python API Tests #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 shell: cmd diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 2ea7eb6d68b74b..7c2c53e68b0065 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -64,6 +64,7 @@ def __init__(self, pt_module, graph_element=None, example_input=None, alias_db=N self._transform_tensor_list_constants_to_listconstruct( self.graph_element) self._transform_optional_constants(self.graph_element) + self.out_debug_name_overwrites = {} @staticmethod def _get_preserved_attributes(model) -> list: @@ -165,6 +166,8 @@ def get_input_type(self, index: int): return self.get_type_for_value(raw_input) def get_output_debug_name(self, index: int) -> str: + if index in self.out_debug_name_overwrites: + return self.out_debug_name_overwrites[index] return self._raw_output(index).debugName() def get_output_shape(self, index: int): @@ -283,7 +286,7 @@ def mark_node(self, node): return node def try_decode_get_attr(self): - pt_value = get_value_from_getattr(self.graph_element, self.pt_module) + pt_value, name = get_value_from_getattr(self.graph_element, self.pt_module) assert pt_value is not None, "Couldn't retrieve value from prim::GetAttr" if isinstance(pt_value, torch.ScriptObject): # We assume this is __torch__.torch.classes.quantized.Conv2dPackedParamsBase or __torch__.torch.classes.quantized.LinearPackedParamsBase @@ -313,7 +316,12 @@ def try_decode_get_attr(self): pass return res elif not isinstance(pt_value, (torch.jit.ScriptModule, torch.jit.TracedModule)): - return ivalue_to_constant(pt_value, shared_memory=self._shared_memory) + const = ivalue_to_constant(pt_value, shared_memory=self._shared_memory) + if len(const) > 0: + # set name corresponding to state_dict name + const[0].get_node().set_friendly_name(name) + self.out_debug_name_overwrites[0] = name + return const else: return [] @@ -328,7 +336,11 @@ def as_constant(self): return ivalue_to_constant(pt_value.toIValue(), shared_memory=self._shared_memory) if isinstance(pt_type, torch.ListType): return self._as_constant_list(pt_value) - return ivalue_to_constant(pt_value.toIValue(), shared_memory=self._shared_memory) + const = ivalue_to_constant(pt_value.toIValue(), shared_memory=self._shared_memory) + if len(const) > 0: + # set name corresponding to state_dict name + const[0].get_node().set_friendly_name(self.get_output_debug_name(0)) + return const def as_string(self): if self.get_op_type() == "prim::Constant": @@ -377,7 +389,7 @@ def input_is_none(self, index: int) -> bool: else: in_node = r_input.node() if in_node.kind() == "prim::GetAttr": - pt_value = get_value_from_getattr(in_node, self.pt_module) + pt_value, _ = get_value_from_getattr(in_node, self.pt_module) return pt_value is None return False diff --git a/src/bindings/python/src/openvino/frontend/pytorch/utils.py b/src/bindings/python/src/openvino/frontend/pytorch/utils.py index 0d47227c7c6f1d..0225603e78ae54 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/utils.py @@ -99,13 +99,15 @@ def get_value_from_getattr(getattr_node, self_module): break getattr_node = inputs[0].node() module = self_module + path_name = "self" while len(stack) > 0: node = stack.pop() attr_name = node.s("name") assert hasattr( module, attr_name), f"No attribute with name \"{attr_name}\" found in module." + path_name = ".".join([path_name, attr_name]) module = getattr(module, attr_name) - return module + return module, path_name def graph_has_ops(graph, op_types:list) -> bool: res = False diff --git a/src/frontends/pytorch/src/node_context.cpp b/src/frontends/pytorch/src/node_context.cpp index 364999ccbe13cb..0ebd26d3b8e901 100644 --- a/src/frontends/pytorch/src/node_context.cpp +++ b/src/frontends/pytorch/src/node_context.cpp @@ -43,7 +43,6 @@ OutputVector NodeContext::as_constant() const { } else { auto c_outs = m_decoder->as_constant(); FRONT_END_OP_CONVERSION_CHECK(c_outs.size() == 1, "Constant must have exactly one output."); - c_outs[0].get_node_shared_ptr()->set_friendly_name(m_decoder->get_output_debug_name(0)); return c_outs; } } diff --git a/src/frontends/pytorch/src/op/get_attr.cpp b/src/frontends/pytorch/src/op/get_attr.cpp index 9896ec65525ba9..58bc63e60a700e 100644 --- a/src/frontends/pytorch/src/op/get_attr.cpp +++ b/src/frontends/pytorch/src/op/get_attr.cpp @@ -17,6 +17,10 @@ OutputVector translate_get_attr(const NodeContext& context) { "Failed to obtain data from GetAttr with output tensor name: ", context.get_decoder()->get_output_debug_name(0)); if (res.size() == 1) { + auto node = res[0].get_node(); + if (node->get_friendly_name() != node->get_name()) { + res[0].add_names({node->get_friendly_name()}); + } return res; } else { // Packed params case diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index f76c7b1fa97ba8..7c0051c0feea11 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -295,3 +295,26 @@ def add_ext(front_end, stat): assert tel_stat["send_event"] == 2 assert tel_stat["send_error"] == 0 assert tel_stat["send_stack_trace"] == 0 + + +def test_state_dict_names(): + from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder + + import torchvision + model = torch.hub.load("pytorch/vision", "resnet18", weights="DEFAULT") + decoder = TorchScriptPythonDecoder( + model, example_input=(torch.randn(1, 3, 224, 224),)) + fe_manager = FrontEndManager() + fe = fe_manager.load_by_framework("pytorch") + im = fe.load(decoder) + om = fe.convert(im) + state_dict_keys = set( + name for name in model.state_dict().keys() if "_tracked" not in name) + common_names = set() + for n in om.get_ops(): + if "Constant" in n.get_type_name(): + for name in n.output(0).names: + matches = [k for k in state_dict_keys if name.startswith("self." + k)] + if (len(matches) > 0): + common_names.update(matches) + assert state_dict_keys == common_names, f"Not all names exist:\nstate_dict:{state_dict_keys}" From 0d95f16ad334492dfc396180fd5657a1c0e95bb1 Mon Sep 17 00:00:00 2001 From: Ivan Novoselov Date: Mon, 22 Jan 2024 09:42:18 +0000 Subject: [PATCH 114/122] [Snippets] Fix PerfCount infrastructure (#22162) --- .../snippets/lowered/expression_factory.hpp | 17 +++++ .../lowered/pass/insert_perf_count.hpp | 5 +- .../src/lowered/expression_factory.cpp | 25 +++++++ .../src/lowered/pass/insert_perf_count.cpp | 65 +++++++++---------- src/common/snippets/src/op/subgraph.cpp | 4 +- .../x64/jit_perf_count_rdtsc_emitters.cpp | 4 +- .../snippets/x64/op/perf_count_rdtsc.hpp | 15 ++++- 7 files changed, 91 insertions(+), 44 deletions(-) diff --git a/src/common/snippets/include/snippets/lowered/expression_factory.hpp b/src/common/snippets/include/snippets/lowered/expression_factory.hpp index f179abf746c313..71337c807b1667 100644 --- a/src/common/snippets/include/snippets/lowered/expression_factory.hpp +++ b/src/common/snippets/include/snippets/lowered/expression_factory.hpp @@ -24,6 +24,12 @@ class LinearIR::ExpressionFactory { return create(loop_begin, params...); } else if (const auto loop_end = ov::as_type_ptr(n)) { return create(loop_end, params...); +#ifdef SNIPPETS_DEBUG_CAPS + } else if (const auto perf_counter = ov::as_type_ptr(n)) { + return create(perf_counter, params...); + } else if (const auto perf_counter = ov::as_type_ptr(n)) { + return create(perf_counter, params...); +#endif } return create(n, params...); } @@ -49,6 +55,17 @@ class LinearIR::ExpressionFactory { static ExpressionPtr create(const std::shared_ptr& n, const std::vector& inputs, const LinearIR& linear_ir); static ExpressionPtr create(const std::shared_ptr& n, const std::vector& inputs, const LinearIR& linear_ir); + // Note: PerfCountBegin nodes have a PerfCountEnd ov::Output, but corresponding expression should not have any outputs to avoid register allocation +#ifdef SNIPPETS_DEBUG_CAPS + static ExpressionPtr create(const std::shared_ptr& n, + const std::vector& inputs, + const LinearIR& linear_ir); + static ExpressionPtr create(const std::shared_ptr& n, + const std::vector& inputs, + const LinearIR& linear_ir); + static ExpressionPtr create_without_connections(const std::shared_ptr& n, const LinearIR& linear_ir); +#endif + // Creates inputs for expression using parent output port connectors static void create_expression_inputs(const LinearIR& linear_ir, const ExpressionPtr& expr); // Creates new output port connectors diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp index 752a2d18dfa5fd..bad6dd3504fdc5 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp @@ -24,8 +24,11 @@ namespace pass { class InsertPerfCount: public Pass { public: OPENVINO_RTTI("InsertPerfCount", "Pass") - InsertPerfCount() = default; + InsertPerfCount(std::map boundary_op_names); bool run(LinearIR& linear_ir) override; + +private: + std::map m_boundary_op_names; }; } // namespace pass diff --git a/src/common/snippets/src/lowered/expression_factory.cpp b/src/common/snippets/src/lowered/expression_factory.cpp index 64b5e2e99be817..3707ad26120c8e 100644 --- a/src/common/snippets/src/lowered/expression_factory.cpp +++ b/src/common/snippets/src/lowered/expression_factory.cpp @@ -117,6 +117,31 @@ ExpressionPtr LinearIR::ExpressionFactory::create(const std::shared_ptr& n, + const std::vector& inputs, + const LinearIR& linear_ir) { + OPENVINO_ASSERT(inputs.empty(), "PerfCountBegin factory do not accept any input connectors"); + return create_without_connections(n, linear_ir); +} + +ExpressionPtr LinearIR::ExpressionFactory::create(const std::shared_ptr& n, + const std::vector& inputs, + const LinearIR& linear_ir) { + OPENVINO_ASSERT(inputs.empty(), "PerfCountEnd factory do not accept any input connectors"); + return create_without_connections(n, linear_ir); +} + +ExpressionPtr LinearIR::ExpressionFactory::create_without_connections(const std::shared_ptr& n, + const LinearIR& linear_ir) { + auto expr = std::shared_ptr(new Expression(n, linear_ir.m_shape_infer_factory)); + expr->m_input_port_descriptors.clear(); + expr->m_output_port_descriptors.clear(); + expr->validate(); + return expr; +} +#endif + ExpressionPtr LinearIR::ExpressionFactory::create(const std::shared_ptr& n, const std::vector& inputs, const LinearIR& linear_ir) { diff --git a/src/common/snippets/src/lowered/pass/insert_perf_count.cpp b/src/common/snippets/src/lowered/pass/insert_perf_count.cpp index 3214962ef333b5..6ccfbf1094cdc3 100644 --- a/src/common/snippets/src/lowered/pass/insert_perf_count.cpp +++ b/src/common/snippets/src/lowered/pass/insert_perf_count.cpp @@ -5,7 +5,6 @@ #include "snippets/lowered/pass/insert_perf_count.hpp" #include "snippets/lowered/linear_ir.hpp" -#include "snippets/snippets_isa.hpp" #include "snippets/itt.hpp" namespace ov { @@ -13,48 +12,44 @@ namespace snippets { namespace lowered { namespace pass { +InsertPerfCount::InsertPerfCount(std::map boundary_op_names) + : Pass(), m_boundary_op_names(std::move(boundary_op_names)) { +} + bool InsertPerfCount::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::InsertPerfCount") if (linear_ir.empty()) return false; + if (m_boundary_op_names.empty()) { + const auto& first_op_name = linear_ir.begin()->get()->get_node()->get_friendly_name(); + const auto& last_op_name = linear_ir.rbegin()->get()->get_node()->get_friendly_name(); + m_boundary_op_names.insert({first_op_name, last_op_name}); + } - auto is_parameter = [](const std::shared_ptr& node) { - return ov::is_type(node); - }; - auto is_result = [](const std::shared_ptr& node) { - return ov::is_type(node); - }; - - // mark perf_count_begin and perf_count_end position - auto perf_count_begin_pos = linear_ir.cbegin(); - auto perf_count_end_pos = perf_count_begin_pos; - bool first_result_marked = false; + size_t seq_number = 0; for (auto expr_it = linear_ir.cbegin(); expr_it != linear_ir.cend(); expr_it++) { - const auto expr = *expr_it; - const auto& node = expr->get_node(); - if (is_parameter(node)) - perf_count_begin_pos = expr_it; - - if (is_result(node) && !first_result_marked) { - perf_count_end_pos = expr_it; - first_result_marked = true; + const auto& op_name = expr_it->get()->get_node()->get_friendly_name(); + const auto& found = m_boundary_op_names.find(op_name); + if (found != m_boundary_op_names.end()) { + const auto perf_count_begin_pos = expr_it; + auto perf_count_end_pos = expr_it; + while (perf_count_end_pos->get()->get_node()->get_friendly_name() != found->second && + perf_count_end_pos != linear_ir.cend()) { + perf_count_end_pos++; + } + OPENVINO_ASSERT(perf_count_end_pos != linear_ir.cend(), "Failed to find requested op name to insert PerfCountEnd"); + const auto& perf_count_begin = std::make_shared(); + perf_count_begin->set_friendly_name(std::string("PerfCount_Begin_") + std::to_string(seq_number)); + const auto empty_inputs = std::vector{}; + linear_ir.insert_node(perf_count_begin, empty_inputs, perf_count_begin_pos->get()->get_loop_ids(), false, perf_count_begin_pos); + + const auto& perf_count_end = std::make_shared(perf_count_begin->output(0)); + perf_count_end->set_friendly_name(std::string("PerfCount_End_") + std::to_string(seq_number)); + // linear_ir.insert has insert before behavior, need to increment perf_count_end_pos + linear_ir.insert_node(perf_count_end, empty_inputs, perf_count_end_pos->get()->get_loop_ids(), false, next(perf_count_end_pos)); + seq_number++; } } - - // insert perf_count_begin after last parameter - // linear_ir.insert has insert before behavior, need move to next. - const auto empty_inputs = std::vector{}; - const auto last_param_it = perf_count_begin_pos; - perf_count_begin_pos = std::next(perf_count_begin_pos); - const auto& perf_count_begin = std::make_shared(); - linear_ir.insert_node(perf_count_begin, empty_inputs, last_param_it->get()->get_loop_ids(), false, perf_count_begin_pos); - - // insert perf_count_end before first result - const auto& perf_count_end = std::make_shared(perf_count_begin->output(0)); - perf_count_end->set_friendly_name("last_parameter_to_first_result"); - // PerfCountEnd doesn't need PortConnector to PerfCountBegin - linear_ir.insert_node(perf_count_end, empty_inputs, perf_count_end_pos->get()->get_loop_ids(), false, perf_count_end_pos); - return true; } diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index d0bf43006d73a2..6524dcda3d9605 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -471,8 +471,8 @@ snippets::Schedule Subgraph::generate_from_linear_ir(const std::shared_ptr &in_i // iteration++ h->mov(h->rax, reinterpret_cast(&m_end_node->iteration)); - h->mov(h->rdx, qword[h->rax]); - h->add(h->rdx, 0x01); - h->mov(qword[h->rax], h->rdx); + h->inc(qword[h->rax]); h->pop(h->rdx); h->pop(h->rax); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp index e9c0f593cfddeb..5bd27172aea92d 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp @@ -7,6 +7,7 @@ #include "openvino/op/op.hpp" #include "snippets/op/perf_count.hpp" +#include using namespace ov::snippets::op; @@ -38,8 +39,16 @@ class PerfCountRdtscEnd : public PerfCountEndBase { PerfCountRdtscEnd(const Output& pc_begin); PerfCountRdtscEnd() = default; ~PerfCountRdtscEnd() { - uint64_t avg = iteration == 0 ? 0 : accumulation / iteration; - std::cout << "accumulation:" << accumulation << " iteration:" << iteration << " avg:" << avg << std::endl; + double avg = 0; + if (iteration != 0) { + // Note: theoretically accumulation could be larger than 2^53, however + // iteration is unlikely to exceed this threshold. So here we derive an integral part first + // and cast only the remainder to double + const uint64_t integral = accumulation / iteration; + avg = integral + static_cast(accumulation - integral * iteration) / iteration; + } + std::cerr << "name : " << get_friendly_name() << " : acc : " << accumulation << " : num_hit : " << iteration + << std::fixed << std::setprecision(4) << " : avg : " << avg << std::endl; } std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; @@ -49,7 +58,7 @@ class PerfCountRdtscEnd : public PerfCountEndBase { // in destructor of PerfCountRdtscEnd, output the perf info // accumulation is cycle count uint64_t accumulation = 0ul; - uint32_t iteration = 0u; + uint64_t iteration = 0ul; }; } // namespace intel_cpu From 74909ddaacbcc756f9d6b2c388d519e72c22bb98 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Mon, 22 Jan 2024 10:47:19 +0100 Subject: [PATCH 115/122] [TESTS] BenchmarkLayerTest: call infer() inside the overridden validate() (#22266) --- .../include/shared_test_classes/base/benchmark.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp index 8f9d013be4610a..cd0e1375eda074 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp @@ -199,7 +199,7 @@ namespace test { template class BenchmarkLayerTest : public BaseLayerTest { static_assert(std::is_base_of::value, - "BaseLayerTest should inherit from LayerTestsUtils::LayerTestsCommon"); + "BaseLayerTest should inherit from ov::test::SubgraphBaseTest"); public: static constexpr int kDefaultNumberOfAttempts = 100; @@ -226,6 +226,7 @@ class BenchmarkLayerTest : public BaseLayerTest { } void validate() override { + infer(); for (const auto& res : curr_bench_results_) { const auto& node_type_name = res.first; const auto curr_time = static_cast(res.second); From 2d4e4402a60eb6f7a80117fc428905ce13ef3f2a Mon Sep 17 00:00:00 2001 From: Gorokhov Dmitriy Date: Mon, 22 Jan 2024 13:56:05 +0400 Subject: [PATCH 116/122] [CPU][ARM] Fixed debug caps build (#22296) --- src/plugins/intel_cpu/src/nodes/subgraph.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index 47889638b70247..b63def7f19d641 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -516,7 +516,7 @@ void Snippet::SnippetJitExecutor::update_ptrs(jit_snippets_call_args& call_args, } } -#ifdef SNIPPETS_DEBUG_CAPS +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) void Snippet::SnippetJitExecutor::segfault_detector() { const auto target = std::dynamic_pointer_cast(snippetAttrs.snippet->get_generator()->get_target_machine()); if (target && target->debug_config.enable_segfault_detector) { From cb6eb83cc16c7bcfbaebbd0f9eb87f7eb660ef21 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 22 Jan 2024 11:57:25 +0100 Subject: [PATCH 117/122] [core] ROI tensor can set new ROI shape (#22257) * Allow set_shape for ROI tensor * Change set ROI shape up to initial ROI size * Fix test value * Update ROI tensor expected error message in python tests * Minor changes in ROI tensor ctor --- .../python/tests/test_runtime/test_tensor.py | 2 +- src/core/tests/ov_tensor_test.cpp | 44 +++++++++++++ src/inference/src/dev/make_tensor.cpp | 63 +++++++++++++------ 3 files changed, 90 insertions(+), 19 deletions(-) diff --git a/src/bindings/python/tests/test_runtime/test_tensor.py b/src/bindings/python/tests/test_runtime/test_tensor.py index fca622c7654e7c..da38c93fa911ce 100644 --- a/src/bindings/python/tests/test_runtime/test_tensor.py +++ b/src/bindings/python/tests/test_runtime/test_tensor.py @@ -354,7 +354,7 @@ def test_cannot_create_roi_from_packed_tensor(ov_type): ov_tensor = ov.Tensor(ov_type, [1, 3, 48, 48]) with pytest.raises(RuntimeError) as e: ov.Tensor(ov_tensor, [0, 0, 24, 24], [1, 3, 48, 48]) - assert "ROI Tensor for types with bitwidths less then 8 bit is not implemented" in str(e.value) + assert "for types with bitwidths less then 8 bit" in str(e.value) @pytest.mark.parametrize( diff --git a/src/core/tests/ov_tensor_test.cpp b/src/core/tests/ov_tensor_test.cpp index 9dfbe2853bbdaf..2e64323477ab68 100644 --- a/src/core/tests/ov_tensor_test.cpp +++ b/src/core/tests/ov_tensor_test.cpp @@ -570,6 +570,50 @@ TEST_F(OVTensorTest, makeRangeRoiStringTensor) { ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type()); } +TEST_F(OVTensorTest, setSmallerShapeOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 4}}; + const ov::Shape newShape({1, 1, 3, 2}); + + ASSERT_EQ(roi_tensor.get_shape(), ov::Shape({1, 2, 4, 2})); + + roi_tensor.set_shape(newShape); + ASSERT_EQ(roi_tensor.get_shape(), newShape); +} + +TEST_F(OVTensorTest, setMaxSizeShapeOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 5}}; + const ov::Shape new_shape({1, 2, 1, 1}); + const ov::Shape roi_capacity({1, 2, 4, 3}); + + ASSERT_EQ(roi_tensor.get_shape(), roi_capacity); + + roi_tensor.set_shape(new_shape); + ASSERT_EQ(roi_tensor.get_shape(), new_shape); + + roi_tensor.set_shape(roi_capacity); + ASSERT_EQ(roi_tensor.get_shape(), roi_capacity); +} + +TEST_F(OVTensorTest, setShapeGtMaxOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 5}}; + const ov::Shape newShape({0, 0, 0, 0}); + + roi_tensor.set_shape(newShape); + ASSERT_EQ(roi_tensor.get_shape(), newShape); +} + +TEST_F(OVTensorTest, setMinShapeOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 5}}; + const ov::Shape newShape({1, 3, 6, 3}); // ROI coordinate begin + newShape[2] is bigger than t.shape[2] + + ASSERT_EQ(roi_tensor.get_shape(), ov::Shape({1, 2, 4, 3})); + ASSERT_THROW(roi_tensor.set_shape(newShape), ov::Exception); +} + TEST_F(OVTensorTest, cannotSetShapeOnRoiTensor) { ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6 ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}}; diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 021ff0c15e312d..e91fccdbbf3c8e 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -17,6 +17,29 @@ namespace ov { +namespace { +Shape make_roi_shape(const Shape& tensor_shape, const Coordinate& begin, const Coordinate& end) { + OPENVINO_ASSERT(tensor_shape.size() == begin.size()); + OPENVINO_ASSERT(begin.size() == end.size()); + + auto roi_shape = Shape(begin.size()); + + auto roi_begin = begin.begin(); + auto roi_end = end.begin(); + auto roi_dim = roi_shape.begin(); + auto max_dim = tensor_shape.begin(); + + for (; max_dim != tensor_shape.end(); ++max_dim, ++roi_begin, ++roi_end, ++roi_dim) { + OPENVINO_ASSERT(*roi_begin <= *max_dim); + OPENVINO_ASSERT(*roi_end <= *max_dim); + *roi_dim = *roi_end - *roi_begin; + OPENVINO_ASSERT(*roi_dim <= *max_dim); + } + + return roi_shape; +} +} // namespace + /** * @brief View tensor to external memory * The tensor doesn't own the external memory @@ -156,7 +179,7 @@ class StridedViewTensor : public ViewTensor { * * @param element_type Tensor element type * @param shape Tensor shape - * @param ptr pointer to external memoty + * @param ptr pointer to external memory * @param byte_strides Tensor strides * * @return Shared pointer to tensor interface @@ -266,22 +289,14 @@ std::shared_ptr make_tensor(const element::Type element_type, const Sha */ class RoiTensor : public ITensor { public: - RoiTensor(const std::shared_ptr& owner, const Coordinate& begin, const Coordinate& end) : m_owner{owner} { - OPENVINO_ASSERT(owner->get_element_type().bitwidth() >= 8, + RoiTensor(const std::shared_ptr& owner, const Coordinate& begin, const Coordinate& end) + : m_owner{owner}, + m_shape{make_roi_shape(owner->get_shape(), begin, end)}, + m_capacity{m_shape}, + m_offset{std::inner_product(begin.begin(), begin.end(), get_strides().begin(), static_cast(0))} { + OPENVINO_ASSERT(get_element_type().bitwidth() >= 8, "ROI Tensor for types with bitwidths less then 8 bit is not implemented. Tensor type: ", - owner->get_element_type()); - auto owner_shape = owner->get_shape(); - OPENVINO_ASSERT(owner_shape.size() == begin.size()); - OPENVINO_ASSERT(begin.size() == end.size()); - m_shape.resize(begin.size()); - for (size_t i = 0; i < begin.size(); ++i) { - OPENVINO_ASSERT(begin[i] <= owner_shape[i]); - OPENVINO_ASSERT(end[i] <= owner_shape[i]); - m_shape[i] = end[i] - begin[i]; - OPENVINO_ASSERT(m_shape[i] <= owner_shape[i]); - } - auto& strides = get_strides(); - m_offset = std::inner_product(begin.begin(), begin.end(), strides.begin(), static_cast(0)); + get_element_type()); } const element::Type& get_element_type() const override { @@ -297,7 +312,18 @@ class RoiTensor : public ITensor { } void set_shape(ov::Shape new_shape) override { - OPENVINO_THROW("Shapes cannot be changed for ROI Tensor"); + OPENVINO_ASSERT(new_shape.size() == m_shape.size()); + for (auto new_dim = new_shape.cbegin(), max_dim = m_capacity.cbegin(); new_dim != new_shape.cend(); + ++max_dim, ++new_dim) { + OPENVINO_ASSERT(*new_dim <= *max_dim, + "Cannot set new shape: ", + new_shape, + " for ROI tensor! Dimension: ", + std::distance(new_shape.cbegin(), new_dim), + " is not compatible."); + } + + m_shape = std::move(new_shape); } void* data(const element::Type& element_type) const override { @@ -307,8 +333,9 @@ class RoiTensor : public ITensor { private: std::shared_ptr m_owner; - size_t m_offset; Shape m_shape; + const Shape m_capacity; + const size_t m_offset; }; /** From 29e59809dfaae6141a3f4c602a6380a8cc69cd14 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 22 Jan 2024 12:01:14 +0100 Subject: [PATCH 118/122] Use second sync point in test to fix core dump (#22294) - clean-up Barrier test util class. --- .../tests/functional/ov_core_threading.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/inference/tests/functional/ov_core_threading.cpp b/src/inference/tests/functional/ov_core_threading.cpp index 29675f21582182..09c88809084663 100644 --- a/src/inference/tests/functional/ov_core_threading.cpp +++ b/src/inference/tests/functional/ov_core_threading.cpp @@ -178,23 +178,23 @@ class Barrier { std::mutex m_mutex; std::condition_variable m_cv; size_t m_count; - size_t m_expected; - size_t m_gen; + const size_t m_expected; + size_t m_wait_id; public: - explicit Barrier(std::size_t count) : m_count{count}, m_expected{count}, m_gen{} {} + explicit Barrier(std::size_t count) : m_count{count}, m_expected{count}, m_wait_id{} {} void arrive_and_wait() { std::unique_lock lock(m_mutex); - auto gen = m_gen; - if (!--m_count) { - ++m_gen; + if (--m_count == 0) { + ++m_wait_id; m_count = m_expected; m_cv.notify_all(); } else { - m_cv.wait(lock, [this, gen] { - return gen != m_gen; + const auto wait_id = m_wait_id; + m_cv.wait(lock, [this, wait_id] { + return wait_id != m_wait_id; }); } } @@ -221,6 +221,9 @@ TEST_F(CoreThreadingTests, ReadModel) { // to be added in frontend. sync_point.arrive_and_wait(); std::ignore = core.read_model(modelName, weightsName); + + // sync before next iteration (modification of extensions vector) + sync_point.arrive_and_wait(); }, 100, threads_num); From 04e89b0182f80ed7442c1eecfb8925c7ef9d7769 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 22 Jan 2024 12:03:24 +0100 Subject: [PATCH 119/122] [core] Remove ngraph descriptor API (#22295) * Remove ngraph/descriptor API * Remove ngraph/runtime/tensor.hpp --- src/core/include/ngraph/descriptor/input.hpp | 30 -------- src/core/include/ngraph/descriptor/output.hpp | 34 --------- src/core/include/ngraph/descriptor/tensor.hpp | 35 --------- src/core/include/ngraph/ngraph.hpp | 6 +- src/core/include/ngraph/node.hpp | 6 +- src/core/include/ngraph/node_input.hpp | 2 +- src/core/include/ngraph/node_output.hpp | 2 +- src/core/include/ngraph/runtime/tensor.hpp | 72 ------------------- src/core/include/ngraph/util.hpp | 13 ++-- .../openvino/core/descriptor/tensor.hpp | 9 +-- src/core/reference/src/op/einsum.cpp | 2 +- src/core/src/op/constant.cpp | 2 +- src/core/src/runtime/tensor.cpp | 30 -------- src/core/src/util.cpp | 4 +- 14 files changed, 18 insertions(+), 229 deletions(-) delete mode 100644 src/core/include/ngraph/descriptor/input.hpp delete mode 100644 src/core/include/ngraph/descriptor/output.hpp delete mode 100644 src/core/include/ngraph/descriptor/tensor.hpp delete mode 100644 src/core/include/ngraph/runtime/tensor.hpp delete mode 100644 src/core/src/runtime/tensor.cpp diff --git a/src/core/include/ngraph/descriptor/input.hpp b/src/core/include/ngraph/descriptor/input.hpp deleted file mode 100644 index f4966373085105..00000000000000 --- a/src/core/include/ngraph/descriptor/input.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/descriptor/tensor.hpp" -#include "openvino/core/descriptor/input.hpp" - -namespace ngraph { -using ov::Node; -namespace descriptor { - -// Describes a tensor that is an input to an op, directly or indirectly via a tuple -using ov::descriptor::Input; -} // namespace descriptor -} // namespace ngraph diff --git a/src/core/include/ngraph/descriptor/output.hpp b/src/core/include/ngraph/descriptor/output.hpp deleted file mode 100644 index 3e26c8941acf94..00000000000000 --- a/src/core/include/ngraph/descriptor/output.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/tensor.hpp" -#include "ngraph/node_output.hpp" -#include "openvino/core/descriptor/output.hpp" - -namespace ngraph { -using ov::Node; -namespace descriptor { -// Describes an output tensor of an op -using ov::descriptor::Output; -} // namespace descriptor -} // namespace ngraph diff --git a/src/core/include/ngraph/descriptor/tensor.hpp b/src/core/include/ngraph/descriptor/tensor.hpp deleted file mode 100644 index f26db08e34349c..00000000000000 --- a/src/core/include/ngraph/descriptor/tensor.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ngraph/partial_shape.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" -#include "openvino/core/descriptor/tensor.hpp" - -namespace ngraph { -using ov::TensorLabel; -using ov::TensorLabelVector; -namespace descriptor { -/// \brief Compile-time descriptor of a first-class value that is a tensor. -using ov::descriptor::Tensor; -} // namespace descriptor -} // namespace ngraph diff --git a/src/core/include/ngraph/ngraph.hpp b/src/core/include/ngraph/ngraph.hpp index ad8c918d5229ee..735835feafd182 100644 --- a/src/core/include/ngraph/ngraph.hpp +++ b/src/core/include/ngraph/ngraph.hpp @@ -50,9 +50,6 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/attribute_visitor.hpp" -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/output.hpp" -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/evaluator.hpp" #include "ngraph/except.hpp" #include "ngraph/factory.hpp" @@ -65,6 +62,9 @@ #include "ngraph/specialize_function.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/core/descriptor/input.hpp" +#include "openvino/core/descriptor/output.hpp" +#include "openvino/core/descriptor/tensor.hpp" // nGraph opsets #include "ngraph/opsets/opset.hpp" diff --git a/src/core/include/ngraph/node.hpp b/src/core/include/ngraph/node.hpp index 7983a214a04767..a98e068adf3eb9 100644 --- a/src/core/include/ngraph/node.hpp +++ b/src/core/include/ngraph/node.hpp @@ -32,15 +32,15 @@ #include "ngraph/check.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/deprecated.hpp" -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/output.hpp" -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/node_input.hpp" #include "ngraph/node_output.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/output_vector.hpp" #include "ngraph/strides.hpp" #include "openvino/core/any.hpp" +#include "openvino/core/descriptor/input.hpp" +#include "openvino/core/descriptor/output.hpp" +#include "openvino/core/descriptor/tensor.hpp" #include "openvino/core/node.hpp" #include "openvino/op/util/variable.hpp" #include "openvino/op/util/variable_value.hpp" diff --git a/src/core/include/ngraph/node_input.hpp b/src/core/include/ngraph/node_input.hpp index 37c6678de74760..bac74bc0d312be 100644 --- a/src/core/include/ngraph/node_input.hpp +++ b/src/core/include/ngraph/node_input.hpp @@ -17,10 +17,10 @@ #include #include -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" +#include "openvino/core/descriptor/tensor.hpp" #include "openvino/core/node_input.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/node_output.hpp b/src/core/include/ngraph/node_output.hpp index 4786b52f34784c..f8f1da44b2eff9 100644 --- a/src/core/include/ngraph/node_output.hpp +++ b/src/core/include/ngraph/node_output.hpp @@ -18,10 +18,10 @@ #include #include -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" +#include "openvino/core/descriptor/tensor.hpp" #include "openvino/core/node_output.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/runtime/tensor.hpp b/src/core/include/ngraph/runtime/tensor.hpp deleted file mode 100644 index 84cd45268c9bd9..00000000000000 --- a/src/core/include/ngraph/runtime/tensor.hpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/descriptor/tensor.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/strides.hpp" -#include "ngraph/type/element_type.hpp" - -namespace ngraph { -namespace runtime { -NGRAPH_SUPPRESS_DEPRECATED_START -class NGRAPH_API NGRAPH_API_DEPRECATED Tensor { -protected: - Tensor(const std::shared_ptr& descriptor) : m_descriptor(descriptor), m_stale(true) {} - -public: - virtual ~Tensor() {} - Tensor& operator=(const Tensor&) = default; - - /// \brief Get tensor shape - /// \return const reference to a Shape - virtual const ngraph::Shape& get_shape() const; - - /// \brief Get tensor partial shape - /// \return const reference to a PartialShape - const ngraph::PartialShape& get_partial_shape() const; - - /// \brief Get tensor element type - /// \return element::Type - virtual const element::Type& get_element_type() const; - - /// \brief Get number of elements in the tensor - /// \return number of elements in the tensor - virtual size_t get_element_count() const; - - /// \brief Get the size in bytes of the tensor - /// \return number of bytes in tensor's allocation - virtual size_t get_size_in_bytes() const; - - /// \brief Write bytes directly into the tensor - /// \param p Pointer to source of data - /// \param n Number of bytes to write, must be integral number of elements. - virtual void write(const void* p, size_t n) = 0; - - /// \brief Read bytes directly from the tensor - /// \param p Pointer to destination for data - /// \param n Number of bytes to read, must be integral number of elements. - virtual void read(void* p, size_t n) const = 0; - -protected: - std::shared_ptr m_descriptor; - bool m_stale; -}; -NGRAPH_SUPPRESS_DEPRECATED_END -} // namespace runtime -} // namespace ngraph diff --git a/src/core/include/ngraph/util.hpp b/src/core/include/ngraph/util.hpp index 27df0f4cdd87da..f08b70233a0c2b 100644 --- a/src/core/include/ngraph/util.hpp +++ b/src/core/include/ngraph/util.hpp @@ -32,11 +32,11 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/node.hpp" -#include "ngraph/runtime/tensor.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type_traits.hpp" #include "openvino/core/enum_mask.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { class Node; @@ -45,10 +45,7 @@ namespace ngraph { using ov::EnumMask; using ov::Node; class stopwatch; - -namespace runtime { class Tensor; -} // namespace runtime NGRAPH_SUPPRESS_DEPRECATED_START template @@ -257,14 +254,14 @@ NGRAPH_API_DEPRECATED T double_to_int(double x, double float_to_int_converter(do } // end namespace ngraph template -NGRAPH_API_DEPRECATED std::vector read_vector(std::shared_ptr tv) { +NGRAPH_API_DEPRECATED std::vector read_vector(std::shared_ptr tv) { if (ngraph::element::from() != tv->get_element_type()) { OPENVINO_THROW("read_vector type must match Tensor type"); } size_t element_count = ngraph::shape_size(tv->get_shape()); size_t size = element_count * sizeof(T); std::vector rc(element_count); - tv->read(rc.data(), size); + std::memcpy(rc.data(), tv->data(), size); return rc; } @@ -279,10 +276,10 @@ NGRAPH_API_DEPRECATED std::vector array_2_vector(typename ngraph::element_typ } NGRAPH_API_DEPRECATED -std::vector NGRAPH_API read_float_vector(std::shared_ptr tv); +std::vector NGRAPH_API read_float_vector(std::shared_ptr tv); NGRAPH_API_DEPRECATED -std::vector NGRAPH_API read_index_vector(std::shared_ptr tv); +std::vector NGRAPH_API read_index_vector(std::shared_ptr tv); NGRAPH_API NGRAPH_API_DEPRECATED diff --git a/src/core/include/openvino/core/descriptor/tensor.hpp b/src/core/include/openvino/core/descriptor/tensor.hpp index 73b34a32ea53a0..d7be44f6e025b3 100644 --- a/src/core/include/openvino/core/descriptor/tensor.hpp +++ b/src/core/include/openvino/core/descriptor/tensor.hpp @@ -18,12 +18,6 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/tensor.hpp" -namespace ngraph { -namespace runtime { -class HostTensor; -} -} // namespace ngraph - namespace ov { class Node; /// \brief Alias for label tensor. @@ -98,7 +92,7 @@ class OPENVINO_API Tensor { TensorLabel get_value_label() const { return m_value_label; } - /// \brief checks if lower and upper bound are set and point to the same HostTensor + /// \brief checks if lower and upper bound are set and point to the same Tensor bool has_and_set_bound() const { return m_upper_value && m_lower_value && m_upper_value.data() == m_lower_value.data(); } @@ -144,7 +138,6 @@ class OPENVINO_API Tensor { friend OPENVINO_API std::string get_ov_tensor_legacy_name(const Tensor& tensor); friend OPENVINO_API void set_ov_tensor_legacy_name(Tensor& tensor, const std::string& tensor_name); friend class pass::ReverseShapeAndTypeInfer; - friend class ngraph::runtime::HostTensor; }; OPENVINO_API diff --git a/src/core/reference/src/op/einsum.cpp b/src/core/reference/src/op/einsum.cpp index e9f003f0ce748a..113b1c249303a6 100644 --- a/src/core/reference/src/op/einsum.cpp +++ b/src/core/reference/src/op/einsum.cpp @@ -461,7 +461,7 @@ void broadcast_input(ov::TensorVector& inputs, /// template ov::Tensor build_identity(const ov::Tensor& input, const ov::TensorLabel& repeated_label_dims) { - // allocate HostTensor for building identity tensor + // allocate Tensor for building identity tensor OPENVINO_ASSERT(repeated_label_dims.size() > 1); Shape input_shape = input.get_shape(); Shape identity_shape(input_shape.size(), 1); diff --git a/src/core/src/op/constant.cpp b/src/core/src/op/constant.cpp index ef3b62265daa95..98cdfd9cc4104e 100644 --- a/src/core/src/op/constant.cpp +++ b/src/core/src/op/constant.cpp @@ -12,12 +12,12 @@ #include "compare.hpp" #include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/runtime/tensor.hpp" #include "openvino/core/type/float16.hpp" #include "openvino/core/type/nf4.hpp" #include "openvino/reference/utils/type_util.hpp" #include "openvino/runtime/shared_buffer.hpp" #include "openvino/runtime/string_aligned_buffer.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { namespace op { diff --git a/src/core/src/runtime/tensor.cpp b/src/core/src/runtime/tensor.cpp deleted file mode 100644 index f7f587d1a95e9d..00000000000000 --- a/src/core/src/runtime/tensor.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/runtime/tensor.hpp" - -using namespace ngraph; -using namespace std; - -OPENVINO_SUPPRESS_DEPRECATED_START - -const Shape& runtime::Tensor::get_shape() const { - return m_descriptor->get_shape(); -} - -const PartialShape& runtime::Tensor::get_partial_shape() const { - return m_descriptor->get_partial_shape(); -} - -const element::Type& runtime::Tensor::get_element_type() const { - return m_descriptor->get_element_type(); -} - -size_t runtime::Tensor::get_element_count() const { - return shape_size(m_descriptor->get_shape()); -} - -size_t runtime::Tensor::get_size_in_bytes() const { - return m_descriptor->size(); -} diff --git a/src/core/src/util.cpp b/src/core/src/util.cpp index 49ae1575101e7b..cf94286b04116d 100644 --- a/src/core/src/util.cpp +++ b/src/core/src/util.cpp @@ -262,7 +262,7 @@ void parse_version_string(std::string version, size_t& major, size_t& minor, siz } } // namespace ngraph -std::vector read_float_vector(std::shared_ptr tv) { +std::vector read_float_vector(std::shared_ptr tv) { std::vector float_vec; ov::element::Type element_type = tv->get_element_type(); @@ -338,7 +338,7 @@ std::vector read_float_vector(std::shared_ptr tv return float_vec; } -std::vector read_index_vector(std::shared_ptr tv) { +std::vector read_index_vector(std::shared_ptr tv) { std::vector index_vec; ov::element::Type element_type = tv->get_element_type(); From d04a53ae376765e5f335e1aa270d9b2ab19a99ec Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Mon, 22 Jan 2024 15:06:41 +0400 Subject: [PATCH 120/122] [Snippets] Added RegType as part of PortDescriptor (#22161) --- .../snippets/include/snippets/emitter.hpp | 22 +- .../snippets/include/snippets/generator.hpp | 13 +- .../include/snippets/lowered/expression.hpp | 2 +- .../lowered/pass/assign_registers.hpp | 6 +- .../snippets/lowered/pass/init_loops.hpp | 1 + .../snippets/lowered/pass/insert_loops.hpp | 1 - .../snippets/lowered/port_descriptor.hpp | 9 +- src/common/snippets/src/emitter.cpp | 29 +++ src/common/snippets/src/generator.cpp | 27 +- .../snippets/src/lowered/expression.cpp | 2 +- src/common/snippets/src/lowered/linear_ir.cpp | 4 +- .../src/lowered/pass/assign_registers.cpp | 234 +++++++++--------- .../src/lowered/pass/cleanup_loop_offsets.cpp | 6 + .../snippets/src/lowered/pass/init_loops.cpp | 18 ++ .../src/lowered/pass/insert_loops.cpp | 29 --- .../src/lowered/pass/insert_tail_loop.cpp | 6 +- .../snippets/src/lowered/port_descriptor.cpp | 6 +- .../snippets/src/op/serialization_node.cpp | 27 +- .../snippets/tests/include/lowering_utils.hpp | 2 +- .../emitters/snippets/x64/cpu_generator.cpp | 8 +- .../emitters/snippets/x64/cpu_generator.hpp | 2 +- .../snippets/x64/jit_container_emitter.cpp | 49 ++-- .../snippets/x64/jit_kernel_emitter.cpp | 23 +- .../snippets/x64/jit_loop_emitters.cpp | 25 +- .../snippets/x64/jit_loop_emitters.hpp | 1 + 25 files changed, 306 insertions(+), 246 deletions(-) create mode 100644 src/common/snippets/src/emitter.cpp diff --git a/src/common/snippets/include/snippets/emitter.hpp b/src/common/snippets/include/snippets/emitter.hpp index a2aa4923c2eef4..f61e94f521a941 100644 --- a/src/common/snippets/include/snippets/emitter.hpp +++ b/src/common/snippets/include/snippets/emitter.hpp @@ -12,7 +12,27 @@ namespace ov { namespace snippets { -using RegInfo = std::pair, std::vector>; +/** + * @interface RegType + * @brief Register type of input and output operations + */ +enum class RegType { gpr, vec }; +/** + * @interface Reg + * @brief Register representation: type of register and index + */ +struct Reg { + Reg(RegType type_, size_t idx_) : type(type_), idx(idx_) {} + + RegType type = RegType::gpr; + size_t idx = 0; + + friend bool operator==(const Reg& lhs, const Reg& rhs); + friend bool operator!=(const Reg& lhs, const Reg& rhs); +}; +using RegInfo = std::pair, std::vector>; + +std::string regTypeToStr(const RegType& type); /** * @interface Emitter diff --git a/src/common/snippets/include/snippets/generator.hpp b/src/common/snippets/include/snippets/generator.hpp index 1647ccf1e771a0..a4541551553e19 100644 --- a/src/common/snippets/include/snippets/generator.hpp +++ b/src/common/snippets/include/snippets/generator.hpp @@ -97,21 +97,12 @@ class Generator { */ std::shared_ptr get_target_machine() const; - /** - * @interface opRegType - * @brief Register type of operations - * Note that currently there are 4 types of ops: - * gpr->gpr: (Parameter, Result, LoopBegin, LoopEnd etc) - * gpr->vec: or vec->gpr Load/LoadConvert, Store/StoreConvert, BroadcastLoad etc. - * vec->vec: all other "normal" operations that perform calculations on vector registers: Add, BroadcastMove, Power, etc. - */ - enum opRegType {gpr2gpr, gpr2vec, vec2gpr, vec2vec}; /** * @brief gets register type by op type * TODO: Should be static attribute of emitters * @return register type */ - opRegType get_op_reg_type(const std::shared_ptr& op) const; + virtual RegType get_op_out_reg_type(const ov::Output& out) const; virtual std::shared_ptr clone() const = 0; @@ -120,7 +111,7 @@ class Generator { * @brief gets register type by specific plugin op type * @return register type */ - virtual opRegType get_specific_op_reg_type(const std::shared_ptr& op) const; + virtual RegType get_specific_op_out_reg_type(const ov::Output& out) const; /** * @brief returns true if an emitter can use precompiled kernel. * @return bool diff --git a/src/common/snippets/include/snippets/lowered/expression.hpp b/src/common/snippets/include/snippets/lowered/expression.hpp index a8a87e2666f1df..0b619370ab47a5 100644 --- a/src/common/snippets/include/snippets/lowered/expression.hpp +++ b/src/common/snippets/include/snippets/lowered/expression.hpp @@ -32,7 +32,7 @@ class Expression : public std::enable_shared_from_this { std::shared_ptr get_emitter() const; RegInfo get_reg_info() const; - void set_reg_info(RegInfo rinfo); + void set_reg_info(const RegInfo& rinfo); const PortConnectorPtr& get_input_port_connector(size_t i) const; const PortConnectorPtr& get_output_port_connector(size_t i) const; diff --git a/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp b/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp index 4425b4e59d8f77..132aaa935ceb70 100644 --- a/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp @@ -21,11 +21,13 @@ namespace pass { class AssignRegisters : public Pass { public: OPENVINO_RTTI("AssignRegisters", "Pass") - explicit AssignRegisters(const std::function& op)>& mapper) : m_reg_type_mapper(mapper) {} + explicit AssignRegisters(const std::function& out)>& mapper) : m_reg_type_mapper(mapper) {} bool run(LinearIR& linear_ir) override; private: - std::function& op)> m_reg_type_mapper; + void set_reg_types(LinearIR& linear_ir); + + std::function& out)> m_reg_type_mapper; static constexpr size_t reg_count = 16lu; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp index e98c6caaafa49c..75fc3268e0b6c5 100644 --- a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp @@ -25,6 +25,7 @@ class InitLoops : public Pass { bool run(LinearIR& linear_ir) override; private: + static void init_is_incremented(const LinearIR::LoopManager::LoopInfoPtr& loop_info); static void init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& loop_info); static void init_finalization_offsets(const LinearIR::LoopManager::LoopInfoPtr& loop_info); static void init_element_type_sizes(const LinearIR::LoopManager::LoopInfoPtr& loop_info); diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp index 7b39182c6feda1..bcd5c9231e7441 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp @@ -25,7 +25,6 @@ class InsertLoops : public Pass { bool run(LinearIR& linear_ir) override; private: static void insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& loop_manager, size_t loop_id, bool has_outer_loop); - static void filter_ports(std::vector& loop_entries, std::vector& loop_exits); }; } // namespace pass diff --git a/src/common/snippets/include/snippets/lowered/port_descriptor.hpp b/src/common/snippets/include/snippets/lowered/port_descriptor.hpp index 2c74867d8436d6..dfdebbcafed47a 100644 --- a/src/common/snippets/include/snippets/lowered/port_descriptor.hpp +++ b/src/common/snippets/include/snippets/lowered/port_descriptor.hpp @@ -7,6 +7,7 @@ #include "openvino/core/node.hpp" #include "openvino/core/attribute_visitor.hpp" #include "snippets/shape_types.hpp" +#include "snippets/emitter.hpp" namespace ov { @@ -41,12 +42,14 @@ class PortDescriptor { const VectorDims& get_shape() const {return m_tensor_shape;} const VectorDims& get_subtensor() const {return m_subtensor_shape;} const std::vector& get_layout() const {return m_layout;} - size_t get_reg() const { return m_reg; } + const Reg& get_reg() const { return m_reg; } void set_shape(const VectorDims& tensor) { m_tensor_shape = tensor; } void set_layout(const std::vector& layout) { m_layout = layout; } void set_subtensor(const VectorDims& subtensor) { m_subtensor_shape = subtensor; } - void set_reg(size_t reg) { m_reg = reg; } + void set_reg(Reg reg) { m_reg = std::move(reg); } + void set_reg_type(RegType type) { m_reg.type = type; } + void set_reg_idx(size_t idx) { m_reg.idx = idx; } std::string serialize() const; bool empty() const { return m_layout.empty() && m_subtensor_shape.empty();} @@ -64,7 +67,7 @@ class PortDescriptor { /// \brief Minimal tensor size that could be processed in one call VectorDims m_subtensor_shape{}; /// \brief The corresponding abstract/physical register - size_t m_reg = 0; + Reg m_reg { RegType::gpr, 0 }; /// Notes: /// - `m_tensor_shape` is dense shape which is controlled by expression outputs. diff --git a/src/common/snippets/src/emitter.cpp b/src/common/snippets/src/emitter.cpp new file mode 100644 index 00000000000000..45f6657bfe3542 --- /dev/null +++ b/src/common/snippets/src/emitter.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/emitter.hpp" + +namespace ov { +namespace snippets { + +bool operator==(const Reg& lhs, const Reg& rhs) { + return lhs.type == rhs.type && lhs.idx == rhs.idx; +} +bool operator!=(const Reg& lhs, const Reg& rhs) { + return !(lhs == rhs); +} + +std::string regTypeToStr(const RegType& type) { + switch (type) { + case RegType::vec: + return "vec"; + case RegType::gpr: + return "gpr"; + default: + OPENVINO_THROW("Unexpected RegType"); + } +} + +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/generator.cpp b/src/common/snippets/src/generator.cpp index 5c4848c2535358..96972fce825c0c 100644 --- a/src/common/snippets/src/generator.cpp +++ b/src/common/snippets/src/generator.cpp @@ -23,8 +23,8 @@ void Generator::generate(lowered::LinearIR& linear_ir, LoweringResult& result, c if (!target->is_supported()) OPENVINO_THROW("unsupported architecture for code generation"); - std::function& op)> reg_type_mapper = [&](const std::shared_ptr& op) -> opRegType { - return get_op_reg_type(op); + std::function& out)> reg_type_mapper = [&](const ov::Output& out) -> RegType { + return get_op_out_reg_type(out); }; lowered::pass::PassPipeline lowered_pipeline; // Note: the order of all passes in this pipeline must not be changed since they have hard dependencies @@ -70,7 +70,8 @@ std::shared_ptr Generator::get_target_machine() const { return target; } -Generator::opRegType Generator::get_op_reg_type(const std::shared_ptr& op) const { +RegType Generator::get_op_out_reg_type(const ov::Output& out) const { + const auto op = out.get_node_shared_ptr(); if (std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || @@ -78,19 +79,17 @@ Generator::opRegType Generator::get_op_reg_type(const std::shared_ptr& op) std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op) + std::dynamic_pointer_cast(op) || + std::dynamic_pointer_cast(op) #ifdef SNIPPETS_DEBUG_CAPS || std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) #endif ) - return gpr2gpr; + return RegType::gpr; else if (std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op)) - return gpr2vec; - else if (std::dynamic_pointer_cast(op)) - return vec2gpr; - else if (ov::op::util::is_unary_elementwise_arithmetic(op) || + std::dynamic_pointer_cast(op) || + ov::op::util::is_unary_elementwise_arithmetic(op) || ov::op::util::is_binary_elementwise_arithmetic(op) || ov::op::util::is_binary_elementwise_comparison(op) || ov::op::util::is_binary_elementwise_logical(op) || @@ -104,13 +103,13 @@ Generator::opRegType Generator::get_op_reg_type(const std::shared_ptr& op) std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op)) - return vec2vec; + return RegType::vec; else - return get_specific_op_reg_type(op); + return get_specific_op_out_reg_type(op); } -Generator::opRegType Generator::get_specific_op_reg_type(const std::shared_ptr& op) const { - OPENVINO_THROW("Register type of the operation " + std::string(op->get_type_name()) + " isn't determined!"); +RegType Generator::get_specific_op_out_reg_type(const ov::Output& out) const { + OPENVINO_THROW("Register type of the operation " + std::string(out.get_node()->get_type_name()) + " isn't determined!"); } }// namespace snippets diff --git a/src/common/snippets/src/lowered/expression.cpp b/src/common/snippets/src/lowered/expression.cpp index dedfa0a75da291..f33f3aeef95fc3 100644 --- a/src/common/snippets/src/lowered/expression.cpp +++ b/src/common/snippets/src/lowered/expression.cpp @@ -82,7 +82,7 @@ RegInfo Expression::get_reg_info() const { return reg_info; } -void Expression::set_reg_info(RegInfo rinfo) { +void Expression::set_reg_info(const RegInfo& rinfo) { const auto& in = rinfo.first; const auto& out = rinfo.second; OPENVINO_ASSERT(m_input_port_descriptors.size() == in.size(), "Incorrect count of input physical registers"); diff --git a/src/common/snippets/src/lowered/linear_ir.cpp b/src/common/snippets/src/lowered/linear_ir.cpp index a41a3b09f4c64f..a29d8d2045a6f1 100644 --- a/src/common/snippets/src/lowered/linear_ir.cpp +++ b/src/common/snippets/src/lowered/linear_ir.cpp @@ -133,10 +133,10 @@ void LinearIR::debug_print(bool tds_as_pointers) const { auto print_rinfo = [](const RegInfo& rinfo) { std::cerr << " : {"; for (auto i : rinfo.first) - std::cerr << i << " "; + std::cerr << regTypeToStr(i.type) << "[" << i.idx << "] "; std::cerr << " => "; for (auto i : rinfo.second) - std::cerr << i << " "; + std::cerr << regTypeToStr(i.type) << "[" << i.idx << "] "; std::cerr << "}"; }; std::map td2int; diff --git a/src/common/snippets/src/lowered/pass/assign_registers.cpp b/src/common/snippets/src/lowered/pass/assign_registers.cpp index ee4ca207755896..3fe8b612d78c2f 100644 --- a/src/common/snippets/src/lowered/pass/assign_registers.cpp +++ b/src/common/snippets/src/lowered/pass/assign_registers.cpp @@ -16,26 +16,56 @@ namespace snippets { namespace lowered { namespace pass { +void AssignRegisters::set_reg_types(LinearIR& linear_ir) { + for (const auto& expr : linear_ir) { + const auto op = expr->get_node(); + if (ov::is_type(op) || + ov::is_type(op) +#ifdef SNIPPETS_DEBUG_CAPS + || ov::is_type(op) + || ov::is_type(op) +#endif + ) + continue; + + OPENVINO_ASSERT(expr->get_output_count() == op->get_output_size(), "Incorrect count of output port descriptors!"); + for (size_t i = 0; i < expr->get_output_count(); ++i) { + const auto reg_type = m_reg_type_mapper(op->output(i)); + expr->get_output_port_descriptor(i)->set_reg_type(reg_type); + // propogate to consumers + for (const auto& consumer : expr->get_output_port_connector(i)->get_consumers()) { + consumer.get_descriptor_ptr()->set_reg_type(reg_type); + } + } + } +} + bool AssignRegisters::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::AssignRegisters") using Reg = size_t; using tensor = PortConnectorPtr; - const auto& expressions = linear_ir.get_ops(); - std::vector> typed_ops; - NodeVector ops; + set_reg_types(linear_ir); + const auto& exprs = linear_ir.get_ops(); + const auto& io_exprs = linear_ir.get_IO_ops(); + Reg num_expressions = exprs.size(); Reg num_parameters = 0; Reg num_results = 0; - Reg num_expressions = 0; - for (auto& expr : expressions) { - auto op = expr->get_node(); - auto reg_type = m_reg_type_mapper(op); - typed_ops.emplace_back(reg_type, expr); - num_parameters += is_type(op); - num_results += is_type(op); - ops.push_back(op); - num_expressions++; + for (const auto& expr : io_exprs) { + switch (expr->get_type()) { + case snippets::lowered::IOExpression::io_type::INPUT: { + num_parameters++; + break; + } + case snippets::lowered::IOExpression::io_type::OUTPUT: { + num_results++; + break; + } default : { + OPENVINO_THROW("Kernel detected unsupported io_type"); + } + } } + size_t counter_vec = 0; size_t counter_gpr = 0; std::map regs_vec, regs_gpr; @@ -43,7 +73,7 @@ bool AssignRegisters::run(LinearIR& linear_ir) { std::map manually_assigned_gprs, manually_assigned_vecs; const auto IS_MANUALLY_ALLOCATED_REG = SIZE_MAX; auto accumulator_reg = 0lu; - for (const auto& expr : expressions) { + for (const auto& expr : exprs) { auto op = expr->get_node(); if (const auto io_expr = std::dynamic_pointer_cast(expr)) { if (io_expr->get_type() == IOExpression::io_type::INPUT) { @@ -80,67 +110,51 @@ bool AssignRegisters::run(LinearIR& linear_ir) { for (const auto& tensor : input_expr_input_tensors) { const auto parent_expr = tensor->get_source().get_expr(); if (ov::is_type(parent_expr->get_node())) { - manually_assigned_vecs[tensor] = static_cast(accumulator_reg); if (ov::is_type(parent_expr->get_input_port_connector(0)->get_source().get_expr()->get_node())) { + manually_assigned_vecs[tensor] = static_cast(accumulator_reg); manually_assigned_vecs[parent_expr->get_input_port_connector(0)] = static_cast(accumulator_reg); - } + } } } - const auto& output_tensor = expr->get_output_port_connector(0); manually_assigned_vecs[input_tensor] = static_cast(accumulator_reg); - manually_assigned_vecs[output_tensor] = static_cast(accumulator_reg); - for (const auto& child_expr_input : output_tensor->get_consumers()) { - if (ov::is_type(child_expr_input.get_expr()->get_node())) { - manually_assigned_vecs[child_expr_input.get_expr()->get_output_port_connector(0)] = - static_cast(accumulator_reg); - } - } - - // TODO: Fix via common pipeline using LoopEnd: - // All operations `outside loop` after Horizon ops should have the same register to avoid using it in the next Loop - const auto& current_loops_ids = expr->get_loop_ids(); - auto next_expr = output_tensor->get_consumers().begin()->get_expr(); - while (next_expr->get_loop_ids() == current_loops_ids) { - manually_assigned_vecs[next_expr->get_output_port_connector(0)] = - static_cast(accumulator_reg); - next_expr = next_expr->get_output_port_connector(0)->get_consumers().begin()->get_expr(); - } - accumulator_reg++; } } // Note: have to specify default capture "=" due to MSVC bug (it doesn't capture const expressions implicitly) // Otherwise WIN build fails with "IS_MANUALLY_ALLOCATED_REG cannot be implicitly captured because no default capture mode has been specified" // the same problem with all the other lambdas in this file - auto enumerate_out_tensors = [=] (const ExpressionPtr& expr, - decltype(regs_vec)& reg_map, - const std::map& manually_assigned_regs, - size_t& counter) { - for (const auto& out_tensor : expr->get_output_port_connectors()) { - // Note that some ops might have identical input&output tensors (Result and Tile* for ex.) - // so we have to check that the tensor has not been enumerated already - if (reg_map.count(out_tensor) == 0) { - reg_map[out_tensor] = manually_assigned_regs.count(out_tensor) == 0 ? counter++ : IS_MANUALLY_ALLOCATED_REG; - } + auto enumerate_out_tensor = [=] (const tensor& out_tensor, + decltype(regs_vec)& reg_map, + const std::map& manually_assigned_regs, + size_t& counter) { + // Note that some ops might have identical input&output tensors (Result and Tile* for ex.) + // so we have to check that the tensor has not been enumerated already + if (reg_map.count(out_tensor) == 0) { + reg_map[out_tensor] = manually_assigned_regs.count(out_tensor) == 0 ? counter++ : IS_MANUALLY_ALLOCATED_REG; } }; - for (const auto& t_op : typed_ops) { - switch (t_op.first) { - case Generator::opRegType::vec2vec: - case Generator::opRegType::gpr2vec: - enumerate_out_tensors(t_op.second, regs_vec, manually_assigned_vecs, counter_vec); - break; - case Generator::opRegType::gpr2gpr: - case Generator::opRegType::vec2gpr: - enumerate_out_tensors(t_op.second, regs_gpr, manually_assigned_gprs, counter_gpr); - break; + for (const auto& expr : exprs) { + for (size_t i = 0; i < expr->get_output_count(); ++i) { + const auto& out = expr->get_output_port(i); + switch (out.get_descriptor_ptr()->get_reg().type) { + case RegType::vec: + enumerate_out_tensor(out.get_port_connector_ptr(), regs_vec, manually_assigned_vecs, counter_vec); + break; + case RegType::gpr: + enumerate_out_tensor(out.get_port_connector_ptr(), regs_gpr, manually_assigned_gprs, counter_gpr); + break; + default: + OPENVINO_THROW("Unsupported reg type detected"); + } } } // todo: make one for gpr and one for vector - std::vector> used_gpr(num_expressions, std::set()); // used = used as an input - std::vector> defined_gpr(num_expressions, std::set()); // defined = used as output - std::vector> used_vec(num_expressions, std::set()); - std::vector> defined_vec(num_expressions, std::set()); + std::vector> used_gpr, used_vec; // used = used as an input + std::vector> defined_gpr, defined_vec; // defined = used as output + used_gpr.reserve(num_expressions); + used_vec.reserve(num_expressions); + defined_gpr.reserve(num_expressions); + defined_vec.reserve(num_expressions); auto tensor2reg = [=] (const std::vector& tensors, const std::map& reg_map) { std::set result; @@ -153,44 +167,52 @@ bool AssignRegisters::run(LinearIR& linear_ir) { } return result; }; - for (size_t i = 0; i < typed_ops.size(); i++) { - const auto& t_op = typed_ops[i]; - std::vector used_tensors, defined_tensors; - for (const auto& in : t_op.second->get_input_port_connectors()) - used_tensors.push_back(in); - for (const auto& out : t_op.second->get_output_port_connectors()) - defined_tensors.push_back(out); - switch (t_op.first) { - case Generator::opRegType::vec2vec: - used_vec[i] = tensor2reg(used_tensors, regs_vec); - defined_vec[i] = tensor2reg(defined_tensors, regs_vec); - break; - case Generator::opRegType::gpr2gpr: - used_gpr[i] = tensor2reg(used_tensors, regs_gpr); - defined_gpr[i] = tensor2reg(defined_tensors, regs_gpr); - break; - case Generator::opRegType::gpr2vec: - used_gpr[i] = tensor2reg(used_tensors, regs_gpr); - defined_vec[i] = tensor2reg(defined_tensors, regs_vec); - break; - case Generator::opRegType::vec2gpr: - used_vec[i] = tensor2reg(used_tensors, regs_vec); - defined_gpr[i] = tensor2reg(defined_tensors, regs_gpr); - break; + + for (const auto& expr : exprs) { + std::vector used_gpr_tensors, used_vec_tensors, defined_gpr_tensors, defined_vec_tensors; + for (size_t i = 0; i < expr->get_input_count(); ++i) { + const auto& in = expr->get_input_port(i); + switch (in.get_descriptor_ptr()->get_reg().type) { + case RegType::vec: + used_vec_tensors.push_back(in.get_port_connector_ptr()); + break; + case RegType::gpr: + used_gpr_tensors.push_back(in.get_port_connector_ptr()); + break; + default: + OPENVINO_THROW("Unsupported reg type detected"); + } } + for (size_t i = 0; i < expr->get_output_count(); ++i) { + const auto& out = expr->get_output_port(i); + switch (out.get_descriptor_ptr()->get_reg().type) { + case RegType::vec: + defined_vec_tensors.push_back(out.get_port_connector_ptr()); + break; + case RegType::gpr: + defined_gpr_tensors.push_back(out.get_port_connector_ptr()); + break; + default: + OPENVINO_THROW("Unsupported reg type detected"); + } + } + used_vec.emplace_back(tensor2reg(used_vec_tensors, regs_vec)); + used_gpr.emplace_back(tensor2reg(used_gpr_tensors, regs_gpr)); + defined_vec.emplace_back(tensor2reg(defined_vec_tensors, regs_vec)); + defined_gpr.emplace_back(tensor2reg(defined_gpr_tensors, regs_gpr)); } // define life intervals // liveOut[i] - regs that are live on exit from i-th (topologically ordered) operation // liveIn[i] - regs that are live on entering the i-th (topologically ordered) operation - std::vector> life_in_vec(std::move(used_vec)); - std::vector> life_out_vec(typed_ops.size(), std::set()); - std::vector> life_in_gpr(std::move(used_gpr)); - std::vector> life_out_gpr(typed_ops.size(), std::set()); + std::vector> life_in_vec(std::move(used_vec)), + life_in_gpr(std::move(used_gpr)); + std::vector> life_out_vec(num_expressions, std::set()), + life_out_gpr(num_expressions, std::set()); // todo: this part if O(N*N), so it's slow for large subgraphs. Can we simplify it? At least add an early stopping criteria - for (size_t i = 0; i < typed_ops.size(); i++) { - for (size_t n = 0; n < typed_ops.size(); n++) { + for (size_t i = 0; i < num_expressions; i++) { + for (size_t n = 0; n < num_expressions; n++) { // Regs that are live on entering the operation = regs used by the op + (all other regs alive - regs defined by the op) // copy regs from lifeOut to lifeIn while ignoring regs in def std::set_difference(life_out_gpr[n].begin(), life_out_gpr[n].end(), @@ -200,9 +222,9 @@ bool AssignRegisters::run(LinearIR& linear_ir) { defined_vec[n].begin(), defined_vec[n].end(), std::inserter(life_in_vec[n], life_in_vec[n].begin())); } - for (size_t n = 0; n < typed_ops.size(); n++) { - const auto& expr = typed_ops[n].second; - if (is_type(expr->get_node()) || is_type(expr->get_node())) + size_t n = 0; + for (const auto& expr : exprs) { + if (is_type(expr->get_node())) continue; for (const auto& out : expr->get_output_port_connectors()) { for (const auto& child_expr_input : out->get_consumers()) { @@ -214,20 +236,13 @@ bool AssignRegisters::run(LinearIR& linear_ir) { child_it++; k++; } - if (k == typed_ops.size()) + if (k == num_expressions) OPENVINO_THROW("assign registers can't find target op in the body"); - switch (typed_ops[k].first) { - case Generator::opRegType::vec2vec: - case Generator::opRegType::vec2gpr: - life_out_vec[n].insert(life_in_vec[k].begin(), life_in_vec[k].end()); - break; - case Generator::opRegType::gpr2gpr: - case Generator::opRegType::gpr2vec: - life_out_gpr[n].insert(life_in_gpr[k].begin(), life_in_gpr[k].end()); - break; - } + life_out_vec[n].insert(life_in_vec[k].begin(), life_in_vec[k].end()); + life_out_gpr[n].insert(life_in_gpr[k].begin(), life_in_gpr[k].end()); } } + n++; } } struct by_starting { @@ -257,7 +272,7 @@ bool AssignRegisters::run(LinearIR& linear_ir) { } return i; }; - for (int i = 0; i < static_cast(typed_ops.size()); i++) { + for (int i = 0; i < static_cast(num_expressions); i++) { for (const auto& def : defined_vec[i]) live_intervals_vec[std::make_pair(i, find_last_use(life_in_vec, static_cast(def)))] = def; for (const auto& def : defined_gpr[i]) @@ -329,16 +344,13 @@ bool AssignRegisters::run(LinearIR& linear_ir) { register_assigned_regs(regs_vec, unique2reused_map_vec); register_assigned_regs(regs_gpr, unique2reused_map_gpr); - for (auto& t_op : typed_ops) { - RegInfo rinfo; - const auto& expr = t_op.second; - for (const auto& in : expr->get_input_port_connectors()) { - rinfo.first.push_back(assigned_regs[in]); + for (const auto& expr : exprs) { + for (size_t i = 0; i < expr->get_input_count(); ++i) { + expr->get_input_port_descriptor(i)->set_reg_idx(assigned_regs[expr->get_input_port_connector(i)]); } - for (const auto& out : expr->get_output_port_connectors()) { - rinfo.second.push_back(assigned_regs[out]); + for (size_t i = 0; i < expr->get_output_count(); ++i) { + expr->get_output_port_descriptor(i)->set_reg_idx(assigned_regs[expr->get_output_port_connector(i)]); } - t_op.second->set_reg_info(rinfo); } return false; } diff --git a/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp b/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp index 79c9a115718c1f..5e5cc43b13c835 100644 --- a/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp +++ b/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp @@ -34,6 +34,7 @@ bool CleanupLoopOffsets::run(LinearIR& linear_ir) { is_modified = true; } if (auto outer_loop_end = as_type_ptr(next_node)) { + const auto& is_incremented = loop_end->get_is_incremented(); auto fin_offsets = loop_end->get_finalization_offsets(); std::unordered_map per_port_connector_offset; const auto& loop_inputs = expr_it->get()->get_input_port_connectors(); @@ -41,12 +42,17 @@ bool CleanupLoopOffsets::run(LinearIR& linear_ir) { per_port_connector_offset[loop_inputs[i]] = i; const auto outer_increment = static_cast(outer_loop_end->get_increment()); + const auto& outer_is_incremented = outer_loop_end->get_is_incremented(); auto outer_ptr_increments = outer_loop_end->get_ptr_increments(); const auto& outer_loop_inputs = next_expr_it->get()->get_input_port_connectors(); for (size_t i = 0; i < outer_ptr_increments.size(); i++) { + if (!outer_is_incremented[i]) + continue; const auto& managed_connector = outer_loop_inputs[i]; const auto& found = per_port_connector_offset.find(managed_connector); if (found != per_port_connector_offset.end()) { + if (!is_incremented[found->second]) + continue; // Since data ptr is incremented on [ptr_increment x increment], // we should guarantee proportionality of ptr shifts. // If the data ptr can't be proportionally shifted, the optimization is not applied diff --git a/src/common/snippets/src/lowered/pass/init_loops.cpp b/src/common/snippets/src/lowered/pass/init_loops.cpp index 0d6757eed88e4c..8272b7c3de2a81 100644 --- a/src/common/snippets/src/lowered/pass/init_loops.cpp +++ b/src/common/snippets/src/lowered/pass/init_loops.cpp @@ -6,6 +6,7 @@ #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/loop_manager.hpp" +#include "snippets/op/memory_access.hpp" #include "snippets/itt.hpp" namespace ov { @@ -37,6 +38,22 @@ int64_t get_output_stride(size_t dim, const VectorDims& shape) { InitLoops::InitLoops() : Pass() {} +void InitLoops::init_is_incremented(const LinearIR::LoopManager::LoopInfoPtr& loop_info) { + auto loop_entries = loop_info->get_entry_points(); + auto loop_exits = loop_info->get_exit_points(); + auto update = [](std::vector& ports) { + for (auto& port : ports) { + if (!ov::is_type(port.expr_port->get_expr()->get_node())) { + port.is_incremented = false; + } + } + }; + update(loop_entries); + update(loop_exits); + loop_info->set_entry_points(loop_entries); + loop_info->set_exit_points(loop_exits); +} + void InitLoops::init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& loop_info) { const auto work_amount = loop_info->get_work_amount(); auto loop_entries = loop_info->get_entry_points(); @@ -115,6 +132,7 @@ bool InitLoops::run(LinearIR& linear_ir) { const auto& loops = loop_manager->get_map(); for (const auto& loop : loops) { const auto loop_info = loop.second; + init_is_incremented(loop_info); init_ptr_increments(loop_info); init_finalization_offsets(loop_info); init_element_type_sizes(loop_info); diff --git a/src/common/snippets/src/lowered/pass/insert_loops.cpp b/src/common/snippets/src/lowered/pass/insert_loops.cpp index 9e52387333a02d..58ecf6310e2de1 100644 --- a/src/common/snippets/src/lowered/pass/insert_loops.cpp +++ b/src/common/snippets/src/lowered/pass/insert_loops.cpp @@ -27,32 +27,6 @@ std::vector get_outer_loop_ids(const ExpressionPtr& expr, size_t loop_id InsertLoops::InsertLoops() : Pass() {} -void InsertLoops::filter_ports(std::vector& loop_entries, std::vector& loop_exits) { - std::vector new_loop_entries; - std::vector new_loop_exits; - new_loop_entries.reserve(loop_entries.size()); - new_loop_exits.reserve(loop_exits.size()); - - for (const auto& loop_entry_point : loop_entries) { - const auto& expr = loop_entry_point.expr_port->get_expr(); - const auto ma = ov::as_type_ptr(expr->get_node()); - if (ma && ma->is_memory_access_input_port(loop_entry_point.expr_port->get_index())) { - new_loop_entries.push_back(loop_entry_point); - } - } - - for (const auto& loop_exit_point : loop_exits) { - const auto& expr = loop_exit_point.expr_port->get_expr(); - const auto ma = ov::as_type_ptr(expr->get_node()); - if (ma && ma->is_memory_access_output_port(loop_exit_point.expr_port->get_index())) { - new_loop_exits.push_back(loop_exit_point); - } - } - - loop_entries = new_loop_entries; - loop_exits = new_loop_exits; -} - void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& loop_manager, size_t loop_id, bool has_outer_loop) { const auto loop_info = loop_manager->get_loop_info(loop_id); auto loop_entries = loop_info->get_entry_points(); @@ -63,9 +37,6 @@ void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& LinearIR::constExprIt loop_begin_pos, loop_end_pos; loop_manager->get_loop_bounds(linear_ir, loop_id, loop_begin_pos, loop_end_pos); - // Remove non MemoryAccess ports since Loop can have only GPR inputs - filter_ports(loop_entries, loop_exits); - const auto in_out_num = loop_entries.size() + loop_exits.size(); std::vector is_incremented; std::vector ptr_increments, finalization_offsets, io_data_sizes; diff --git a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp index 281c3eb281481b..2727d7efbd8f83 100644 --- a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp +++ b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp @@ -24,7 +24,8 @@ void InsertTailLoop::propagate_updated_subtensor_through_loop(const LinearIR& li // First step: set new dim value to the corresponding entry_points' dimensions if (new_dim_value != existing_subtensor_value) { for (const auto& port : loop_info->get_entry_points()) { - if (port.is_incremented) { + const auto& reg_type = port.expr_port->get_descriptor_ptr()->get_reg().type; + if ((port.is_incremented && reg_type == RegType::gpr) || (reg_type == RegType::vec)) { const auto& expr = port.expr_port->get_expr(); const auto node = expr->get_node(); auto desc = port.expr_port->get_descriptor_ptr(); @@ -48,7 +49,8 @@ void InsertTailLoop::propagate_updated_subtensor_through_loop(const LinearIR& li } auto update_only_dim_idx_with_subtensor_value = [&](const LinearIR::LoopManager::LoopPort& port) { - if (port.is_incremented) { + const auto& reg_type = port.expr_port->get_descriptor_ptr()->get_reg().type; + if ((port.is_incremented && reg_type == RegType::gpr) || (reg_type == RegType::vec)) { auto desc = port.expr_port->get_descriptor_ptr(); const auto expr = port.expr_port->get_expr(); const auto parent_desc = expr->get_input_port_connector(port.expr_port->get_index())->get_source().get_descriptor_ptr(); diff --git a/src/common/snippets/src/lowered/port_descriptor.cpp b/src/common/snippets/src/lowered/port_descriptor.cpp index e8c4bdd0626b47..63269fa013b1c8 100644 --- a/src/common/snippets/src/lowered/port_descriptor.cpp +++ b/src/common/snippets/src/lowered/port_descriptor.cpp @@ -43,7 +43,7 @@ PortDescriptorPtr PortDescriptor::clone() const { return desc; } -std::string PortDescriptor::serialize() const { +std::string PortDescriptor::serialize() const { std::stringstream ss; ss << m_tensor_shape.size() << " "; for (auto val : m_tensor_shape) @@ -54,12 +54,14 @@ std::string PortDescriptor::serialize() const { ss << m_layout.size() << " "; for (auto val : m_layout) ss << val << " "; + ss << regTypeToStr(m_reg.type) << "["<< m_reg.idx << "]"; return ss.str(); } bool operator==(const PortDescriptor& lhs, const PortDescriptor& rhs) { return lhs.m_tensor_shape == rhs.m_tensor_shape && lhs.m_layout == rhs.m_layout && - lhs.m_subtensor_shape == rhs.m_subtensor_shape; + lhs.m_subtensor_shape == rhs.m_subtensor_shape && + lhs.m_reg == rhs.m_reg; } void PortDescriptorUtils::init_default(std::vector& in_descs, diff --git a/src/common/snippets/src/op/serialization_node.cpp b/src/common/snippets/src/op/serialization_node.cpp index a91c63beb9402b..dde0f0ae6aa8e9 100644 --- a/src/common/snippets/src/op/serialization_node.cpp +++ b/src/common/snippets/src/op/serialization_node.cpp @@ -40,27 +40,38 @@ std::shared_ptr SerializationNode::clone_with_new_inputs(const OutputVecto } bool SerializationNode::visit_attributes(AttributeVisitor &visitor) { + std::vector in_regs, out_regs; + std::vector in_reg_types, out_reg_types; std::vector>> shapes; for (size_t i = 0; i < m_expr->get_input_count(); i++) { - const auto &shape = m_expr->get_input_port_descriptor(i)->get_shape(); + const auto& desc = m_expr->get_input_port_descriptor(i); + const auto &shape = desc->get_shape(); if (!shape.empty()) shapes.emplace_back("in_shape_" + std::to_string(i), shape); + in_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); + in_regs.emplace_back(desc->get_reg().idx); } for (size_t i = 0; i < m_expr->get_output_count(); i++) { - const auto &shape = m_expr->get_output_port_descriptor(i)->get_shape(); + const auto& desc = m_expr->get_output_port_descriptor(i); + const auto &shape = desc->get_shape(); if (!shape.empty()) shapes.emplace_back("out_shape_" + std::to_string(i), shape); + out_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); + out_regs.emplace_back(desc->get_reg().idx); } - auto loop_ids = m_expr->get_loop_ids(); - auto rinfo = m_expr->get_reg_info(); - if (!rinfo.first.empty()) - visitor.on_attribute("in_regs", rinfo.first); - if (!rinfo.second.empty()) - visitor.on_attribute("out_regs", rinfo.second); + if (!in_regs.empty()) { + visitor.on_attribute("in_regs", in_regs); + visitor.on_attribute("in_reg_types", in_reg_types); + } + if (!out_regs.empty()) { + visitor.on_attribute("out_regs", out_regs); + visitor.on_attribute("out_reg_types", out_reg_types); + } for (auto& s : shapes) visitor.on_attribute(s.first, s.second); + auto loop_ids = m_expr->get_loop_ids(); visitor.on_attribute("loop_ids", loop_ids); m_expr->get_node()->visit_attributes(visitor); return true; diff --git a/src/common/snippets/tests/include/lowering_utils.hpp b/src/common/snippets/tests/include/lowering_utils.hpp index fd9f7932ccb652..2ee6840e78618b 100644 --- a/src/common/snippets/tests/include/lowering_utils.hpp +++ b/src/common/snippets/tests/include/lowering_utils.hpp @@ -46,7 +46,7 @@ class DummyGenerator : public ov::snippets::Generator { std::shared_ptr clone() const override { return std::make_shared(target); } protected: - opRegType get_specific_op_reg_type(const std::shared_ptr& op) const override { return vec2vec; }; + ov::snippets::RegType get_op_out_reg_type(const ov::Output& out) const override { return ov::snippets::RegType::vec; }; }; class LoweringTests : public TransformationTestsF { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp index dfba703338f1a1..4a9f158d1e701e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp @@ -265,17 +265,19 @@ std::shared_ptr intel_cpu::CPUGenerator::clone() const { return std::make_shared(cpu_target_machine->get_isa()); } -snippets::Generator::opRegType intel_cpu::CPUGenerator::get_specific_op_reg_type(const std::shared_ptr& op) const { +ov::snippets::RegType intel_cpu::CPUGenerator::get_specific_op_out_reg_type(const ov::Output& out) const { + const auto op = out.get_node_shared_ptr(); if (std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op)) - return gpr2gpr; + return ov::snippets::RegType::gpr; else if ( std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op)) - return vec2vec; + return ov::snippets::RegType::vec; else OPENVINO_THROW("Register type of the operation " + std::string(op->get_type_name()) + " isn't determined!"); } + bool intel_cpu::CPUGenerator::uses_precompiled_kernel(const std::shared_ptr& e) const { bool need = std::dynamic_pointer_cast(e) || std::dynamic_pointer_cast(e); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp index ed5da62771d12f..6eafd3cb04771c 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp @@ -48,8 +48,8 @@ class CPUGenerator : public snippets::Generator { std::shared_ptr clone() const override; protected: + ov::snippets::RegType get_specific_op_out_reg_type(const ov::Output& out) const override; bool uses_precompiled_kernel(const std::shared_ptr& emitter) const override; - opRegType get_specific_op_reg_type(const std::shared_ptr& op) const override; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp index 7181270f5a56aa..c39f7a7fb493ed 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp @@ -21,51 +21,36 @@ void jit_container_emitter::map_abstract_registers(mapping_info& gpr_map_pool, m if (expressions.empty()) OPENVINO_THROW("Cannot map registers when there is no allocated_emitters provided"); - auto map_regs = [](const std::vector& abstract_regs, mapping_info& mapping) { - auto& abstract_to_physical = mapping.first; - auto& regs_pool = mapping.second; - std::vector physical_regs(abstract_regs.size()); - for (size_t i = 0; i < abstract_regs.size(); i++) { - const auto abstract = abstract_regs[i]; + auto map_regs = [&](const std::vector& abstract_regs) { + std::vector physical_regs = abstract_regs; + for (size_t i = 0; i < abstract_regs.size(); ++i) { + const auto& abstract_reg = abstract_regs[i]; + const auto& type = abstract_reg.type; + const auto& abstract = abstract_reg.idx; + OPENVINO_ASSERT(one_of(type, snippets::RegType::gpr, snippets::RegType::vec), "Incorrect reg type detected!"); + auto& mapping = type == snippets::RegType::gpr ? gpr_map_pool : vec_map_pool; + auto& abstract_to_physical = mapping.first; + auto& regs_pool = mapping.second; auto& physical = physical_regs[i]; if (abstract_to_physical.count(abstract) == 0) { if (regs_pool.empty()) OPENVINO_THROW("Cannot map registers for jit_container_emitter: not enough regs in the pool"); - physical = regs_pool.back(); + physical.idx = regs_pool.back(); regs_pool.pop_back(); - abstract_to_physical[abstract] = physical; + abstract_to_physical[abstract] = physical.idx; } else { - physical = abstract_to_physical[abstract]; + physical.idx = abstract_to_physical[abstract]; } } return physical_regs; }; for (const auto& expression : expressions) { - const auto& emitter = expression->get_emitter(); - std::vector in_physical_regs, out_physical_regs; - std::vector in_abstract_regs, out_abstract_regs; + std::vector in_physical_regs, out_physical_regs; + std::vector in_abstract_regs, out_abstract_regs; std::tie(in_abstract_regs, out_abstract_regs) = expression->get_reg_info(); - switch (std::dynamic_pointer_cast(emitter)->get_in_out_type()) { - case gpr_to_gpr: - in_physical_regs = map_regs(in_abstract_regs, gpr_map_pool); - out_physical_regs = map_regs(out_abstract_regs, gpr_map_pool); - break; - case gpr_to_vec: - in_physical_regs = map_regs(in_abstract_regs, gpr_map_pool); - out_physical_regs = map_regs(out_abstract_regs, vec_map_pool); - break; - case vec_to_gpr: - in_physical_regs = map_regs(in_abstract_regs, vec_map_pool); - out_physical_regs = map_regs(out_abstract_regs, gpr_map_pool); - break; - case vec_to_vec: - in_physical_regs = map_regs(in_abstract_regs, vec_map_pool); - out_physical_regs = map_regs(out_abstract_regs, vec_map_pool); - break; - default: - OPENVINO_THROW("Unsupported type of jit emitter!"); - } + in_physical_regs = map_regs(in_abstract_regs); + out_physical_regs = map_regs(out_abstract_regs); expression->set_reg_info({in_physical_regs, out_physical_regs}); if (auto container = std::dynamic_pointer_cast(expression->get_emitter())) container->map_abstract_registers(gpr_map_pool, vec_map_pool, expressions); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp index 244beb5c3a6758..459dc158c7b54a 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp @@ -12,9 +12,16 @@ using namespace dnnl::impl::cpu::x64; namespace ov { namespace intel_cpu { -inline static void transform_idxs_to_regs(const std::vector& idxs, std::vector& regs) { - regs.resize(idxs.size()); +inline static std::vector transform_idxs_to_regs(const std::vector& idxs) { + std::vector regs(idxs.size()); std::transform(idxs.begin(), idxs.end(), regs.begin(), [](size_t idx){return Reg64(static_cast(idx));}); + return regs; +} + +inline static std::vector transform_snippets_regs_to_idxs(const std::vector& regs) { + std::vector idxs(regs.size()); + std::transform(regs.cbegin(), regs.cend(), idxs.begin(), [](const snippets::Reg& reg) { return reg.idx; }); + return idxs; } jit_kernel_emitter::jit_kernel_emitter(jit_generator* h, cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) @@ -228,16 +235,16 @@ void jit_kernel_emitter::init_data_pointers(const Xbyak::Reg64& reg_indexes, con void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vector& out) const { h->preamble(); - Reg64 reg_indexes = Reg64(static_cast(reg_indexes_idx)); - Reg64 reg_const_params = Reg64(static_cast(reg_const_params_idx)); - std::vector data_ptr_regs; - transform_idxs_to_regs(data_ptr_regs_idx, data_ptr_regs); + auto reg_indexes = Reg64(static_cast(reg_indexes_idx)); + auto reg_const_params = Reg64(static_cast(reg_const_params_idx)); + auto data_ptr_regs = transform_idxs_to_regs(data_ptr_regs_idx); init_data_pointers(reg_indexes, reg_const_params, data_ptr_regs); for (const auto& expression : body) { + const auto reg_info = expression->get_reg_info(); + const auto in_regs = transform_snippets_regs_to_idxs(reg_info.first); + const auto out_regs = transform_snippets_regs_to_idxs(reg_info.second); const auto& emitter = expression->get_emitter(); - std::vector in_regs, out_regs; - std::tie(in_regs, out_regs) = expression->get_reg_info(); emitter->emit_code(in_regs, out_regs, vec_regs_pool, gp_regs_pool); } h->postamble(); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp index c7a37a71c9c8c3..054f78cb88b42e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp @@ -12,11 +12,6 @@ using namespace dnnl::impl::cpu::x64; namespace ov { namespace intel_cpu { -inline static void transform_idxs_to_regs(const std::vector& idxs, std::vector& regs) { - regs.resize(idxs.size()); - std::transform(idxs.begin(), idxs.end(), regs.begin(), [](size_t idx){return Reg64(static_cast(idx));}); -} - jit_loop_begin_emitter::jit_loop_begin_emitter(jit_generator* h, cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) : jit_emitter(h, isa) { loop_begin = ov::as_type_ptr(expr->get_node()); @@ -71,6 +66,7 @@ jit_loop_end_emitter::jit_loop_end_emitter(jit_generator* h, cpu_isa_t isa, cons num_outputs = expr->get_output_count(); wa_increment = static_cast(loop_end->get_increment()); work_amount = static_cast(loop_end->get_work_amount()); + is_incremented = loop_end->get_is_incremented(); ptr_increments = loop_end->get_ptr_increments(); finalization_offsets = loop_end->get_finalization_offsets(); evaluate_once = loop_end->get_evaluate_once(); @@ -98,22 +94,25 @@ void jit_loop_end_emitter::emit_impl(const std::vector& in, const std::v // the last input is actually a work_amount reg data_ptr_reg_idxs.reserve(num_inputs - 1); std::copy(in.begin(), in.end() - 1, std::back_inserter(data_ptr_reg_idxs)); - std::vector data_ptr_regs; - transform_idxs_to_regs(data_ptr_reg_idxs, data_ptr_regs); + Reg64 reg_work_amount = Reg64(in.back()); if (!evaluate_once) { - for (size_t idx = 0; idx < data_ptr_regs.size(); idx++) { - if (ptr_increments[idx] != 0) - h->add(data_ptr_regs[idx], ptr_increments[idx] * wa_increment * io_data_size[idx]); + for (size_t idx = 0; idx < data_ptr_reg_idxs.size(); idx++) { + if (!is_incremented[idx] || ptr_increments[idx] == 0) + continue; + Reg64 data_reg = Reg64(static_cast(data_ptr_reg_idxs[idx])); + h->add(data_reg, ptr_increments[idx] * wa_increment * io_data_size[idx]); } h->sub(reg_work_amount, wa_increment); h->cmp(reg_work_amount, wa_increment); h->jge(loop_begin->begin_address); } - for (size_t idx = 0; idx < data_ptr_regs.size(); idx++) { - if (finalization_offsets[idx] != 0) - h->add(data_ptr_regs[idx], finalization_offsets[idx] * io_data_size[idx]); + for (size_t idx = 0; idx < data_ptr_reg_idxs.size(); idx++) { + if (!is_incremented[idx] || finalization_offsets[idx] == 0) + continue; + Reg64 data_reg = Reg64(static_cast(data_ptr_reg_idxs[idx])); + h->add(data_reg, finalization_offsets[idx] * io_data_size[idx]); } } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp index ac87436c3030f6..a71d253cdd286e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp @@ -55,6 +55,7 @@ class jit_loop_end_emitter : public jit_emitter { int64_t wa_increment = 0; int64_t work_amount = 0; bool evaluate_once = false; + std::vector is_incremented; std::vector ptr_increments; std::vector finalization_offsets; }; From f0dbe96d985cbbf2f29dc1fc2ee15743cc299eb7 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Mon, 22 Jan 2024 12:39:19 +0100 Subject: [PATCH 121/122] increased timeout (#22303) --- .github/workflows/job_cpu_functional_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/job_cpu_functional_tests.yml b/.github/workflows/job_cpu_functional_tests.yml index b1f2e6bbf08b59..7c1b29e085331f 100644 --- a/.github/workflows/job_cpu_functional_tests.yml +++ b/.github/workflows/job_cpu_functional_tests.yml @@ -98,7 +98,7 @@ jobs: fi python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke* - timeout-minutes: 20 + timeout-minutes: 25 - name: Save tests execution time uses: actions/cache/save@v3 From 92cc4f5d1502c704c769478d5ec338c0c2fbed4d Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Mon, 22 Jan 2024 13:25:56 +0100 Subject: [PATCH 122/122] [DOCS] Updated weight compression documentation (#22063) (#22310) authored by alexander.kozlov@intel.com port: https://github.com/openvinotoolkit/openvino/pull/22063 --- .../weight_compression.rst | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/docs/articles_en/openvino_workflow/model_optimization_guide/weight_compression.rst b/docs/articles_en/openvino_workflow/model_optimization_guide/weight_compression.rst index fda734d8303356..5982955ec248a9 100644 --- a/docs/articles_en/openvino_workflow/model_optimization_guide/weight_compression.rst +++ b/docs/articles_en/openvino_workflow/model_optimization_guide/weight_compression.rst @@ -40,7 +40,22 @@ Now, the model is ready for compilation and inference. It can be also saved into * ``ratio`` - controls the ratio between INT4 and INT8 compressed layers in the model. For example, 0.8 means that 80% of layers will be compressed to INT4, while the rest will be compressed to INT8 precision. -The example below shows 4-bit weight quantization applied on top of OpenVINO IR: + * ``dataset`` - calibration dataset for data-aware weight compression. It is required for some compression options, for example, some types ``sensitivity_metric`` can use data for precision selection. + + * ``sensitivity_metric`` - controls the metric to estimate the sensitivity of compressing layers in the bit-width selection algorithm. Some of the metrics require dataset to be provided. The following types are supported: + + * ``nncf.SensitivityMetric.WEIGHT_QUANTIZATION_ERROR`` - data-free metric computed as the inverted 8-bit quantization noise. Weights with highest value of this metric can be accurately quantized channel-wise to 8-bit. The idea is to leave these weights in 8 bit, and quantize the rest of layers to 4-bit group-wise. Since group-wise is more accurate than per-channel, accuracy should not degrade. + + * ``nncf.SensitivityMetric.HESSIAN_INPUT_ACTIVATION`` - requires dataset. The average Hessian trace of weights with respect to the layer-wise quantization error multiplied by L2 norm of 8-bit quantization noise. + + * ``nncf.SensitivityMetric.MEAN_ACTIVATION_VARIANCE`` - requires dataset. The mean variance of the layers' inputs multiplied by inverted 8-bit quantization noise. + + * ``nncf.SensitivityMetric.MAX_ACTIVATION_VARIANCE`` - requires dataset. The maximum variance of the layers' inputs multiplied by inverted 8-bit quantization noise. + + * ``nncf.SensitivityMetric.MEAN_ACTIVATION_MAGNITUDE`` - requires dataset. The mean magnitude of the layers' inputs multiplied by inverted 8-bit quantization noise. + + +The example below shows data-free 4-bit weight quantization applied on top of OpenVINO IR: .. tab-set:: @@ -51,6 +66,8 @@ The example below shows 4-bit weight quantization applied on top of OpenVINO IR: :language: python :fragment: [compression_4bit] +For data-aware weight compression refer to the following `example `__. + .. note:: OpenVINO also supports 4-bit models from Hugging Face `Transformers `__ library optimized @@ -132,6 +149,7 @@ The table below shows examples of Text Generation models with different optimiza Additional Resources #################### +- `Data-aware weight compression `__ - :doc:`Post-training Quantization ` - :doc:`Training-time Optimization ` - `NNCF GitHub `__