From 010b225bba53e51f44c91d33451e0a2e274e04d9 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Thu, 1 Feb 2024 18:21:08 +0400 Subject: [PATCH 01/13] Remove blob_utils --- .../src/concat_const_inplace.cpp | 1 + .../concurrency/gpu_concurrency_tests.cpp | 12 +- .../gpu_remote_tensor_tests.cpp | 19 +- .../include/base/ov_behavior_test_utils.hpp | 1 - .../compiled_model/compiled_model_base.hpp | 4 +- .../infer_request_dynamic.hpp | 1 - .../convolution_qdq_transformation.cpp | 1 - .../convolution_transformation.cpp | 1 - .../convolution_with_incorrect_weights.cpp | 1 - .../depth_to_space_transformation.cpp | 1 - ...d_two_output_branches_with_convolution.cpp | 1 - .../fully_connected_transformation.cpp | 1 - .../fuse_convert_transformation.cpp | 1 - .../gemm_transformation.cpp | 1 - .../group_convolution_transformation.cpp | 1 - .../groupconvolution_qdq_transformation.cpp | 1 - .../mat_mul_with_optimized_constant_fq.cpp | 1 - .../move_fake_quantize_transformation.cpp | 1 - ...ly_to_group_convolution_transformation.cpp | 1 - .../mvn_transformation.cpp | 1 - .../normalize_transformation.cpp | 1 - .../output_layers_concat.cpp | 1 - .../output_layers_concat_multi_channel.cpp | 1 - ...put_layers_handling_in_transformations.cpp | 1 - ..._through_dequantization_transformation.cpp | 1 - .../recurrent_cell_transformation.cpp | 2 - .../transpose_after_matmul_transformation.cpp | 1 - .../base/layer_test_utils.hpp | 2 +- .../layer_transformation.cpp | 1 - .../common_test_utils/ov_tensor_utils.hpp | 3 + .../common_test_utils/src/ov_tensor_utils.cpp | 35 ++ .../functional_test_utils/blob_utils.hpp | 584 ------------------ 32 files changed, 55 insertions(+), 630 deletions(-) delete mode 100644 src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp index bb12b5de29b23d..0844ce4a2b85f8 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp @@ -5,6 +5,7 @@ #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "utils/cpu_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp index e493792d58d2dc..443d673ce10cad 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp @@ -5,7 +5,6 @@ #include "common_test_utils/test_common.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "transformations/utils/utils.hpp" #include "common_test_utils/file_utils.hpp" @@ -242,20 +241,15 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) { // Modify tensor somehow and save as a reference values ov::test::utils::fill_tensor_random(output_tensor2); - std::vector ref_values; - ref_values.resize(output_tensor2.get_byte_size()); - std::memcpy(ref_values.data(), output_tensor2.data(), output_tensor2.get_byte_size()); + ov::Tensor ref_tensor(output_tensor2.get_element_type(), output_tensor2.get_shape()); + output_tensor2.copy_to(ref_tensor); // Perform second infer() call with a system host memory tensor infer_request1.set_output_tensor(output_tensor1); ASSERT_NO_THROW(infer_request1.infer()); // Expect that output_tensor2 will not change it's data after infer() call - FuncTestUtils::compareRawBuffers(ref_values.data(), - output_tensor2.data(), - ref_values.size(), - ov::shape_size(output_tensor2.get_shape()), - 1e-4f); + ov::test::utils::compare(ref_tensor, output_tensor2, 1e-4); } TEST(smoke_InferRequestDeviceMemoryAllocation, canSetSystemHostTensor) { diff --git a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp index 300d88ec88240a..74fda0a8dc3263 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp @@ -9,6 +9,7 @@ #include "remote_tensor_tests/helpers.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/data_utils.hpp" #include "base/ov_behavior_test_utils.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" @@ -1891,9 +1892,9 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) { ASSERT_EQ(output_tensor_regular.get_size() * num_batch, output_tensor_shared.get_size()); float thr = 0.1f; - FuncTestUtils::compareRawBuffers(static_cast(output_tensor_shared.data()) + i * output_tensor_regular.get_size(), + ov::test::utils::compare_raw_data(static_cast(output_tensor_shared.data()) + i * output_tensor_regular.get_size(), static_cast(output_tensor_regular.data()), - output_tensor_regular.get_size(), output_tensor_regular.get_size(), thr); + output_tensor_regular.get_size(), thr); } } @@ -2022,9 +2023,9 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_two_planes) { ASSERT_EQ(output_tensor_regular.get_size() * num_batch, output_tensor_shared.get_size()); float thr = 0.1f; - FuncTestUtils::compareRawBuffers(static_cast(output_tensor_shared.data()) + i * output_tensor_regular.get_size(), + ov::test::utils::compare_raw_data(static_cast(output_tensor_shared.data()) + i * output_tensor_regular.get_size(), static_cast(output_tensor_regular.data()), - output_tensor_regular.get_size(), output_tensor_regular.get_size(), thr); + output_tensor_regular.get_size(), thr); } } @@ -2145,9 +2146,9 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toGray) { ASSERT_EQ(output_tensor_regular.get_size() * num_batch, output_tensor_shared.get_size()); float thr = 0.1f; - FuncTestUtils::compareRawBuffers(static_cast(output_tensor_shared.data()) + i * output_tensor_regular.get_size(), + ov::test::utils::compare_raw_data(static_cast(output_tensor_shared.data()) + i * output_tensor_regular.get_size(), static_cast(output_tensor_regular.data()), - output_tensor_regular.get_size(), output_tensor_regular.get_size(), thr); + output_tensor_regular.get_size(), thr); } } @@ -2304,9 +2305,9 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_buffer) { ASSERT_EQ(output_tensor_regular.get_size() * num_batch, out_tensor_new.get_size()); float thr = 0.1f; - FuncTestUtils::compareRawBuffers(static_cast(out_tensor_new.data()) + i * output_tensor_regular.get_size(), - static_cast(output_tensor_regular.data()), - output_tensor_regular.get_size(), output_tensor_regular.get_size(), thr); + ov::test::utils::compare_raw_data(static_cast(out_tensor_new.data()) + i * output_tensor_regular.get_size(), + static_cast(output_tensor_regular.data()), + output_tensor_regular.get_size(), thr); } } diff --git a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index e8b3278a69c590..836aae7e614ed4 100644 --- a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -23,7 +23,6 @@ #include "functional_test_utils/plugin_cache.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" #include "functional_test_utils/skip_tests_config.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/summary/api_summary.hpp" #include "openvino/util/file_util.hpp" #include "common_test_utils/subgraph_builders/split_conv_concat.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index 413d3cecebeb86..3bf66babd2efd3 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -362,7 +362,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoBeforeExecution) { auto getExecValue = [&rtInfo](const std::string& paramName) -> std::string { auto it = rtInfo.find(paramName); - IE_ASSERT(rtInfo.end() != it); + OPENVINO_ASSERT(rtInfo.end() != it); return it->second.as(); }; @@ -414,7 +414,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { auto getExecValue = [&rtInfo](const std::string& paramName) -> std::string { auto it = rtInfo.find(paramName); - IE_ASSERT(rtInfo.end() != it); + OPENVINO_ASSERT(rtInfo.end() != it); return it->second.as(); }; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp index 6c309643c99334..4398b4bea8c1f3 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp @@ -19,7 +19,6 @@ #include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" // TODO [mandrono]: move current test case inside CPU plug-in and return the original tests diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp index e92913b60189ff..c955bb16756b12 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp index 02c366f8fc5298..410e6005d65087 100755 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp index 14b49e5800bdf4..7f4f6582eb440b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index 713bc7d8439867..cf2813e77d2063 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -9,7 +9,6 @@ #include #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "low_precision_transformations/depth_to_space_transformation.hpp" #include "openvino/core/model.hpp" #include "openvino/op/depth_to_space.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 5aad24deae06bf..8e116e6621e0a8 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index 74a31d244b20a6..7dac1a33b0699b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/mat_mul.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp index 85fdbd1bb57d7d..049c66a50167f6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fuse_convert.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp index ce67bff1d06dcd..b006a95c9c54b8 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_lpt_models/mat_mul.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp index 912c3437998ab7..bb6b5dc4166ec5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/group_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp index 6ad674ee9439d5..ddd3b750814094 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 6de4a9423ffc84..1fda036729df07 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp index 144ba7dd1b8a60..6ace6bde3d6fa2 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -12,7 +12,6 @@ #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_lpt_models/move_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp index 32fdb92de0ab8a..423fdf75ec195f 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/multiply_to_group_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp index 3dfc7692486a6a..489806ae66b771 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/mvn.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp index d39091eb3cde7b..aef23abd0094cd 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/normalize_l2.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index 0beb3f72899172..12272374f8434b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "common_test_utils/node_builders/fake_quantize.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp index 924f4ed77519b7..4744497e9aecfd 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "common_test_utils/node_builders/fake_quantize.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index ff3399d346c4eb..4afacd52455960 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "common_test_utils/node_builders/fake_quantize.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp index 5936bc75a8f215..f29b1c7c72103e 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp index 0abfbaec3eb81f..fdf815ba48f058 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp @@ -9,10 +9,8 @@ #include #include - #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_lpt_models/recurrent_cell.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp index fff95b82618a0f..8d99dc1dc4ba54 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" #include "ov_lpt_models/transpose_after_mat_mul.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index a23e1be0e84943..8a6237856bbbf1 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -7,9 +7,9 @@ #include #include "common_test_utils/common_utils.hpp" +#include "common_test_utils/data_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "common_test_utils/data_utils.hpp" #include "functional_test_utils/crash_handler.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/summary/environment.hpp" diff --git a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp index 845874a8a6bb49..0a4d8ea5e8166a 100644 --- a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp +++ b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp @@ -8,7 +8,6 @@ #include #include -#include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index 0bd6140e2133ad..03b23cedf53265 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -77,6 +77,9 @@ ov::Tensor create_and_fill_tensor_real_distribution(const ov::element::Type elem const float max, const int seed); +ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, + const ov::Shape& shape); + void compare(const ov::Tensor& expected, const ov::Tensor& actual, const double abs_threshold = std::numeric_limits::max(), diff --git a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp index ee85e0c88cebad..a21ce7a5d00f7b 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp @@ -302,6 +302,41 @@ ov::Tensor create_and_fill_tensor_consistently(const ov::element::Type element_t return tensor; } +template +void fill_data_sin_value(T* data, size_t size) { + for (size_t i = 0; i < size; i++) { + data[i] = static_cast(sin(i)); + } +} + +ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, + const ov::Shape& shape) { + auto tensor = ov::Tensor{element_type, shape}; +#define CASE(X) \ + case X: \ + fill_data_sin_value(tensor.data>(), \ + tensor.get_size()); \ + break; + switch (element_type) { + CASE(ov::element::Type_t::i8) + CASE(ov::element::Type_t::i16) + CASE(ov::element::Type_t::i32) + CASE(ov::element::Type_t::i64) + CASE(ov::element::Type_t::u8) + CASE(ov::element::Type_t::u16) + CASE(ov::element::Type_t::u32) + CASE(ov::element::Type_t::u64) + CASE(ov::element::Type_t::bf16) + CASE(ov::element::Type_t::f16) + CASE(ov::element::Type_t::f32) + CASE(ov::element::Type_t::f64) + default: + OPENVINO_THROW("Unsupported element type: ", element_type); + } +#undef CASE + return tensor; +} + constexpr double eps = std::numeric_limits::epsilon(); inline double less(double a, double b) { diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp deleted file mode 100644 index 2bf8fa2285d649..00000000000000 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp +++ /dev/null @@ -1,584 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include -#include -#include -#include - -#include "blob_factory.hpp" -#include "common_test_utils/data_utils.hpp" -#include "common_test_utils/test_constants.hpp" -#include "ie_ngraph_utils.hpp" -#include "openvino/runtime/common.hpp" - -namespace FuncTestUtils { -namespace Bf16TestUtils { -inline short reducePrecisionBitwiseS(const float in); -} // namespace Bf16TestUtils - -enum CompareType { - ABS, - REL, - ABS_AND_REL // if absolute and relative differences are too high, an exception is thrown -}; -/** - * @brief Checks values of two blobs according to given algorithm and thresholds. - * In ABS and REL cases thr1 corresponds to the single threshold, - * In ABS_AND_REL case thr1 and thr2 mean absolute and relative threshold - * - * @tparam dType Type of blob data - * @param res Pointer to considered blob - * @param ref Pointer to reference blob - * @param resSize Size of considered blob - * @param refSize Size of reference blob - * @param compareType Defines an algorithm of comparison - * @param thr1 First threshold of difference - * @param thr2 Second threshold of difference - * @param printData A flag if data printing is demanded - */ -template -inline void compareRawBuffers(const dType* res, - const dType* ref, - size_t resSize, - size_t refSize, - CompareType compareType, - float thr1 = 0.01, - float thr2 = 0.01, - bool printData = false) { - if (printData) { - std::cout << "Reference results: " << std::endl; - for (size_t i = 0; i < refSize; i++) { - std::cout << ref[i] << " "; - } - std::cout << std::endl; - std::cout << "Test results: " << std::endl; - for (size_t i = 0; i < resSize; i++) { - std::cout << res[i] << " "; - } - std::cout << std::endl; - } - - switch (compareType) { - case CompareType::ABS: - for (size_t i = 0; i < refSize; i++) { - float absDiff = std::abs(res[i] - ref[i]); - ASSERT_LE(absDiff, thr1) << "Relative comparison of values ref: " << ref[i] << " and res: " << res[i] - << " , index in blobs: " << i << " failed!"; - } - break; - case CompareType::REL: - for (size_t i = 0; i < refSize; i++) { - float absDiff = std::abs(res[i] - ref[i]); - float relDiff = absDiff / std::max(res[i], ref[i]); - ASSERT_LE(relDiff, thr2) << "Relative comparison of values ref: " << ref[i] << " and res: " << res[i] - << " , index in blobs: " << i << " failed!"; - } - break; - case CompareType::ABS_AND_REL: - for (size_t i = 0; i < refSize; i++) { - float absDiff = std::abs(res[i] - ref[i]); - if (absDiff > thr1) { - float relDiff = absDiff / std::max(res[i], ref[i]); - ASSERT_LE(relDiff, thr2) << "Comparison of values ref: " << ref[i] << " and res: " << res[i] - << " , index in blobs: " << i << " failed!"; - } - } - break; - } -} -/** - * @brief Checks absolute and relative difference of blob values according to given threshold. - * - * @tparam dType Type of blob data - * @param res Pointer to considered blob - * @param ref Pointer to reference blob - * @param resSize Size of considered blob - * @param refSize Size of reference blob - * @param thr Threshold of difference, absolute and relative simultaneously - * @param printData Flag if data printing is demanded - */ -template -inline void compareRawBuffers(const dType* res, - const dType* ref, - size_t resSize, - size_t refSize, - float thr = 0.01, - bool printData = false) { - compareRawBuffers(res, ref, resSize, refSize, CompareType::ABS_AND_REL, thr, thr, printData); -} -/** - * @brief Checks values of two blobs according to given algorithm and thresholds. - * In ABS and REL cases thr1 corresponds to the single threshold, - * In ABS_AND_REL case thr1 and thr2 mean absolute and relative threshold - * - * @tparam dType Type of blob data - * @param res Vector of considered blob values - * @param ref Vector of reference blob values - * @param resSize Size of considered blob - * @param refSize Size of reference blob - * @param compareType Defines an algorithm of comparision - * @param thr1 First threshold of difference - * @param thr2 Second threshold of difference - * @param printData A flag if data printing is demanded - */ -template -inline void compareRawBuffers(const std::vector res, - const std::vector ref, - const std::vector& resSizes, - const std::vector& refSizes, - CompareType compareType, - float thr1 = 0.01, - float thr2 = 0.01, - bool printData = false) { - ASSERT_TRUE(res.size() == ref.size()) << "Reference and Results vector have to be same length"; - ASSERT_TRUE(res.size() == resSizes.size()) << "Results vector and elements count vector have to be same length"; - ASSERT_TRUE(ref.size() == refSizes.size()) << "Reference vector and elements count vector have to be same length"; - for (size_t i = 0; i < res.size(); i++) { - if (printData) - std::cout << "BEGIN CHECK BUFFER [" << i << "]" << std::endl; - compareRawBuffers(res[i], ref[i], resSizes[i], refSizes[i], compareType, thr1, thr2, printData); - if (printData) - std::cout << "END CHECK BUFFER [" << i << "]" << std::endl; - } -} -/** - * @brief Checks absolute and relative difference of blob values according to given threshold. - * - * @tparam dType Type of blob data - * @param res Vector of considered blob values - * @param ref Vector of reference blob values - * @param resSize Size of considered blob - * @param refSize Size of reference blob - * @param thr Threshold of difference, absolute and relative simultaneously - * @param printData A flag if data printing is demanded - */ -template -inline void compareRawBuffers(const std::vector res, - const std::vector ref, - const std::vector& resSizes, - const std::vector& refSizes, - float thr = 0.01, - bool printData = false) { - compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData); -} -/** - * @brief Checks values of two blobs according to given algorithm and thresholds. - * In ABS and REL cases thr1 corresponds to the single threshold, - * In ABS_AND_REL case thr1 and thr2 mean absolute and relative threshold - * - * @tparam dType Type of blob data - * @param res Vector of considered blob values - * @param ref Vector of reference blob values - * @param resSize Size of considered blob - * @param refSize Size of reference blob - * @param compareType Defines an algorithm of comparision - * @param thr1 First threshold of difference - * @param thr2 Second threshold of difference - * @param printData A flag if data printing is demanded - */ -template -inline void compareRawBuffers(const std::vector res, - const std::vector> ref, - const std::vector& resSizes, - const std::vector& refSizes, - CompareType compareType, - float thr1 = 0.01, - float thr2 = 0.01, - bool printData = false) { - ASSERT_TRUE(res.size() == ref.size()) << "Reference and Results vector have to be same length"; - ASSERT_TRUE(res.size() == resSizes.size()) << "Results vector and elements count vector have to be same length"; - ASSERT_TRUE(ref.size() == refSizes.size()) << "Reference vector and elements count vector have to be same length"; - for (size_t i = 0; i < res.size(); i++) { - if (printData) - std::cout << "BEGIN CHECK BUFFER [" << i << "]" << std::endl; - compareRawBuffers(res[i], *ref[i], resSizes[i], refSizes[i], compareType, thr1, thr2, printData); - if (printData) - std::cout << "END CHECK BUFFER [" << i << "]" << std::endl; - } -} -/** - * @brief Checks absolute and relative difference of blob values according to given threshold. - * - * @tparam dType Type of blob data - * @param res Vector of considered blob values - * @param ref Vector of reference blob values - * @param resSize Size of considered blob - * @param refSize Size of reference blob - * @param thr Threshold of difference, absolute and relative simultaneously - * @param printData A flag if data printing is demanded - */ -template -inline void compareRawBuffers(const std::vector res, - const std::vector> ref, - const std::vector& resSizes, - const std::vector& refSizes, - float thr = 0.01, - bool printData = false) { - compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData); -} - -inline void GetComparisonThreshold(InferenceEngine::Precision prc, float& absoluteThreshold, float& relativeThreshold) { - switch (prc) { - case InferenceEngine::Precision::FP32: - absoluteThreshold = relativeThreshold = 1e-4f; - break; - case InferenceEngine::Precision::FP16: - absoluteThreshold = relativeThreshold = 1e-2f; - break; - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::U8: - absoluteThreshold = relativeThreshold = 1; - break; - default: - IE_THROW() << "Unhandled precision " << prc << " passed to the GetComparisonThreshold()"; - } -} - -inline float GetComparisonThreshold(InferenceEngine::Precision prc) { - float res; - GetComparisonThreshold(prc, res, res); - return res; -} - -// Copy from net_pass.h -template -inline void convertArrayPrecision(typename InferenceEngine::PrecisionTrait::value_type* dst, - const typename InferenceEngine::PrecisionTrait::value_type* src, - size_t nelem) { - using dst_type = typename InferenceEngine::PrecisionTrait::value_type; - - for (size_t i = 0; i < nelem; i++) { - dst[i] = static_cast(src[i]); - } -} - -template <> -inline void convertArrayPrecision(float* dst, - const short* src, - size_t nelem) { - auto srcBf16 = reinterpret_cast(src); - for (size_t i = 0; i < nelem; i++) { - dst[i] = static_cast(srcBf16[i]); - } -} - -template -inline InferenceEngine::Blob::Ptr convertBlobPrecision(const InferenceEngine::Blob::Ptr& blob) { - using from_d_type = typename InferenceEngine::PrecisionTrait::value_type; - using to_d_type = typename InferenceEngine::PrecisionTrait::value_type; - - auto tensor_desc = blob->getTensorDesc(); - InferenceEngine::Blob::Ptr new_blob = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc{PREC_TO, tensor_desc.getDims(), tensor_desc.getLayout()}); - new_blob->allocate(); - auto target = new_blob->buffer().as(); - auto source = blob->buffer().as(); - convertArrayPrecision(target, source, blob->size()); - return new_blob; -} - -inline InferenceEngine::Blob::Ptr createAndFillBlobFloatNormalDistribution(const InferenceEngine::TensorDesc& td, - const float mean, - const float stddev, - const int32_t seed = 1) { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); - blob->allocate(); - switch (td.getPrecision()) { -#define CASE(X) \ - case X: \ - ov::test::utils::fill_data_normal_random_float(blob, mean, stddev, seed); \ - break; - CASE(InferenceEngine::Precision::FP32) - CASE(InferenceEngine::Precision::FP16) - CASE(InferenceEngine::Precision::U8) - CASE(InferenceEngine::Precision::U16) - CASE(InferenceEngine::Precision::I8) - CASE(InferenceEngine::Precision::I16) - CASE(InferenceEngine::Precision::I64) - CASE(InferenceEngine::Precision::BIN) - CASE(InferenceEngine::Precision::I32) - CASE(InferenceEngine::Precision::BOOL) -#undef CASE - default: - IE_THROW() << "Wrong precision specified: " << td.getPrecision().name(); - } - return blob; -} - -inline InferenceEngine::Blob::Ptr createAndFillBlobFloat(const InferenceEngine::TensorDesc& td, - const uint32_t range = 10, - const int32_t start_from = 0, - const int32_t resolution = 1, - const int32_t seed = 1) { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); - - blob->allocate(); - switch (td.getPrecision()) { -#define CASE(X) \ - case X: \ - ov::test::utils::fill_data_random_float(blob, range, start_from, resolution, seed); \ - break; - CASE(InferenceEngine::Precision::FP32) - CASE(InferenceEngine::Precision::FP16) - CASE(InferenceEngine::Precision::U8) - CASE(InferenceEngine::Precision::U16) - CASE(InferenceEngine::Precision::I8) - CASE(InferenceEngine::Precision::I16) - CASE(InferenceEngine::Precision::I64) - CASE(InferenceEngine::Precision::BIN) - CASE(InferenceEngine::Precision::I32) - CASE(InferenceEngine::Precision::BOOL) -#undef CASE - default: - IE_THROW() << "Wrong precision specified: " << td.getPrecision().name(); - } - return blob; -} - -template -inline InferenceEngine::Blob::Ptr createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc& td, - const T values[], - const int size) { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); - blob->allocate(); - switch (td.getPrecision()) { -#define CASE(X) \ - case X: \ - ov::test::utils::fill_data_float_array(blob, values, size); \ - break; - CASE(InferenceEngine::Precision::FP32) - CASE(InferenceEngine::Precision::FP16) - CASE(InferenceEngine::Precision::U8) - CASE(InferenceEngine::Precision::U16) - CASE(InferenceEngine::Precision::I8) - CASE(InferenceEngine::Precision::I16) - CASE(InferenceEngine::Precision::I64) - CASE(InferenceEngine::Precision::BIN) - CASE(InferenceEngine::Precision::I32) - CASE(InferenceEngine::Precision::BOOL) -#undef CASE - default: - IE_THROW() << "Wrong precision specified: " << td.getPrecision().name(); - } - return blob; -} - -inline InferenceEngine::Blob::Ptr createAndFillBlob(const InferenceEngine::TensorDesc& td, - const uint32_t range = 10, - const int32_t start_from = 0, - const int32_t resolution = 1, - const int seed = 1) { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); - blob->allocate(); - switch (td.getPrecision()) { -#define CASE(X) \ - case X: \ - ov::test::utils::fill_data_random(blob, range, start_from, resolution, seed); \ - break; - CASE(InferenceEngine::Precision::FP64) - CASE(InferenceEngine::Precision::FP32) - CASE(InferenceEngine::Precision::FP16) - CASE(InferenceEngine::Precision::BF16) - CASE(InferenceEngine::Precision::U4) - CASE(InferenceEngine::Precision::U8) - CASE(InferenceEngine::Precision::U32) - CASE(InferenceEngine::Precision::U16) - CASE(InferenceEngine::Precision::U64) - CASE(InferenceEngine::Precision::I4) - CASE(InferenceEngine::Precision::I8) - CASE(InferenceEngine::Precision::I16) - CASE(InferenceEngine::Precision::I32) - CASE(InferenceEngine::Precision::I64) - CASE(InferenceEngine::Precision::BIN) - CASE(InferenceEngine::Precision::BOOL) -#undef CASE - default: - IE_THROW() << "Wrong precision specified: " << td.getPrecision().name(); - } - return blob; -} - -inline InferenceEngine::Blob::Ptr createAndFillBlobConsistently(const InferenceEngine::TensorDesc& td, - const uint32_t range, - const int32_t start_from, - const int32_t resolution) { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); - blob->allocate(); - switch (td.getPrecision()) { -#define CASE(X) \ - case X: \ - ov::test::utils::fill_data_consistently(blob, range, start_from, resolution); \ - break; - CASE(InferenceEngine::Precision::FP32) - CASE(InferenceEngine::Precision::FP16) - CASE(InferenceEngine::Precision::U8) - CASE(InferenceEngine::Precision::U16) - CASE(InferenceEngine::Precision::I8) - CASE(InferenceEngine::Precision::I16) - CASE(InferenceEngine::Precision::I64) - CASE(InferenceEngine::Precision::BIN) - CASE(InferenceEngine::Precision::I32) - CASE(InferenceEngine::Precision::BOOL) -#undef CASE - default: - IE_THROW() << "Wrong precision specified: " << td.getPrecision().name(); - } - return blob; -} - -inline InferenceEngine::Blob::Ptr createAndFillBlobUniqueSequence(const InferenceEngine::TensorDesc& td, - const int32_t start_from = 0, - const int32_t resolution = 1, - const int32_t seed = 1) { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); - blob->allocate(); - auto shape = td.getDims(); - auto range = std::accumulate(begin(shape), end(shape), uint64_t(1), std::multiplies()) * 2; - switch (td.getPrecision()) { -#define CASE(X) \ - case X: \ - ov::test::utils::fill_random_unique_sequence(blob, range, start_from, resolution, seed); \ - break; - CASE(InferenceEngine::Precision::FP32) - CASE(InferenceEngine::Precision::FP16) - CASE(InferenceEngine::Precision::U8) - CASE(InferenceEngine::Precision::U16) - CASE(InferenceEngine::Precision::I8) - CASE(InferenceEngine::Precision::I16) - CASE(InferenceEngine::Precision::I64) - CASE(InferenceEngine::Precision::I32) -#undef CASE - default: - IE_THROW() << "Wrong precision specified: " << td.getPrecision().name(); - } - return blob; -} - -template -inline void fillInputsBySinValues(dType* data, size_t size) { - if (std::is_same::value) { - for (size_t i = 0; i < size; i++) { - data[i] = sin(static_cast(i)); - } - } else if (std::is_same::value) { - for (size_t i = 0; i < size; i++) { - data[i] = FuncTestUtils::Bf16TestUtils::reducePrecisionBitwiseS(sin(static_cast(i))); - } - } -} - -inline int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) { - InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as(blob); - if (!mblob) { - return -1; - } - if (mblob->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP32) { - return -2; - } - auto lm = mblob->rwmap(); - fillInputsBySinValues(lm.as(), mblob->size()); - return 0; -} - -namespace Bf16TestUtils { - -#if defined __GNUC__ -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wstrict-aliasing" -# pragma GCC diagnostic ignored "-Wuninitialized" -#endif - -inline float reducePrecisionBitwise(const float in) { - float f = in; - int* i = reinterpret_cast(&f); - int t2 = *i & 0xFFFF0000; - float ft1; - memcpy(&ft1, &t2, sizeof(float)); - if ((*i & 0x8000) && (*i & 0x007F0000) != 0x007F0000) { - t2 += 0x10000; - memcpy(&ft1, &t2, sizeof(float)); - } - return ft1; -} - -inline short reducePrecisionBitwiseS(const float in) { - float f = reducePrecisionBitwise(in); - int intf = *reinterpret_cast(&f); - intf = intf >> 16; - short s = intf; - return s; -} - -#if defined __GNUC__ -# pragma GCC diagnostic pop -#endif - -} // namespace Bf16TestUtils - -enum class BlobType { - Memory, - Compound, - Remote, -}; - -inline std::ostream& operator<<(std::ostream& os, BlobType type) { - switch (type) { - case BlobType::Memory: - return os << "Memory"; - case BlobType::Remote: - return os << "Remote"; - default: - IE_THROW() << "Not supported blob type"; - } -} - -inline bool checkLayout(InferenceEngine::Layout layout, const std::vector& inputShapes) { - bool check = false; - switch (layout) { - case InferenceEngine::Layout::SCALAR: - check = inputShapes.size() == 0; - break; - case InferenceEngine::Layout::C: - check = 1 == inputShapes.size(); - break; - case InferenceEngine::Layout::BLOCKED: - case InferenceEngine::Layout::ANY: - check = true; - break; - case InferenceEngine::Layout::GOIDHW: - check = 6 == inputShapes.size(); - break; - case InferenceEngine::Layout::NCDHW: - case InferenceEngine::Layout::NDHWC: - case InferenceEngine::Layout::OIDHW: - case InferenceEngine::Layout::GOIHW: - check = 5 == inputShapes.size(); - break; - case InferenceEngine::Layout::OIHW: - case InferenceEngine::Layout::NCHW: - case InferenceEngine::Layout::NHWC: - check = 4 == inputShapes.size(); - break; - case InferenceEngine::Layout::CHW: - case InferenceEngine::Layout::HWC: - check = 3 == inputShapes.size(); - break; - case InferenceEngine::Layout::CN: - case InferenceEngine::Layout::NC: - case InferenceEngine::Layout::HW: - check = 2 == inputShapes.size(); - break; - default: - break; - } - return check; -} -} // namespace FuncTestUtils From 8bba5bcb486c344ce1b3aecf49688d36b827af9b Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Mon, 5 Feb 2024 01:05:37 +0400 Subject: [PATCH 02/13] Cleaning func test utils --- .../fq_mul_fusion_test.cpp | 1 - .../execution_graph_tests/add_output.cpp | 1 - .../concurrency/gpu_concurrency_tests.cpp | 1 - .../include/base/behavior_test_utils.hpp | 188 ------------------ .../include/base/ov_behavior_test_utils.hpp | 3 +- .../compiled_model/compiled_model_base.hpp | 3 +- .../ov_infer_request/batched_tensors.hpp | 2 +- .../infer_request_dynamic.hpp | 3 +- .../behavior/ov_infer_request/inference.hpp | 2 +- .../ov_infer_request/inference_chaining.hpp | 2 +- .../ov_infer_request/iteration_chaining.hpp | 2 +- .../ov_infer_request/memory_states.hpp | 2 +- .../ov_infer_request/properties_tests.hpp | 4 +- .../ov_plugin/auto_batching_tests.hpp | 1 - .../behavior/ov_plugin/caching_tests.hpp | 1 - .../include/behavior/ov_plugin/version.hpp | 2 +- .../ov_infer_request/batched_tensors.cpp | 2 +- .../ov_infer_request/memory_states.cpp | 2 - .../convolution_qdq_transformation.cpp | 1 - .../convolution_transformation.cpp | 1 - .../convolution_with_incorrect_weights.cpp | 1 - ...d_two_output_branches_with_convolution.cpp | 1 - .../fully_connected_transformation.cpp | 1 - .../fuse_convert_transformation.cpp | 1 - .../gemm_transformation.cpp | 1 - .../group_convolution_transformation.cpp | 1 - .../groupconvolution_qdq_transformation.cpp | 1 - .../mat_mul_with_optimized_constant_fq.cpp | 1 - ...ly_to_group_convolution_transformation.cpp | 1 - .../mvn_transformation.cpp | 1 - .../normalize_transformation.cpp | 1 - .../output_layers_concat.cpp | 1 - .../output_layers_concat_multi_channel.cpp | 1 - ...put_layers_handling_in_transformations.cpp | 1 - ..._through_dequantization_transformation.cpp | 1 - .../shuffle_channels_transformation.cpp | 1 - .../transpose_after_matmul_transformation.cpp | 1 - .../functional_test_utils/plugin_cache.hpp | 34 ---- .../functional_test_utils/precision_utils.hpp | 62 ------ .../src/plugin_cache.cpp | 109 ---------- 40 files changed, 12 insertions(+), 434 deletions(-) delete mode 100644 src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp delete mode 100644 src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp delete mode 100644 src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp delete mode 100644 src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp diff --git a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp index 7dcf6a9c44b3c3..f9036fb5eadda2 100644 --- a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp +++ b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp @@ -12,7 +12,6 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset4.hpp" #include "openvino/pass/manager.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp index 786a204488c5ee..f01cfa5db36cc2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp @@ -4,7 +4,6 @@ #include #include "execution_graph_tests/add_output.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/sigmoid.hpp" #include "openvino/op/constant.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp index 443d673ce10cad..32418b94b52e66 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp @@ -4,7 +4,6 @@ #include "common_test_utils/test_common.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "transformations/utils/utils.hpp" #include "common_test_utils/file_utils.hpp" diff --git a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp deleted file mode 100644 index 5d446bb5043d80..00000000000000 --- a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ov_behavior_test_utils.hpp" - -#include "ie_core.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "common_test_utils/file_utils.hpp" -#include "openvino/util/file_util.hpp" -#include "functional_test_utils/summary/api_summary.hpp" -#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" - -namespace BehaviorTestsUtils { - -class IEInferRequestTestBase : public ov::test::behavior::APIBaseTest { -private: - void set_api_entity() override { - api_entity = ov::test::utils::ov_entity::ie_infer_request; - }; -}; - -class IEExecutableNetworkTestBase : public ov::test::behavior::APIBaseTest { -private: - void set_api_entity() override { - api_entity = ov::test::utils::ov_entity::ie_executable_network; - }; -}; - -class IEPluginTestBase : public ov::test::behavior::APIBaseTest { -private: - void set_api_entity() override { - api_entity = ov::test::utils::ov_entity::ie_plugin; - }; -}; - -typedef std::tuple< - std::string, // Device name - std::map // Config -> InferRequestParams; - -class InferRequestTests : public testing::WithParamInterface, - public IEInferRequestTestBase { -public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - std::string targetDevice; - std::map configuration; - std::tie(targetDevice, configuration) = obj.param; - std::ostringstream result; - std::replace(targetDevice.begin(), targetDevice.end(), ':', '.'); - result << "targetDevice=" << targetDevice << "_"; - if (!configuration.empty()) { - for (auto &configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; - } - } - return result.str(); - } - - void SetUp() override { - std::tie(target_device, configuration) = this->GetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - APIBaseTest::SetUp(); - function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(); - cnnNet = InferenceEngine::CNNNetwork(function); - // Load CNNNetwork to target plugins - execNet = ie->LoadNetwork(cnnNet, target_device, configuration); - } - - void TearDown() override { - if (!configuration.empty()) { - PluginCache::get().reset(); - } - APIBaseTest::TearDown(); - } - -protected: - InferenceEngine::CNNNetwork cnnNet; - InferenceEngine::ExecutableNetwork execNet; - std::shared_ptr ie = PluginCache::get().ie(); - std::shared_ptr function; - std::map configuration;; -}; - -inline InferenceEngine::Core createIECoreWithTemplate() { - PluginCache::get().reset(); - InferenceEngine::Core ie; -#ifndef OPENVINO_STATIC_LIBRARY - std::string pluginName = "openvino_template_plugin" OV_BUILD_POSTFIX; - ie.RegisterPlugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), - ov::test::utils::DEVICE_TEMPLATE); -#endif // !OPENVINO_STATIC_LIBRARY - return ie; -} - -class IEClassNetworkTest : public ov::test::behavior::OVClassNetworkTest { -public: - InferenceEngine::CNNNetwork actualCnnNetwork, simpleCnnNetwork, multinputCnnNetwork, ksoCnnNetwork; - - void SetUp() { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - OVClassNetworkTest::SetUp(); - // Generic network - ASSERT_NO_THROW(actualCnnNetwork = InferenceEngine::CNNNetwork(actualNetwork)); - // Quite simple network - ASSERT_NO_THROW(simpleCnnNetwork = InferenceEngine::CNNNetwork(simpleNetwork)); - // Multinput to substruct network - ASSERT_NO_THROW(multinputCnnNetwork = InferenceEngine::CNNNetwork(multinputNetwork)); - // Network with KSO - ASSERT_NO_THROW(ksoCnnNetwork = InferenceEngine::CNNNetwork(ksoNetwork)); - } -}; - -class IEClassBaseTestP : public IEClassNetworkTest, - public ::testing::WithParamInterface, - public IEPluginTestBase { -public: - void SetUp() override { - target_device = GetParam(); - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - APIBaseTest::SetUp(); - IEClassNetworkTest::SetUp(); - } -}; - -class IEExecNetClassBaseTestP : public IEClassNetworkTest, - public ::testing::WithParamInterface, - public IEExecutableNetworkTestBase { -public: - void SetUp() override { - target_device = GetParam(); - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - APIBaseTest::SetUp(); - IEClassNetworkTest::SetUp(); - } -}; - -typedef std::tuple< - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Config -> BehaviorBasicParams; - -class BehaviorTestsBasicBase : public testing::WithParamInterface { -public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - using namespace ov::test::utils; - - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - std::replace(targetDevice.begin(), targetDevice.end(), ':', '_'); - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - if (!configuration.empty()) { - result << "config=" << configuration; - } - return result.str(); - } - - std::shared_ptr ie = PluginCache::get().ie(); - std::shared_ptr function; - InferenceEngine::Precision netPrecision; - std::map configuration; -}; - -class BehaviorTestsBasic : public BehaviorTestsBasicBase, - public IEPluginTestBase { -protected: - void SetUp() override { - std::tie(netPrecision, target_device, configuration) = this->GetParam(); - SKIP_IF_CURRENT_TEST_IS_DISABLED() - APIBaseTest::SetUp(); - function = ov::test::utils::make_conv_pool_relu(); - } - void TearDown() override { - if (!configuration.empty()) { - PluginCache::get().reset(); - } - APIBaseTest::TearDown(); - } -}; -} // namespace BehaviorTestsUtils diff --git a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index 836aae7e614ed4..b7f27d1819022e 100644 --- a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -20,7 +20,6 @@ #include "functional_test_utils/crash_handler.hpp" #include "common_test_utils/file_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/summary/api_summary.hpp" @@ -146,7 +145,7 @@ class OVInferRequestTests : public testing::WithParamInterface #include #include "functional_test_utils/ov_plugin_cache.hpp" -#include +#include "base/ov_behavior_test_utils.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp index 4398b4bea8c1f3..89d0f092274aaf 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp @@ -16,9 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include #include -#include +#include "base/ov_behavior_test_utils.hpp" #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" // TODO [mandrono]: move current test case inside CPU plug-in and return the original tests diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp index 929ce1472c5746..636b164c96912e 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp @@ -7,7 +7,7 @@ #include #include #include "functional_test_utils/ov_plugin_cache.hpp" -#include +#include "base/ov_behavior_test_utils.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp index 9ff9f9144f8bc2..4965ac87ee8fe6 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference_chaining.hpp @@ -11,7 +11,7 @@ #include #include -#include "base/behavior_test_utils.hpp" +#include "base/ov_behavior_test_utils.hpp" #include "openvino/core/attribute_visitor.hpp" #include "openvino/core/model.hpp" #include "openvino/core/node.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/iteration_chaining.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/iteration_chaining.hpp index 725899cd075c44..966632f62c73bb 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/iteration_chaining.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/iteration_chaining.hpp @@ -11,7 +11,7 @@ #include #include -#include "base/behavior_test_utils.hpp" +#include "base/ov_behavior_test_utils.hpp" #include "openvino/core/attribute_visitor.hpp" #include "openvino/core/model.hpp" #include "openvino/core/node.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp index d9b93c2f8352f5..ca971cfa02e2d9 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp @@ -4,7 +4,7 @@ #pragma once -#include "base/behavior_test_utils.hpp" +#include "base/ov_behavior_test_utils.hpp" #include "common_test_utils/test_common.hpp" namespace ov { diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp index 22435559e792bf..b79b7e5c498e38 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp @@ -4,7 +4,7 @@ #pragma once -#include "base/behavior_test_utils.hpp" +#include "base/ov_behavior_test_utils.hpp" #include "openvino/runtime/threading/executor_manager.hpp" namespace ov { @@ -31,7 +31,7 @@ class InferRequestPropertiesTest : public testing::WithParamInterface #include "shared_test_classes/base/ov_subgraph.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "common_test_utils/unicode_utils.hpp" #include "openvino/util/common_util.hpp" #include "base/ov_behavior_test_utils.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/version.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/version.hpp index 437261507d435d..19cbdf7154c031 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/version.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/version.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "base/behavior_test_utils.hpp" +#include "base/ov_behavior_test_utils.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp index 45df311d407153..590c7ab16b912c 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp @@ -39,7 +39,7 @@ void OVInferRequestBatchedTests::TearDown() { if (m_need_reset_core) { ie->set_property({ov::cache_dir()}); ie.reset(); - PluginCache::get().reset(); + ov::test::utils::PluginCache::get().reset(); ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); ov::test::utils::removeDir(m_cache_dir); } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp index 9ed8d89bad7afc..bac86ba34646f1 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp @@ -4,9 +4,7 @@ #include "behavior/ov_infer_request/memory_states.hpp" -#include "base/behavior_test_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/sigmoid.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp index c955bb16756b12..f0eaa0b10b1131 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp index 410e6005d65087..4c87d697a6bdcc 100755 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp index 7f4f6582eb440b..fc74c3d260c635 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 8e116e6621e0a8..11c5a55d6e2f39 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index 7dac1a33b0699b..27773b7b8f5d4c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/mat_mul.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp index 049c66a50167f6..d93f82b3a9aa38 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp index b006a95c9c54b8..af7f587f6d6700 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_lpt_models/mat_mul.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp index bb6b5dc4166ec5..e31378c0bca1ef 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/group_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp index ddd3b750814094..e288a75d361144 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 1fda036729df07..68edab74b7c4f1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp index 423fdf75ec195f..2827bc00ec6834 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp index 489806ae66b771..9c39f710965a52 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp index aef23abd0094cd..993ddc5adbbfe3 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index 12272374f8434b..28aa6b8cf7b179 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/node_builders/fake_quantize.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp index 4744497e9aecfd..c2653a93b3c2d7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/node_builders/fake_quantize.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index 4afacd52455960..676f69d53c9458 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/node_builders/fake_quantize.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp index f29b1c7c72103e..6bdf5f6bc3939c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp index cef478d164a97b..d92e622dd7b34d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_lpt_models/shuffle_channels.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp index 8d99dc1dc4ba54..ffb56c1cb8b79e 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ov_lpt_models/transpose_after_mat_mul.hpp" diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp deleted file mode 100644 index 20ebb92ed0d10e..00000000000000 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -namespace InferenceEngine { - -class Core; - -} // namespace InferenceEngine - -class PluginCache { -public: - std::shared_ptr ie(const std::string& deviceToCheck = std::string()); - - static PluginCache& get(); - - void reset(); - - PluginCache(const PluginCache&) = delete; - PluginCache& operator=(const PluginCache&) = delete; - -private: - PluginCache(); - ~PluginCache() = default; - - std::mutex g_mtx; - std::shared_ptr ie_core; -}; diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp deleted file mode 100644 index 38ba8b8ea515f3..00000000000000 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "ie_precision.hpp" -#include "openvino/core/type/element_type.hpp" - -namespace FuncTestUtils { -namespace PrecisionUtils { - -// Copied from inference-engine/src/inference_engine/src/ie_ngraph_utils.hpp -inline ::ov::element::Type convertIE2nGraphPrc(const InferenceEngine::Precision& precision) { - InferenceEngine::Precision::ePrecision pType = precision; - switch (pType) { - case InferenceEngine::Precision::UNSPECIFIED: - return ::ov::element::Type(::ov::element::Type_t::undefined); - case InferenceEngine::Precision::FP64: - return ::ov::element::Type(::ov::element::Type_t::f64); - case InferenceEngine::Precision::FP32: - return ::ov::element::Type(::ov::element::Type_t::f32); - case InferenceEngine::Precision::FP16: - return ::ov::element::Type(::ov::element::Type_t::f16); - case InferenceEngine::Precision::BF16: - return ::ov::element::Type(::ov::element::Type_t::bf16); - case InferenceEngine::Precision::U4: - return ::ov::element::Type(::ov::element::Type_t::u4); - case InferenceEngine::Precision::I4: - return ::ov::element::Type(::ov::element::Type_t::i4); - case InferenceEngine::Precision::U8: - return ::ov::element::Type(::ov::element::Type_t::u8); - case InferenceEngine::Precision::I8: - return ::ov::element::Type(::ov::element::Type_t::i8); - case InferenceEngine::Precision::U16: - return ::ov::element::Type(::ov::element::Type_t::u16); - case InferenceEngine::Precision::I16: - return ::ov::element::Type(::ov::element::Type_t::i16); - case InferenceEngine::Precision::U32: - return ::ov::element::Type(::ov::element::Type_t::u32); - case InferenceEngine::Precision::I32: - return ::ov::element::Type(::ov::element::Type_t::i32); - case InferenceEngine::Precision::I64: - return ::ov::element::Type(::ov::element::Type_t::i64); - case InferenceEngine::Precision::U64: - return ::ov::element::Type(::ov::element::Type_t::u64); - case InferenceEngine::Precision::BOOL: - return ::ov::element::Type(::ov::element::Type_t::boolean); - case InferenceEngine::Precision::BIN: - return ::ov::element::Type(::ov::element::Type_t::u1); - case InferenceEngine::Precision::Q78: - case InferenceEngine::Precision::MIXED: - case InferenceEngine::Precision::CUSTOM: - default: - IE_THROW() << "Incorrect precision!"; - } -} - -} // namespace PrecisionUtils -} // namespace FuncTestUtils \ No newline at end of file diff --git a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp b/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp deleted file mode 100644 index e809f9d260ca89..00000000000000 --- a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "functional_test_utils/plugin_cache.hpp" - -#include - -#include -#include - -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_constants.hpp" -#include "functional_test_utils/ov_plugin_cache.hpp" -#include "ie_core.hpp" -#include "openvino/util/file_util.hpp" - -namespace { -class TestListener : public testing::EmptyTestEventListener { -public: - void OnTestEnd(const testing::TestInfo& testInfo) override { - if (auto testResult = testInfo.result()) { - if (testResult->Failed()) { - PluginCache::get().reset(); - } - } - } -}; -} // namespace - -PluginCache& PluginCache::get() { - static PluginCache instance; - return instance; -} - -std::shared_ptr PluginCache::ie(const std::string& deviceToCheck) { - std::lock_guard lock(g_mtx); - if (std::getenv("DISABLE_PLUGIN_CACHE") != nullptr) { -#ifndef NDEBUG - std::cout << "'DISABLE_PLUGIN_CACHE' environment variable is set. New Core object will be created!" - << std::endl; -#endif - return std::make_shared(); - } -#ifndef NDEBUG - std::cout << "Access PluginCache ie core. IE Core use count: " << ie_core.use_count() << std::endl; -#endif - - if (!ie_core) { -#ifndef NDEBUG - std::cout << "Created ie core." << std::endl; -#endif - ie_core = std::make_shared(); - } - assert(0 != ie_core.use_count()); - - // register template plugin if it is needed - try { - std::string pluginName = "openvino_template_plugin"; - pluginName += OV_BUILD_POSTFIX; - ie_core->RegisterPlugin( - ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), - "TEMPLATE"); - } catch (...) { - } - - if (!deviceToCheck.empty()) { - std::vector metrics; - if (deviceToCheck.find(':') != std::string::npos) { - std::string realDevice = deviceToCheck.substr(0, deviceToCheck.find(':')); - metrics = {ie_core->GetMetric(realDevice, ov::supported_properties.name()).as()}; - } else { - metrics = {ie_core->GetMetric(deviceToCheck, ov::supported_properties.name()).as()}; - } - if (std::find(metrics.begin(), metrics.end(), ov::supported_properties.name()) != metrics.end()) { - auto availableDevices = - ie_core->GetMetric(deviceToCheck, ov::supported_properties.name()).as>(); - - if (availableDevices.empty()) { - std::cerr << "No available devices for " << deviceToCheck << std::endl; - std::exit(EXIT_FAILURE); - } - -#ifndef NDEBUG - std::cout << "Available devices for " << deviceToCheck << ":" << std::endl; - - for (const auto& device : availableDevices) { - std::cout << " " << device << std::endl; - } -#endif - } - } - return ie_core; -} - -void PluginCache::reset() { - std::lock_guard lock(g_mtx); - -#ifndef NDEBUG - std::cout << "Reset PluginCache. IE Core use count: " << ie_core.use_count() << std::endl; -#endif - - ie_core.reset(); -} - -PluginCache::PluginCache() { - auto& listeners = testing::UnitTest::GetInstance()->listeners(); - listeners.Append(new TestListener); -} From 0c4be26747021a1747273e6f0c48ea54e5c6099e Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Mon, 5 Feb 2024 01:22:15 +0400 Subject: [PATCH 03/13] Creaning unit test utils --- .../auto_batch/tests/unit/async_infer_request_test.cpp | 2 +- .../unit_test_utils/mocks/mock_engine/mock_plugin.cpp | 2 -- .../unit_test_utils/mocks/mock_engine/mock_plugin.hpp | 4 ---- .../unit_test_utils/mocks/openvino/runtime/mock_icore.hpp | 1 - 4 files changed, 1 insertion(+), 8 deletions(-) diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index e7c337a10d5648..a78d71f79b4c58 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -207,7 +207,7 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParam t; for (int n = 0; n < sz; n++) { - IE_ASSERT(workerRequestPtr->_tasks.try_pop(t)); + OPENVINO_ASSERT(workerRequestPtr->_tasks.try_pop(t)); t.first->m_sync_request->m_batched_request_status = SyncInferRequest::eExecutionFlavor::TIMEOUT_EXECUTED; t.first->m_request_without_batch->start_async(); diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp index 6ca6a9e5caab65..ae619f66f2a2a4 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp @@ -12,9 +12,7 @@ #include #include -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "description_buffer.hpp" -#include "ie_icore.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/common.hpp" #include "openvino/runtime/icore.hpp" diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp index 505eeca3a32ee0..f06422af2a46a2 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp @@ -9,10 +9,6 @@ #include "openvino/runtime/iplugin.hpp" -namespace InferenceEngine { -class IInferencePlugin; -} - class MockPlugin : public ov::IPlugin { std::shared_ptr m_plugin; void set_parameters_if_need() const; diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp index 013fb91ae791b5..1a90ddd73fba8a 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp @@ -6,7 +6,6 @@ #include -#include "ie_icore.hpp" #include "openvino/runtime/icompiled_model.hpp" namespace ov { From 2820230a7e2e10c59a570ae966ca0e906973d7d2 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Mon, 5 Feb 2024 01:46:55 +0400 Subject: [PATCH 04/13] Cleaning common test utils --- .../src/read_ir/read_ir.cpp | 2 +- .../num_inputs_fusing_bin_conv.cpp | 2 +- .../src/subgraph/perm_conv_perm_concat.cpp | 2 +- .../include/common_test_utils/data_utils.hpp | 228 +----------------- .../common_test_utils/src/data_utils.cpp | 200 +-------------- .../src/graph_comparator.cpp | 5 +- 6 files changed, 11 insertions(+), 428 deletions(-) diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp index 10db7aebd9d195..b289ebe5549b6b 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp @@ -301,7 +301,7 @@ std::vector ReadIRTest::calculate_refs() { std::ifstream ref_data_ifstream(path_to_ref_tensor, std::ifstream::binary); ref_data_ifstream.open(path_to_ref_tensor, std::ios::binary); if (!ref_data_ifstream.is_open()) - IE_THROW() << "Weights file " << path_to_ref_tensor << " cannot be opened!"; + OPENVINO_THROW("Weights file ", path_to_ref_tensor, " cannot be opened!"); size_t buf_size = 0; for (const auto& output : functionRefs->outputs()) { diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp index ddfd51d9b7f271..cc52f0157cfa79 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp @@ -58,7 +58,7 @@ TEST_P(ExecGraphInputsFusingBinConv, CheckNumInputsInBinConvFusingWithConv) { const auto & rtInfo = op->get_rt_info(); auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string { auto it = rtInfo.find(paramName); - IE_ASSERT(rtInfo.end() != it); + OPENVINO_ASSERT(rtInfo.end() != it); return it->second.as(); }; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp index 0e2a53737bdaac..7cc5f2a4f5fe73 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp @@ -91,7 +91,7 @@ void PermConvPermConcat::SetUp() { auto reshape_out_pattern = std::make_shared( ov::element::i64, ov::Shape{2}, - InferenceEngine::SizeVector({1, (permute_out_shape[2] + 1) * permute_out_shape[3]})); + std::vector({1, (permute_out_shape[2] + 1) * permute_out_shape[3]})); auto reshape_out = std::make_shared(concat, reshape_out_pattern, false); function = std::make_shared(reshape_out, input_parameter, "perm_conv_perm_concat"); diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index a41bbd4e61b2ff..f8f8e6017d6106 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -11,7 +11,6 @@ #include "common_test_utils/common_utils.hpp" #include "gtest/gtest.h" -#include "ie_blob.h" #include "openvino/core/type/element_type_traits.hpp" #include "openvino/runtime/tensor.hpp" @@ -45,51 +44,22 @@ inline std::vector generate_float_numbers(std::size_t vec_len, float min, } /** - * Fill blob with value data blob. Broadcast semantic is included. + * Fill tensor with value data. Broadcast semantic is included. * Broadcasting with alignment through last dimension. * - * @param blob tensor to fill in + * @param tensor tensor to fill in * @param values src tensor which should be broadcast */ -OPENVINO_SUPPRESS_DEPRECATED_START -void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine::Blob::Ptr& values); -OPENVINO_SUPPRESS_DEPRECATED_END void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values); /** * Wrapper on top of fill_data_with_broadcast with simplified signature * - * @param blob the destination blob to fill in + * @param tensor tensor to fill in * @param axis Axis to apply values * @param values data to broadcast */ -OPENVINO_SUPPRESS_DEPRECATED_START -void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, size_t axis, std::vector values); -OPENVINO_SUPPRESS_DEPRECATED_END void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector values); -/** - * Make a view blob with new shape. It will reinterpret original tensor data as a tensor with new shape. - * - * NB! Limitation: the nwe one blob will no have ownership of data buffer. The original blob should be alive - * while view is in use. - * - * @param tensor original source tensor - * @param new_shape new one shape for view blob - * @return new one blob view - */ -OPENVINO_SUPPRESS_DEPRECATED_START -InferenceEngine::Blob::Ptr make_reshape_view(const InferenceEngine::Blob::Ptr& blob, - InferenceEngine::SizeVector new_shape); -OPENVINO_SUPPRESS_DEPRECATED_END - -/** - * Calculate size of buffer required for provided tensor descriptor. - * @param tdesc provided tensor descriptor - * @return size in bytes - */ -OPENVINO_SUPPRESS_DEPRECATED_START -size_t byte_size(const InferenceEngine::TensorDesc& tdesc); -OPENVINO_SUPPRESS_DEPRECATED_END ov::Tensor make_tensor_with_precision_convert(const ov::Tensor& tensor, ov::element::Type prc); @@ -139,27 +109,6 @@ inline void fill_roi_raw_ptr(T* data, } } -OPENVINO_SUPPRESS_DEPRECATED_START -template -inline void fill_data_roi(InferenceEngine::Blob::Ptr& blob, - const uint32_t range, - const int height, - const int width, - const float omega, - const bool is_roi_max_mode, - const int seed = 1, - void (*propGenerator)(InferenceEngine::Blob::Ptr&) = nullptr) { - if (propGenerator != nullptr) { - propGenerator(blob); - return; - } - using T = typename InferenceEngine::PrecisionTrait::value_type; - auto* data = blob->buffer().as(); - fill_roi_raw_ptr(data, blob->size(), range, height, width, omega, is_roi_max_mode, seed); -} - -OPENVINO_SUPPRESS_DEPRECATED_END - void fill_psroi(ov::Tensor& tensor, int batchSize, int height, @@ -308,82 +257,6 @@ void fill_tensor_random(ov::Tensor& tensor, const int32_t k = 1, const int seed = 1); -/** @brief Fill blob with random data. - * - * @param blob Target blob - * @param range Values range - * @param start_from Value from which range should start - * @param k Resolution of floating point numbers. - * - With k = 1 every random number will be basically integer number. - * - With k = 2 numbers resolution will 1/2 so outputs only .0 or .50 - * - With k = 4 numbers resolution will 1/4 so outputs only .0 .25 .50 0.75 and etc. - */ -OPENVINO_SUPPRESS_DEPRECATED_START -template -void inline fill_data_random(InferenceEngine::Blob::Ptr& blob, - const uint32_t range = 10, - int32_t start_from = 0, - const int32_t k = 1, - const int seed = 1) { - using T = typename InferenceEngine::PrecisionTrait::value_type; - auto* rawBlobDataPtr = blob->buffer().as(); - if (PRC == InferenceEngine::Precision::U4 || PRC == InferenceEngine::Precision::I4 || - PRC == InferenceEngine::Precision::BIN) { - fill_data_random(rawBlobDataPtr, blob->byteSize(), range, start_from, k, seed); - } else { - fill_data_random(rawBlobDataPtr, blob->size(), range, start_from, k, seed); - } -} -OPENVINO_SUPPRESS_DEPRECATED_END - -/** @brief Fill blob with a sorted sequence of unique elements randomly generated. - * - * This function generates and fills a blob of a certain precision, with a - * sorted sequence of unique elements. - * - * @param blob Target blob - * @param range Values range - * @param start_from Value from which range should start - * @param k Resolution of floating point numbers. - * - With k = 1 every random number will be basically integer number. - * - With k = 2 numbers resolution will 1/2 so outputs only .0 or .50 - * - With k = 4 numbers resolution will 1/4 so outputs only .0 .25 .50 0.75 and etc. - */ -OPENVINO_SUPPRESS_DEPRECATED_START -template -void inline fill_random_unique_sequence(InferenceEngine::Blob::Ptr& blob, - uint64_t range, - int64_t start_from = 0, - const int64_t k = 1, - const int32_t seed = 1) { - using T = typename InferenceEngine::PrecisionTrait::value_type; - auto* rawBlobDataPtr = blob->buffer().as(); - - if (start_from < 0 && !std::is_signed::value) { - start_from = 0; - } - - if (range < blob->size()) { - range = blob->size() * 2; - } - - std::mt19937 generator(seed); - std::uniform_int_distribution dist(k * start_from, k * (start_from + range)); - - std::set elems; - while (elems.size() != blob->size()) { - auto value = static_cast(dist(generator)); - value /= static_cast(k); - if (PRC == InferenceEngine::Precision::FP16) { - elems.insert(static_cast(ov::float16(value).to_bits())); - } else { - elems.insert(static_cast(value)); - } - } - std::copy(elems.begin(), elems.end(), rawBlobDataPtr); -} -OPENVINO_SUPPRESS_DEPRECATED_END - template void inline fill_data_ptr_consistently(T* data, size_t size, @@ -402,45 +275,6 @@ void inline fill_data_ptr_consistently(T* data, } } -OPENVINO_SUPPRESS_DEPRECATED_START -template -void inline fill_data_consistently(InferenceEngine::Blob::Ptr& blob, - const uint32_t range = 10, - int32_t start_from = 0, - const int32_t k = 1) { - using T = typename InferenceEngine::PrecisionTrait::value_type; - auto* rawBlobDataPtr = blob->buffer().as(); - if (start_from < 0 && !std::is_signed::value) { - start_from = 0; - } - fill_data_ptr_consistently(rawBlobDataPtr, blob->size(), range, start_from, k); -} - -template -void inline fill_data_random_float(InferenceEngine::Blob::Ptr& blob, - const uint32_t range, - int32_t start_from, - const int32_t k, - const int seed = 1) { - using T = typename InferenceEngine::PrecisionTrait::value_type; - std::default_random_engine random(seed); - // 1/k is the resolution of the floating point numbers - std::uniform_int_distribution distribution(k * start_from, k * (start_from + range)); - - auto* rawBlobDataPtr = blob->buffer().as(); - for (size_t i = 0; i < blob->size(); i++) { - auto value = static_cast(distribution(random)); - value /= static_cast(k); - if (PRC == InferenceEngine::Precision::FP16) { - rawBlobDataPtr[i] = static_cast(ov::float16(value).to_bits()); - } else if (PRC == InferenceEngine::Precision::BF16) { - rawBlobDataPtr[i] = static_cast(ov::bfloat16(value).to_bits()); - } else { - rawBlobDataPtr[i] = static_cast(value); - } - } -} - template void inline fill_data_ptr_normal_random_float(T* data, size_t size, @@ -452,7 +286,7 @@ void inline fill_data_ptr_normal_random_float(T* data, for (size_t i = 0; i < size; i++) { auto value = static_cast(normal_d(random)); if (typeid(T) == - typeid(typename InferenceEngine::PrecisionTrait::value_type)) { + typeid(typename ov::fundamental_type_for)) { data[i] = static_cast(ov::float16(value).to_bits()); } else { data[i] = static_cast(value); @@ -460,60 +294,6 @@ void inline fill_data_ptr_normal_random_float(T* data, } } -template -void inline fill_data_normal_random_float(InferenceEngine::Blob::Ptr& blob, - const float mean, - const float stddev, - const int seed = 1) { - using T = typename InferenceEngine::PrecisionTrait::value_type; - auto* rawBlobDataPtr = blob->buffer().as(); - fill_data_ptr_normal_random_float(rawBlobDataPtr, blob->size(), mean, stddev, seed); -} - -template -void inline fill_data_float_array(InferenceEngine::Blob::Ptr& blob, const T values[], const size_t size) { - using Type = typename InferenceEngine::PrecisionTrait::value_type; - - auto* rawBlobDataPtr = blob->buffer().as(); - for (size_t i = 0; i < std::min(size, blob->size()); i++) { - auto value = values[i]; - if (typeid(Type) == - typeid(typename InferenceEngine::PrecisionTrait::value_type)) { - rawBlobDataPtr[i] = static_cast(ov::float16(value).to_bits()); - - } else { - rawBlobDataPtr[i] = static_cast(value); - } - } -} - -template <> -void inline fill_data_random(InferenceEngine::Blob::Ptr& blob, - const uint32_t range, - int32_t start_from, - const int32_t k, - const int seed) { - fill_data_random_float(blob, range, start_from, k, seed); -} - -template <> -void inline fill_data_random(InferenceEngine::Blob::Ptr& blob, - const uint32_t range, - int32_t start_from, - const int32_t k, - const int seed) { - fill_data_random_float(blob, range, start_from, k, seed); -} - -template <> -void inline fill_data_random(InferenceEngine::Blob::Ptr& blob, - const uint32_t range, - int32_t start_from, - const int32_t k, - const int seed) { - fill_data_random_float(blob, range, start_from, k, seed); -} -OPENVINO_SUPPRESS_DEPRECATED_END void fill_random_string(std::string* dst, const size_t size, diff --git a/src/tests/test_utils/common_test_utils/src/data_utils.cpp b/src/tests/test_utils/common_test_utils/src/data_utils.cpp index c9c25f82235cec..9766fb5ae1da5b 100644 --- a/src/tests/test_utils/common_test_utils/src/data_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/data_utils.cpp @@ -4,42 +4,14 @@ #include "common_test_utils/data_utils.hpp" -#include "blob_factory.hpp" -#include "ie_blob.h" -#include "openvino/core/deprecated.hpp" #include "openvino/core/type/element_type_traits.hpp" #include "openvino/runtime/tensor.hpp" #include "precomp.hpp" -using namespace InferenceEngine::details; - namespace ov { namespace test { namespace utils { - -OPENVINO_SUPPRESS_DEPRECATED_START - -bool isDenseBlob(const InferenceEngine::Blob::Ptr& blob) { - auto blk_desc = blob->getTensorDesc().getBlockingDesc(); - auto dims = blk_desc.getBlockDims(); - auto strs = blk_desc.getStrides(); - - IE_ASSERT(dims.size() == strs.size()) << " isDenseBlob: inconsistent tensor descriptor"; - - auto size = dims.size(); - if (size == 0) - return true; - if (size == 1) - return strs[0] == 1; - - for (auto i = size - 1; i > 0; i--) { - if (strs[i - 1] != strs[i - 1] * dims[i]) - return false; - } - - return true; -} - +namespace { template void copy_7D(void* src_raw_ptr, std::vector& src_str, @@ -79,175 +51,7 @@ void copy_7D(void* src_raw_ptr, } } } - -void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine::Blob::Ptr& values) { - using InferenceEngine::SizeVector; - constexpr size_t MAX_N_DIMS = 7; // Suppose it's enough - - IE_ASSERT(blob->getTensorDesc().getPrecision() == values->getTensorDesc().getPrecision()); - - auto values_dims = values->getTensorDesc().getDims(); - auto blob_dims = blob->getTensorDesc().getDims(); - auto n_dims = blob_dims.size(); - IE_ASSERT(values_dims.size() <= n_dims); - IE_ASSERT(n_dims <= MAX_N_DIMS); - - ov::Shape src_dims(MAX_N_DIMS, 1); - std::copy(values_dims.rbegin(), values_dims.rend(), src_dims.rbegin()); - - ov::Shape dst_dims(MAX_N_DIMS, 1); - std::copy(blob_dims.rbegin(), blob_dims.rend(), dst_dims.rbegin()); - - bool compatible = true; - for (int i = 0; i < MAX_N_DIMS; i++) { - if (src_dims[i] != dst_dims[i] && src_dims[i] != 1) - compatible = false; - } - - IE_ASSERT(compatible); - - auto fill_strides_like_plain = [](ov::Shape dims) { - ov::Shape str(dims.size()); - if (str.empty()) - return str; - else - str.back() = 1; - - // stride[i] = stride[i+1]*d[i+1] - std::transform(dims.rbegin(), dims.rend() - 1, str.rbegin(), str.rbegin() + 1, [](size_t d, size_t s) { - return d * s; - }); - - // zeroing broadcast dimension equal 1 - std::transform(str.begin(), str.end(), dims.begin(), str.begin(), [](size_t s, size_t d) { - return d == 1 ? 0 : s; - }); - - return str; - }; - - SizeVector src_strides = fill_strides_like_plain(src_dims); - SizeVector dst_strides = fill_strides_like_plain(dst_dims); - - auto get_data = [](InferenceEngine::Blob::Ptr& blob) { - auto mem_blob = dynamic_cast(blob.get()); - auto mem = mem_blob->rwmap(); - return mem.as(); - }; - - auto dst_ptr = get_data(blob); - auto src_ptr = get_data(values); - - switch (blob->getTensorDesc().getPrecision()) { - case InferenceEngine::Precision::U64: - case InferenceEngine::Precision::I64: - copy_7D(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims); - break; - case InferenceEngine::Precision::FP32: - case InferenceEngine::Precision::I32: - copy_7D(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims); - break; - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::U16: - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::BF16: - copy_7D(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims); - break; - case InferenceEngine::Precision::U8: - case InferenceEngine::Precision::I8: - copy_7D(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims); - break; - default: - IE_THROW() << "Unsupported precision by fill_data_with_broadcast function"; - } -} - -template -void copy_with_convert(InferenceEngine::Blob::Ptr& src_blob, InferenceEngine::Blob::Ptr& dst_blob) { - using SRC_TYPE = typename InferenceEngine::PrecisionTrait::value_type; - using DST_TYPE = typename InferenceEngine::PrecisionTrait::value_type; - - auto src_lock_m = src_blob->as()->rwmap(); - auto src_ptr = src_lock_m.as(); - auto src_size = src_blob->size(); - - auto dst_lock_m = dst_blob->as()->rwmap(); - auto dst_ptr = dst_lock_m.as(); - - std::copy(src_ptr, src_ptr + src_size, dst_ptr); -} - -InferenceEngine::Blob::Ptr make_with_precision_convert(InferenceEngine::Blob::Ptr& blob, - InferenceEngine::Precision prc) { - IE_ASSERT(isDenseBlob(blob)); - auto td = blob->getTensorDesc(); - td.setPrecision(prc); - - auto new_blob = make_blob_with_precision(td); - new_blob->allocate(); - -#define CASE(_PRC) \ - case InferenceEngine::Precision::_PRC: \ - copy_with_convert(blob, new_blob); \ - break - switch (prc) { - CASE(FP32); - CASE(I64); - CASE(U64); - CASE(I32); - CASE(U32); - CASE(I16); - CASE(U16); - CASE(I8); - CASE(U8); - default: - IE_THROW() << "Unsupported precision case"; - } -#undef CASE - - return new_blob; -} - -void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, size_t axis, std::vector values) { - InferenceEngine::SizeVector value_dims(blob->getTensorDesc().getDims().size() - axis, 1); - value_dims.front() = values.size(); - auto prc = blob->getTensorDesc().getPrecision(); - auto layout = InferenceEngine::TensorDesc::getLayoutByDims(value_dims); - InferenceEngine::TensorDesc value_tdesc(prc, value_dims, layout); - - InferenceEngine::Blob::Ptr values_blob; - if (prc == InferenceEngine::Precision::FP32) { - values_blob = make_blob_with_precision(value_tdesc, values.data()); - } else { - values_blob = make_blob_with_precision(value_tdesc, values.data()); - values_blob = make_with_precision_convert(values_blob, prc); - } - - fill_data_with_broadcast(blob, values_blob); -} - -InferenceEngine::Blob::Ptr make_reshape_view(const InferenceEngine::Blob::Ptr& blob, - InferenceEngine::SizeVector new_shape) { - using InferenceEngine::TensorDesc; - auto new_size = std::accumulate(new_shape.begin(), new_shape.end(), 1, std::multiplies()); - IE_ASSERT(new_size == blob->size()); - - auto orig_mem_blob = dynamic_cast(blob.get()); - auto orig_mem = orig_mem_blob->rwmap(); - auto orig_ptr = orig_mem.as(); - - auto new_tdesc = - TensorDesc(blob->getTensorDesc().getPrecision(), new_shape, TensorDesc::getLayoutByDims(new_shape)); - auto new_blob = make_blob_with_precision(new_tdesc, orig_ptr); - return new_blob; -} - -size_t byte_size(const InferenceEngine::TensorDesc& tdesc) { - auto prc = tdesc.getPrecision(); - auto dims = tdesc.getDims(); - return prc.size() * std::accumulate(std::begin(dims), std::end(dims), (size_t)1, std::multiplies()); -} -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace namespace { static int randInt(int low, int high) { diff --git a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp index 5b4fa456fb7e5c..b5e4c2d116b528 100644 --- a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp +++ b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/ov_tensor_utils.hpp" #include "gtest/gtest.h" -#include "ie_common.h" #include "openvino/op/constant.hpp" #include "openvino/op/loop.hpp" #include "openvino/op/result.hpp" @@ -1024,7 +1023,7 @@ AccuracyCheckResult accuracy_check(const std::shared_ptr& ref_functio return AccuracyCheckResult{true, ""}; } try { - IE_ASSERT(ref_function->get_parameters().size() == cur_function->get_parameters().size()); + OPENVINO_ASSERT(ref_function->get_parameters().size() == cur_function->get_parameters().size()); std::map, ov::Tensor> ref_input_data; std::map, ov::Tensor> cur_input_data; @@ -1038,7 +1037,7 @@ AccuracyCheckResult accuracy_check(const std::shared_ptr& ref_functio auto ref_outputs = ngraph::helpers::interpretFunction(ref_function, ref_input_data); auto outputs = ngraph::helpers::interpretFunction(cur_function, cur_input_data); - IE_ASSERT(ref_outputs.size() == outputs.size()); + OPENVINO_ASSERT(ref_outputs.size() == outputs.size()); for (int i = 0; i < ref_outputs.size(); i++) { ov::test::utils::compare(ref_outputs[i], outputs[i], abs_threshold, rel_threshold); From e3c1480b0aa3ca0a1a5a456746e931bab495ab15 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Mon, 5 Feb 2024 12:41:03 +0400 Subject: [PATCH 05/13] Fix clang format --- .../include/common_test_utils/data_utils.hpp | 4 +--- .../include/common_test_utils/ov_tensor_utils.hpp | 3 +-- .../test_utils/common_test_utils/src/data_utils.cpp | 2 +- .../common_test_utils/src/ov_tensor_utils.cpp | 10 ++++------ .../test_utils/common_test_utils/tests/utils_tests.cpp | 3 +-- 5 files changed, 8 insertions(+), 14 deletions(-) diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index f8f8e6017d6106..1c4a169e896033 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -285,8 +285,7 @@ void inline fill_data_ptr_normal_random_float(T* data, std::normal_distribution<> normal_d{mean, stddev}; for (size_t i = 0; i < size; i++) { auto value = static_cast(normal_d(random)); - if (typeid(T) == - typeid(typename ov::fundamental_type_for)) { + if (typeid(T) == typeid(typename ov::fundamental_type_for)) { data[i] = static_cast(ov::float16(value).to_bits()); } else { data[i] = static_cast(value); @@ -294,7 +293,6 @@ void inline fill_data_ptr_normal_random_float(T* data, } } - void fill_random_string(std::string* dst, const size_t size, const size_t len_range = 10lu, diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index 03b23cedf53265..a6bd0561324615 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -77,8 +77,7 @@ ov::Tensor create_and_fill_tensor_real_distribution(const ov::element::Type elem const float max, const int seed); -ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, - const ov::Shape& shape); +ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, const ov::Shape& shape); void compare(const ov::Tensor& expected, const ov::Tensor& actual, diff --git a/src/tests/test_utils/common_test_utils/src/data_utils.cpp b/src/tests/test_utils/common_test_utils/src/data_utils.cpp index 9766fb5ae1da5b..8d2f91aab3a69d 100644 --- a/src/tests/test_utils/common_test_utils/src/data_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/data_utils.cpp @@ -51,7 +51,7 @@ void copy_7D(void* src_raw_ptr, } } } -} // namespace +} // namespace namespace { static int randInt(int low, int high) { diff --git a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp index a21ce7a5d00f7b..c2a26b5705ae18 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp @@ -309,13 +309,11 @@ void fill_data_sin_value(T* data, size_t size) { } } -ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, - const ov::Shape& shape) { +ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, const ov::Shape& shape) { auto tensor = ov::Tensor{element_type, shape}; -#define CASE(X) \ - case X: \ - fill_data_sin_value(tensor.data>(), \ - tensor.get_size()); \ +#define CASE(X) \ + case X: \ + fill_data_sin_value(tensor.data>(), tensor.get_size()); \ break; switch (element_type) { CASE(ov::element::Type_t::i8) diff --git a/src/tests/test_utils/common_test_utils/tests/utils_tests.cpp b/src/tests/test_utils/common_test_utils/tests/utils_tests.cpp index 141a1dee3f82ea..9bab74bcb8ea05 100644 --- a/src/tests/test_utils/common_test_utils/tests/utils_tests.cpp +++ b/src/tests/test_utils/common_test_utils/tests/utils_tests.cpp @@ -4,8 +4,8 @@ #include -#include "openvino/util/file_util.hpp" #include "openvino/util/common_util.hpp" +#include "openvino/util/file_util.hpp" using namespace testing; using namespace ov::util; @@ -14,7 +14,6 @@ TEST(UtilsTests, get_directory_returns_root) { ASSERT_EQ(get_directory("/test"), "/"); } - TEST(UtilsTests, filter_lines_by_prefix) { auto lines = "abc\nkkb\nabpp\n"; auto res = filter_lines_by_prefix(lines, "ab"); From a330815a869d88bc575772900b9f612422bb53ce Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Mon, 5 Feb 2024 14:00:01 +0400 Subject: [PATCH 06/13] Apply comments --- .../common_test_utils/ov_tensor_utils.hpp | 2 -- .../common_test_utils/src/ov_tensor_utils.cpp | 33 ------------------- 2 files changed, 35 deletions(-) diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index a6bd0561324615..0bd6140e2133ad 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -77,8 +77,6 @@ ov::Tensor create_and_fill_tensor_real_distribution(const ov::element::Type elem const float max, const int seed); -ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, const ov::Shape& shape); - void compare(const ov::Tensor& expected, const ov::Tensor& actual, const double abs_threshold = std::numeric_limits::max(), diff --git a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp index c2a26b5705ae18..ee85e0c88cebad 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp @@ -302,39 +302,6 @@ ov::Tensor create_and_fill_tensor_consistently(const ov::element::Type element_t return tensor; } -template -void fill_data_sin_value(T* data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = static_cast(sin(i)); - } -} - -ov::Tensor create_and_fill_tensor_sin_values(const ov::element::Type element_type, const ov::Shape& shape) { - auto tensor = ov::Tensor{element_type, shape}; -#define CASE(X) \ - case X: \ - fill_data_sin_value(tensor.data>(), tensor.get_size()); \ - break; - switch (element_type) { - CASE(ov::element::Type_t::i8) - CASE(ov::element::Type_t::i16) - CASE(ov::element::Type_t::i32) - CASE(ov::element::Type_t::i64) - CASE(ov::element::Type_t::u8) - CASE(ov::element::Type_t::u16) - CASE(ov::element::Type_t::u32) - CASE(ov::element::Type_t::u64) - CASE(ov::element::Type_t::bf16) - CASE(ov::element::Type_t::f16) - CASE(ov::element::Type_t::f32) - CASE(ov::element::Type_t::f64) - default: - OPENVINO_THROW("Unsupported element type: ", element_type); - } -#undef CASE - return tensor; -} - constexpr double eps = std::numeric_limits::epsilon(); inline double less(double a, double b) { From 42ea11b9e5a6aede9f57c46ee962006e36ba3f90 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Mon, 5 Feb 2024 19:58:31 +0400 Subject: [PATCH 07/13] Disable failing test for template plugin --- src/plugins/template/tests/functional/skip_tests_config.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/plugins/template/tests/functional/skip_tests_config.cpp b/src/plugins/template/tests/functional/skip_tests_config.cpp index 0743b5837f2dab..5d9e369cde953a 100644 --- a/src/plugins/template/tests/functional/skip_tests_config.cpp +++ b/src/plugins/template/tests/functional/skip_tests_config.cpp @@ -121,6 +121,8 @@ std::vector disabledTestPatterns() { R"(.*eltwiseOpType=Mod_secondaryInputType=PARAMETER_opType=VECTOR_NetType=(f16|f32).*)", // Interpreter backend doesn't implement evaluate method for OP Multiply (by GroupNormalizationDecomposition) R"(.*ReferenceGroupNormalization.*_f64*)", + // CVS-131733 + R"(.*smoke_BehaviorTests/InferRequestPropertiesTest.ReusableCPUStreamsExecutor/*)", }; #ifdef _WIN32 From 6f567ba72f0ddbd3ea1336adc53e36a55d86e195 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Feb 2024 04:22:34 +0800 Subject: [PATCH 08/13] Removed Inference Engine API 1.0 --- src/cmake/openvino.cmake | 6 +- ...ise_transformation_is_broadcasted_test.cpp | 2 - .../tests/precision_details_test.cpp | 2 - .../tests/unit/calclulate_levels_test.cpp | 1 - .../tests/unit/data_precision_check.cpp | 1 - ...ayer_transformation_get_data_precision.cpp | 1 - .../tests/unit/reshape_test.cpp | 1 - .../tests/unit/update_reshape_values.cpp | 1 - .../offline_transformations/pruning_test.cpp | 1 - src/core/CMakeLists.txt | 2 +- src/core/include/openvino/core/any.hpp | 5 - src/core/src/runtime/allocator.cpp | 3 - src/core/src/runtime/blob_allocator.hpp | 84 -- .../src/runtime/{ov_tensor.cpp => tensor.cpp} | 3 +- src/core/tests/type_relaxed_copy.cpp | 1 - .../ir/tests/rt_info_deserialization.cpp | 364 ------- src/frontends/onnx/tests/onnx_import.in.cpp | 2 - .../paddle/tests/read_paddle_model_test.cpp | 2 - src/inference/CMakeLists.txt | 6 +- src/inference/dev_api/blob_factory.hpp | 136 --- .../ie_iexecutable_network_internal.hpp | 220 ----- .../interface/ie_iinfer_request_internal.hpp | 288 ------ .../interface/ie_iplugin_internal.hpp | 353 ------- .../interface/ie_ivariable_state_internal.hpp | 83 -- .../dev_api/cpp_interfaces/plugin_itt.hpp | 21 - src/inference/dev_api/description_buffer.hpp | 124 --- src/inference/dev_api/ie_icore.hpp | 178 ---- src/inference/dev_api/ie_ngraph_utils.hpp | 128 --- .../openvino/runtime/icompiled_model.hpp | 7 - .../dev_api/openvino/runtime/icore.hpp | 2 - .../dev_api/openvino/runtime/iplugin.hpp | 16 - .../dev_api/openvino/runtime/make_tensor.hpp | 6 - src/inference/include/ie/cpp/ie_cnn_network.h | 243 ----- .../include/ie/cpp/ie_executable_network.hpp | 210 ---- .../include/ie/cpp/ie_infer_request.hpp | 273 ------ .../include/ie/cpp/ie_memory_state.hpp | 111 --- .../include/ie/details/ie_pre_allocator.hpp | 87 -- src/inference/include/ie/ie_allocator.hpp | 87 -- src/inference/include/ie/ie_api.h | 138 --- src/inference/include/ie/ie_blob.h | 894 ------------------ src/inference/include/ie/ie_common.h | 545 ----------- src/inference/include/ie/ie_core.hpp | 320 ------- src/inference/include/ie/ie_data.h | 192 ---- src/inference/include/ie/ie_icnn_network.hpp | 278 ------ .../include/ie/ie_iexecutable_network.hpp | 165 ---- .../include/ie/ie_iinfer_request.hpp | 184 ---- src/inference/include/ie/ie_input_info.hpp | 183 ---- src/inference/include/ie/ie_layouts.h | 408 -------- src/inference/include/ie/ie_locked_memory.hpp | 428 --------- src/inference/include/ie/ie_precision.hpp | 546 ----------- src/inference/include/ie/inference_engine.hpp | 24 - .../include/openvino/runtime/core.hpp | 4 - .../openvino/runtime/variable_state.hpp | 6 - src/inference/src/any_copy.cpp | 27 - src/inference/src/any_copy.hpp | 18 - src/inference/src/blob_factory.cpp | 22 - src/inference/src/cache_guard.cpp | 2 - ...ie_cache_manager.hpp => cache_manager.hpp} | 23 +- src/inference/src/check_network_batchable.hpp | 3 +- src/inference/src/cnn_network_ngraph_impl.cpp | 631 ------------ src/inference/src/cnn_network_ngraph_impl.hpp | 119 --- src/inference/src/compilation_context.cpp | 35 +- .../src/{ => cpp}/compiled_model.cpp | 0 src/inference/src/{ => cpp}/core.cpp | 25 +- src/inference/src/cpp/exception2status.hpp | 84 -- src/inference/src/cpp/ie_cnn_network.cpp | 148 --- .../src/cpp/ie_executable_network.cpp | 101 -- .../src/cpp/ie_executable_network_base.hpp | 89 -- .../src/cpp/ie_infer_async_request_base.hpp | 179 ---- src/inference/src/cpp/ie_infer_request.cpp | 195 ---- src/inference/src/{ => cpp}/infer_request.cpp | 20 +- .../src/{ => cpp}/remote_context.cpp | 1 - src/inference/src/{ => cpp}/remote_tensor.cpp | 0 ..._variable_state.cpp => variable_state.cpp} | 47 +- .../ie_iexecutable_network_internal.cpp | 130 --- .../interface/ie_iinfer_request_internal.cpp | 361 ------- .../interface/ie_iplugin_internal.cpp | 388 -------- .../interface/ie_ivariable_state_internal.cpp | 27 - src/inference/src/dev/converter_utils.cpp | 707 -------------- src/inference/src/dev/converter_utils.hpp | 40 - src/inference/src/dev/core_impl.cpp | 108 ++- src/inference/src/dev/core_impl.hpp | 82 +- src/inference/src/dev/core_impl_ie.cpp | 240 ----- src/inference/src/dev/icompiled_model.cpp | 8 +- .../src/dev/icompiled_model_wrapper.cpp | 62 -- .../src/dev/icompiled_model_wrapper.hpp | 37 - src/inference/src/dev/iplugin.cpp | 8 +- src/inference/src/dev/iplugin_wrapper.cpp | 117 --- src/inference/src/dev/iplugin_wrapper.hpp | 166 ---- src/inference/src/dev/make_tensor.cpp | 228 ----- src/inference/src/dev/plugin.cpp | 14 +- src/inference/src/ie_blob_common.cpp | 108 --- src/inference/src/ie_common.cpp | 100 -- src/inference/src/ie_core.cpp | 352 ------- src/inference/src/ie_data.cpp | 185 ---- src/inference/src/ie_layouts.cpp | 539 ----------- src/inference/src/ie_network_reader.cpp | 195 ---- src/inference/src/ie_network_reader.hpp | 45 - src/inference/src/ie_ngraph_utils.cpp | 26 - src/inference/src/model_reader.cpp | 20 +- src/inference/src/openvino_shutdown.cpp | 15 - src/inference/src/os/lin/lin_system_conf.cpp | 1 - src/inference/src/system_allocator.cpp | 18 - src/inference/src/system_allocator.hpp | 38 - .../tests/functional/matmul_sr_tests.cpp | 2 - .../functional/ov_infer_request_test.cpp | 1 - .../tests/functional/ov_remote_tensor.cpp | 1 - .../functional/ov_shared_object_test.cpp | 11 +- .../tests/unit/compilation_context_test.cpp | 1 - src/inference/tests/unit/core.cpp | 2 +- src/inference/tests/unit/query_model_test.cpp | 3 +- .../tests/unit/compile_model_metric_test.cpp | 3 - src/plugins/intel_cpu/src/compiled_model.cpp | 1 - src/plugins/intel_cpu/src/config.h | 2 - src/plugins/intel_cpu/src/graph.cpp | 9 +- src/plugins/intel_cpu/src/infer_request.cpp | 18 +- .../src/memory_desc/blocked_memory_desc.cpp | 2 + .../src/memory_desc/cpu_memory_desc_utils.cpp | 2 - src/plugins/intel_cpu/src/node.cpp | 1 - src/plugins/intel_cpu/src/node.h | 1 - .../fullyconnected_implementations.cpp | 2 - .../nodes/executors/precision_translation.cpp | 2 - src/plugins/intel_cpu/src/nodes/eye.cpp | 1 - src/plugins/intel_cpu/src/plugin.cpp | 17 +- src/plugins/intel_cpu/src/plugin.h | 2 - src/plugins/intel_cpu/src/serialize.h | 8 +- .../transformation_pipeline.cpp | 8 +- .../transformations/transformation_pipeline.h | 5 +- src/plugins/intel_cpu/src/utils/blob_dump.cpp | 1 - .../intel_cpu/src/utils/general_utils.h | 3 +- .../intel_cpu/src/utils/ngraph_utils.hpp | 3 +- .../intel_cpu/tests/unit/dnnl_memory_test.cpp | 1 + .../tests/unit/nodes/eltwise_node_test.cpp | 1 - .../tests/unit/nodes/reorder_node_test.cpp | 1 - .../transformations/state_concat_sdpa.cpp | 1 - .../x64/convert_to_interaction.cpp | 1 - .../intel_gpu/plugin/compiled_model.hpp | 3 - .../include/intel_gpu/plugin/custom_layer.hpp | 1 - .../dynamic/ctc_greedy_decoder_seq_len.cpp | 1 - src/plugins/proxy/src/plugin.cpp | 15 +- src/plugins/proxy/src/remote_context.cpp | 27 +- src/plugins/proxy/src/remote_context.hpp | 13 +- .../tests/functional/op_reference/if.cpp | 1 - .../include/base/ov_behavior_test_utils.hpp | 8 - .../ov_infer_request/infer_consistency.hpp | 2 +- .../disable_lowering_precision.cpp | 1 - .../depth_to_space_transformation.cpp | 3 +- .../mat_mul_transformation.cpp | 1 - .../mat_mul_with_constant_transformation.cpp | 1 - .../squeeze_transformation.cpp | 1 - .../unsqueeze_transformation.cpp | 1 - .../shared_test_classes/base/benchmark.hpp | 2 +- .../base/utils/compare_results.hpp | 1 - .../layer_transformation.cpp | 2 - .../src/base/ov_subgraph.cpp | 14 +- .../subgraph/get_output_before_activation.cpp | 7 - .../quantized_convolution_backprop_data.cpp | 3 +- .../src/subgraph/split_conv_concat.cpp | 1 - .../mocks/mock_engine/mock_plugin.cpp | 1 - .../mocks/openvino/runtime/mock_icore.hpp | 2 +- 160 files changed, 200 insertions(+), 13539 deletions(-) delete mode 100644 src/core/src/runtime/blob_allocator.hpp rename src/core/src/runtime/{ov_tensor.cpp => tensor.cpp} (99%) delete mode 100644 src/inference/dev_api/blob_factory.hpp delete mode 100644 src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp delete mode 100644 src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp delete mode 100644 src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp delete mode 100644 src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp delete mode 100644 src/inference/dev_api/cpp_interfaces/plugin_itt.hpp delete mode 100644 src/inference/dev_api/description_buffer.hpp delete mode 100644 src/inference/dev_api/ie_icore.hpp delete mode 100644 src/inference/dev_api/ie_ngraph_utils.hpp delete mode 100644 src/inference/include/ie/cpp/ie_cnn_network.h delete mode 100644 src/inference/include/ie/cpp/ie_executable_network.hpp delete mode 100644 src/inference/include/ie/cpp/ie_infer_request.hpp delete mode 100644 src/inference/include/ie/cpp/ie_memory_state.hpp delete mode 100644 src/inference/include/ie/details/ie_pre_allocator.hpp delete mode 100644 src/inference/include/ie/ie_allocator.hpp delete mode 100644 src/inference/include/ie/ie_api.h delete mode 100644 src/inference/include/ie/ie_blob.h delete mode 100644 src/inference/include/ie/ie_common.h delete mode 100644 src/inference/include/ie/ie_core.hpp delete mode 100644 src/inference/include/ie/ie_data.h delete mode 100644 src/inference/include/ie/ie_icnn_network.hpp delete mode 100644 src/inference/include/ie/ie_iexecutable_network.hpp delete mode 100644 src/inference/include/ie/ie_iinfer_request.hpp delete mode 100644 src/inference/include/ie/ie_input_info.hpp delete mode 100644 src/inference/include/ie/ie_layouts.h delete mode 100644 src/inference/include/ie/ie_locked_memory.hpp delete mode 100644 src/inference/include/ie/ie_precision.hpp delete mode 100644 src/inference/include/ie/inference_engine.hpp delete mode 100644 src/inference/src/any_copy.cpp delete mode 100644 src/inference/src/any_copy.hpp delete mode 100644 src/inference/src/blob_factory.cpp rename src/inference/src/{ie_cache_manager.hpp => cache_manager.hpp} (80%) delete mode 100644 src/inference/src/cnn_network_ngraph_impl.cpp delete mode 100644 src/inference/src/cnn_network_ngraph_impl.hpp rename src/inference/src/{ => cpp}/compiled_model.cpp (100%) rename src/inference/src/{ => cpp}/core.cpp (95%) delete mode 100644 src/inference/src/cpp/exception2status.hpp delete mode 100644 src/inference/src/cpp/ie_cnn_network.cpp delete mode 100644 src/inference/src/cpp/ie_executable_network.cpp delete mode 100644 src/inference/src/cpp/ie_executable_network_base.hpp delete mode 100644 src/inference/src/cpp/ie_infer_async_request_base.hpp delete mode 100644 src/inference/src/cpp/ie_infer_request.cpp rename src/inference/src/{ => cpp}/infer_request.cpp (92%) rename src/inference/src/{ => cpp}/remote_context.cpp (99%) rename src/inference/src/{ => cpp}/remote_tensor.cpp (100%) rename src/inference/src/cpp/{ie_variable_state.cpp => variable_state.cpp} (54%) delete mode 100644 src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp delete mode 100644 src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp delete mode 100644 src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp delete mode 100644 src/inference/src/cpp_interfaces/interface/ie_ivariable_state_internal.cpp delete mode 100644 src/inference/src/dev/converter_utils.cpp delete mode 100644 src/inference/src/dev/converter_utils.hpp delete mode 100644 src/inference/src/dev/core_impl_ie.cpp delete mode 100644 src/inference/src/dev/icompiled_model_wrapper.cpp delete mode 100644 src/inference/src/dev/icompiled_model_wrapper.hpp delete mode 100644 src/inference/src/dev/iplugin_wrapper.cpp delete mode 100644 src/inference/src/dev/iplugin_wrapper.hpp delete mode 100644 src/inference/src/ie_blob_common.cpp delete mode 100644 src/inference/src/ie_common.cpp delete mode 100644 src/inference/src/ie_core.cpp delete mode 100644 src/inference/src/ie_data.cpp delete mode 100644 src/inference/src/ie_layouts.cpp delete mode 100644 src/inference/src/ie_network_reader.cpp delete mode 100644 src/inference/src/ie_network_reader.hpp delete mode 100644 src/inference/src/ie_ngraph_utils.cpp delete mode 100644 src/inference/src/openvino_shutdown.cpp delete mode 100644 src/inference/src/system_allocator.cpp delete mode 100644 src/inference/src/system_allocator.hpp diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index 770615d9365010..ed772ad0810734 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -37,8 +37,7 @@ ov_add_vs_version_file(NAME ${TARGET_NAME} FILEDESCRIPTION "OpenVINO runtime lib target_include_directories(${TARGET_NAME} PUBLIC $ $ - $ - $) + $) target_link_libraries(${TARGET_NAME} PRIVATE openvino::reference openvino::shape_inference @@ -92,8 +91,7 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE} ${OV_CPACK_COMP_CORE_EXCLUDE_ALL} LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT ${OV_CPACK_COMP_CORE} ${OV_CPACK_COMP_CORE_EXCLUDE_ALL} NAMELINK_COMPONENT ${OV_CPACK_COMP_CORE_DEV} - INCLUDES DESTINATION ${OV_CPACK_INCLUDEDIR} - ${OV_CPACK_INCLUDEDIR}/ie) + INCLUDES DESTINATION ${OV_CPACK_INCLUDEDIR}) # OpenVINO runtime library dev diff --git a/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp b/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp index b953ea7b14c9e5..0a3c75649376b0 100644 --- a/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp +++ b/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp @@ -5,8 +5,6 @@ #include #include "low_precision/eltwise_base_transformation.hpp" -#include - using namespace ::testing; using namespace std; diff --git a/src/common/low_precision_transformations/tests/precision_details_test.cpp b/src/common/low_precision_transformations/tests/precision_details_test.cpp index f0c158f4d00c64..e8a81f200cd1fa 100644 --- a/src/common/low_precision_transformations/tests/precision_details_test.cpp +++ b/src/common/low_precision_transformations/tests/precision_details_test.cpp @@ -7,8 +7,6 @@ #include "low_precision/layer_transformation.hpp" #include "low_precision/fake_quantize.hpp" -#include - using namespace ::testing; using namespace std; using namespace ov::pass::low_precision; diff --git a/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp b/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp index 6344f22f359796..35917c62491015 100644 --- a/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp +++ b/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp @@ -3,7 +3,6 @@ // #include -#include #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp b/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp index f9f447a6222e24..4d6b83c9908046 100644 --- a/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp +++ b/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp @@ -4,7 +4,6 @@ #include #include -#include #include "low_precision/layer_transformation.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp index a9d353c7440610..2002a5fa5327f3 100644 --- a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp +++ b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp @@ -4,7 +4,6 @@ #include #include -#include #include "low_precision/layer_transformation.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/reshape_test.cpp b/src/common/low_precision_transformations/tests/unit/reshape_test.cpp index 74e0e8224d75b8..045e9714166226 100644 --- a/src/common/low_precision_transformations/tests/unit/reshape_test.cpp +++ b/src/common/low_precision_transformations/tests/unit/reshape_test.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "low_precision/reshape.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp b/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp index 1b29f8f18a8c6f..56c605fda53ed4 100644 --- a/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp +++ b/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "low_precision/network_helper.hpp" diff --git a/src/common/transformations/tests/offline_transformations/pruning_test.cpp b/src/common/transformations/tests/offline_transformations/pruning_test.cpp index 9dab20582be727..cad91bb59bd41d 100644 --- a/src/common/transformations/tests/offline_transformations/pruning_test.cpp +++ b/src/common/transformations/tests/offline_transformations/pruning_test.cpp @@ -11,7 +11,6 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "inference_engine.hpp" #include "mask_attribute.hpp" #include "openvino/core/model.hpp" #include "openvino/op/util/attr_types.hpp" diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 680bf15a5b037d..8a21637307e621 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -23,7 +23,7 @@ if(ON) set(MIXED_SRC "${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/allocator.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/itensor.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/ov_tensor.cpp") + "${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/tensor.cpp") set_property(SOURCE ${MIXED_SRC} APPEND PROPERTY INCLUDE_DIRECTORIES diff --git a/src/core/include/openvino/core/any.hpp b/src/core/include/openvino/core/any.hpp index 622013b2b1f8b0..d5f45a9f4791e2 100644 --- a/src/core/include/openvino/core/any.hpp +++ b/src/core/include/openvino/core/any.hpp @@ -20,10 +20,6 @@ #include "openvino/core/except.hpp" #include "openvino/core/runtime_attribute.hpp" -namespace InferenceEngine { -class ExecutableNetwork; -} // namespace InferenceEngine - namespace ov { class Plugin; /** @cond INTERNAL */ @@ -662,7 +658,6 @@ class OPENVINO_API Any { }; friend class ::ov::RuntimeAttribute; - friend class ::InferenceEngine::ExecutableNetwork; friend class ::ov::CompiledModel; friend class ::ov::proxy::CompiledModel; friend class ::ov::RemoteContext; diff --git a/src/core/src/runtime/allocator.cpp b/src/core/src/runtime/allocator.cpp index 0abb440396cec5..01c43e4acefc06 100644 --- a/src/core/src/runtime/allocator.cpp +++ b/src/core/src/runtime/allocator.cpp @@ -4,9 +4,6 @@ #include "openvino/runtime/allocator.hpp" -#include "blob_allocator.hpp" -#include "ie_allocator.hpp" -#include "ie_common.h" #include "openvino/core/except.hpp" namespace ov { diff --git a/src/core/src/runtime/blob_allocator.hpp b/src/core/src/runtime/blob_allocator.hpp deleted file mode 100644 index 0513fdfa02d085..00000000000000 --- a/src/core/src/runtime/blob_allocator.hpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "ie_allocator.hpp" // IE public header -#include "openvino/core/except.hpp" -#include "openvino/runtime/allocator.hpp" -#include "openvino/runtime/common.hpp" -#include "system_allocator.hpp" // IE private header - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -struct BlobAllocator : public IAllocator { - BlobAllocator(const ov::Allocator& impl) : _impl{impl} {} - - void* lock(void* handle, LockOp) noexcept override { - return handle; - } - - void unlock(void*) noexcept override {} - - void* alloc(const size_t size) noexcept override { - try { - return size_map.emplace(_impl.allocate(size), size).first->first; - } catch (...) { - return nullptr; - } - } - - bool free(void* handle) noexcept override { - try { - auto size = size_map.at(handle); - size_map.erase(handle); - _impl.deallocate(handle, size); - return true; - } catch (...) { - return false; - } - } - - ov::Allocator _impl; - std::unordered_map size_map; -}; -} // namespace InferenceEngine - -namespace ov { -struct BlobAllocator { - BlobAllocator() : _impl{std::make_shared()} {} - - void* allocate(const size_t bytes, const size_t alignment) { - OPENVINO_ASSERT(alignment == alignof(max_align_t), - "Aligned deallocation is not implemented. alignment: ", - alignment); - auto handle = _impl->alloc(bytes); - OPENVINO_ASSERT(handle != nullptr, "Can not allocate storage for at least ", bytes, " bytes"); - return handle; - } - - void deallocate(void* handle, const size_t bytes, const size_t alignment) { - OPENVINO_ASSERT(bytes == 0, "Sized deallocation is not implemented. bytes: ", bytes); - OPENVINO_ASSERT(alignment == alignof(max_align_t), - "Aligned deallocation is not implemented. alignment: ", - alignment); - auto res = _impl->free(handle); - OPENVINO_ASSERT(res != false, "Can not deallocate storage"); - } - - bool is_equal(const BlobAllocator& other) const { - if (other._impl == _impl) - return true; - auto other_system_memory_allocator = - dynamic_cast(other._impl.get()); - auto system_allocator = dynamic_cast(_impl.get()); - if (system_allocator != nullptr && other_system_memory_allocator != nullptr) - return true; - return false; - } - - std::shared_ptr _impl; -}; -} // namespace ov -IE_SUPPRESS_DEPRECATED_END diff --git a/src/core/src/runtime/ov_tensor.cpp b/src/core/src/runtime/tensor.cpp similarity index 99% rename from src/core/src/runtime/ov_tensor.cpp rename to src/core/src/runtime/tensor.cpp index f9182f5ea6b770..f3e89472dc8832 100644 --- a/src/core/src/runtime/ov_tensor.cpp +++ b/src/core/src/runtime/tensor.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/runtime/tensor.hpp" + #include #include "openvino/core/except.hpp" @@ -12,7 +14,6 @@ #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/remote_tensor.hpp" -#include "openvino/runtime/tensor.hpp" namespace ov { diff --git a/src/core/tests/type_relaxed_copy.cpp b/src/core/tests/type_relaxed_copy.cpp index bbda062e23271c..a6796ff5a466c2 100644 --- a/src/core/tests/type_relaxed_copy.cpp +++ b/src/core/tests/type_relaxed_copy.cpp @@ -7,7 +7,6 @@ #include #include -#include "ie_common.h" #include "openvino/op/matmul.hpp" #include "ov_ops/type_relaxed.hpp" diff --git a/src/frontends/ir/tests/rt_info_deserialization.cpp b/src/frontends/ir/tests/rt_info_deserialization.cpp index ae43bb01e54921..4aa769f932faa0 100644 --- a/src/frontends/ir/tests/rt_info_deserialization.cpp +++ b/src/frontends/ir/tests/rt_info_deserialization.cpp @@ -9,7 +9,6 @@ #include #include "common_test_utils/graph_comparator.hpp" -#include "ie/ie_core.hpp" #include "openvino/core/preprocess/input_tensor_info.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/op/add.hpp" @@ -137,19 +136,6 @@ TEST_F(RTInfoDeserialization, node_v10) { auto round = result->get_input_node_ptr(0); check_rt_info(round->get_rt_info()); - // read IR v10 with old API - { - InferenceEngine::Core core; - auto f_10 = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - ASSERT_NE(nullptr, f_10.getFunction()); - - auto res = compare_functions(f, f_10.getFunction()); - EXPECT_TRUE(res.first) << res.second; - - EXPECT_EQ(InferenceEngine::Precision::FP32, f_10.getInputsInfo()["in1"]->getPrecision()); - EXPECT_EQ(InferenceEngine::Precision::FP32, f_10.getOutputsInfo()["Round"]->getPrecision()); - } - // read IR v10 with new API and check that CNNNetwork precision conversions are applied { ov::Shape shape{1, 3, 22, 22}; @@ -263,16 +249,6 @@ TEST_F(RTInfoDeserialization, names_collision_v10) { }; check_version(f, 10); - // read IR v10 with old API - { - InferenceEngine::Core core; - auto f_10 = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - ASSERT_NE(nullptr, f_10.getFunction()); - - auto res = compare_functions(f, f_10.getFunction()); - EXPECT_TRUE(res.first) << res.second; - } - // read IR v10 with new API { ov::Core core; @@ -382,19 +358,6 @@ TEST_F(RTInfoDeserialization, input_and_output_v10) { check_rt_info(add->input(1).get_rt_info()); check_rt_info(add->output(0).get_rt_info()); - // read IR v10 with old API - { - InferenceEngine::Core core; - auto f_10 = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - ASSERT_NE(nullptr, f_10.getFunction()); - - auto res = compare_functions(f, f_10.getFunction()); - EXPECT_TRUE(res.first) << res.second; - - EXPECT_EQ(InferenceEngine::Precision::I64, f_10.getInputsInfo()["in1"]->getPrecision()); - EXPECT_EQ(InferenceEngine::Precision::I32, f_10.getOutputsInfo()["sum"]->getPrecision()); - } - // read IR v10 with new API and check that CNNNetwork precision conversions are applied { const ov::Shape shape{1, 3, 22, 22}; @@ -555,217 +518,6 @@ TEST_F(RTInfoDeserialization, node_v11) { check_version(f_11, 11); } - - // read IR v11 with old API and check that old_api_map is applied - { - const ov::PartialShape shape{1, 3, 22, 22}; - auto type = ov::element::f16; - auto param = std::make_shared(type, shape); - param->set_friendly_name("in1"); - param->get_output_tensor(0).set_names({"input_tensor"}); - - // TODO: No guarantee that Transpose will use exactly 'uint64_t' constant - auto constant_param = - std::make_shared(ov::element::u64, ov::Shape{4}, std::vector{0, 2, 3, 1}); - auto transpose_param = std::make_shared(param, constant_param); - - // TODO: No guarantee that only 'convert' will be added by implicit pre-processing - auto convert_param = std::make_shared(transpose_param, ov::element::f32); - - auto round = std::make_shared(convert_param, ov::op::v5::Round::RoundMode::HALF_TO_EVEN); - // TODO: runtime information should migrate as well? - round->get_rt_info()[ov::FusedNames::get_type_info_static()] = ov::FusedNames("Round1,Round2"); - - // TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit post-processing - auto constant_result = - std::make_shared(ov::element::u64, ov::Shape{4}, std::vector{0, 3, 1, 2}); - auto transpose_result = std::make_shared(round, constant_result); - - transpose_result->set_friendly_name("Round"); - transpose_result->get_output_tensor(0).set_names({"output_tensor"}); - - auto result = std::make_shared(transpose_result); - result->set_friendly_name("output"); - - auto f_10_ref = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); - f_10_ref->set_friendly_name("Network"); - - InferenceEngine::Core core; - auto cnn_core = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - auto f_10_core = cnn_core.getFunction(); - ASSERT_NE(nullptr, f_10_core); - - check_version(f_10_core, 10); - - ASSERT_GT(cnn_core.getInputsInfo().count("in1"), 0); - EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getInputsInfo()["in1"]->getPrecision()); - ASSERT_GT(cnn_core.getOutputsInfo().count("Round"), 0); - EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getOutputsInfo()["Round"]->getPrecision()); - - const auto fc = FunctionsComparator::with_default() - .enable(FunctionsComparator::ATTRIBUTES) - .enable(FunctionsComparator::PRECISIONS) - .enable(FunctionsComparator::RUNTIME_KEYS) - .enable(FunctionsComparator::NAMES) - .enable(FunctionsComparator::CONST_VALUES); - auto res = fc.compare(f_10_core, f_10_ref); - EXPECT_TRUE(res.valid) << res.message; - - EXPECT_EQ(shape, f_10_ref->input().get_partial_shape()); - EXPECT_EQ(shape, f_10_core->input().get_partial_shape()); - EXPECT_EQ(shape, f_10_ref->get_output_partial_shape(0)); - EXPECT_EQ(shape, f_10_core->get_output_partial_shape(0)); - - // check that old api map is removed once applied - auto check_old_api_rt_info = [](const ov::RTMap& info) { - const std::string& key_order = ov::OldApiMapOrder::get_type_info_static(); - EXPECT_EQ(0, info.count(key_order)); - const std::string& key_type = ov::OldApiMapElementType::get_type_info_static(); - EXPECT_EQ(0, info.count(key_type)); - }; - - check_old_api_rt_info(f_10_core->get_parameters()[0]->get_rt_info()); - check_old_api_rt_info(f_10_core->get_result()->get_rt_info()); - - // check information about layout - EXPECT_EQ(f_10_core->get_parameters()[0]->get_layout(), ov::Layout("NCHW")) - << f_10_core->get_parameters()[0]->get_layout().to_string(); - EXPECT_EQ(f_10_core->get_results()[0]->get_layout(), ov::Layout("NCHW")) - << f_10_core->get_results()[0]->get_layout().to_string(); - } -} - -TEST_F(RTInfoDeserialization, node_v11_uint8) { - std::string model = R"V0G0N( - - - - - - - - - - - - 1 - 22 - 22 - 3 - - - - - - - - - - - 1 - 22 - 22 - 3 - - - - - 1 - 22 - 22 - 3 - - - - - - - - - - 1 - 22 - 22 - 3 - - - - - - - - - -)V0G0N"; - auto f = getWithIRFrontend(model); - ASSERT_NE(nullptr, f); - - // read IR v11 with old API and check that old_api_map is applied - const ov::PartialShape shape{1, 3, 22, 22}; - auto type = ov::element::f16; - auto param = std::make_shared(type, shape); - param->set_friendly_name("in1"); - param->get_output_tensor(0).set_names({"input_tensor"}); - - auto constant_param = - std::make_shared(ov::element::u64, ov::Shape{4}, std::vector{0, 2, 3, 1}); - auto transpose_param = std::make_shared(param, constant_param); - - auto round = std::make_shared(transpose_param, ov::op::v5::Round::RoundMode::HALF_TO_EVEN); - round->get_rt_info()[ov::FusedNames::get_type_info_static()] = ov::FusedNames("Round1,Round2"); - auto constant_result = - std::make_shared(ov::element::u64, ov::Shape{4}, std::vector{0, 3, 1, 2}); - auto transpose_result = std::make_shared(round, constant_result); - - transpose_result->set_friendly_name("Round"); - transpose_result->get_output_tensor(0).set_names({"output_tensor"}); - - auto result = std::make_shared(transpose_result); - result->set_friendly_name("output"); - - auto f_10_ref = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); - f_10_ref->set_friendly_name("Network"); - - InferenceEngine::Core core; - auto cnn_core = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - auto f_10_core = cnn_core.getFunction(); - ASSERT_NE(nullptr, f_10_core); - - ASSERT_GT(cnn_core.getInputsInfo().count("in1"), 0); - EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getInputsInfo()["in1"]->getPrecision()); - ASSERT_GT(cnn_core.getOutputsInfo().count("Round"), 0); - EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getOutputsInfo()["Round"]->getPrecision()); - - const auto fc = FunctionsComparator::with_default() - .enable(FunctionsComparator::ATTRIBUTES) - .enable(FunctionsComparator::PRECISIONS) - .enable(FunctionsComparator::RUNTIME_KEYS) - .enable(FunctionsComparator::NAMES) - .enable(FunctionsComparator::CONST_VALUES); - auto res = fc.compare(f_10_core, f_10_ref); - EXPECT_TRUE(res.valid) << res.message; - - EXPECT_EQ(shape, f_10_ref->input().get_partial_shape()); - EXPECT_EQ(shape, f_10_core->input().get_partial_shape()); - EXPECT_EQ(shape, f_10_ref->get_output_partial_shape(0)); - EXPECT_EQ(shape, f_10_core->get_output_partial_shape(0)); - - // check that old api map is removed once applied - auto check_old_api_rt_info = [](const ov::RTMap& info) { - const std::string& key_order = ov::OldApiMapOrder::get_type_info_static(); - EXPECT_EQ(0, info.count(key_order)); - const std::string& key_type = ov::OldApiMapElementType::get_type_info_static(); - EXPECT_EQ(0, info.count(key_type)); - }; - - check_old_api_rt_info(f_10_core->get_parameters()[0]->get_rt_info()); - check_old_api_rt_info(f_10_core->get_result()->get_rt_info()); - - // check information about layout - EXPECT_TRUE(f_10_core->get_parameters()[0]->get_layout().empty()) - << f_10_core->get_parameters()[0]->get_layout().to_string(); - EXPECT_TRUE(f_10_core->get_results()[0]->get_layout().empty()) - << f_10_core->get_results()[0]->get_layout().to_string(); } TEST_F(RTInfoDeserialization, node_v11_multiple_rt_keys) { @@ -948,33 +700,6 @@ TEST_F(RTInfoDeserialization, input_and_output_v11) { check_fused_names(add->input(0).get_rt_info(), "test2,test3"); check_fused_names(add->input(1).get_rt_info(), "test3,test4"); check_fused_names(add->output(0).get_rt_info(), "test4,test5"); - - // read IR v11 with old API - the function is the same since no old_api_map is applied - { - InferenceEngine::Core core; - auto cnn = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - auto f_10 = cnn.getFunction(); - ASSERT_NE(nullptr, f_10); - - EXPECT_EQ(InferenceEngine::Precision::FP32, cnn.getInputsInfo()["in1"]->getPrecision()); - EXPECT_EQ(InferenceEngine::Precision::FP32, cnn.getOutputsInfo()["sum"]->getPrecision()); - - // check that old api map is removed once applied - auto check_old_api_rt_info = [](const ov::RTMap& info) { - const std::string& key_type = ov::OldApiMapElementType::get_type_info_static(); - EXPECT_FALSE(info.count(key_type)); - const std::string& key_order = ov::OldApiMapElementType::get_type_info_static(); - EXPECT_FALSE(info.count(key_order)); - }; - - check_old_api_rt_info(f_10->get_parameters()[0]->get_rt_info()); - check_old_api_rt_info(f_10->get_result()->get_rt_info()); - - auto res = compare_functions(f, f_10); - EXPECT_TRUE(res.first) << res.second; - - check_version(f_10, 10); - } } TEST_F(RTInfoDeserialization, indexes_input_and_output_v11) { @@ -1094,92 +819,3 @@ TEST_F(RTInfoDeserialization, indexes_input_and_output_v11) { ASSERT_EQ(f->get_results()[0]->get_friendly_name(), "output2"); ASSERT_EQ(f->get_results()[1]->get_friendly_name(), "output1"); } - -TEST_F(RTInfoDeserialization, v11_to_v10_without_rt_info) { - std::string model = R"V0G0N( - - - - - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - - 1 - 3 - 22 - 22 - - - - - - 1 - 3 - 22 - 22 - - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - - -)V0G0N"; - - auto check_version = [](const std::shared_ptr& f, int ref_version) { - auto& rt_info = f->get_rt_info(); - ASSERT_TRUE(rt_info.count("version")); - ASSERT_TRUE(rt_info.at("version").is()); - ASSERT_EQ(rt_info.at("version").as(), ref_version); - }; - InferenceEngine::Core core; - auto cnn = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); - auto f_10 = cnn.getFunction(); - ASSERT_NE(nullptr, f_10); - - check_version(f_10, 10); -} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 227f45aaea9774..3df40f8ecc38d5 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include #include #include diff --git a/src/frontends/paddle/tests/read_paddle_model_test.cpp b/src/frontends/paddle/tests/read_paddle_model_test.cpp index 521e14b79e7408..d321e1e0e820c3 100644 --- a/src/frontends/paddle/tests/read_paddle_model_test.cpp +++ b/src/frontends/paddle/tests/read_paddle_model_test.cpp @@ -3,10 +3,8 @@ // #include -#include #include -#include #include #include #include diff --git a/src/inference/CMakeLists.txt b/src/inference/CMakeLists.txt index 25e4c590b68799..0bdce768dc0753 100644 --- a/src/inference/CMakeLists.txt +++ b/src/inference/CMakeLists.txt @@ -88,7 +88,6 @@ target_include_directories(${TARGET_NAME}_obj SYSTEM PRIVATE target_include_directories(${TARGET_NAME}_obj PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src" $ - "${CMAKE_CURRENT_SOURCE_DIR}/include/ie" # TODO: remove in 2024.0 $<$:$> # for ov_plugins.hpp $,$>,${CMAKE_CURRENT_BINARY_DIR}/$,${CMAKE_CURRENT_BINARY_DIR}>) @@ -96,7 +95,7 @@ target_include_directories(${TARGET_NAME}_obj PRIVATE target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::itt openvino::util openvino::core::dev) ov_mark_target_as_cc(${TARGET_NAME}_obj) -# IE is public API => need to mark this library as important for ABI free +# OpenVINO Runtime is public API => need to mark this library as important for ABI free ov_abi_free_target(${TARGET_NAME}_obj) ov_set_threading_interface_for(${TARGET_NAME}_obj) @@ -108,8 +107,7 @@ endif() add_library(${TARGET_NAME} INTERFACE) target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) -target_include_directories(${TARGET_NAME} INTERFACE $ - $) +target_include_directories(${TARGET_NAME} INTERFACE $) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${PUBLIC_HEADERS}) diff --git a/src/inference/dev_api/blob_factory.hpp b/src/inference/dev_api/blob_factory.hpp deleted file mode 100644 index 1c5a6801d654f6..00000000000000 --- a/src/inference/dev_api/blob_factory.hpp +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A file with helper functions to uniformly create Blob objects - * @file blob_factory.hpp - */ - -#pragma once - -#include -#include -#include - -#include "ie_blob.h" -#include "ie_data.h" -#include "openvino/runtime/itensor.hpp" -#include "openvino/runtime/so_ptr.hpp" - -IE_SUPPRESS_DEPRECATED_START -/** - * @private - */ -template -class BlobFactory { -public: - using BlobType = typename InferenceEngine::PrecisionTrait::value_type; - - static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc) { - return InferenceEngine::make_shared_blob(desc); - } - static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, void* ptr) { - return InferenceEngine::make_shared_blob(desc, reinterpret_cast(ptr)); - } - static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, void* ptr, const std::size_t size) { - return InferenceEngine::make_shared_blob(desc, reinterpret_cast(ptr), size); - } - static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, - const std::shared_ptr& alloc) { - return InferenceEngine::make_shared_blob(desc, alloc); - } -}; - -/** - * @private - */ -template -InferenceEngine::Blob::Ptr make_shared_blob2(Args&&... args) { - return BlobFactory::make(std::forward(args)...); -} - -/** - * @brief Creates Blob::Ptr with precision. - * @ingroup ie_dev_api_memory - * - * @param[in] desc The TensorDesc object - * @return A Blob::Ptr pointer - */ -INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) -make_blob_with_precision(const InferenceEngine::TensorDesc& desc); - -/** - * @brief Makes a blob with precision. - * @ingroup ie_dev_api_memory - * - * @param[in] desc The TensorDesc object - * @param ptr The pointer to a raw memory - * @return A Blob::Ptr pointer - */ -INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) -make_blob_with_precision(const InferenceEngine::TensorDesc& desc, void* ptr); - -/** - * @brief Makes a blob with precision. - * @ingroup ie_dev_api_memory - * - * @param[in] desc The description - * @param[in] alloc The IAllocator object - * @return A Blob::Ptr pointer - */ -INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) -make_blob_with_precision(const InferenceEngine::TensorDesc& desc, - const std::shared_ptr& alloc); - -/** - * @brief Creates Blob::Ptr with precision - * @ingroup ie_dev_api_memory - * - * @param[in] precision The precision - * @param args The arguments - * @tparam Args Variadic template arguments - * @return A Blob::Ptr pointer - */ -template -InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision precision, Args&&... args) { -#define USE_FACTORY(precision) \ - case InferenceEngine::Precision::precision: \ - return make_shared_blob2(std::forward(args)...); - - switch (precision) { - USE_FACTORY(FP32); - USE_FACTORY(FP64); - USE_FACTORY(FP16); - USE_FACTORY(Q78); - USE_FACTORY(I4); - USE_FACTORY(I8); - USE_FACTORY(I16); - USE_FACTORY(I32); - USE_FACTORY(I64); - USE_FACTORY(U4); - USE_FACTORY(U8); - USE_FACTORY(U16); - USE_FACTORY(U32); - USE_FACTORY(U64); - USE_FACTORY(BIN); - USE_FACTORY(BF16); - USE_FACTORY(BOOL); - USE_FACTORY(STRING); - default: - IE_THROW() << "cannot locate blob for precision: " << precision; - } -#undef USE_FACTORY -} - -namespace ov { - -ov::SoPtr make_tensor(const std::shared_ptr& tensor, bool unwrap = false); - -OPENVINO_RUNTIME_API std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, - bool unwrap = true, - InferenceEngine::TensorDesc desc = {}); - -} // namespace ov - -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp deleted file mode 100644 index 4341677a3bd4ea..00000000000000 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "cpp/ie_cnn_network.h" -#include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" -#include "openvino/runtime/so_ptr.hpp" - -namespace ov { -class Model; -namespace op { -namespace v0 { -class Parameter; -class Result; -} // namespace v0 -} // namespace op -} // namespace ov - -namespace InferenceEngine { - -class IInferencePlugin; -class IPluginWrapper; -class IInferRequestInternal; -class IVariableStateInternal; -class ICompiledModelWrapper; - -/** - * @interface IExecutableNetworkInternal - * @brief An internal API of executable network to be implemented by plugin, - * @ingroup ie_dev_api_exec_network_api - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IExecutableNetworkInternal) - : public std::enable_shared_from_this { -public: - /** - * @brief A shared pointer to IExecutableNetworkInternal interface - */ - using Ptr = std::shared_ptr; - - /** - * @brief Sets the network inputs info. - * @param[in] networkInputs The network inputs info - */ - virtual void setNetworkInputs(const InputsDataMap& networkInputs); - - /** - * @brief Sets the network outputs data. - * @param[in] networkOutputs The network outputs - */ - virtual void setNetworkOutputs(const OutputsDataMap& networkOutputs); - - /** - * @brief Sets the network parameters - * @param[in] params The network parameters - */ - virtual void setInputs(const std::vector>& params); - /** - * @brief Returns the network parameters - */ - virtual const std::vector>& getInputs() const; - /** - * @brief Sets the network results - * @param[in] results The network results - */ - virtual void setOutputs(const std::vector>& results); - /** - * @brief Returns the network results - */ - virtual const std::vector>& getOutputs() const; - - /** - * @brief Gets the Executable network output Data node information. The received info is stored in the given Data - * node. - * @return out Reference to the ConstOutputsDataMap object - */ - virtual ConstOutputsDataMap GetOutputsInfo() const; - - /** - * @brief Gets the Executable network input Data node information. The received info is stored in the given - * InputsDataMap object. - * @return inputs Reference to ConstInputsDataMap object. - */ - virtual ConstInputsDataMap GetInputsInfo() const; - - /** - * @brief Create an inference request object used to infer the network - * Note: the returned request will have allocated input and output blobs (that can be changed later) - * @return shared_ptr for the created request - */ - virtual std::shared_ptr CreateInferRequest(); - - /** - * @deprecated Use IExecutableNetworkInternal::Export(std::ostream& networkModel) - * @brief Export the current created executable network so it can be used later in the Import() main API - * @param modelFileName - path to the location of the exported file - */ - virtual void Export(const std::string& modelFileName); - - /** - * @brief Export the current created executable network so it can be used later in the Import() main API - * @param networkModel - Reference to network model output stream - */ - virtual void Export(std::ostream& networkModel); - - /** - * @brief Get executable graph information from a device - * @return A network object to store executable graph information - */ - virtual std::shared_ptr GetExecGraphInfo(); - - /** - * @brief Sets the pointer to plugin internal. - * @param[in] plugin The plugin - * @note Needed to correctly handle ownership between objects. - */ - virtual void SetPointerToPlugin(const std::shared_ptr& plugin); - - /** - * @brief Gets the pointer to plugin so. - * @note Needed to correctly handle ownership between objects. - * @return A shared pointer to the plugin so - */ - virtual std::shared_ptr GetPointerToSo(); - - /** - * @brief Sets configuration for current executable network - * @param config Map of pairs: (config name, config ov::Any value) - */ - virtual void SetConfig(const ov::AnyMap& config); - - /** - * @brief Gets configuration dedicated to plugin behaviour - * @param name A config key, can be found in properties.hpp - * @return A value of config corresponding to config key - */ - virtual ov::Any GetConfig(const std::string& name) const; - - /** - * @brief Gets general runtime metric for dedicated hardware - * @param name A metric name to request - * @return A metric value corresponding to metric key - */ - virtual ov::Any GetMetric(const std::string& name) const; - - /** - * @brief Raises the flag that model was loaded from cache - */ - void loadedFromCache(); - - /** - * @brief Provides an information how model was loaded - * - * @return true if model was loaded from cache - */ - bool isLoadedFromCache() const; - -protected: - virtual ~IExecutableNetworkInternal() = default; - - /** - * @brief Creates an inference request internal implementation. - * @note The method is called by IExecutableNetworkInternal::CreateInferRequest as - * plugin-specific implementation. - * @param[in] networkInputs The network inputs - * @param[in] networkOutputs The network outputs - * @return A shared pointer to inference request object. - */ - virtual std::shared_ptr CreateInferRequestImpl(InputsDataMap networkInputs, - OutputsDataMap networkOutputs); - /** - * @brief Creates an inference request internal implementation. - * @note The method is called by IExecutableNetworkInternal::CreateInferRequest as - * plugin-specific implementation. - * @param[in] inputs The function inputs - * @param[in] outputs The function outputs - * @return A shared pointer to inference request object. - */ - virtual std::shared_ptr CreateInferRequestImpl( - const std::vector>& inputs, - const std::vector>& outputs); - - InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info - InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data - std::vector> _parameters; - std::vector> _results; - - /** - * @brief A pointer to a IInferencePlugin interface. - * @note Needed to correctly handle ownership between objects. - */ - std::shared_ptr _plugin; - - /** - * @brief A pointer to a plugin library. - * @note Needed to correctly handle ownership between objects. - */ - std::shared_ptr _so; - - /** - * @brief If true, it means that model was loaded from cache - */ - bool _loadedFromCache = false; - - friend InferenceEngine::ICompiledModelWrapper; - friend InferenceEngine::IPluginWrapper; -}; - -/** - * @brief SoPtr to IExecutableNetworkInternal. - */ -using SoExecutableNetworkInternal = ov::SoPtr; - -} // namespace InferenceEngine diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp deleted file mode 100644 index 438b94acb65c2d..00000000000000 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "cpp/ie_infer_request.hpp" -#include "ie_common.h" -#include "ie_input_info.hpp" -#include "openvino/core/node_output.hpp" -#include "openvino/runtime/so_ptr.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START -class IExecutableNetworkInternal; -class IVariableStateInternal; - -/** - * @interface IInferRequestInternal - * @brief An internal API of synchronous inference request to be implemented by plugin, - * which is used in InferRequestBase forwarding mechanism - * @ingroup ie_dev_api_infer_request_api - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestInternal) - : public std::enable_shared_from_this { -public: - /** - * @brief A shared pointer to a IInferRequestInternal interface - */ - using Ptr = std::shared_ptr; - - IInferRequestInternal() = default; - - /** - * @brief Constructs a new instance. - * @param[in] networkInputs The network inputs info - * @param[in] networkOutputs The network outputs data - */ - IInferRequestInternal(const InputsDataMap& networkInputs, const OutputsDataMap& networkOutputs); - - /** - * @brief Constructs a new instance. - * @param[in] inputs The network inputs - * @param[in] outputs The network outputs - */ - IInferRequestInternal(const std::vector>& networkInputs, - const std::vector>& networkOutputs); - - /** - * @brief Infers specified input(s) in synchronous mode - * @note blocks all method of InferRequest while request is ongoing (running or waiting in queue) - */ - virtual void Infer(); - - /** - * @brief The minimal infer function to be implemented by plugins. It infers specified input(s) in synchronous mode - * @note - * * This method is used in IInferRequestInternal::Infer, which calls the common code first and after uses this - * plugin dependent implementation. - * * Blocks all method of InferRequest while request is ongoing (running or waiting in queue) - */ - virtual void InferImpl(); - - /** - * @brief Cancel current inference request execution - */ - virtual void Cancel(); - - /** - * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer. - * Note: not all plugins may provide meaningful data - * @return - a map of layer names to profiling information for that layer. - */ - virtual std::map GetPerformanceCounts() const; - - /** - * @brief Set input/output data to infer - * @note Memory allocation doesn't happen - * @param name - a name of input or output blob. - * @param data - a reference to input or output blob. The type of Blob must correspond to the network input - * precision and size. - */ - virtual void SetBlob(const std::string& name, const Blob::Ptr& data); - - /** - * @brief Get input/output data to infer - * @note Memory allocation doesn't happen - * @param name - a name of input or output blob. - * @param data - a reference to input or output blob. The type of Blob must correspond to the network input - * precision and size. - */ - virtual Blob::Ptr GetBlob(const std::string& name); - - /** - * @brief Queries memory states. - * @return Returns memory states - */ - virtual std::vector> QueryState(); - - /** - * @brief Start inference of specified input(s) in asynchronous mode - * @note The method returns immediately. Inference starts also immediately. - */ - virtual void StartAsync(); - - /** - * @brief The minimal asynchronous inference function to be implemented by plugins. - * It starts inference of specified input(s) in asynchronous mode - * @note - * * The methos is used in AsyncInferRequestInternal::StartAsync which performs common steps first and - * calls plugin dependent implementation of this method after. - * * It returns immediately. Inference starts also immediately. - */ - virtual void StartAsyncImpl(); - - /** - * @brief Waits for the result to become available. Blocks until specified millis_timeout has elapsed or the result - * becomes available, whichever comes first. - * @param millis_timeout - maximum duration in milliseconds to block for - * @note There are special cases when millis_timeout is equal some value of WaitMode enum: - * * STATUS_ONLY - immediately returns request status (InferRequest::StatusCode). It doesn't block or interrupt - * current thread. - * * RESULT_READY - waits until inference result becomes available - * @return A status code - */ - virtual StatusCode Wait(int64_t millis_timeout); - - /** - * @brief Alias for callback type - */ - using Callback = std::function; - - /** - * @brief Set callback function which will be called on success or failure of asynchronous request - * @param callback - function to be called with the following description: - */ - virtual void SetCallback(Callback callback); - - /** - * @brief Check that @p blob is valid. Throws an exception if it's not. - * - * @param[in] blob The blob to check - * @param[in] name The name of input or output depending of if the @p blob is input or output - * @param[in] isInput Indicates if @p is input - * @param[in] refDims The reference dims, empty if not specified - */ - void checkBlob(const Blob::Ptr& blob, const std::string& name, bool isInput, const SizeVector& refDims = {}) const; - - /** - * @brief Check that all of the blobs is valid. Throws an exception if it's not. - */ - virtual void checkBlobs(); - - /** - * @brief Sets the pointer to executable network internal. - * @note Needed to correctly handle ownership between objects. - * @param[in] exeNetwork The executable network - */ - void setPointerToExecutableNetworkInternal(const std::shared_ptr& exeNetwork); - - /** - * @brief Returns the pointer to executable network internal. - * @returns The executable network - */ - std::shared_ptr getPointerToExecutableNetworkInternal() const; - - /** - * @brief Sets the pointer to so when needed. - * @note Needed to correctly handle ownership between objects. - * @param[in] so The library so - */ - void setPointerToSo(const std::shared_ptr& so); - - /** - * @brief Returns the pointer to so. - * @returns The library - */ - std::shared_ptr getPointerToSo() const; - /** - * @brief Gets the pointer to userData. - * @return Pointer to user data - */ - INFERENCE_ENGINE_DEPRECATED("The method will be removed") - void* GetUserData() noexcept; - - /** - * @brief Sets the pointer to userData. - * @param[in] Pointer to user data - */ - INFERENCE_ENGINE_DEPRECATED("The method will be removed") - void SetUserData(void* userData) noexcept; - - const std::vector>& GetInputs() const; - const std::vector>& GetOutputs() const; - - /** - * @brief Sets inputs/outputs from ov::Model - */ - virtual void setModelInputsOutputs(const std::vector>& inputs, - const std::vector>& outputs); - -protected: - /** - * @brief Destroys the object. - */ - virtual ~IInferRequestInternal(); - - /** - * @brief Checks and executes input data pre-processing if needed. - * @param inputs Inputs blobs to perform preprocessing on - * @param serial Whether to use multiple threads to execute the step - */ - void execDataPreprocessing(InferenceEngine::BlobMap& preprocessedBlobs, bool serial = false); - - /** - * @brief Helper function to find input or output blob by name - * @param name A name of input or output blob. - * @param foundInput A pointer to input information if found. - * @param foundOutput A pointer to output DataPtr if found. - * @return `True` - if loaded network has input with provided name, - * `false` - if loaded network has output with provided name - * @throws [not_found] exception if there is no input and output layers with given name - */ - bool findInputAndOutputBlobByName(const std::string& name, InputInfo::Ptr& foundInput, DataPtr& foundOutput) const; - - /** - * @brief Helper function to find input node by legacy blob name - * @param name A legacy name of input blob - * @return shared pointer to OpenVINO input node - */ - std::shared_ptr findInputByNodeName(const std::string& name) const; - /** - * @brief Helper function to find output node by legacy blob name - * @param name A legacy name of output blob - * @return shared pointer to OpenVINO output node - */ - std::shared_ptr findOutputByNodeName(const std::string& name) const; - - /** - * @brief Checks whether pre-processing step is required for a given input - * @param info InputInfo corresponding to input blob - * @param userBlob Input Blob object corresponding to input info - * @param deviceBlob Blob object in plugin's desired format - * @return `True` if pre-processing is required, `false` otherwise - */ - bool preProcessingRequired(const InputInfo::Ptr& info, - const Blob::Ptr& userBlob, - const Blob::Ptr& deviceBlob = nullptr); - - void addInputPreProcessingFor(const std::string& name, const Blob::Ptr& from, const Blob::Ptr& to); - - InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info - InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data - InferenceEngine::BlobMap _inputs; //!< A map of user passed blobs for network inputs - InferenceEngine::BlobMap _deviceInputs; //!< A map of actual network inputs, in plugin specific format - InferenceEngine::BlobMap _outputs; //!< A map of user passed blobs for network outputs - std::vector> _parameters; //!< A vector of function inputs - std::vector> _results; //!< A vector of function outputs - - /** - * @brief A shared pointer to IInferRequestInternal - * @note Needed to correctly handle ownership between objects. - */ - std::shared_ptr _exeNetwork; - /** - * @brief A shared pointer to loaded library - * @note Needed to correctly handle ownership between objects. - */ - std::shared_ptr _so; - Callback _callback; //!< A callback - -private: - void* _userData = nullptr; -}; - -/** - * @brief SoPtr to IInferRequestInternal. - */ -using SoIInferRequestInternal = ov::SoPtr; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp deleted file mode 100644 index 8480eef310b5be..00000000000000 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Inference Engine plugin API wrapper, to be used by particular implementors - * @file ie_iplugin_internal.hpp - */ - -#pragma once - -#include -#include -#include -#include - -#include "blob_factory.hpp" -#include "cpp/ie_cnn_network.h" -#include "ie_input_info.hpp" -#include "openvino/core/extension.hpp" -#include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/so_ptr.hpp" -#include "openvino/util/pp.hpp" - -using namespace ov::threading; - -namespace InferenceEngine { - -class ExecutorManager; -class IExecutableNetworkInternal; -class IExtension; -class ICore; - -/** - * @brief Copies the values of `std::string` indexed map and apply const cast - * - * @param[in] map map to copy - * @return map that contains pointers to constant values - */ -template -std::map> constMapCast(const std::map>& map) { - std::map> res; - for (auto&& v : map) - res.emplace(v.first, std::const_pointer_cast(v.second)); - return res; -} - -/** - * @brief Copies the values of `std::string` indexed map and apply const cast - * - * @param[in] map map to copy - * @return map that contains pointers to values - */ -template -std::map> constMapCast(const std::map>& map) { - std::map> res; - for (auto&& v : map) - res.emplace(v.first, std::const_pointer_cast(v.second)); - return res; -} - -/** - * @brief Copies InputInfo - * - * @param[in] networkInputs The network inputs to copy from - * @return copy of network inputs - */ -INFERENCE_ENGINE_API_CPP(InputsDataMap) copyInfo(const InputsDataMap& networkInputs); - -/** - * @brief Copies OutputsData - * - * @param[in] networkInputs network outputs to copy from - * @return copy of network outputs - */ -INFERENCE_ENGINE_API_CPP(OutputsDataMap) copyInfo(const OutputsDataMap& networkOutputs); - -/** - * @brief Set input and output information to executable network. This method is used to - * set additional information to InferenceEngine::IExecutableNetworkInternal created by device plugin. - * @param exeNetwork Executable network object - * @param function Model with initial execution info - */ -INFERENCE_ENGINE_API_CPP(void) -SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const std::shared_ptr& function, - bool new_api); - -/** - * @brief Returns set of nodes which were removed after transformation. - * If originalFunction contains node1 and transformedFunction does not - * contains node1 in ops list, node1 will be returned. - * @param originalFunction Original network - * @param transformedFunction Transformed network - * @return Set of strings which contains removed node names - */ -INFERENCE_ENGINE_API_CPP(std::unordered_set) -GetRemovedNodes(const std::shared_ptr& originalFunction, - const std::shared_ptr& transformedFunction); - -/** - * @brief Returns set of nodes from original model which are - * determined as supported after applied transformation pipeline. - * @param model Original model - * @param transform Transformation pipeline function - * @param is_node_supported Function returning whether node is supported or not - * @return Set of strings which contains supported node names - */ -INFERENCE_ENGINE_API_CPP(std::unordered_set) -GetSupportedNodes(const std::shared_ptr& model, - std::function&)> transform, - std::function)> is_node_supported); - -/** - * @interface IInferencePlugin - * @brief An API of plugin to be implemented by a plugin - * @ingroup ie_dev_api_plugin_api - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugin) - : public std::enable_shared_from_this { - class VersionStore : public ov::Version { - void copyFrom(const Version& v); - - public: - VersionStore() = default; - - explicit VersionStore(const Version& v); - - VersionStore& operator=(const VersionStore& v); - } _version; - -public: - /** - * @brief A shared pointer to IInferencePlugin interface - */ - using Ptr = std::shared_ptr; - - /** - * @brief Sets a plugin version - * @param version A version to set - */ - void SetVersion(const ov::Version& version); - - /** - * @brief Gets a plugin version - * @return A const ov::Version object - */ - const ov::Version& GetVersion() const; - - /** - * @brief Provides a name of a plugin - * @return The name. - */ - virtual std::string GetName() const noexcept; - - /** - * @brief Sets a name for a plugin - * @param[in] name The name - */ - virtual void SetName(const std::string& name) noexcept; - - /** - * @brief Creates an executable network from an pares network object, users can create as many networks as they need - * and use them simultaneously (up to the limitation of the HW resources) - * @param network A network object acquired from InferenceEngine::Core::ReadNetwork - * @param config A string-string map of config parameters relevant only for this load operation - * @return Created Executable Network object - */ - virtual std::shared_ptr LoadNetwork(const CNNNetwork& network, - const std::map& config); - - /** - * @brief Creates an executable network from model file path - * @param modelPath A path to model - * @param config A string-string map of config parameters relevant only for this load operation - * @return Created Executable Network object - */ - virtual ov::SoPtr LoadNetwork(const std::string& modelPath, - const std::map& config); - - /** - * @brief Registers extension within plugin - * @param extension - pointer to already loaded extension - */ - virtual void AddExtension(const std::shared_ptr& extension); - - /** - * @brief Sets configuration for plugin, acceptable keys can be found in properties.hpp - * @param config string-string map of config parameters - */ - virtual void SetConfig(const std::map& config); - - /** - * @brief Sets configuration for plugin, acceptable keys can be found in openvino/runtime/properties.hpp - * @param config ov::AnyMap of config parameters - */ - virtual void SetProperties(const ov::AnyMap& config); - - /** - * @brief Gets configuration dedicated to plugin behaviour - * @param name - value of config corresponding to config key - * @param options - configuration details for config - * @return Value of config corresponding to config key - */ - virtual ov::Any GetConfig(const std::string& name, const ov::AnyMap& options) const; - - /** - * @brief Gets general runtime metric for dedicated hardware - * @param name - metric name to request - * @param options - configuration details for metric - * @return Metric value corresponding to metric key - */ - virtual ov::Any GetMetric(const std::string& name, const ov::AnyMap& options) const; - - /** - * @deprecated Use ImportNetwork(std::istream& networkModel, const std::map& config) - * @brief Creates an executable network from an previously exported network - * @param modelFileName - path to the location of the exported file - * @param config A string -> string map of parameters - * @return An Executable network - */ - virtual std::shared_ptr ImportNetwork(const std::string& modelFileName, - const std::map& config); - - /** - * @brief Creates an executable network from an previously exported network using plugin implementation - * and removes Inference Engine magic and plugin name - * @param networkModel Reference to network model output stream - * @param config A string -> string map of parameters - * @return An Executable network - */ - virtual std::shared_ptr ImportNetwork(std::istream& networkModel, - const std::map& config); - - /** - * @brief Sets pointer to ICore interface - * @param core Pointer to Core interface - */ - virtual void SetCore(std::weak_ptr core); - - /** - * @brief Gets reference to ICore interface - * @return Reference to ICore interface - */ - virtual std::shared_ptr GetCore() const noexcept; - - /** - * @brief Provides an information about used API - * @return true if new API is used - */ - bool IsNewAPI() const noexcept; - - /** - * @brief Gets reference to tasks execution manager - * @return Reference to ExecutorManager interface - */ - const std::shared_ptr& executorManager() const; - - /** - * @brief Queries a plugin about supported layers in network - * @param[in] network The network object to query - * @param[in] config The map of configuration parameters - * @return The result of query operator containing supported layers map - */ - virtual QueryNetworkResult QueryNetwork(const CNNNetwork& network, - const std::map& config) const; - -protected: - IInferencePlugin(); - virtual ~IInferencePlugin() = default; - - /** - * @brief Creates an executable network from a parsed network object, users can create as many networks as they need - * and use them simultaneously (up to the limitation of the HW resources) - * @note The function is used in - * InferencePluginInternal::LoadNetwork(const CNNNetwork&, const std::map&) - * which performs common steps first and calls this plugin-dependent method implementation after. - * @param network A network object - * @param config string-string map of config parameters relevant only for this load operation - * @return Shared pointer to the ExecutableNetwork object - */ - virtual std::shared_ptr LoadExeNetworkImpl( - const CNNNetwork& network, - const std::map& config); - - /** - * @brief Set input and output information to executable network. This method is used to - * set addtional information to InferenceEngine::IExecutableNetworkInternal create by device plugin. - * @param exeNetwork An executable network object to set information to - * @param inputs An input information to set - * @param outputs An output information to set - * @param function Function with initial execution info - */ - void SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const ConstInputsDataMap& inputs, - const ConstOutputsDataMap& outputs); - - /** - * @brief Set input and output information to executable network. This method is used to - * set additional information to InferenceEngine::IExecutableNetworkInternal create by device plugin. - * @param function Function with initial execution info - */ - void SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const std::shared_ptr& function); - - std::string _pluginName; //!< A device name that plugins enables - std::map _config; //!< A map config keys -> values - std::weak_ptr _core; //!< A pointer to ICore interface - std::shared_ptr _executorManager; //!< A tasks execution manager - bool _isNewAPI; //!< A flag which shows used API -}; - -/** - * @def IE_CREATE_PLUGIN - * @brief Defines a name of a function creating plugin instance - * @ingroup ie_dev_api_plugin_api - */ -#ifndef IE_CREATE_PLUGIN -# define IE_CREATE_PLUGIN CreatePluginEngine -#endif - -/** - * @private - */ -constexpr static const auto create_plugin_function = OV_PP_TOSTRING(IE_CREATE_PLUGIN); - -INFERENCE_ENGINE_API_CPP(std::shared_ptr<::ov::IPlugin>) -convert_plugin(const std::shared_ptr& from); - -} // namespace InferenceEngine - -/** - * @def IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version) - * @brief Defines the exported `IE_CREATE_PLUGIN` function which is used to create a plugin instance - * @ingroup ie_dev_api_plugin_api - */ -#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...) \ - INFERENCE_PLUGIN_API(void) \ - IE_CREATE_PLUGIN(::std::shared_ptr<::ov::IPlugin>& plugin) noexcept(false); \ - void IE_CREATE_PLUGIN(::std::shared_ptr<::ov::IPlugin>& plugin) noexcept(false) { \ - std::shared_ptr<::InferenceEngine::IInferencePlugin> ie_plugin; \ - try { \ - ie_plugin = ::std::make_shared(__VA_ARGS__); \ - } catch (const InferenceEngine::Exception&) { \ - throw; \ - } catch (const std::exception& ex) { \ - IE_THROW() << ex.what(); \ - } catch (...) { \ - IE_THROW(Unexpected); \ - } \ - ie_plugin->SetVersion(version); \ - plugin = convert_plugin(ie_plugin); \ - } diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp deleted file mode 100644 index f04a34bf841e63..00000000000000 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ie_blob.h" -#include "openvino/runtime/so_ptr.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START - -/** - * @interface IVariableStateInternal - * @brief Minimal interface for variable state implementation - * @ingroup ie_dev_api_variable_state_api - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IVariableStateInternal) - : public std::enable_shared_from_this { -public: - /** - * @brief A shared pointer to a IVariableStateInternal interface - */ - using Ptr = std::shared_ptr; - - explicit IVariableStateInternal(const std::string& name); - - /** - * @brief Gets a variable state name - * @return A string representing variable state name - */ - virtual std::string GetName() const; - - /** - * @brief Reset internal variable state for relevant infer request, to a value specified as - * default for according `ReadValue` node - */ - virtual void Reset(); - - /** - * @brief Sets the new state for the next inference - * @param newState A new state - */ - virtual void SetState(const Blob::Ptr& newState); - - /** - * @brief Returns the value of the variable state. - * @return The value of the variable state - */ - virtual Blob::CPtr GetState() const; - -protected: - /** - * @brief A default dtor - */ - virtual ~IVariableStateInternal() = default; - - std::string name; - Blob::Ptr state; -}; - -/** - * @brief For compatibility reasons. - */ -using IMemoryStateInternal = IVariableStateInternal; - -/** - * @brief SoPtr to IVariableStateInternal. - */ -using SoIVariableStateInternal = ov::SoPtr; - -/** - * @brief For compatibility reasons. - */ -using MemoryStateInternal = IVariableStateInternal; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/dev_api/cpp_interfaces/plugin_itt.hpp b/src/inference/dev_api/cpp_interfaces/plugin_itt.hpp deleted file mode 100644 index b498cd5eed3f44..00000000000000 --- a/src/inference/dev_api/cpp_interfaces/plugin_itt.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Defines openvino domains for tracing - * @file plugin_itt.hpp - */ - -#pragma once - -#include - -namespace InferenceEngine { -namespace itt { -namespace domains { -OV_ITT_DOMAIN(Plugin) -OV_ITT_DOMAIN(Plugin_LT) -} // namespace domains -} // namespace itt -} // namespace InferenceEngine diff --git a/src/inference/dev_api/description_buffer.hpp b/src/inference/dev_api/description_buffer.hpp deleted file mode 100644 index 517bd2f13808a4..00000000000000 --- a/src/inference/dev_api/description_buffer.hpp +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Defines Description buffer to conviniently works with StatusCode and ResponseDesc - * @file description_buffer.hpp - */ - -#pragma once - -#include -#include -#include - -#include "ie_common.h" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief A description buffer wrapping StatusCode and ResponseDesc - * @ingroup ie_dev_api_error_debug - */ -struct INFERENCE_ENGINE_1_0_DEPRECATED DescriptionBuffer : public std::basic_streambuf> { - /** - * @brief Creeates a description buffer with parameters - * - * @param[in] err The error code - * @param desc The response desc to write an error message to - */ - DescriptionBuffer(StatusCode err, ResponseDesc* desc) : err(err) { - init(desc); - } - - /** - * @brief Constructs with StatusCode - * - * @param[in] err The StatusCode value - */ - explicit DescriptionBuffer(StatusCode err) : err(err) {} - - /** - * @brief Constructs with ResponseDesc - * - * @param desc The ResponseDesc pointer - */ - explicit DescriptionBuffer(ResponseDesc* desc) { - init(desc); - } - - /** - * @brief Constructs with parameters - * - * @param pBuffer The buffer to wrtie to. - * @param[in] len The length of `pBuffer` - */ - DescriptionBuffer(char* pBuffer, size_t len) { - init(pBuffer, len); - } - - /** - * @brief Constructs with parameters - * - * @param[in] err The StatusCode value - * @param pBuffer The buffer to wrtie to. - * @param[in] len The length of `pBuffer` - */ - DescriptionBuffer(StatusCode err, char* pBuffer, size_t len) : err(err) { - init(pBuffer, len); - } - - /** - * @brief Writes to ResponseDesc stream - * - * @param[in] obj The object to write to stream - * @tparam T An object type - * - * @return A reference to itself - */ - template - DescriptionBuffer& operator<<(const T& obj) { - if (!stream) - return *this; - (*stream.get()) << obj; - - return *this; - } - - /** - * @brief Converts to StatusCode - * @return A StatusCode value - */ - operator StatusCode() const { - if (stream) - stream->flush(); - return err; - } - -private: - std::unique_ptr stream; - StatusCode err = GENERAL_ERROR; - - void init(ResponseDesc* desc) { - if (desc == nullptr) - return; - init(desc->msg, sizeof(desc->msg) / sizeof(desc->msg[0])); - } - - void init(char* ptr, size_t len) { - if (nullptr != ptr && len > 0) { - // set the "put" pointer the start of the buffer and record it's length. - setp(ptr, ptr + len - 1); - } - stream.reset(new std::ostream(this)); - - if (nullptr != ptr && len > 0) { - ptr[len - 1] = 0; - (*stream.get()) << ptr; - } - } -}; -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/dev_api/ie_icore.hpp b/src/inference/dev_api/ie_icore.hpp deleted file mode 100644 index 4461c580a4515d..00000000000000 --- a/src/inference/dev_api/ie_icore.hpp +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for ICore interface - * @file ie_icore.hpp - */ - -#pragma once - -#include -#include -#include - -#include "cpp/ie_cnn_network.h" -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "openvino/runtime/icore.hpp" -#include "openvino/runtime/properties.hpp" - -namespace InferenceEngine { - -class ICore : public ov::ICore { -public: - /** - * @brief Reads IR xml and bin (with the same name) files - * @param model string with IR - * @param weights shared pointer to constant blob with weights - * @param frontendMode read network without post-processing or other transformations - * @return CNNNetwork - */ - virtual CNNNetwork ReadNetwork(const std::string& model, - const Blob::CPtr& weights, - bool frontendMode = false) const = 0; - - /** - * @brief Reads IR xml and bin files - * @param modelPath path to IR file - * @param binPath path to bin file, if path is empty, will try to read bin file with the same name as xml and - * if bin file with the same name was not found, will load IR without weights. - * @return CNNNetwork - */ - virtual CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath) const = 0; - - /** - * @brief Creates an executable network from a network object. - * - * Users can create as many networks as they need and use - * them simultaneously (up to the limitation of the hardware resources) - * - * @param network CNNNetwork object acquired from Core::ReadNetwork - * @param deviceName Name of device to load network to - * @param config Optional map of pairs: (config name, config value) relevant only for this load - * operation - * @return An executable network reference - */ - virtual SoExecutableNetworkInternal LoadNetwork(const CNNNetwork& network, - const std::string& deviceName, - const std::map& config = {}) = 0; - - /** - * @brief Creates an executable network from a model memory. - * - * Users can create as many networks as they need and use - * them simultaneously (up to the limitation of the hardware resources) - * - * @param modelStr String data of model - * @param weights Model's weights - * @param deviceName Name of device to load network to - * @param config Optional map of pairs: (config name, config value) relevant only for this load - * operation - * @param val Optional callback to perform validation of loaded CNNNetwork, if ReadNetwork is triggered - * @return An executable network reference - */ - virtual SoExecutableNetworkInternal LoadNetwork( - const std::string& modelStr, - const InferenceEngine::Blob::CPtr& weights, - const std::string& deviceName, - const std::map& config, - const std::function& val = nullptr) = 0; - - /** - * @brief Creates an executable network from a model file. - * - * Users can create as many networks as they need and use - * them simultaneously (up to the limitation of the hardware resources) - * - * @param modelPath Path to model - * @param deviceName Name of device to load network to - * @param config Optional map of pairs: (config name, config value) relevant only for this load - * operation - * @param val Optional callback to perform validation of loaded CNNNetwork, if ReadNetwork is triggered - * @return An executable network reference - */ - virtual SoExecutableNetworkInternal LoadNetwork(const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - const std::function& val = nullptr) = 0; - - /** - * @brief Creates an executable network from a previously exported network - * @param networkModel network model stream - * @param deviceName Name of device load executable network on - * @param config Optional map of pairs: (config name, config value) relevant only for this load - * operation* - * @return An executable network reference - */ - virtual SoExecutableNetworkInternal ImportNetwork(std::istream& networkModel, - const std::string& deviceName = {}, - const std::map& config = {}) = 0; - - /** - * @brief Query device if it supports specified network with specified configuration - * - * @param deviceName A name of a device to query - * @param network Network object to query - * @param config Optional map of pairs: (config name, config value) - * @return An object containing a map of pairs a layer name -> a device name supporting this layer. - */ - virtual QueryNetworkResult QueryNetwork(const CNNNetwork& network, - const std::string& deviceName, - const std::map& config) const = 0; - - /** - * @brief Gets general runtime metric for dedicated hardware. - * - * The method is needed to request common device properties - * which are executable network agnostic. It can be device name, temperature, other devices-specific values. - * - * @param deviceName - A name of a device to get a metric value. - * @param name - metric name to request. - * @return Metric value corresponding to metric key. - */ - virtual ov::Any GetMetric(const std::string& deviceName, - const std::string& name, - const ov::AnyMap& options = {}) const = 0; - - /** - * @brief Gets configuration dedicated to device behaviour. - * - * The method is targeted to extract information which can be set via SetConfig method. - * - * @param deviceName - A name of a device to get a configuration value. - * @param name - config key. - * @return Value of config corresponding to config key. - */ - virtual ov::Any GetConfig(const std::string& deviceName, const std::string& name) const = 0; - - /** - * @brief Returns devices available for neural networks inference - * - * @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1 } - * If there more than one device of specific type, they are enumerated with .# suffix. - */ - virtual std::vector GetAvailableDevices() const = 0; - - /** - * @brief Checks whether device supports model caching feature - * - * @param deviceName - A name of a device to get a metric value. - * @return True if device has IMPORT_EXPORT_SUPPORT and CACHING_PROPERTIES metric in SUPPORTED_PROPERTIES and - * this metric returns 'true', False otherwise. - */ - virtual bool DeviceSupportsModelCaching(const std::string& deviceName) const = 0; - - /** - * @brief Get only configs that are supported by device - * @param deviceName Name of a device - * @param config Map of configs that can contains configs that are not supported by device - * @return map of configs that are supported by device - */ - virtual std::map GetSupportedConfig(const std::string& deviceName, - const std::map& config) = 0; - - virtual bool isNewAPI() const = 0; -}; - -} // namespace InferenceEngine diff --git a/src/inference/dev_api/ie_ngraph_utils.hpp b/src/inference/dev_api/ie_ngraph_utils.hpp deleted file mode 100644 index 6a2647037aa63d..00000000000000 --- a/src/inference/dev_api/ie_ngraph_utils.hpp +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "cpp/ie_cnn_network.h" -#include "ie_precision.hpp" -#include "openvino/core/type/element_type.hpp" -#include "openvino/runtime/common.hpp" - -namespace InferenceEngine { -namespace details { - -INFERENCE_ENGINE_1_0_DEPRECATED inline ::ov::element::Type convertPrecision(const Precision& precision) { - Precision::ePrecision pType = precision; - switch (pType) { - case Precision::UNSPECIFIED: - return ::ov::element::Type(::ov::element::Type_t::undefined); - case Precision::FP32: - return ::ov::element::Type(::ov::element::Type_t::f32); - case Precision::FP64: - return ::ov::element::Type(::ov::element::Type_t::f64); - case Precision::FP16: - return ::ov::element::Type(::ov::element::Type_t::f16); - case Precision::BF16: - return ::ov::element::Type(::ov::element::Type_t::bf16); - case Precision::U4: - return ::ov::element::Type(::ov::element::Type_t::u4); - case Precision::U8: - return ::ov::element::Type(::ov::element::Type_t::u8); - case Precision::I4: - return ::ov::element::Type(::ov::element::Type_t::i4); - case Precision::I8: - return ::ov::element::Type(::ov::element::Type_t::i8); - case Precision::U16: - return ::ov::element::Type(::ov::element::Type_t::u16); - case Precision::I16: - return ::ov::element::Type(::ov::element::Type_t::i16); - case Precision::I32: - return ::ov::element::Type(::ov::element::Type_t::i32); - case Precision::U32: - return ::ov::element::Type(::ov::element::Type_t::u32); - case Precision::I64: - return ::ov::element::Type(::ov::element::Type_t::i64); - case Precision::U64: - return ::ov::element::Type(::ov::element::Type_t::u64); - case Precision::BOOL: - return ::ov::element::Type(::ov::element::Type_t::boolean); - case Precision::BIN: - return ::ov::element::Type(::ov::element::Type_t::u1); - case Precision::NF4: - return ::ov::element::Type(::ov::element::Type_t::nf4); - case Precision::STRING: - return ::ov::element::Type(::ov::element::Type_t::string); - case Precision::Q78: - case Precision::MIXED: - case Precision::CUSTOM: - default: - IE_THROW() << "Incorrect precision!"; - } -} - -INFERENCE_ENGINE_1_0_DEPRECATED inline ::ov::element::Type convertPrecision(const std::string& precision) { - return ::ov::element::Type(precision); -} - -INFERENCE_ENGINE_1_0_DEPRECATED inline Precision convertPrecision(const ::ov::element::Type& precision) { - switch (precision) { - case ::ov::element::Type_t::undefined: - return Precision(Precision::UNSPECIFIED); - case ::ov::element::Type_t::f16: - return Precision(Precision::FP16); - case ::ov::element::Type_t::f32: - return Precision(Precision::FP32); - case ::ov::element::Type_t::f64: - return Precision(Precision::FP64); - case ::ov::element::Type_t::bf16: - return Precision(Precision::BF16); - case ::ov::element::Type_t::i4: - return Precision(Precision::I4); - case ::ov::element::Type_t::i8: - return Precision(Precision::I8); - case ::ov::element::Type_t::i16: - return Precision(Precision::I16); - case ::ov::element::Type_t::i32: - return Precision(Precision::I32); - case ::ov::element::Type_t::i64: - return Precision(Precision::I64); - case ::ov::element::Type_t::u4: - return Precision(Precision::U4); - case ::ov::element::Type_t::u8: - return Precision(Precision::U8); - case ::ov::element::Type_t::u16: - return Precision(Precision::U16); - case ::ov::element::Type_t::u32: - return Precision(Precision::U32); - case ::ov::element::Type_t::u64: - return Precision(Precision::U64); - case ::ov::element::Type_t::u1: - return Precision(Precision::BIN); - case ::ov::element::Type_t::boolean: - return Precision(Precision::BOOL); - case ::ov::element::Type_t::nf4: - return Precision(Precision::NF4); - case ::ov::element::Type_t::string: - return Precision(Precision::STRING); - case ::ov::element::Type_t::dynamic: - return Precision(Precision::UNSPECIFIED); - default: - IE_THROW() << "Incorrect precision " << precision.to_string() << "!"; - return {}; - } -} - -/** - * @brief Clones input network including all layers and internal data objects - * @note Blobs inside layers are reused - * @param network A network to clone - * @return A cloned object - */ -INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(CNNNetwork) cloneNetwork(const CNNNetwork& network); - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/dev_api/openvino/runtime/icompiled_model.hpp b/src/inference/dev_api/openvino/runtime/icompiled_model.hpp index 9cbe08b7d546c0..c214de6c36f6b0 100644 --- a/src/inference/dev_api/openvino/runtime/icompiled_model.hpp +++ b/src/inference/dev_api/openvino/runtime/icompiled_model.hpp @@ -22,15 +22,10 @@ #include "openvino/runtime/threading/cpu_streams_executor.hpp" #include "openvino/runtime/threading/itask_executor.hpp" -namespace InferenceEngine { -class ICompiledModelWrapper; -} // namespace InferenceEngine - namespace ov { class CoreImpl; class IPlugin; -class IExecutableNetworkWrapper; class IAsyncInferRequest; /** @@ -150,8 +145,6 @@ class OPENVINO_RUNTIME_API ICompiledModel : public std::enable_shared_from_this< std::shared_ptr m_callback_executor = nullptr; //!< Holds a callback executor friend ov::CoreImpl; - friend ov::IExecutableNetworkWrapper; - friend InferenceEngine::ICompiledModelWrapper; // FIXME: Remove after removing IE API std::vector> _parameters; diff --git a/src/inference/dev_api/openvino/runtime/icore.hpp b/src/inference/dev_api/openvino/runtime/icore.hpp index bf4da34721d152..fd75171f8fd69e 100644 --- a/src/inference/dev_api/openvino/runtime/icore.hpp +++ b/src/inference/dev_api/openvino/runtime/icore.hpp @@ -170,8 +170,6 @@ class OPENVINO_RUNTIME_API ICore { */ virtual ov::SoPtr create_context(const std::string& device_name, const AnyMap& args) const = 0; - virtual bool is_new_api() const = 0; - /** * @brief Get a pointer to default shared context object for the specified device. * @param device_name - A name of a device to get create shared context from. diff --git a/src/inference/dev_api/openvino/runtime/iplugin.hpp b/src/inference/dev_api/openvino/runtime/iplugin.hpp index 92b9ad4e75d7a3..1ad03ff4b05ddf 100644 --- a/src/inference/dev_api/openvino/runtime/iplugin.hpp +++ b/src/inference/dev_api/openvino/runtime/iplugin.hpp @@ -22,13 +22,6 @@ #include "openvino/runtime/threading/executor_manager.hpp" #include "openvino/util/pp.hpp" -namespace InferenceEngine { - -class IPluginWrapper; -class IExtension; - -} // namespace InferenceEngine - namespace ov { /** @@ -211,12 +204,6 @@ class OPENVINO_RUNTIME_API IPlugin : public std::enable_shared_from_this get_core() const; - /** - * @brief Provides an information about used API - * @return true if new API is used - */ - bool is_new_api() const; - /** * @brief Gets reference to tasks execution manager * @return Reference to ExecutorManager interface @@ -229,13 +216,10 @@ class OPENVINO_RUNTIME_API IPlugin : public std::enable_shared_from_this m_core; //!< A pointer to ICore interface std::shared_ptr m_executor_manager; //!< A tasks execution manager ov::Version m_version; //!< Member contains plugin version - bool m_is_new_api; //!< A flag which shows used API }; /** diff --git a/src/inference/dev_api/openvino/runtime/make_tensor.hpp b/src/inference/dev_api/openvino/runtime/make_tensor.hpp index 7433b22a7fc38c..f728021c7e12ef 100644 --- a/src/inference/dev_api/openvino/runtime/make_tensor.hpp +++ b/src/inference/dev_api/openvino/runtime/make_tensor.hpp @@ -8,12 +8,6 @@ #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/so_ptr.hpp" -namespace InferenceEngine { - -class Blob; - -} // namespace InferenceEngine - namespace ov { /** diff --git a/src/inference/include/ie/cpp/ie_cnn_network.h b/src/inference/include/ie/cpp/ie_cnn_network.h deleted file mode 100644 index 616fd763c523de..00000000000000 --- a/src/inference/include/ie/cpp/ie_cnn_network.h +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file that provides wrapper for ICNNNetwork object - * - * @file ie_cnn_network.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ie_blob.h" -#include "ie_common.h" -#include "ie_data.h" -#include "ie_icnn_network.hpp" -#include "openvino/core/model.hpp" - -namespace InferenceEngine { - -class IExtension; - -/** - * @brief This class contains all the information about the Neural Network and the related binary information - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(CNNNetwork) { -public: - /** - * @brief A default constructor - */ - CNNNetwork(); - - IE_SUPPRESS_DEPRECATED_START - /** - * @deprecated Don't use this constructor. It will be removed soon - * @brief Allows helper class to manage lifetime of network object - * - * @param network Pointer to the network object - */ - INFERENCE_ENGINE_DEPRECATED("Don't use this constructor. It will be removed soon") - explicit CNNNetwork(std::shared_ptr network); - - /** - * @brief A constructor from ngraph::Function object - * This constructor wraps existing ngraph::Function - * If you want to avoid modification of original Function, please create a copy - * @param network Pointer to the ngraph::Function object - * @param exts Vector of pointers to IE extension objects - */ - explicit CNNNetwork(const std::shared_ptr& network); - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Gets the network output Data node information. The received info is stored in the given Data node. - * - * For single and multiple outputs networks. - * - * This method need to be called to find out OpenVINO output names for using them later - * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob - * - * If you want to use framework names, you can use InferenceEngine::CNNNetwork::getOVNameForTensor - * method to map framework names to OpenVINO names - * - * @return the InferenceEngine::OutputsDataMap object - */ - OutputsDataMap getOutputsInfo() const; - - /** - * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap - * object. - * - * For single and multiple inputs networks. - * This method need to be called to find out OpenVINO input names for using them later - * when calling InferenceEngine::InferRequest::SetBlob - * - * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor - * method to map framework names to OpenVINO names - * - * @return The InferenceEngine::InputsDataMap object. - */ - InputsDataMap getInputsInfo() const; - - /** - * @brief Returns the number of layers in the network as an integer value - * @return The number of layers as an integer value - */ - size_t layerCount() const; - - /** - * @brief Returns the network name. - * @return Network name - */ - const std::string& getName() const; - - /** - * @brief Changes the inference batch size. - * - * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call - * InferenceEngine::CNNNetwork::reshape. - * - * @param size Size of batch to set - * - * @note Current implementation of the function sets batch size to the first dimension of all layers in the - * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the - * method works incorrectly. This limitation is resolved via shape inference feature by using - * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation - * - * @note Current implementation of the function sets batch size to the first dimension of all layers in the - * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the - * method works incorrectly. This limitation is resolved via shape inference feature by using - * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation - */ - void setBatchSize(const size_t size); - - /** - * @brief Gets the inference batch size - * @return The size of batch as a size_t value - */ - size_t getBatchSize() const; - - IE_SUPPRESS_DEPRECATED_START - /** - * @deprecated InferenceEngine::ICNNNetwork interface is deprecated - * @brief An overloaded operator cast to get pointer on current network - * - * @return A shared pointer of the current network - */ - INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated") - operator ICNNNetwork::Ptr(); - - /** - * @deprecated InferenceEngine::ICNNNetwork interface is deprecated - * @brief An overloaded operator & to get current network - * - * @return An instance of the current network - */ - INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated") - operator ICNNNetwork&(); - - /** - * @deprecated InferenceEngine::ICNNNetwork interface is deprecated - * @brief An overloaded operator & to get current network - * - * @return A const reference of the current network - */ - INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated") - operator const ICNNNetwork&() const; - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Returns constant nGraph function - * @return constant nGraph function - */ - std::shared_ptr getFunction(); - - /** - * @brief Returns constant nGraph function - * @return constant nGraph function - */ - std::shared_ptr getFunction() const; - - /** - * @brief Adds output to the layer - * @param layerName Name of the layer - * @param outputIndex Index of the output - */ - void addOutput(const std::string& layerName, size_t outputIndex = 0); - - IE_SUPPRESS_DEPRECATED_START - /** - * @brief Helper method to get collect all input shapes with names of corresponding Data objects - * @return Map of pairs: input name and its dimension. - */ - ICNNNetwork::InputShapes getInputShapes() const; - - /** - * @brief Run shape inference with new input shapes for the network - * @param inputShapes A map of pairs: name of corresponding data and its dimension. - */ - void reshape(const ICNNNetwork::InputShapes& inputShapes); - - /** - * @brief Serialize network to IR and weights files. - * - * @param xmlPath Path to output IR file. - * @param binPath Path to output weights file. The parameter is skipped in case - * of executable graph info serialization. - */ - void serialize(const std::string& xmlPath, const std::string& binPath = {}) const; - - /** - * @brief Serialize network to IR and weights streams. - * - * @param xmlBuf output IR stream. - * @param binBuf output weights stream. - */ - void serialize(std::ostream& xmlBuf, std::ostream& binBuf) const; - - /** - * @brief Serialize network to IR stream and weights Blob::Ptr. - * - * @param xmlBuf output IR stream. - * @param binBlob output weights Blob::Ptr. - */ - void serialize(std::ostream& xmlBuf, Blob::Ptr& binBlob) const; - - /** - * @brief Method maps framework tensor name to OpenVINO name - * @param orig_name Framework tensor name - * @return OpenVINO name - */ - std::string getOVNameForTensor(const std::string& orig_name) const; - -private: - IE_SUPPRESS_DEPRECATED_START - /** - * @brief Network extra interface, might be nullptr - */ - std::shared_ptr network; - - /** - * @brief A pointer to the current network - */ - ICNNNetwork* actual = nullptr; - IE_SUPPRESS_DEPRECATED_END -}; - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/cpp/ie_executable_network.hpp b/src/inference/include/ie/cpp/ie_executable_network.hpp deleted file mode 100644 index 10ab6a66c3d12e..00000000000000 --- a/src/inference/include/ie/cpp/ie_executable_network.hpp +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file that provides ExecutableNetwork class - * - * @file ie_executable_network.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "cpp/ie_cnn_network.h" -#include "cpp/ie_infer_request.hpp" -#include "ie_iexecutable_network.hpp" - -namespace ov { -class Core; -} // namespace ov - -namespace InferenceEngine { -class IExecutableNetworkInternal; - -/** - * @brief This is an interface of an executable network - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) { - std::shared_ptr _impl; - std::shared_ptr _so; - - /** - * @brief Constructs ExecutableNetwork from the initialized std::shared_ptr - * @param impl Initialized shared pointer - * @param so Plugin to use. This is required to ensure that ExecutableNetwork can work properly even if plugin - * object is destroyed. - */ - ExecutableNetwork(const std::shared_ptr& impl, const std::shared_ptr& so); - friend class Core; - friend class ov::Core; - -public: - /// @brief Default constructor - ExecutableNetwork() = default; - - /// @brief Default copy constructor - /// @param other other ExecutableNetwork object - ExecutableNetwork(const ExecutableNetwork& other) = default; - - /// @brief Default copy assignment operator - /// @param other other ExecutableNetwork object - /// @return reference to the current object - ExecutableNetwork& operator=(const ExecutableNetwork& other) = default; - - /// @brief Default move constructor - /// @param other other ExecutableNetwork object - ExecutableNetwork(ExecutableNetwork&& other) = default; - - /// @brief Default move assignment operator - /// @param other other ExecutableNetwork object - /// @return reference to the current object - ExecutableNetwork& operator=(ExecutableNetwork&& other) = default; - - /** - * @brief Destructor preserves unloading order of implementation object and reference to library - */ - ~ExecutableNetwork(); - - /** - * @brief Gets the Executable network output Data node information. - * - * The received info is stored in the given InferenceEngine::ConstOutputsDataMap node. - * This method need to be called to find output names for using them later - * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob - * - * @return A collection that contains string as key, and const Data smart pointer as value - */ - ConstOutputsDataMap GetOutputsInfo() const; - - /** - * @brief Gets the executable network input Data node information. - * - * The received info is stored in the given InferenceEngine::ConstInputsDataMap object. - * This method need to be called to find out input names for using them later - * when calling InferenceEngine::InferRequest::SetBlob - * - * @return A collection that contains string as key, and const InputInfo smart pointer as value - */ - ConstInputsDataMap GetInputsInfo() const; - - /** - * @brief Creates an inference request object used to infer the network. - * - * The created request has allocated input and output blobs (that can be changed later). - * - * @return InferRequest object - */ - InferRequest CreateInferRequest(); - - /** - * @brief Exports the current executable network. - * - * @see Core::ImportNetwork - * - * @param modelFileName Full path to the location of the exported file - */ - void Export(const std::string& modelFileName); - - /** - * @brief Exports the current executable network. - * - * @see Core::ImportNetwork - * - * @param networkModel Network model output stream - */ - void Export(std::ostream& networkModel); - - /** - * @copybrief IExecutableNetwork::GetExecGraphInfo - * - * Wraps IExecutableNetwork::GetExecGraphInfo. - * @return CNNetwork containing Executable Graph Info - */ - CNNNetwork GetExecGraphInfo(); - - /** - * @brief Sets configuration for current executable network - * - * @param config Map of pairs: (config name, config value) - */ - void SetConfig(const ov::AnyMap& config); - - /** @brief Gets configuration for current executable network. - * - * The method is responsible to extract information - * which affects executable network execution. The list of supported configuration values can be extracted via - * ExecutableNetwork::GetMetric with the SUPPORTED_CONFIG_KEYS key, but some of these keys cannot be changed - * dynamically, e.g. DEVICE_ID cannot changed if an executable network has already been compiled for particular - * device. - * - * @param name config key, can be found in properties.hpp - * @return Configuration ov::Any value - */ - ov::Any GetConfig(const std::string& name) const; - - /** - * @brief Gets general runtime metric for an executable network. - * - * It can be network name, actual device ID on - * which executable network is running or all other properties which cannot be changed dynamically. - * - * @param name metric name to request - * @return Metric ov::Any value - */ - ov::Any GetMetric(const std::string& name) const; - - /** - * @brief Checks if current ExecutableNetwork object is not initialized - * @return true if current ExecutableNetwork object is not initialized, false - otherwise - */ - bool operator!() const noexcept; - - /** - * @brief Checks if current ExecutableNetwork object is initialized - * @return true if current ExecutableNetwork object is initialized, false - otherwise - */ - explicit operator bool() const noexcept; - - /** - * @deprecated The method Will be removed - * @brief reset owned object to new pointer. - * - * Essential for cases when simultaneously loaded networks not expected. - * @param newActual actual pointed object - */ - void reset(std::shared_ptr newActual); - - /** - * @deprecated Will be removed. Use operator bool - * @brief cast operator is used when this wrapper initialized by LoadNetwork - * @return A shared pointer to IExecutableNetwork interface. - */ - operator std::shared_ptr(); - - /** - * @deprecated Use ExecutableNetwork::CreateInferRequest - * @copybrief IExecutableNetwork::CreateInferRequest - * - * Wraps IExecutableNetwork::CreateInferRequest. - * @return shared pointer on InferenceEngine::InferRequest object - */ - InferRequest::Ptr CreateInferRequestPtr(); -}; - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/cpp/ie_infer_request.hpp b/src/inference/include/ie/cpp/ie_infer_request.hpp deleted file mode 100644 index 69702bb5cccd1f..00000000000000 --- a/src/inference/include/ie/cpp/ie_infer_request.hpp +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file that provides wrapper classes for infer requests and callbacks. - * - * @file ie_infer_request.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "cpp/ie_memory_state.hpp" -#include "ie_blob.h" -#include "ie_iinfer_request.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START - -class IInferRequestInternal; - -namespace details { -class ICompletionCallbackWrapper; -} // namespace details - -/** - * @copybrief IInferRequest - * - * Wraps IInferRequest - * It can throw exceptions safely for the application, where it is properly handled. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferRequest) { - std::shared_ptr _impl; - std::shared_ptr _so; - - /** - * @brief Constructs InferRequest from the initialized std::shared_ptr - * @param impl Initialized shared pointer - * @param so Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is - * destroyed. - */ - InferRequest(const std::shared_ptr& impl, const std::shared_ptr& so); - friend class ExecutableNetwork; - -public: - /** - * @enum WaitMode - * @brief Enumeration to hold wait mode for IInferRequest - */ - enum INFERENCE_ENGINE_1_0_DEPRECATED WaitMode : int64_t { - /** Wait until inference result becomes available */ - RESULT_READY = -1, - /** IInferRequest doesn't block or interrupt current thread and immediately returns inference status */ - STATUS_ONLY = 0, - }; - - /** - * @brief A smart pointer to the InferRequest object - */ - using Ptr = std::shared_ptr; - - /// @brief Default constructor - InferRequest() = default; - - /// @brief Default copy constructor - /// @param other other InferRequest object - InferRequest(const InferRequest& other) = default; - - /// @brief Default copy assignment operator - /// @param other other InferRequest object - /// @return reference to the current object - InferRequest& operator=(const InferRequest& other) = default; - - /// @brief Default move constructor - /// @param other other InferRequest object - InferRequest(InferRequest&& other) = default; - - /// @brief Default move assignment operator - /// @param other other InferRequest object - /// @return reference to the current object - InferRequest& operator=(InferRequest&& other) = default; - - /** - * @brief Destructor preserves unloading order of implementation object and reference to library - */ - ~InferRequest(); - - /** - * @brief Sets input/output data to infer - * - * @note Memory allocation does not happen - * @param name Name of input or output blob. - * @param data Reference to input or output blob. The type of a blob must match the network input precision and - * size. - */ - void SetBlob(const std::string& name, const Blob::Ptr& data); - - /** - * @brief Gets input/output data for inference - * - * @note Memory allocation does not happen - * @param name A name of Blob to get - * @return A shared pointer to a Blob with a name @p name. If a blob is not found, an exception is thrown. - */ - Blob::Ptr GetBlob(const std::string& name); - - /** - * @brief Infers specified input(s) in synchronous mode - * - * @note blocks all methods of InferRequest while request is ongoing (running or waiting in queue) - * - */ - void Infer(); - - /** - * @brief Cancels inference request - */ - void Cancel(); - - /** - * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer - * - * @note not all plugins provide meaningful data - * @return Map of layer names to profiling information for that layer - */ - std::map GetPerformanceCounts() const; - - /** - * @brief Sets input data to infer - * - * @note Memory allocation doesn't happen - * @param inputs A reference to a map of input blobs accessed by input names. - * The type of Blob must correspond to the network input precision and size. - */ - void SetInput(const BlobMap& inputs); - - /** - * @brief Sets data that will contain result of the inference - * - * @note Memory allocation doesn't happen - * @param results - a reference to a map of result blobs accessed by output names. - * The type of Blob must correspond to the network output precision and size. - */ - void SetOutput(const BlobMap& results); - - /** - * @brief Start inference of specified input(s) in asynchronous mode - * - * @note It returns immediately. Inference starts also immediately. - */ - void StartAsync(); - - /** - * @brief Waits for the result to become available. Blocks until specified millis_timeout has elapsed or the result - * becomes available, whichever comes first. - * - * - * @param millis_timeout Maximum duration in milliseconds to block for - * @note There are special cases when millis_timeout is equal some value of the WaitMode enum: - * * STATUS_ONLY - immediately returns inference status (IInferRequest::RequestStatus). It does not block or - * interrupt current thread - * * RESULT_READY - waits until inference result becomes available - * @return A status code of operation - */ - StatusCode Wait(int64_t millis_timeout = RESULT_READY); - -private: - void SetCompletionCallbackImpl(std::function); - void SetCompletionCallbackImpl(std::function); - IE_SUPPRESS_DEPRECATED_START - void SetCompletionCallbackImpl(IInferRequest::CompletionCallback); - IE_SUPPRESS_DEPRECATED_END - - template - struct SetCallback { - void operator()(std::function f) { - _this.SetCompletionCallbackImpl(std::move(f)); - } - InferRequest& _this; - }; - -public: - /** - * @brief Sets a callback function that will be called on success or failure of asynchronous request - * - * @param callbackToSet callback object which will be called on when inference finish. - */ - template - void SetCompletionCallback(F callbackToSet) { - SetCallback{*this}(std::move(callbackToSet)); - } - - IE_SUPPRESS_DEPRECATED_START - /** - * @brief Gets state control interface for given infer request. - * - * State control essential for recurrent networks - * @return A vector of Memory State objects - */ - std::vector QueryState(); - - /** - * @brief IInferRequest pointer to be used directly in CreateInferRequest functions - * @return A shared pointer to IInferRequest interface - */ - INFERENCE_ENGINE_DEPRECATED("Will be removed") - operator std::shared_ptr(); - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Checks if current InferRequest object is not initialized - * @return true if current InferRequest object is not initialized, false - otherwise - */ - bool operator!() const noexcept; - - /** - * @brief Checks if current InferRequest object is initialized - * @return true if current InferRequest object is initialized, false - otherwise - */ - explicit operator bool() const noexcept; - - /** - * @brief Compares whether this request wraps the same impl underneath - * @return true if current InferRequest object doesn't wrap the same impl as the operator's arg - */ - bool operator!=(const InferRequest&) const noexcept; - - /** - * @brief Compares whether this request wraps the same impl underneath - * @return true if current InferRequest object wraps the same impl as the operator's arg - */ - bool operator==(const InferRequest&) const noexcept; -}; - -/** - * @private - */ -template <> -struct InferRequest::SetCallback> { - void operator()(std::function f) { - _this.SetCompletionCallbackImpl(std::move(f)); - } - InferRequest& _this; -}; - -/** - * @private - */ -template <> -struct InferRequest::SetCallback { - void operator()(IInferRequest::CompletionCallback f) { - _this.SetCompletionCallbackImpl(std::move(f)); - } - InferRequest& _this; -}; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/cpp/ie_memory_state.hpp b/src/inference/include/ie/cpp/ie_memory_state.hpp deleted file mode 100644 index bef706632d4353..00000000000000 --- a/src/inference/include/ie/cpp/ie_memory_state.hpp +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file that provides VariableState - * - * @file ie_memory_state.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ie_api.h" -#include "ie_blob.h" - -namespace InferenceEngine { - -class IVariableStateInternal; -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief VariableState class - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(VariableState) { - std::shared_ptr _impl; - std::shared_ptr _so; - - /** - * @brief Constructs VariableState from the initialized std::shared_ptr - * @param impl Initialized shared pointer - * @param so Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin - * object is destroyed. - */ - VariableState(const std::shared_ptr& impl, const std::shared_ptr& so); - friend class InferRequest; - friend class ExecutableNetwork; - -public: - /// @brief Default constructor - VariableState() = default; - - /// @brief Default copy constructor - /// @param other other VariableState object - VariableState(const VariableState& other) = default; - - /// @brief Default copy assignment operator - /// @param other other VariableState object - /// @return reference to the current object - VariableState& operator=(const VariableState& other) = default; - - /// @brief Default move constructor - /// @param other other VariableState object - VariableState(VariableState&& other) = default; - - /// @brief Default move assignment operator - /// @param other other VariableState object - /// @return reference to the current object - VariableState& operator=(VariableState&& other) = default; - - /** - * @brief Destructor preserves unloading order of implementation object and reference to library - */ - ~VariableState(); - - /** - * @brief Reset internal variable state for relevant infer request, - * to a value specified as default for according ReadValue node - */ - void Reset(); - - /** - * @brief Gets name of current variable state, if length of array is not enough name is truncated by len, null - * terminator is inserted as well. As variable state name `variable_id` from according `ReadValue` used. - * @return A string representing a state name - */ - std::string GetName() const; - - /** - * @brief Returns the value of the variable state. - * @return A blob representing a state - */ - Blob::CPtr GetState() const; - - /** - * @brief Sets the new state for the next inference. - * @param state The current state to set - */ - void SetState(Blob::Ptr state); -}; - -/** - * @brief For compatibility reasons. - */ -using MemoryState = VariableState; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/details/ie_pre_allocator.hpp b/src/inference/include/ie/details/ie_pre_allocator.hpp deleted file mode 100644 index b9e55d4bb98216..00000000000000 --- a/src/inference/include/ie/details/ie_pre_allocator.hpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief The header file defines utility PreAllocator class - * - * @file ie_pre_allocator.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_allocator.hpp" - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -namespace details { -/* - * @brief This is a helper class to wrap external memory - */ -class INFERENCE_ENGINE_1_0_DEPRECATED PreAllocator final : public IAllocator { - void* _actualData; - size_t _sizeInBytes; - -public: - PreAllocator(void* ptr, size_t bytes_size) : _actualData(ptr), _sizeInBytes(bytes_size) {} - /** - * @brief Locks a handle to heap memory accessible by any memory manipulation routines - * @return The generic pointer to a memory buffer - */ - void* lock(void* handle, LockOp = LOCK_FOR_WRITE) noexcept override { - if (handle != _actualData) { - return nullptr; - } - return handle; - } - /** - * @brief The PreAllocator class does not utilize this function - */ - void unlock(void*) noexcept override {} - - /** - * @brief Returns a pointer to preallocated memory - * @param size Size in bytes - * @return A handle to the preallocated memory or nullptr - */ - void* alloc(size_t size) noexcept override { - if (size <= _sizeInBytes) { - return _actualData; - } - - return nullptr; - } - /** - * @brief The PreAllocator class cannot release the handle - * @return false - */ - bool free(void*) noexcept override { - return false; - } -}; - -/** - * @brief Creates a special allocator that only works on external memory - * @param ptr Pointer to preallocated memory - * @param size Number of elements allocated - * @return A new allocator - */ -template -std::shared_ptr INFERENCE_ENGINE_1_0_DEPRECATED make_pre_allocator(T* ptr, size_t size) { - return std::make_shared(ptr, size * sizeof(T)); -} - -} // namespace details -} // namespace InferenceEngine -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/include/ie/ie_allocator.hpp b/src/inference/include/ie/ie_allocator.hpp deleted file mode 100644 index bd01531526a7c7..00000000000000 --- a/src/inference/include/ie/ie_allocator.hpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file that provides Allocator interface - * - * @file ie_allocator.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { - -/** - * @brief Allocator handle mapping type - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED LockOp { - LOCK_FOR_READ = 0, //!< A flag to lock data for read - LOCK_FOR_WRITE //!< A flag to lock data for write -}; - -/** - * @interface IAllocator - * @brief Allocator concept to be used for memory management and is used as part of the Blob. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED IAllocator : public std::enable_shared_from_this { -public: - /** - * @brief Maps handle to heap memory accessible by any memory manipulation routines. - * - * @param handle Handle to the allocated memory to be locked - * @param op Operation to lock memory for - * @return Generic pointer to memory - */ - virtual void* lock(void* handle, LockOp op = LOCK_FOR_WRITE) noexcept = 0; - /** - * @brief Unmaps memory by handle with multiple sequential mappings of the same handle. - * - * The multiple sequential mappings of the same handle are suppose to get the same - * result while there isn't a ref counter supported. - * - * @param handle Handle to the locked memory to unlock - */ - virtual void unlock(void* handle) noexcept = 0; - /** - * @brief Allocates memory - * - * @param size The size in bytes to allocate - * @return Handle to the allocated resource - */ - virtual void* alloc(size_t size) noexcept = 0; - /** - * @brief Releases the handle and all associated memory resources which invalidates the handle. - * @param handle The handle to free - * @return `false` if handle cannot be released, otherwise - `true`. - */ - virtual bool free(void* handle) noexcept = 0; - -protected: - virtual ~IAllocator() = default; -}; - -/** - * @brief Creates the default implementation of the Inference Engine allocator per plugin. - * - * @return The Inference Engine IAllocator* instance - */ -INFERENCE_ENGINE_API_CPP(std::shared_ptr) -INFERENCE_ENGINE_1_0_DEPRECATED CreateDefaultAllocator() noexcept; - -} // namespace InferenceEngine -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/include/ie/ie_api.h b/src/inference/include/ie/ie_api.h deleted file mode 100644 index e43d7a377a9863..00000000000000 --- a/src/inference/include/ie/ie_api.h +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief The macro defines a symbol import/export mechanism essential for Microsoft Windows(R) OS. - * @file ie_api.h - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#if defined(OPENVINO_STATIC_LIBRARY) || defined(USE_STATIC_IE) || (defined(__GNUC__) && (__GNUC__ < 4)) -# define INFERENCE_ENGINE_API(...) extern "C" __VA_ARGS__ -# define INFERENCE_ENGINE_API_CPP(...) __VA_ARGS__ -# define INFERENCE_ENGINE_API_CLASS(...) __VA_ARGS__ -#else -# if defined(_WIN32) || defined(__CYGWIN__) -# ifdef IMPLEMENT_INFERENCE_ENGINE_API -# define INFERENCE_ENGINE_API(...) extern "C" __declspec(dllexport) __VA_ARGS__ __cdecl -# define INFERENCE_ENGINE_API_CPP(...) __declspec(dllexport) __VA_ARGS__ -# define INFERENCE_ENGINE_API_CLASS(...) __declspec(dllexport) __VA_ARGS__ -# else -# define INFERENCE_ENGINE_API(...) extern "C" __declspec(dllimport) __VA_ARGS__ __cdecl -# define INFERENCE_ENGINE_API_CPP(...) __declspec(dllimport) __VA_ARGS__ -# define INFERENCE_ENGINE_API_CLASS(...) __declspec(dllimport) __VA_ARGS__ -# endif -# else -# define INFERENCE_ENGINE_API(...) extern "C" __attribute__((visibility("default"))) __VA_ARGS__ -# define INFERENCE_ENGINE_API_CPP(...) __attribute__((visibility("default"))) __VA_ARGS__ -# define INFERENCE_ENGINE_API_CLASS(...) __attribute__((visibility("default"))) __VA_ARGS__ -# endif -#endif - -#if defined(__GNUC__) -# define INFERENCE_ENGINE_DEPRECATED(msg) __attribute__((deprecated(msg))) -# if __GNUC__ >= 6 || defined(__clang__) -# define INFERENCE_ENGINE_ENUM_DEPRECATED(msg) INFERENCE_ENGINE_DEPRECATED(msg) -# else -# define INFERENCE_ENGINE_ENUM_DEPRECATED(msg) -# endif -#elif defined(_MSC_VER) -# define INFERENCE_ENGINE_DEPRECATED(msg) __declspec(deprecated(msg)) -# if _MSC_VER >= 1900 /* VS2015 */ -# define INFERENCE_ENGINE_ENUM_DEPRECATED(msg) [[deprecated(msg)]] -# else -# define INFERENCE_ENGINE_ENUM_DEPRECATED(msg) -# endif -#elif defined(__INTEL_COMPILER) -# define INFERENCE_ENGINE_DEPRECATED(msg) __attribute__((deprecated(msg))) -# define INFERENCE_ENGINE_ENUM_DEPRECATED(msg) INFERENCE_ENGINE_DEPRECATED(msg) -#else -# define INFERENCE_ENGINE_DEPRECATED(msg) -# define INFERENCE_ENGINE_ENUM_DEPRECATED(msg) -#endif - -#define INFERENCE_ENGINE_1_0_DEPRECATED \ - INFERENCE_ENGINE_DEPRECATED("The Inference Engine API is deprecated and will be removed in the 2024.0 release. " \ - "For instructions on transitioning to the new API, please refer to " \ - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") - -// Suppress warning "-Wdeprecated-declarations" / C4996 -#if defined(__GNUC__) -# define IE_DO_PRAGMA(x) _Pragma(# x) -#elif defined(_MSC_VER) -# define IE_DO_PRAGMA(x) __pragma(x) -#else -# define IE_DO_PRAGMA(x) -#endif - -#if (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ > 405)) || defined(__clang__) -# define IE_SUPPRESS_DEPRECATED_START \ - IE_DO_PRAGMA(GCC diagnostic push) \ - IE_DO_PRAGMA(GCC diagnostic ignored "-Wdeprecated-declarations") -# define IE_SUPPRESS_DEPRECATED_END IE_DO_PRAGMA(GCC diagnostic pop) -#elif defined(_MSC_VER) -# define IE_SUPPRESS_DEPRECATED_START \ - IE_DO_PRAGMA(warning(push)) \ - IE_DO_PRAGMA(warning(disable : 4996)) -# define IE_SUPPRESS_DEPRECATED_END IE_DO_PRAGMA(warning(pop)) -#elif defined(__INTEL_COMPILER) -# define IE_SUPPRESS_DEPRECATED_START \ - IE_DO_PRAGMA(warning(push)) \ - IE_DO_PRAGMA(warning(disable : 1478)) \ - IE_DO_PRAGMA(warning(disable : 1786)) -# define IE_SUPPRESS_DEPRECATED_END IE_DO_PRAGMA(warning(pop)) -#else -# define IE_SUPPRESS_DEPRECATED_START -# define IE_SUPPRESS_DEPRECATED_END -#endif - -#if defined __GNUC__ && (__GNUC__ <= 4 || (__GNUC__ == 5 && __GNUC_MINOR__ <= 5) || \ - (defined __i386__ || defined __arm__ || defined __aarch64__)) -# define _IE_SUPPRESS_DEPRECATED_START_GCC IE_SUPPRESS_DEPRECATED_START -# define _IE_SUPPRESS_DEPRECATED_END_GCC IE_SUPPRESS_DEPRECATED_END -#else -# define _IE_SUPPRESS_DEPRECATED_START_GCC -# define _IE_SUPPRESS_DEPRECATED_END_GCC -#endif - -#ifndef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -# ifdef _WIN32 -# if defined(__INTEL_COMPILER) || defined(_MSC_VER) || defined(__GNUC__) -# define OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -# endif -# elif defined(__clang__) -# define OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -# elif defined(__GNUC__) && (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ > 2)) -# define OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -# endif -#endif - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -# ifndef ENABLE_UNICODE_PATH_SUPPORT -# define ENABLE_UNICODE_PATH_SUPPORT -# endif -#endif - -/** - * @def INFERENCE_PLUGIN_API(type) - * @brief Defines Inference Engine Plugin API method - * @param type A plugin type - */ - -#if (defined(_WIN32) || defined(__CYGWIN__)) && defined(IMPLEMENT_INFERENCE_ENGINE_PLUGIN) -# define INFERENCE_PLUGIN_API(type) extern "C" __declspec(dllexport) type -#else -# define INFERENCE_PLUGIN_API(type) INFERENCE_ENGINE_API(type) -#endif diff --git a/src/inference/include/ie/ie_blob.h b/src/inference/include/ie/ie_blob.h deleted file mode 100644 index 4436136c5832e8..00000000000000 --- a/src/inference/include/ie/ie_blob.h +++ /dev/null @@ -1,894 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for Blob and generic TBlob<> - * - * @file ie_blob.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "details/ie_pre_allocator.hpp" -#include "ie_allocator.hpp" -#include "ie_common.h" -#include "ie_layouts.h" -#include "ie_locked_memory.hpp" -#include "ie_precision.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief This class represents a universal container in the Inference Engine - * - * @note Each Blob implementation must be derived from this Blob class directly or indirectly - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { -public: - /** - * @brief A smart pointer containing Blob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const Blob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief Creates a TBlob<> object from a Data node - * - * @param data A reference to a smart pointer of the Data node - * @return Smart pointer to TBlob<> with the relevant C type to the precision of the data node - */ - static Ptr CreateFromData(const DataPtr& data); - - /** - * @brief Blob virtual destructor - */ - virtual ~Blob(); - - /** - * @brief Checks if the Blob object can be cast to the type T* - * - * @tparam T Type to be checked. Must represent a class derived from the Blob - * @return true if this object can be dynamically cast to the type T*. Otherwise, false - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - bool is() noexcept { - return dynamic_cast(this) != nullptr; - } - - /** - * @brief Checks if the Blob object can be cast to the type const T* - * - * @tparam T Type to be checked. Must represent a class derived from the Blob - * @return true if this object can be dynamically cast to the type const T*. Otherwise, false - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - bool is() const noexcept { - return dynamic_cast(this) != nullptr; - } - - /** - * @brief Casts this Blob object to the type T*. - * - * Use InferenceEngine::as() to operate with shared Blob objects instead of raw pointers - * - * @tparam T Type to cast to. Must represent a class derived from the Blob - * @return Raw pointer to the object of the type T or nullptr on error - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - T* as() noexcept { - return dynamic_cast(this); - } - - /** - * @brief Casts this Blob object to the type const T*. - * - * Use InferenceEngine::as() to operate with shared Blob objects instead of raw pointers - * - * @tparam T Type to cast to. Must represent a class derived from the Blob - * @return Raw pointer to the object of the type const T or nullptr on error - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - const T* as() const noexcept { - return dynamic_cast(this); - } - - /** - * @brief Constructor. Creates an empty Blob object with the specified precision. - * - * @param tensorDesc Defines the layout and dims of the blob - */ - explicit Blob(const TensorDesc& tensorDesc) : tensorDesc(tensorDesc) {} - - /** - * @brief Returns the tensor description - * @return A const reference to a tensor descriptor - */ - virtual const TensorDesc& getTensorDesc() const noexcept { - return tensorDesc; - } - - /** - * @brief Returns the tensor description - * @return A reference to a tensor descriptor - */ - virtual TensorDesc& getTensorDesc() noexcept { - return tensorDesc; - } - - /** - * @brief By default, returns the total number of elements (a product of all the dims or 1 for scalar) - * - * Return value and its interpretation heavily depend on the blob type - * - * @return The total number of elements - */ - virtual size_t size() const noexcept { - if (tensorDesc.getLayout() == Layout::SCALAR) - return 1; - return product(tensorDesc.getDims()); - } - - /** - * @brief Returns the size of the current Blob in bytes. - * @return Blob's size in bytes - */ - virtual size_t byteSize() const { - return size() * element_size(); - } - - /** - * @deprecated Cast to MemoryBlob and use its API instead. - * Blob class can represent compound blob, which do not refer to the only solid memory. - * - * @brief Provides the number of bytes per element. - * - * The overall Blob capacity is size() * element_size(). Abstract method. - * - * @return Returns the number of bytes per element - */ - virtual size_t element_size() const = 0; - - /** - * @brief Allocates memory to store the data. - * - * Abstract method. - */ - virtual void allocate() noexcept = 0; - - /** - * @brief Releases previously allocated data. - * - * Abstract method. - * - * @return `True` if deallocation happens successfully, `false` otherwise. - */ - virtual bool deallocate() noexcept = 0; - - /** - * @brief Set new shape for blob, deallocate/allocate if new total size is bigger than previous one. - * - * @param dims new shape - */ - virtual void setShape(const SizeVector& dims); - - /** - * @deprecated Cast to MemoryBlob and use new wlock/rwlock API instead. - * Blob class can represent compound blob, which do not refer to the only solid memory. - * @brief Gets access to the allocated memory. - * - * Abstract method. - * - * @return A LockedMemory object - */ - virtual LockedMemory buffer() noexcept = 0; - - /** - * @deprecated Cast to MemoryBlob and use new MemoryBlob::rmap() function instead. - * Blob class can represent compound blob, which do not refer to the only solid memory. - * @brief Gets read-only access to the allocated memory. - * - * Abstract method. - * - * @return A LockedMemory object - */ - virtual LockedMemory cbuffer() const noexcept = 0; - - /** - * @brief Creates a blob describing given ROI object based on the current blob with memory sharing. - * - * Note: default implementation throws "not implemented" exception. - * - * @param roi A ROI object inside of the current blob. - * - * @return A shared pointer to the newly created ROI blob. - */ - virtual Blob::Ptr createROI(const ROI& roi) const; - - /** - * @brief Creates a blob describing given ROI object based on the current blob with memory sharing. - * - * Note: default implementation may throws "not implemented" exception. - * - * @param begin A ROI start coordinate - * @param end A ROI end coordinate - * - * @return A shared pointer to the newly created ROI blob. - */ - virtual Blob::Ptr createROI(const std::vector& begin, const std::vector& end) const; - -protected: - /** - * @brief The tensor descriptor of the given blob. - */ - TensorDesc tensorDesc; - - /** - * @deprecated Cast to MemoryBlob and use its API instead. - * @brief Multiplies the dimension vector values. - * - * @param dims Reference to a vector with dimension values of type size_t - * @return Result of multiplication - */ - static size_t product(const SizeVector& dims) noexcept { - if (dims.empty()) - return 0; - return std::accumulate(std::begin(dims), std::end(dims), (size_t)1, std::multiplies()); - } - - /** - * @deprecated Cast to MemoryBlob and use its API instead. - * @brief Multiplies the dimension vector values. Size of a scalar is 1 instead of 0 as for product. - * - * @param dims Reference to a vector with dimension values of type size_t - * @return Result of multiplication - */ - static size_t properProduct(const SizeVector& dims) noexcept { - return std::accumulate(std::begin(dims), std::end(dims), (size_t)1, std::multiplies()); - } - - /** - * @brief Gets an allocator for allocator-based blobs - * - * @return The allocator for allocator-based blobs or nullptr if there is none - */ - virtual const std::shared_ptr& getAllocator() const noexcept = 0; -}; - -/** - * @brief Helper cast function to work with shared Blob objects - * @param blob A blob to cast - * @return shared_ptr to the type T. Returned shared_ptr shares ownership of the object with the - * input Blob::Ptr - */ -template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> -INFERENCE_ENGINE_1_0_DEPRECATED std::shared_ptr as(const Blob::Ptr& blob) noexcept { - return std::dynamic_pointer_cast(blob); -} - -/** - * @brief Helper cast function to work with shared Blob objects - * @param blob A blob to cast - * @return shared_ptr to the type const T. Returned shared_ptr shares ownership of the object with - * the input Blob::Ptr - */ -template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> -INFERENCE_ENGINE_1_0_DEPRECATED std::shared_ptr as(const Blob::CPtr& blob) noexcept { - return std::dynamic_pointer_cast(blob); -} - -/** - * @brief This class implements a container object that represents a tensor in memory (host and - * remote/accelerated) - * - * @note Any Blob implementation that represents a concept of a tensor in memory (for example, - * TBlob) must be a subclass of MemoryBlob instead of Blob - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(MemoryBlob) : public Blob { -public: - /** - * @brief A smart pointer to the MemoryBlob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const MemoryBlob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief MemoryBlob virtual destructor - */ - virtual ~MemoryBlob(); - - /** - * @brief Constructor. Creates an empty MemoryBlob object with the specified precision. - * - * @param tensorDesc Defines the layout and dims of the blob - */ - explicit MemoryBlob(const TensorDesc& tensorDesc) : Blob(tensorDesc) {} - - /** - * @brief Returns the tensor description - * @return A tensor description - */ - const TensorDesc& getTensorDesc() const noexcept override { - return tensorDesc; - } - - /** - * @brief Returns the tensor description - * @return A tensor description - */ - TensorDesc& getTensorDesc() noexcept override { - return tensorDesc; - } - - /** - * @brief Returns the total number of elements, which is a product of all the dimensions - * @return The total number of elements - */ - size_t size() const noexcept override { - if (tensorDesc.getLayout() == Layout::SCALAR) - return 1; - return product(tensorDesc.getDims()); - } - - /** - * @brief Returns the size of the current Blob in bytes calculated as `size() * element_size()`. - * @return Blob's size in bytes - */ - size_t byteSize() const override { - return (size() * tensorDesc.getPrecision().bitsSize() + 7) >> 3; - } - - size_t element_size() const override { - return tensorDesc.getPrecision().size(); - } - - /** - * @brief Allocates memory to store the data. - * - * Abstract method. - */ - void allocate() noexcept override = 0; - - /** - * @brief Releases previously allocated data. - * - * Abstract method. - * @return `True` if deallocation happens successfully, `false` otherwise. - */ - bool deallocate() noexcept override = 0; - - /** - * @deprecated Use wmap() or rwmap() API instead. - * @brief Gets access to the allocated memory. - * - * Abstract method. - * - * @return A LockedMemory object - */ - LockedMemory buffer() noexcept override = 0; - - /** - * @deprecated Use rmap() function instead. - * @brief Gets read-only access to the allocated memory. - * - * Abstract method. - * - * @return A LockedMemory object - */ - LockedMemory cbuffer() const noexcept override = 0; - - /** - * @brief Gets read/write access to the memory in virtual space of the process. - * The function returns object which retains mapped memory. - * The memory been addressed in the MemoryBlob in general case can be allocated on remote device. - * This function maps remote memory to the memory in the virtual process space and after destruction - * of the LockedMemory will upload changed content to the accelerator. - * - * To avoid extra copy of data, you can use rmap() and wmap() functions. - * - * In case of memory originally allocated on the host, this function returns LockedMemory which will - * transparently refer to original memory address. No extra copy will happen - * - * In general case, pointer received from that LockedMemory becomes invalid just after - * destruction of LockedMemory instance. Keep Locked memory alive while you need to address memory - * in the process on the host. - * - * Abstract method. - * - * @return A LockedMemory object - */ - virtual LockedMemory rwmap() noexcept = 0; - - /** - * @brief Gets read only access to the memory in virtual space of the process. - * The function returns object which retains mapped memory. - * - * The memory been addressed in the MemoryBlob in general case can be allocated on remote device. - * This function copies remote memory to the memory in the virtual process space and after - * destruction of the LockedMemory it will not upload host memory back, because it is expected that - * content is not changed. - * - * To have an ability change content, you can use rwmap() and wmap() functions. - * - * In case of memory originally allocated on the host, this function returns LockedMemory which will - * transparently refer to original memory address. No extra copy will happen - * - * In general case, pointer received from that LockedMemory becomes invalid just after destruction - * of LockedMemory instance. Keep Locked memory alive while you need to address memory in the - * process on the host. - * - * Abstract method. - * - * @return A LockedMemory object - */ - virtual LockedMemory rmap() const noexcept = 0; - - /** - * @brief Gets "write only direction" access to the memory in virtual space of the process. - * The function returns object which retains memory to be uploaded on device. - * - * The memory been addressed in the MemoryBlob in general case can be allocated on remote device. - * This function does not copy of the content from the device to the memory in the virtual process - * space, the content of the memory just after calling of this function is not specified. After - * destruction of the LockedMemory, content will be upload host memory. - * In the same time there is no abilities to restrict reading from the memory, you need to care of - * reading from memory got by wmap(), it might have sense in some cases like filling of content and - * before uploading to device - * - * To access data stored in the blob, you can use rwmap() and rmap() functions. - * - * In case of memory originally allocated on the host, this function returns LockedMemory which will - * transparently refer to original memory address. No extra copy will happen - * - * In general case, pointer received from that LockedMemory becomes invalid just after destruction - * of LockedMemory instance. Keep Locked memory alive while you need to address memory in the - * process on the host. - * - * Abstract method. - * - * @return A LockedMemory object - */ - virtual LockedMemory wmap() noexcept = 0; - -protected: - /** - * @brief Gets the allocator for allocator-based blobs. - * - * @return The allocator for allocator-based blobs or if there is none then a nullptr. - */ - const std::shared_ptr& getAllocator() const noexcept override = 0; - - /** - * @brief Gets the handle to allocated memory. - * - * @return The handle to allocated memory for allocator-based blobs or if there is none then a nullptr. - */ - virtual void* getHandle() const noexcept = 0; - - /// private - template - friend class TBlobProxy; -}; - -/** - * @brief This is a convenient type for working with a map containing pairs(string, pointer to a Blob instance). - */ -using BlobMap = std::map; - -/** - * @brief Represents real host memory allocated for a Tensor/Blob per C type. - */ -template ::value && std::is_trivial::value>> -class INFERENCE_ENGINE_1_0_DEPRECATED TBlob : public MemoryBlob { - template - friend class TBlob; - -public: - /** - * @brief Smart Pointer to this TBlob object. - */ - using Ptr = std::shared_ptr>; - - /** - * @brief Creates a TBlob object with the specified dimensions and layout but does not allocate the memory. - * - * Use the allocate() method to allocate memory. - * - * @param tensorDesc Tensor description - */ - explicit TBlob(const TensorDesc& tensorDesc) : MemoryBlob(tensorDesc) {} - - /** - * @brief The constructor creates a TBlob object with the specified dimensions and layout - * on the pre-allocated memory. - * - * The allocate() call is not required. - * - * @param tensorDesc Tensor description - * @param ptr Pointer to the pre-allocated memory - * @param data_size Length of the pre-allocated array. If not set, size is assumed equal - * to the dot product of dims. - */ - TBlob(const TensorDesc& tensorDesc, T* ptr, size_t data_size = 0) : MemoryBlob(tensorDesc) { - if (data_size == 0) { - data_size = size(); - } - - if (data_size != 0 && ptr == nullptr) { - IE_THROW() << "Using Blob on external nullptr memory"; - } - - _allocator = details::make_pre_allocator(ptr, data_size); - // blob on attached memory is always allocated, so we are not forcing the user to call allocate() - allocate(); - } - - /** - * @brief Creates a TBlob object with the specified dimensions, layout and custom memory allocator but does not - * allocate the memory. - * - * @param tensorDesc Tensor description - * @param alloc An allocator - */ - TBlob(const TensorDesc& tensorDesc, const std::shared_ptr& alloc) - : MemoryBlob(tensorDesc), - _allocator(alloc) { - if (_allocator == nullptr) - IE_THROW() << "TBlob allocator was not initialized."; - } - - /** - * @brief The copy constructor data is reallocated and copied from the source to the target blob. - * - * @param blob Source blob - */ - TBlob(const TBlob& blob) : MemoryBlob(blob.getTensorDesc()) { - copyFrom(blob); - } - - /** - * @brief A move constructor. - * - * @param blob rvalue to make a move from - */ - TBlob(TBlob&& blob) : MemoryBlob(blob.getTensorDesc()) { - moveFrom(blob); - } - - /** - * @brief Copy operator for the TBlob object. - * - * @param blob object reference to copy from - * @return Newly copied object - */ - TBlob& operator=(const TBlob& blob) { - copyFrom(blob); - return *this; - } - - /** - *@brief Virtual destructor. - */ - virtual ~TBlob() { - deallocate(); - } - - /** - * @brief Creates an new empty rvalue LockedMemory object. - * - * @return rvalue for the empty locked object of type T - */ - virtual LockedMemory data() noexcept { - return lockme(); - } - - /** - * @brief Creates a new empty rvalue read-only LockedMemory object. - * - * @return rvalue for the empty locked const object of type T. - */ - virtual LockedMemory readOnly() const noexcept { - return lockme(); - } - - void allocate() noexcept override { - const auto allocator = getAllocator(); - const auto rawHandle = allocator->alloc(byteSize()); - - if (rawHandle == nullptr) { - return; - } - - _handle.reset(rawHandle, [allocator](void* rawHandle) { - allocator->free(rawHandle); - }); - } - - bool deallocate() noexcept override { - return free(); - } - - LockedMemory buffer() noexcept override { - return lockme(); - } - - LockedMemory cbuffer() const noexcept override { - return lockme(); - } - - LockedMemory rwmap() noexcept override { - return lockme(); - } - - LockedMemory rmap() const noexcept override { - return lockme(); - } - LockedMemory wmap() noexcept override { - return lockme(); - } - - Blob::Ptr createROI(const ROI& roi) const override { - return Blob::Ptr(new TBlob(*this, roi)); - } - - Blob::Ptr createROI(const std::vector& begin, const std::vector& end) const override { - return Blob::Ptr(new TBlob(*this, begin, end)); - } - -protected: - /** - * @brief Local instance of IAllocator to manipulate memory. - */ - mutable std::shared_ptr _allocator; - - /** - * @brief A handle for the stored memory returned from _allocator.alloc(). - */ - std::shared_ptr _handle; - - /** - * @brief Copies dimensions and data from the TBlob object. - * - * @param blob object reference to copy from - */ - void copyFrom(const TBlob& blob) { - tensorDesc = blob.tensorDesc; - this->allocate(); - auto memptr = data(); - memcpy(memptr, blob.readOnly(), byteSize()); - } - - /** - * @brief Swaps memory handlers between the current blob and the given one. - * - * @tparam U Type of the blob to move from - * @param blob TBlob instance to move from - */ - template - void moveFrom(TBlob& blob) { - tensorDesc = blob.tensorDesc; - this->_allocator = std::move(blob._allocator); - std::swap(this->_handle, blob._handle); - } - - /** - * @brief Frees handler and cleans up the stored data. - * @return `true` if memory was freed - */ - virtual bool free() { - bool bCanRelease = _handle != nullptr; - _handle.reset(); - return bCanRelease; - } - - /** - * @brief Creates a LockedMemory instance. - * - * @tparam S Type of the LockedMemory to be created - * @return A created instance of LockedMemory - */ - template - LockedMemory lockme() const { - return LockedMemory(_allocator.get(), getHandle(), 0); - // getTensorDesc().getBlockingDesc().getOffsetPadding()); - } - - const std::shared_ptr& getAllocator() const noexcept override { - // in case when constructor without allocator was used - if (!_allocator) { - _allocator = CreateDefaultAllocator(); - } - - return _allocator; - } - - void* getHandle() const noexcept override { - return _handle.get(); - } - - /** - * @brief Creates a blob from the existing blob with a given ROI - * @param origBlob An original blob - * @param roi A ROI object - */ - TBlob(const TBlob& origBlob, const ROI& roi) - : MemoryBlob(make_roi_desc(origBlob.getTensorDesc(), roi, true)), - _allocator(origBlob._allocator) { - IE_ASSERT(origBlob._handle != nullptr) << "Original Blob must be allocated before ROI creation"; - - _handle = origBlob._handle; - } - - /** - * @brief Creates a blob from the existing blob with a given ROI - * @param origBlob An original blob - * @param begin ROI start coordinate - * @param end ROI end coordinate - */ - TBlob(const TBlob& origBlob, const std::vector& begin, const std::vector& end) - : MemoryBlob(make_roi_desc(origBlob.getTensorDesc(), begin, end, true)), - _allocator(origBlob._allocator) { - IE_ASSERT(origBlob._handle != nullptr) << "Original Blob must be allocated before ROI creation"; - - _handle = origBlob._handle; - } -}; - -// These should not be exported for WIN32 to avoid usage of '_handle' and '_allocator' across CRT bounaries -#ifndef _WIN32 -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -#endif - -/** - * @brief Creates a blob with the given tensor descriptor. - * - * @tparam Type Type of the shared pointer to be created - * @param tensorDesc Tensor descriptor for Blob creation - * @return A shared pointer to the newly created blob of the given type - */ -template -inline INFERENCE_ENGINE_1_0_DEPRECATED typename InferenceEngine::TBlob::Ptr make_shared_blob( - const TensorDesc& tensorDesc) { - if (!tensorDesc.getPrecision().hasStorageType()) - IE_THROW() << "Cannot make shared blob! " - << "The blob type cannot be used to store objects of current precision"; - return std::make_shared>(tensorDesc); -} - -/** - * @brief Creates a blob with the given tensor descriptor from the pointer to the pre-allocated memory. - * - * @tparam Type Type of the shared pointer to be created - * @param tensorDesc TensorDesc for Blob creation - * @param ptr Pointer to the pre-allocated memory - * @param size Length of the pre-allocated array - * @return A shared pointer to the newly created blob of the given type - */ -template -inline INFERENCE_ENGINE_1_0_DEPRECATED typename InferenceEngine::TBlob::Ptr -make_shared_blob(const TensorDesc& tensorDesc, Type* ptr, size_t size = 0) { - if (!tensorDesc.getPrecision().hasStorageType()) - IE_THROW() << "Cannot make shared blob! " - << "The blob type cannot be used to store objects of current precision"; - return std::make_shared>(tensorDesc, ptr, size); -} - -/** - * @brief Creates a blob with the given tensor descriptor and allocator. - * - * @tparam Type Type of the shared pointer to be created - * @param tensorDesc Tensor descriptor for Blob creation - * @param alloc Shared pointer to IAllocator to use in the blob - * @return A shared pointer to the newly created blob of the given type - */ -template -inline INFERENCE_ENGINE_1_0_DEPRECATED typename InferenceEngine::TBlob::Ptr make_shared_blob( - const TensorDesc& tensorDesc, - const std::shared_ptr& alloc) { - if (!tensorDesc.getPrecision().hasStorageType()) - IE_THROW() << "Cannot make shared blob! " - << "The blob type cannot be used to store objects of current precision"; - return std::make_shared>(tensorDesc, alloc); -} - -/** - * @brief Creates a copy of given TBlob instance. - * - * @tparam TypeTo Type of the shared pointer to be created - * @param arg given pointer to blob - * @return A shared pointer to the newly created blob of the given type - */ -template -inline INFERENCE_ENGINE_1_0_DEPRECATED typename InferenceEngine::TBlob::Ptr make_shared_blob( - const TBlob& arg) { - return std::make_shared>(arg); -} - -/** - * @brief Creates a Blob object of the specified type - * - * @param args Constructor arguments for the Blob object - * @return A shared pointer to the newly created Blob object - */ -template ::value, int>::type = 0> -INFERENCE_ENGINE_1_0_DEPRECATED std::shared_ptr make_shared_blob(Args&&... args) { - return std::make_shared(std::forward(args)...); -} - -/** - * @brief Creates a blob describing given ROI object based on the given blob with pre-allocated memory. - * - * @param inputBlob original blob with pre-allocated memory. - * @param roi A ROI object inside of the original blob. - * @return A shared pointer to the newly created blob. - */ -INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(Blob::Ptr) - make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi); - -/** - * @brief Creates a blob describing given ROI object based on the given blob with pre-allocated memory. - * - * @param inputBlob original blob with pre-allocated memory. - * @param begin A ROI object start coordinate inside of the original blob. - * @param end A ROI object end coordinate inside of the original blob. - * @return A shared pointer to the newly created blob. - */ -INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(Blob::Ptr) - make_shared_blob(const Blob::Ptr& inputBlob, const std::vector& begin, const std::vector& end); - -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_common.h b/src/inference/include/ie/ie_common.h deleted file mode 100644 index c933f3fab14c60..00000000000000 --- a/src/inference/include/ie/ie_common.h +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a header file with common inference engine definitions. - * - * @file ie_common.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ie_api.h" - -IE_SUPPRESS_DEPRECATED_START -#ifndef NDEBUG -# include -#endif -namespace InferenceEngine { -/** - * @brief Represents tensor size. - * - * The order is opposite to the order in Caffe*: (w,h,n,b) where the most frequently changing element in memory is - * first. - */ -using SizeVector = std::vector; - -/** - * @brief The main data representation node - */ -class Data; - -/** - * @brief Smart pointer to Data - */ -using DataPtr = std::shared_ptr; - -/** - * @brief Smart pointer to constant Data - */ -using CDataPtr = std::shared_ptr; - -/** - * @brief Smart weak pointer to Data - */ -using DataWeakPtr = std::weak_ptr; - -/** - * @union UserValue - * @brief The method holds the user values to enable binding of data per graph node. - */ -union INFERENCE_ENGINE_1_0_DEPRECATED UserValue { - int v_int; //!< An integer value - float v_float; //!< A floating point value - void* v_ptr; //!< A pointer to a void -}; - -/** - * @enum Layout - * @brief Layouts that the inference engine supports - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED Layout : uint8_t { - ANY = 0, //!< "any" layout - - // I/O data layouts - NCHW = 1, //!< NCHW layout for input / output blobs - NHWC = 2, //!< NHWC layout for input / output blobs - NCDHW = 3, //!< NCDHW layout for input / output blobs - NDHWC = 4, //!< NDHWC layout for input / output blobs - - // weight layouts - OIHW = 64, //!< OIHW layout for operation weights - GOIHW = 65, //!< GOIHW layout for operation weights - OIDHW = 66, //!< OIDHW layout for operation weights - GOIDHW = 67, //!< GOIDHW layout for operation weights - - // Scalar - SCALAR = 95, //!< A scalar layout - - // bias layouts - C = 96, //!< A bias layout for operation - - // Single image layouts - CHW = 128, //!< A single image layout (e.g. for mean image) - HWC = 129, //!< A single image layout (e.g. for mean image) - - // 2D - HW = 192, //!< HW 2D layout - NC = 193, //!< NC 2D layout - CN = 194, //!< CN 2D layout - - BLOCKED = 200, //!< A blocked layout -}; - -/** - * @brief Prints a string representation of InferenceEngine::Layout to a stream - * @param out An output stream to send to - * @param p A layout value to print to a stream - * @return A reference to the `out` stream - */ -INFERENCE_ENGINE_1_0_DEPRECATED inline std::ostream& operator<<(std::ostream& out, const Layout& p) { - switch (p) { -#define PRINT_LAYOUT(name) \ - case name: \ - out << #name; \ - break; - - PRINT_LAYOUT(ANY); - PRINT_LAYOUT(NCHW); - PRINT_LAYOUT(NHWC); - PRINT_LAYOUT(NCDHW); - PRINT_LAYOUT(NDHWC); - PRINT_LAYOUT(OIHW); - PRINT_LAYOUT(GOIHW); - PRINT_LAYOUT(OIDHW); - PRINT_LAYOUT(GOIDHW); - PRINT_LAYOUT(SCALAR); - PRINT_LAYOUT(C); - PRINT_LAYOUT(CHW); - PRINT_LAYOUT(HWC); - PRINT_LAYOUT(HW); - PRINT_LAYOUT(NC); - PRINT_LAYOUT(CN); - PRINT_LAYOUT(BLOCKED); -#undef PRINT_LAYOUT - default: - out << static_cast(p); - break; - } - return out; -} - -/** - * @enum ColorFormat - * @brief Extra information about input color format for preprocessing - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED ColorFormat : uint32_t { - RAW = 0u, ///< Plain blob (default), no extra color processing required - RGB, ///< RGB color format - BGR, ///< BGR color format, default in OpenVINO - RGBX, ///< RGBX color format with X ignored during inference - BGRX, ///< BGRX color format with X ignored during inference -}; - -/** - * @brief Prints a string representation of InferenceEngine::ColorFormat to a stream - * @param out An output stream to send to - * @param fmt A color format value to print to a stream - * @return A reference to the `out` stream - */ -INFERENCE_ENGINE_1_0_DEPRECATED inline std::ostream& operator<<(std::ostream& out, const ColorFormat& fmt) { - switch (fmt) { -#define PRINT_COLOR_FORMAT(name) \ - case name: \ - out << #name; \ - break; - - PRINT_COLOR_FORMAT(RAW); - PRINT_COLOR_FORMAT(RGB); - PRINT_COLOR_FORMAT(BGR); - PRINT_COLOR_FORMAT(RGBX); - PRINT_COLOR_FORMAT(BGRX); -#undef PRINT_COLOR_FORMAT - - default: - out << static_cast(fmt); - break; - } - return out; -} - -/** - * @struct InferenceEngineProfileInfo - * @brief Represents basic inference profiling information per layer. - * - * If the layer is executed using tiling, the sum time per each tile is indicated as the total execution time. - * Due to parallel execution, the total execution time for all layers might be greater than the total inference time. - */ -struct INFERENCE_ENGINE_1_0_DEPRECATED InferenceEngineProfileInfo { - /** - * @brief Defines the general status of the layer - */ - enum INFERENCE_ENGINE_1_0_DEPRECATED LayerStatus { - NOT_RUN, //!< A layer is not executed - OPTIMIZED_OUT, //!< A layer is optimized out during graph optimization phase - EXECUTED //!< A layer is executed - }; - - /** - * @brief Defines a layer status - */ - LayerStatus status; - - /** - * @brief The absolute time in microseconds that the layer ran (in total) - */ - long long realTime_uSec; - /** - * @brief The net host cpu time that the layer ran - */ - long long cpu_uSec; - - /** - * @brief An execution type of unit - */ - char exec_type[256] = {}; - - /** - * @brief A layer type - */ - char layer_type[256] = {}; - - /** - * @brief An execution index of the unit - */ - unsigned execution_index; -}; - -/** - * @enum StatusCode - * @brief This enum contains codes for all possible return values of the interface functions - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED StatusCode : int { - OK = 0, - GENERAL_ERROR = -1, - NOT_IMPLEMENTED = -2, - NETWORK_NOT_LOADED = -3, - PARAMETER_MISMATCH = -4, - NOT_FOUND = -5, - OUT_OF_BOUNDS = -6, - /* - * @brief exception not of std::exception derived type was thrown - */ - UNEXPECTED = -7, - REQUEST_BUSY = -8, - RESULT_NOT_READY = -9, - NOT_ALLOCATED = -10, - INFER_NOT_STARTED = -11, - NETWORK_NOT_READ = -12, - INFER_CANCELLED = -13 -}; - -/** - * @struct ResponseDesc - * @brief Represents detailed information for an error - */ -struct INFERENCE_ENGINE_1_0_DEPRECATED ResponseDesc { - /** - * @brief A character buffer that holds the detailed information for an error. - */ - char msg[4096] = {}; -}; - -/** - * @brief Response structure encapsulating information about supported layer - */ -struct INFERENCE_ENGINE_1_0_DEPRECATED QueryNetworkResult { - /** - * @brief A map of supported layers: - * - key - a layer name - * - value - a device name on which layer is assigned - */ - std::map supportedLayersMap; - - /** - * @brief A status code - */ - StatusCode rc = OK; - - /** - * @brief Response message - */ - ResponseDesc resp; -}; - -/** - * @brief A collection that contains string as key, and const Data smart pointer as value - */ -using ConstOutputsDataMap = std::map; - -/** - * @brief A collection that contains string as key, and Data smart pointer as value - */ -using OutputsDataMap = std::map; - -namespace details { -struct INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferenceEngineException) - : public std::runtime_error { - using std::runtime_error::runtime_error; - bool hasStatus() const { - return true; - } - StatusCode getStatus() const; -}; -} // namespace details - -/** - * @brief Base Inference Engine exception class - */ -IE_SUPPRESS_DEPRECATED_START -struct INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Exception) - : public details::InferenceEngineException { - using InferenceEngineException::InferenceEngineException; -}; -IE_SUPPRESS_DEPRECATED_END - -/// @cond -namespace details { -template -struct ExceptionTraits; - -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED ExceptionTraits { - static const char* string() { - return ""; - } -}; -} // namespace details - -#define INFERENCE_ENGINE_DECLARE_EXCEPTION(ExceptionType, statusCode) \ - struct INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ExceptionType) final \ - : public InferenceEngine::Exception { \ - using Exception::Exception; \ - }; \ - namespace details { \ - template <> \ - struct INFERENCE_ENGINE_1_0_DEPRECATED ExceptionTraits { \ - static const char* string() { \ - return "[ " #statusCode " ]"; \ - } \ - }; \ - } -/// @endcond - -/** @brief This class represents StatusCode::GENERAL_ERROR exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(GeneralError, GENERAL_ERROR) - -/** @brief This class represents StatusCode::NOT_IMPLEMENTED exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(NotImplemented, NOT_IMPLEMENTED) - -/** @brief This class represents StatusCode::NETWORK_NOT_LOADED exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(NetworkNotLoaded, NETWORK_NOT_LOADED) - -/** @brief This class represents StatusCode::PARAMETER_MISMATCH exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(ParameterMismatch, PARAMETER_MISMATCH) - -/** @brief This class represents StatusCode::NOT_FOUND exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(NotFound, NOT_FOUND) - -/** @brief This class represents StatusCode::OUT_OF_BOUNDS exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(OutOfBounds, OUT_OF_BOUNDS) - -/** @brief This class represents StatusCode::UNEXPECTED exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(Unexpected, UNEXPECTED) - -/** @brief This class represents StatusCode::REQUEST_BUSY exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(RequestBusy, REQUEST_BUSY) - -/** @brief This class represents StatusCode::RESULT_NOT_READY exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(ResultNotReady, RESULT_NOT_READY) - -/** @brief This class represents StatusCode::NOT_ALLOCATED exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(NotAllocated, NOT_ALLOCATED) - -/** @brief This class represents StatusCode::INFER_NOT_STARTED exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(InferNotStarted, INFER_NOT_STARTED) - -/** @brief This class represents StatusCode::NETWORK_NOT_READ exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(NetworkNotRead, NETWORK_NOT_READ) - -/** @brief This class represents StatusCode::INFER_CANCELLED exception */ -INFERENCE_ENGINE_DECLARE_EXCEPTION(InferCancelled, INFER_CANCELLED) - -/** - * @private - */ -#undef INFERENCE_ENGINE_DECLARE_EXCEPTION - -// TODO: Move this section out of public API -namespace details { - -/** - * @brief Rethrow a copy of exception. UShould be used in catch blocks - */ -[[noreturn]] INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(void) Rethrow(); - -/** - * @brief Tag struct used to throw exception - */ -#ifndef NDEBUG -template -struct INFERENCE_ENGINE_1_0_DEPRECATED ThrowNow final { - const char* const file; - const int line; - - [[noreturn]] static void create(const std::ostream& ostream, const char* file, int line) { - std::stringstream stream; - stream << '\n' << file << ':' << line << ' '; - stream << ExceptionTraits::string() << ' ' << ostream.rdbuf(); - throw ExceptionType{stream.str()}; - } - - [[noreturn]] void operator<<=(const std::ostream& ostream) { - create(ostream, file, line); - } -}; -#else -template -struct INFERENCE_ENGINE_1_0_DEPRECATED ThrowNow final { - [[noreturn]] static void create(const std::ostream& ostream) { - std::stringstream stream; - stream << ExceptionTraits::string() << ' ' << ostream.rdbuf(); - throw ExceptionType{stream.str()}; - } - - [[noreturn]] void operator<<=(const std::ostream& ostream) { - create(ostream); - } -}; -#endif - -/// @cond -#ifndef NDEBUG -# define IE_LOCATION '\n' << __FILE__ << ':' << __LINE__ << ' ' -# define IE_LOCATION_PARAM __FILE__, __LINE__ -#else -# define IE_LOCATION "" -# define IE_LOCATION_PARAM -#endif // NDEBUG - -// WARNING: DO NOT USE THIS MACRO! Use openvino/util/pp.hpp macro library -#define IE_PP_EXPAND(X) X -#define IE_PP_NARG(...) IE_PP_EXPAND(IE_PP_NARG_(__VA_ARGS__, IE_PP_RSEQ_N())) -#define IE_PP_NARG_(...) IE_PP_EXPAND(IE_PP_ARG_N(__VA_ARGS__)) -#define IE_PP_ARG_N(_0, _1, N, ...) N -#define IE_PP_RSEQ_N() 0, 1, 0 -#define IE_PP_NO_ARGS(NAME) , -#define IE_PP_CAT3_(x, y, z) x##y##z -#define IE_PP_CAT3(x, y, z) IE_PP_CAT3_(x, y, z) -#define IE_PP_OVERLOAD(NAME, ...) \ - IE_PP_EXPAND(IE_PP_CAT3(NAME, _, IE_PP_EXPAND(IE_PP_NARG(IE_PP_NO_ARGS __VA_ARGS__(NAME))))(__VA_ARGS__)) -// ENDWARNING - -#define IE_THROW_0() \ - (InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM}) <<= std::stringstream {} - -#define IE_THROW_1(ExceptionType) \ - (InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM}) <<= std::stringstream {} -/// @endcond - -/** - * @def IE_THROW - * @brief A macro used to throw specified exception with a description - */ -#define IE_THROW(...) IE_PP_OVERLOAD(IE_THROW, __VA_ARGS__) - -/** - * @def IE_ASSERT - * @brief Uses assert() function if NDEBUG is not defined, InferenceEngine exception otherwise - */ -#ifdef NDEBUG -# define IE_ASSERT(EXPRESSION) \ - if (!(EXPRESSION)) \ - IE_THROW(GeneralError) << " AssertionError " #EXPRESSION -#else -/** - * @private - */ -struct NullStream { - template - NullStream& operator<<(const T&) noexcept { - return *this; - } -}; - -# define IE_ASSERT(EXPRESSION) \ - assert((EXPRESSION)); \ - InferenceEngine::details::NullStream() -#endif // NDEBUG - -/// @cond -#define THROW_IE_EXCEPTION \ - (InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM}) <<= \ - std::stringstream {} - -#define IE_EXCEPTION_CASE(TYPE_ALIAS, STATUS_CODE, EXCEPTION_TYPE, ...) \ - case InferenceEngine::STATUS_CODE: { \ - using InferenceEngine::EXCEPTION_TYPE; \ - using TYPE_ALIAS = EXCEPTION_TYPE; \ - __VA_ARGS__; \ - } break; -/// @endcond - -/** - * @def IE_EXCEPTION_SWITCH - * @brief Generate Switch statement over error codes adn maps them to coresponding exceptions type - */ -#define IE_EXCEPTION_SWITCH(STATUS, TYPE_ALIAS, ...) \ - switch (STATUS) { \ - IE_EXCEPTION_CASE(TYPE_ALIAS, GENERAL_ERROR, GeneralError, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, NOT_IMPLEMENTED, NotImplemented, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, NETWORK_NOT_LOADED, NetworkNotLoaded, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, PARAMETER_MISMATCH, ParameterMismatch, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, NOT_FOUND, NotFound, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, OUT_OF_BOUNDS, OutOfBounds, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, UNEXPECTED, Unexpected, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, REQUEST_BUSY, RequestBusy, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, RESULT_NOT_READY, ResultNotReady, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, NOT_ALLOCATED, NotAllocated, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, INFER_NOT_STARTED, InferNotStarted, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, NETWORK_NOT_READ, NetworkNotRead, __VA_ARGS__) \ - IE_EXCEPTION_CASE(TYPE_ALIAS, INFER_CANCELLED, InferCancelled, __VA_ARGS__) \ - default: \ - IE_ASSERT(!"Unreachable"); \ - } - -} // namespace details -} // namespace InferenceEngine - -#if defined(_WIN32) && !defined(__GNUC__) -# define __PRETTY_FUNCTION__ __FUNCSIG__ -#else -# define __PRETTY_FUNCTION__ __PRETTY_FUNCTION__ -#endif -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/include/ie/ie_core.hpp b/src/inference/include/ie/ie_core.hpp deleted file mode 100644 index a18cba26cbca7f..00000000000000 --- a/src/inference/include/ie/ie_core.hpp +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a header file for the Inference Engine Core class C++ API - * - * @file ie_core.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "cpp/ie_executable_network.hpp" -#include "openvino/core/version.hpp" - -namespace InferenceEngine { - -/** - * @ingroup ie_cpp - * @brief This class represents Inference Engine Core entity. - * - * It can throw exceptions safely for the application, where it is properly handled. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) { - class Impl; - std::shared_ptr _impl; - -public: - /** @brief Constructs an OpenVINO Core instance with devices - * and their plugins description. - * - * There are two ways how to configure device plugins: - * 1. (default) Use XML configuration file in case of dynamic libraries build; - * 2. Use strictly defined configuration in case of static libraries build. - * - * @param xmlConfigFile Path to the .xml file with plugins to load from. If the XML configuration file is not - * specified, default OpenVINO Runtime plugins are loaded from: - * 1. (dynamic build) default `plugins.xml` file located in the same folder as OpenVINO runtime shared library; - * 2. (static build) statically defined configuration. In this case path to the .xml file is ignored. - */ - explicit Core(const std::string& xmlConfigFile = {}); - - /** - * @brief Returns plugins version information - * - * @param deviceName Device name to identify plugin - * @return A vector of versions - */ - std::map GetVersions(const std::string& deviceName) const; - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - /** - * @brief Reads models from IR and ONNX formats - * @param modelPath path to model - * @param binPath path to data file - * For IR format (*.bin): - * * if path is empty, will try to read bin file with the same name as xml and - * * if bin file with the same name was not found, will load IR without weights. - * For ONNX format (*.onnx): - * * binPath parameter is not used. - * @return CNNNetwork - */ - CNNNetwork ReadNetwork(const std::wstring& modelPath, const std::wstring& binPath = {}) const; -#endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - - /** - * @brief Reads models from IR and ONNX formats - * @param modelPath path to model - * @param binPath path to data file - * For IR format (*.bin): - * * if path is empty, will try to read bin file with the same name as xml and - * * if bin file with the same name was not found, will load IR without weights. - * For ONNX format (*.onnx): - * * binPath parameter is not used. - * @return CNNNetwork - */ - CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath = {}) const; - - /** - * @brief Reads models from IR and ONNX formats - * @param model string with model in IR or ONNX format - * @param weights shared pointer to constant blob with weights - * Reading ONNX models doesn't support loading weights from data blobs. - * If you are using an ONNX model with external data files, please use the - * `InferenceEngine::Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights) const` - * function overload which takes a filesystem path to the model. - * For ONNX case the second parameter should contain empty blob. - * @note Created InferenceEngine::CNNNetwork object shares the weights with `weights` object. - * So, do not create `weights` on temporary data which can be later freed, since the network - * constant data becomes to point to invalid memory. - * @return CNNNetwork - */ - CNNNetwork ReadNetwork(const std::string& model, const Blob::CPtr& weights) const; - - /** - * @brief Creates an executable network from a network object and uses AUTO plugin as the default device to load - * executable network. - * - * Users can create as many networks as they need and use - * them simultaneously (up to the limitation of the hardware resources) - * - * @param network CNNNetwork object acquired from Core::ReadNetwork - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network reference - */ - ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::map& config = {}); - - /** - * @brief Creates an executable network from a network object. - * - * Users can create as many networks as they need and use - * them simultaneously (up to the limitation of the hardware resources) - * - * @param network CNNNetwork object acquired from Core::ReadNetwork - * @param deviceName Name of device to load network to - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network reference - */ - ExecutableNetwork LoadNetwork(const CNNNetwork& network, - const std::string& deviceName, - const std::map& config = {}); - - /** - * @brief Reads model and creates an executable network from IR or ONNX file and uses AUTO plugin as the default - * device to load executable network. - * - * This can be more efficient than using ReadNetwork + LoadNetwork(CNNNetwork) flow - * especially for cases when caching is enabled and cached model is available - * - * @param modelPath path to model - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation/ - * - * @return An executable network reference - */ - ExecutableNetwork LoadNetwork(const std::string& modelPath, const std::map& config = {}); - - /** - * @brief Reads model and creates an executable network from IR or ONNX file - * - * This can be more efficient than using ReadNetwork + LoadNetwork(CNNNetwork) flow - * especially for cases when caching is enabled and cached model is available - * - * @param modelPath path to model - * @param deviceName Name of device to load network to - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation/ - * - * @return An executable network reference - */ - ExecutableNetwork LoadNetwork(const std::string& modelPath, - const std::string& deviceName, - const std::map& config = {}); - - /** - * @brief Creates an executable network from a previously exported network - * - * @param modelFileName Path to the location of the exported file - * @param deviceName Name of device load executable network on - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation* - * @return An executable network reference - */ - ExecutableNetwork ImportNetwork(const std::string& modelFileName, - const std::string& deviceName, - const std::map& config = {}); - - /** - * @brief Creates an executable network from a previously exported network - * @param networkModel network model stream - * @param deviceName Name of device load executable network on - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation* - * @return An executable network reference - */ - ExecutableNetwork ImportNetwork(std::istream& networkModel, - const std::string& deviceName, - const std::map& config = {}); - - /** - * @deprecated Use Core::ImportNetwork with explicit device name - * @brief Creates an executable network from a previously exported network - * @param networkModel network model stream - * @return An executable network reference - */ - INFERENCE_ENGINE_DEPRECATED("Use Core::ImportNetwork with explicit device name") - ExecutableNetwork ImportNetwork(std::istream& networkModel); - - /** - * @brief Query device if it supports specified network with specified configuration - * - * @param deviceName A name of a device to query - * @param network Network object to query - * @param config Optional map of pairs: (config parameter name, config parameter value) - * @return An object containing a map of pairs a layer name -> a device name supporting this layer. - */ - QueryNetworkResult QueryNetwork(const CNNNetwork& network, - const std::string& deviceName, - const std::map& config = {}) const; - - /** - * @brief Sets configuration for device, acceptable keys can be found in properties.hpp - * - * @param deviceName An optional name of a device. If device name is not specified, the config is set for all the - * registered devices. - * - * @param config Map of pairs: (config parameter name, config parameter value) - */ - void SetConfig(const std::map& config, const std::string& deviceName = {}); - - /** - * @brief Gets configuration dedicated to device behaviour. - * - * The method is targeted to extract information which can be set via SetConfig method. - * - * @param deviceName - A name of a device to get a configuration value. - * @param name - config key. - * @return Value of config corresponding to config key. - */ - ov::Any GetConfig(const std::string& deviceName, const std::string& name) const; - - /** - * @brief Gets general runtime metric for dedicated hardware. - * - * The method is needed to request common device properties - * which are executable network agnostic. It can be device name, temperature, other devices-specific values. - * - * @param deviceName - A name of a device to get a metric value. - * @param name - metric name to request. - * @param options - optional parameters to get a metric value - * @return Metric value corresponding to metric key. - */ - ov::Any GetMetric(const std::string& deviceName, const std::string& name, const ov::AnyMap& options = {}) const; - - /** - * @brief Returns devices available for neural networks inference - * - * @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, NPU } - * If there more than one device of specific type, they are enumerated with .# suffix. - */ - std::vector GetAvailableDevices() const; - - /** - * @brief Register new device and plugin which implement this device inside Inference Engine. - * - * @param plugin Path (absolute or relative) or name of a plugin. Depending on platform, `plugin` is wrapped with - * shared library suffix and prefix to identify library full name - * - * @param deviceName A device name to register plugin for - */ - void RegisterPlugin(const std::string& plugin, const std::string& deviceName); - - /** - * @brief Unloads previously loaded plugin with a specified name from Inference Engine - * The method is needed to remove plugin instance and free its resources. If plugin for a - * specified device has not been created before, the method throws an exception. - * - * @param deviceName Device name identifying plugin to remove from Inference Engine - */ - void UnregisterPlugin(const std::string& deviceName); - - /** @brief Registers plugin to Inference Engine Core instance using XML configuration file with - * plugins description. - * - * XML file has the following structure: - * - * ```xml - * - * - * - * - * - * - * - * - * - * - * - * - * ``` - * - * - `name` identifies name of device enabled by plugin - * - `location` specifies absolute path to dynamic library with plugin. A path can also be relative to inference - * engine shared library. It allows to have common config for different systems with different configurations. - * - Properties are set to plugin via the `SetConfig` method. - * - Extensions are set to plugin via the `AddExtension` method. - * - * @param xmlConfigFile A path to .xml file with plugins to register. - */ - void RegisterPlugins(const std::string& xmlConfigFile); -}; - -/** - * @brief Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing - * dependent resources - * - * @note This function should be used by advanced user to control unload the resources. - * - * You might want to use this function if you are developing a dynamically-loaded library which should clean up all - * resources after itself when the library is unloaded. - */ -INFERENCE_ENGINE_API_CPP(void) INFERENCE_ENGINE_1_0_DEPRECATED shutdown(); -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_data.h b/src/inference/include/ie/ie_data.h deleted file mode 100644 index 66bd61d4639c95..00000000000000 --- a/src/inference/include/ie/ie_data.h +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This header file defines the main Data representation node. - * - * @file ie_data.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ie_api.h" -#include "ie_common.h" -#include "ie_layouts.h" -#include "ie_precision.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -/** - * @deprecated The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on - * transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - * @brief This class represents the main Data representation node. - * - * The NN graphs are di-graphs consisting of data nodes and layer nodes. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Data) { - class Impl; - -public: - /** - * @brief An empty constructor (dimensionless) - * - * @param name Name of the data node - * @param _precision Precision of the data - * @param layout Data layout - */ - Data(const std::string& name, Precision _precision, Layout layout = NCHW); - - /** - * @brief A constructor with tensor descriptor - * - * @param name Name of the data node - * @param desc Tensor descriptor - */ - Data(const std::string& name, const TensorDesc& desc); - - /** - * @brief A copy constructor - * - * @param data A data object to copy from - */ - Data(const Data& data); - - /** - * @brief An assignment operator - * - * @param data A data object to copy from - * @return An assigned object - */ - Data& operator=(const Data& data); - - /** - * @brief Checks if the current node is resolved - * - * @return true if resolved, false otherwise. - */ - bool isInitialized() const; - - /** - * @brief Sets the data dimensions. - * - * After the current node is marked as resolved. - * - * @param a_dims Tensor dimensions to set - */ - void setDims(const SizeVector& a_dims); - - /** - * @brief Sets the layout value for this Data instance - * - * @param layout Layout value to set - */ - void setLayout(Layout layout); - - /** - * @brief changes dims and layout at same time - * - * @param dims new dimensions - * @param layout new layout - */ - void reshape(const SizeVector& dims, Layout layout); - - /** - * @deprecated Use InferenceEngine::Data::reshape(const SizeVector&, Layout) - * @brief changes dims and layout at same time - * - * @param dims new dimensions - * @param layout new layout - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Data::reshape(const SizeVector&, Layout)") - void reshape(const std::initializer_list& dims, Layout layout); - - /** - * @brief Gets the layout value for this Data instance - * @return Layout - */ - Layout getLayout() const; - - /** - * @brief Gets Tensor descriptor reference - * - * @return reference to TensorDesc - */ - const TensorDesc& getTensorDesc() const; - - /** - * @brief Gets a precision type of this Data instance - * - * @return Precision type - */ - const Precision& getPrecision() const; - - /** - * @brief Sets a precision type of this Data instance - * - * @param precision Precision of the data - */ - void setPrecision(const Precision& precision); - - /** - * @return data dimensions - */ - const SizeVector& getDims() const; - - /** - * @return name of the data object - */ - const std::string& getName() const; - - /** - * @brief Sets a name the Data object - * - * @param newName Name of the data node - */ - - void setName(const std::string& newName); - - /** - * @return convenient arbitrary user data holder - */ - const UserValue& getUserObject() const; - - /** - * @private - * @brief Don't touch this field. An implementation details for Data object. - */ - std::shared_ptr _impl; - -private: - /** - * @brief A unique name that identifies this data node - */ - std::string name; - - /** - * @brief A user utility place holder - */ - UserValue userObject; - - /** - * @brief A tensor descriptor - */ - mutable TensorDesc tensorDesc; -}; -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_icnn_network.hpp b/src/inference/include/ie/ie_icnn_network.hpp deleted file mode 100644 index 038f1ec8b63d2d..00000000000000 --- a/src/inference/include/ie/ie_icnn_network.hpp +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a header file for the ICNNNetwork class - * - * @file ie_icnn_network.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_blob.h" -#include "ie_common.h" -#include "ie_data.h" -#include "ie_input_info.hpp" -#include "openvino/core/model.hpp" -#include "openvino/core/partial_shape.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START - -/** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @interface ICNNNetwork - * @brief This is the main interface to describe the NN topology - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ICNNNetwork) - : public std::enable_shared_from_this { -public: - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief A shared pointer to a ICNNNetwork interface - */ - using Ptr = std::shared_ptr; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Returns nGraph function - * @return nGraph function - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual std::shared_ptr getFunction() noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Returns constant nGraph function - * @return constant nGraph function - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual std::shared_ptr getFunction() const noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Gets the network output Data node information. The received info is stored in the given Data node. - * - * For single and multiple outputs networks. - * - * This method need to be called to find out OpenVINO output names for using them later - * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob - * - * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor - * method to map framework names to OpenVINO names - * - * @param out Reference to the OutputsDataMap object - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap - * object. - * - * For single and multiple inputs networks. - * This method need to be called to find out OpenVINO input names for using them later - * when calling InferenceEngine::InferRequest::SetBlob - * - * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor - * method to map framework names to OpenVINO names - * - * @param inputs Reference to InputsDataMap object. - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Returns information on certain input pointed by inputName - * - * @param inputName Name of input layer to get info on - * @return A smart pointer to the input information - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Returns the network name. - * - * @return Network name - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual const std::string& getName() const noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Returns the number of layers in the network as an integer value - * - * @return The number of layers as an integer value - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual size_t layerCount() const = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Adds output to the layer - * - * @param layerName Name of the layer - * @param outputIndex Index of the output - * @param resp Response message - * @return Status code of the operation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode addOutput(const std::string& layerName, - size_t outputIndex = 0, - ResponseDesc* resp = nullptr) noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Changes the inference batch size. - * - * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call - * ICNNNetwork::reshape. - * - * @param size Size of batch to set - * @param responseDesc Pointer to the response message that holds a description of an error if any occurred - * @return Status code of the operation - * @note Current implementation of the function sets batch size to the first dimension of all layers in the - * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the - * method works incorrectly. This limitation is resolved via shape inference feature by using - * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation - * - * @note Current implementation of the function sets batch size to the first dimension of all layers in the - * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the - * method works incorrectly. This limitation is resolved via shape inference feature by using - * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Gets the inference batch size - * - * @return The size of batch as a size_t value - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual size_t getBatchSize() const = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Map of pairs: name of corresponding data and its dimension. - */ - using InputShapes = std::map; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Run shape inference with new input shapes for the network - * - * @param inputShapes - map of pairs: name of corresponding data and its dimension. - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return Status code of the operation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode reshape(const InputShapes& inputShapes, ResponseDesc* resp) noexcept { - (void)inputShapes; - (void)resp; - return NOT_IMPLEMENTED; - }; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Run shape inference with new input shapes for the network - * - * @param partialShapes - map of pairs: name of corresponding data and its dimension. - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return Status code of the operation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode reshape(const std::map& partialShapes, - ResponseDesc* resp) noexcept { - (void)partialShapes; - (void)resp; - return NOT_IMPLEMENTED; - }; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Serialize network to IR and weights files. - * - * @param xmlPath Path to output IR file. - * @param binPath Path to output weights file. - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return Status code of the operation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const - noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Serialize network to IR and weights files. - * - * @param xmlStream A stream for xml content (.xml file) - * @param binStream A stream for weights content (.bin file) - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return Status code of the operation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode serialize(std::ostream& xmlStream, std::ostream& binStream, ResponseDesc* resp) const - noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Serialize network to IR and weights files. - * - * @param xmlStream A stream for xml content (.xml file) - * @param binData A blob for weights content (.bin file) - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return Status code of the operation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode serialize(std::ostream& xmlStream, Blob::Ptr& binData, ResponseDesc* resp) const noexcept = 0; - - /** - * @deprecated Use InferenceEngine::CNNNetwork wrapper instead - * @brief Methods maps framework tensor name to OpenVINO name - * - * @param ov_name OpenVINO name - * @param orig_name Framework tensor name - * @param resp Pointer to the response message that holds a description of an error if any occurred - * - * @return Status code of the operation - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") - virtual StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const - noexcept { - (void)ov_name; - (void)orig_name; - (void)resp; - return NOT_IMPLEMENTED; - } - -protected: - /** - * @brief Default destructor. - */ - ~ICNNNetwork() = default; -}; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_iexecutable_network.hpp b/src/inference/include/ie/ie_iexecutable_network.hpp deleted file mode 100644 index c3ffc38cb5e26c..00000000000000 --- a/src/inference/include/ie/ie_iexecutable_network.hpp +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief a header file for IExecutableNetwork interface - * - * @file ie_iexecutable_network.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ie_common.h" -#include "ie_icnn_network.hpp" -#include "ie_iinfer_request.hpp" -#include "ie_input_info.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief This is an interface of an executable network - */ -class INFERENCE_ENGINE_1_0_DEPRECATED IExecutableNetwork : public std::enable_shared_from_this { -public: - IE_SUPPRESS_DEPRECATED_START - /** - * @brief A smart pointer to the current IExecutableNetwork object - */ - using Ptr = std::shared_ptr; - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Gets the Executable network output Data node information. - * - * The received info is stored in the given InferenceEngine::ConstOutputsDataMap node. - * This method need to be called to find output names for using them later - * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob - * - * @param out Reference to the InferenceEngine::ConstOutputsDataMap object - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode GetOutputsInfo(ConstOutputsDataMap& out, ResponseDesc* resp) const noexcept = 0; - - /** - * @brief Gets the executable network input Data node information. - * - * The received info is stored in the given InferenceEngine::ConstInputsDataMap object. - * This method need to be called to find out input names for using them later - * when calling InferenceEngine::InferRequest::SetBlob - * - * @param inputs Reference to InferenceEngine::ConstInputsDataMap object. - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode GetInputsInfo(ConstInputsDataMap& inputs, ResponseDesc* resp) const noexcept = 0; - - IE_SUPPRESS_DEPRECATED_START - /** - * @brief Creates an inference request object used to infer the network. - * - * The created request has allocated input and output blobs (that can be changed later). - * - * @param req Shared pointer to the created request object - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode CreateInferRequest(IInferRequest::Ptr& req, ResponseDesc* resp) noexcept = 0; - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Exports the current executable network. - * - * @see Core::ImportNetwork - * - * @param modelFileName Full path to the location of the exported file - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode Export(const std::string& modelFileName, ResponseDesc* resp) noexcept = 0; - - /** - * @brief Exports the current executable network. - * - * @see Core::ImportNetwork - * - * @param networkModel Network model output stream - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode Export(std::ostream& networkModel, ResponseDesc* resp) noexcept = 0; - - IE_SUPPRESS_DEPRECATED_START - /** - * @deprecated Use InferenceEngine::ExecutableNetwork::GetExecGraphInfo instead - * @brief Get executable graph information from a device - * - * @param graphPtr network ptr to store executable graph information - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::ExecutableNetwork::GetExecGraphInfo instead") - virtual StatusCode GetExecGraphInfo(ICNNNetwork::Ptr& graphPtr, ResponseDesc* resp) noexcept = 0; - - /** - * @brief Sets configuration for current executable network - * - * @param config Map of pairs: (config name, config value) - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return code of the operation. InferenceEngine::OK if succeeded - */ - virtual StatusCode SetConfig(const ov::AnyMap& config, ResponseDesc* resp) noexcept = 0; - - /** @brief Gets configuration for current executable network. - * - * The method is responsible to extract information - * which affects executable network execution. The list of supported configuration values can be extracted via - * ExecutableNetwork::GetMetric with the SUPPORTED_CONFIG_KEYS key, but some of these keys cannot be changed - * dynamically, e.g. DEVICE_ID cannot changed if an executable network has already been compiled for particular - * device. - * - * @param name config key, can be found in properties.hpp - * @param result value of config corresponding to config key - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return code of the operation. InferenceEngine::OK if succeeded - */ - virtual StatusCode GetConfig(const std::string& name, ov::Any& result, ResponseDesc* resp) const noexcept = 0; - - /** - * @brief Gets general runtime metric for an executable network. - * - * It can be network name, actual device ID on - * which executable network is running or all other properties which cannot be changed dynamically. - * - * @param name metric name to request - * @param result metric value corresponding to metric key - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return code of the operation. InferenceEngine::OK if succeeded - */ - virtual StatusCode GetMetric(const std::string& name, ov::Any& result, ResponseDesc* resp) const noexcept = 0; - -protected: - virtual ~IExecutableNetwork() = default; -}; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_iinfer_request.hpp b/src/inference/include/ie/ie_iinfer_request.hpp deleted file mode 100644 index 73c7570abf6d76..00000000000000 --- a/src/inference/include/ie/ie_iinfer_request.hpp +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief a header file for IInferRequest interface - * - * @file ie_iinfer_request.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_blob.h" -#include "ie_common.h" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START - -/** - * @deprecated Use InferenceEngine::InferRequest C++ wrapper - * @brief This is an interface of asynchronous infer request - */ -class INFERENCE_ENGINE_1_0_DEPRECATED IInferRequest : public std::enable_shared_from_this { -public: - /** - * @enum WaitMode - * @brief Enumeration to hold wait mode for IInferRequest - */ - enum WaitMode : int64_t { - /** Wait until inference result becomes available */ - RESULT_READY = -1, - /** IInferRequest doesn't block or interrupt current thread and immediately returns inference status */ - STATUS_ONLY = 0, - }; - - IE_SUPPRESS_DEPRECATED_START - - /** - * @brief A shared pointer to the IInferRequest object - */ - using Ptr = std::shared_ptr; - /** - * @brief A smart pointer to the IInferRequest object - */ - using WeakPtr = std::weak_ptr; - - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Sets input/output data to infer - * - * @note Memory allocation does not happen - * @param name Name of input or output blob. - * @param data Reference to input or output blob. The type of a blob must match the network input precision and - * size. - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode SetBlob(const char* name, const Blob::Ptr& data, ResponseDesc* resp) noexcept = 0; - - /** - * @brief Gets input/output data for inference - * - * @note Memory allocation does not happen - * @param name Name of input or output blob. - * @param data Reference to input or output blob. The type of Blob must match the network input precision and size. - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode GetBlob(const char* name, Blob::Ptr& data, ResponseDesc* resp) noexcept = 0; - - /** - * @brief Infers specified input(s) in synchronous mode - * - * @note blocks all methods of IInferRequest while request is ongoing (running or waiting in queue) - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode Infer(ResponseDesc* resp) noexcept = 0; - /** - * @brief Cancels current async inference request - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode Cancel(ResponseDesc* resp) noexcept = 0; - - /** - * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer - * - * @note not all plugins provide meaningful data - * @param perfMap Map of layer names to profiling information for that layer - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: InferenceEngine::OK (0) for success - */ - virtual StatusCode GetPerformanceCounts(std::map& perfMap, - ResponseDesc* resp) const noexcept = 0; - - /** - * @brief Waits for the result to become available. Blocks until specified millis_timeout has elapsed or the result - * becomes available, whichever comes first. - * - * @param millis_timeout Maximum duration in milliseconds to block for - * @note There are special cases when millis_timeout is equal some value of the WaitMode enum: - * * STATUS_ONLY - immediately returns inference status (IInferRequest::RequestStatus). It does not block or - * interrupt current thread - * * RESULT_READY - waits until inference result becomes available - * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if - * occurred) - * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success - */ - virtual InferenceEngine::StatusCode Wait(int64_t millis_timeout, ResponseDesc* resp) noexcept = 0; - - /** - * @brief Starts inference of specified input(s) in asynchronous mode - * - * @note It returns immediately. Inference starts also immediately - * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if - * occurred) - * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success - */ - virtual StatusCode StartAsync(ResponseDesc* resp) noexcept = 0; - - IE_SUPPRESS_DEPRECATED_START - - /** - * @brief Completion callback definition as pointer to a function - * - * @param context Pointer to request for providing context inside callback - * @param code Completion result status: InferenceEngine::OK (0) for success - */ - typedef void (*CompletionCallback)(InferenceEngine::IInferRequest::Ptr context, InferenceEngine::StatusCode code); - - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Sets a callback function that will be called on success or failure of asynchronous request - * - * @param callback A function to be called - * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success - */ - virtual StatusCode SetCompletionCallback(CompletionCallback callback) noexcept = 0; - - /** - * @brief Gets arbitrary data for the request and stores a pointer to a pointer to the obtained data - * - * @param data Pointer to a pointer to the gotten arbitrary data - * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if - * occurred) - * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success - */ - virtual StatusCode GetUserData(void** data, ResponseDesc* resp) noexcept = 0; - - /** - * @brief Sets arbitrary data for the request - * - * @param data Pointer to a pointer to arbitrary data to set - * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if - * occurred) - * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success - */ - virtual StatusCode SetUserData(void* data, ResponseDesc* resp) noexcept = 0; - -protected: - virtual ~IInferRequest() = default; -}; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_input_info.hpp b/src/inference/include/ie/ie_input_info.hpp deleted file mode 100644 index ec7092d42e5c62..00000000000000 --- a/src/inference/include/ie/ie_input_info.hpp +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief a header file for InputInfo class - * - * @file ie_input_info.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_api.h" -#include "ie_blob.h" -#include "ie_common.h" -#include "ie_data.h" -#include "ie_precision.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START -/** - * @brief This class contains information about each input of the network - */ -class INFERENCE_ENGINE_1_0_DEPRECATED InputInfo { -public: - /** @brief A smart pointer to the InputInfo instance */ - using Ptr = std::shared_ptr; - /** @brief A smart pointer to the constant InputInfo instance */ - using CPtr = std::shared_ptr; - - /** - * @brief Gets a precision of the input data provided by user - * - * By default it matches the layers precision, but there are exceptions of this rule - * For Q78 precision networks the input is expected in I16 by default - * For FP16 precision networks the input is expected in FP32 by default - * The default input precision might be changed preferred one using InputInfo::setPrecision() - * function. - * For example, for a Q78 precision network you can pass FP32 input data - * @return The precision used for input blob creation - */ - Precision getPrecision() const { - if (!_inputData) { - IE_THROW() << "Data is empty!"; - } - return _inputData->getPrecision(); - } - - /** - * @brief Changes the precision of the input data provided by the user. - * - * This function should be called before loading the network to the plugin - * @param p A new precision of the input data to set - */ - void setPrecision(Precision p) { - if (!_inputData) { - IE_THROW() << "Data is empty!"; - } - _inputData->setPrecision(p); - } - - /** - * @brief Gets a layout of the input data provided by user - * - * @details By default it matches the layers precision and depends on number of its dimensions: - * C - for 1-dimensional, - * NC - for 2-dimensional, - * CHW - for 3-dimensional, - * NCHW - for 4-dimensional - * NCDHW - for 5-dimensional - * The default input layout might be changed preferred one using setLayout() function. - * @return The precision used for input blob creation - */ - Layout getLayout() { - if (!_inputData) { - IE_THROW() << "Data is empty!"; - } - return _inputData->getLayout(); - } - - /** - * @brief Changes the layout of the input data provided by the user. - * - * This function should be called before loading the network to the plugin - * @param l A new layout of the input data to set - */ - void setLayout(Layout l) { - if (!_inputData) { - IE_THROW() << "Data is empty!"; - } - _inputData->setLayout(l); - } - - /** - * @brief Gets the name of the input - * - * @return A string - the name of the input - */ - const std::string& name() const { - if (!_inputData) { - IE_THROW() << "Data is empty!"; - } - return _inputData->getName(); - } - - /** - * @brief Changes the name of the input data provided by the user. - * - * @param newName A new name of the input data to set - */ - void setName(const std::string& newName) { - if (!_inputData) { - IE_THROW() << "Data is empty!"; - } - _inputData->setName(newName); - } - - /** - * @brief Gets the input data - * - * @return A smart pointer to the input data - */ - DataPtr getInputData() const { - return _inputData; - } - - /** - * @brief Initializes the pointer to the input data that stores the main input parameters like dims, - * etc - * - * This method initializes the precision with the information from the inputPtr if it was not set - * explicitly through InputInfo::setPrecision. If InputInfo::setPrecision is called, this method does - * not overwrite the precision. - * @param inputPtr Pointer to the input data to set - */ - void setInputData(DataPtr inputPtr) { - _inputData = inputPtr; - } - - /** - * @brief Returns the tensor descriptor - * @return A const reference to a tensor descriptor - */ - const TensorDesc& getTensorDesc() const { - if (!_inputData) { - IE_THROW() << "Data is empty!"; - } - return _inputData->getTensorDesc(); - } - -protected: - /** - * @brief A smart pointer to the input data - */ - DataPtr _inputData; -}; - -/** - * @brief A collection that contains string as key, and InputInfo smart pointer as value - */ -using InputsDataMap = std::map; - -/** - * @brief A collection that contains string as key, and const InputInfo smart pointer as value - */ -using ConstInputsDataMap = std::map; -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_layouts.h b/src/inference/include/ie/ie_layouts.h deleted file mode 100644 index a14b8dbd803bac..00000000000000 --- a/src/inference/include/ie/ie_layouts.h +++ /dev/null @@ -1,408 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for data layouts and conversion between them - * - * @file ie_layouts.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" -#include "ie_common.h" -#include "ie_precision.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief This class describes blocking layouts - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(BlockingDesc) { -public: - /** - * @brief The default constructor which creates empty blocking descriptor - */ - BlockingDesc(); - /** - * @brief The constructor which allows to create blocking descriptors for standard layouts - * - * @param dims real dimensions - * @param layout memory layout - */ - BlockingDesc(const SizeVector& dims, Layout layout); - /** - * @brief The constructor allows to create blocking descriptors for blocked memory - * - * @param blocked_dims blocked dimensions - * @param order the order of dimensions - */ - BlockingDesc(const SizeVector& blocked_dims, const SizeVector& order); - /** - * @brief The constructor allows to create blocking descriptors for blocked memory - * - * @param blocked_dims blocked dimensions - * @param order the order of dimensions - * @param offset offset to the current memory block - */ - BlockingDesc(const SizeVector& blocked_dims, const SizeVector& order, size_t offset); - /** - * @brief The constructor allows to create blocking descriptors for blocked memory - * - * @param blocked_dims blocked dimensions - * @param order the order of dimensions - * @param offset offset to the current memory block - * @param dimOffsets per-dimension offset from the padding to actual data, - */ - BlockingDesc(const SizeVector& blocked_dims, const SizeVector& order, size_t offset, const SizeVector& dimOffsets); - /** - * @brief The constructor allows to create blocking descriptors for blocked memory - * - * @param blocked_dims blocked dimensions - * @param order the order of dimensions - * @param offset offset to the current memory block - * @param dimOffsets per-dimension offset from the padding to actual data, - * @param strides strides for each dimension - */ - BlockingDesc(const SizeVector& blocked_dims, - const SizeVector& order, - size_t offset, - const SizeVector& dimOffsets, - const SizeVector& strides); - - /** - * @brief Returns the blocked dimensions vector - * - * @return blocked dimensions - */ - const SizeVector& getBlockDims() const { - return blockedDims; - } - - /** - * @brief Returns the vector of order - * - * @return order of dimensions - */ - const SizeVector& getOrder() const { - return order; - } - - /** - * @brief Returns the per-dimension offset vector - * - * @return offsets in elements - */ - const SizeVector& getOffsetPaddingToData() const { - return offsetPaddingToData; - } - - /** - * @brief Returns the offset to the current memory block - * - * @return offset in elements - */ - size_t getOffsetPadding() const { - return offsetPadding; - } - - /** - * @brief Returns strides for each dimension - * - * @return strides in elements - */ - const SizeVector& getStrides() const { - return strides; - } - - /** - * @brief The comparison operator for the BlockingDesc - * - * @param rhs object to compare - * @return true if objects are equal - */ - bool operator==(const BlockingDesc& rhs) const; - /** - * @brief The comparison operator for the BlockingDesc - * - * @param rhs object to compare - * @return true if objects aren't equal - */ - bool operator!=(const BlockingDesc& rhs) const; - -protected: - /** - * @brief Fills tensor descriptor based on blocking dimensions and specific order - * @param blocked_dims A vector representing blocking dimensions - * @param order A vector with specific dims order - */ - void fillDesc(const SizeVector& blocked_dims, const SizeVector& order); - -private: - /** Blocked dimensions. */ - SizeVector blockedDims; - /** Strides for blocked dimensions */ - SizeVector strides; - /** The order of blocked dimensions **/ - SizeVector order; - /** Per-dimension offset from the padding to actual data, the top-level - * tensor with offsets applied must lie within the padding area. */ - SizeVector offsetPaddingToData; - /** Offset from memory origin to the current block, non-zero only in - * a description of a memory sub-block. */ - size_t offsetPadding; -}; - -/** - * @brief This class defines Tensor description - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(TensorDesc) { -public: - /** - * @brief The constructor creates the tensor descriptor using blocking descriptor - * - * @param precision memory precision - * @param dims memory dimensions - * @param blockDesc blocking descriptor - */ - TensorDesc(const Precision& precision, const SizeVector& dims, const BlockingDesc& blockDesc); - /** - * @brief The constructor creates the tensor descriptor using standard layout - * - * @param precision memory precision - * @param dims memory dimensions - * @param layout memory layout - */ - TensorDesc(const Precision& precision, const SizeVector& dims, Layout layout); - /** - * @brief The constructor creates the empty tensor descriptor with precision and layout - * - * @param precision memory precision - * @param layout memory layout - */ - TensorDesc(const Precision& precision, Layout layout); - /** - * @brief The default constructor which creates empty tensor descriptor - */ - TensorDesc(); - - /** - * @brief Reshapes the tensor descriptor - * - * @param dims new dimensions - * @param layout new layout if it is necessary - */ - void reshape(const SizeVector& dims, Layout layout = Layout::ANY); - /** - * @brief Reshapes the tensor descriptor - * - * @param dims new dimensions - * @param blockDesc new blocking descriptor - */ - void reshape(const SizeVector& dims, const BlockingDesc& blockDesc); - - /** - * @brief Returns the vector of dimensions - * - * @return dimensions - */ - SizeVector& getDims() { - return dims; - } - /** - * @brief Returns the constant vector of dimensions - * - * @return dimensions - */ - const SizeVector& getDims() const noexcept { - return dims; - } - /** - * @brief Sets dimensions - * - * @param dims new dimensions - */ - void setDims(const SizeVector& dims); - - /** - * @brief Returns the memory layout - * - * @return layout - */ - Layout getLayout() const { - return layout; - } - - /** - * @brief Sets the layout - * - * @param l memory layout - */ - void setLayout(Layout l); - - /** - * @brief Returns the memory precision - * - * @return precision - */ - const Precision& getPrecision() const { - return precision; - } - - /** - * @brief Sets the memory precision - * - * @param p precision - */ - void setPrecision(const Precision& p) { - precision = p; - } - - /** - * @brief Returns the blocking descriptor - * - * @return blocking descriptor - */ - const BlockingDesc& getBlockingDesc() const { - return blockingDesc; - } - - /** - * @brief The comparison operator for the TensorDesc - * - * @param rhs object to compare - * @return true if objects are equal - */ - bool operator==(const TensorDesc& rhs) const; - /** - * @brief The comparison operator for the TensorDesc - * - * @param rhs object to compare - * @return true if objects aren't equal - */ - bool operator!=(const TensorDesc& rhs) const; - - /** - * @brief Calculates offset for the vector of dimensions - * - * @param v vector of dimensions - * @return offset - */ - size_t offset(const SizeVector& v) const; - /** - * @brief Calculates offset for the local offset - * - * @param l local offset - * @return offset - */ - size_t offset(size_t l) const; - - /** - * @brief Returns the standard layout for dimensions - * - * @param dims the vector of dimensions - * @return the standard memory layout - */ - static Layout getLayoutByDims(const SizeVector& dims); - - /** - * @brief Returns the standard layout for the specified tensor rank - * - * @param rank of the requested layout - * @return the standard memory layout - */ - static Layout getLayoutByRank(size_t rank); - -private: - /** - * Memory layout - */ - Layout layout; - /** - * @brief blob's dimensions - */ - SizeVector dims; - /** - * @brief memory precision - */ - Precision precision; - /** - * Detailed information about layout construction - */ - BlockingDesc blockingDesc; -}; - -/** - * @brief This structure describes ROI data for image-like tensors. - */ -struct INFERENCE_ENGINE_1_0_DEPRECATED ROI { - size_t id = 0; //!< ID of a ROI (offset over batch dimension) - size_t posX = 0; //!< W upper left coordinate of ROI - size_t posY = 0; //!< H upper left coordinate of ROI - size_t sizeX = 0; //!< W size of ROI - size_t sizeY = 0; //!< H size of ROI - - ROI() = default; - - /** - * @brief Creates a ROI objects with given parameters - * @param id ID of a ROI (offset over batch dimension) - * @param posX W upper left coordinate of ROI - * @param posY H upper left coordinate of ROI - * @param sizeX W size of ROI - * @param sizeY H size of ROI - */ - ROI(size_t id, size_t posX, size_t posY, size_t sizeX, size_t sizeY) - : id(id), - posX(posX), - posY(posY), - sizeX(sizeX), - sizeY(sizeY) {} -}; - -/** - * @brief Creates a TensorDesc object for ROI. - * - * @param origDesc original TensorDesc object. - * @param roi An image ROI object inside of the original object. - * @param useOrigMemDesc Flag to use original memory description (strides/offset). - * Should be set if the new TensorDesc describes shared memory. - * - * @return A newly created TensorDesc object representing ROI. - */ -INFERENCE_ENGINE_API_CPP(TensorDesc) -INFERENCE_ENGINE_1_0_DEPRECATED make_roi_desc(const TensorDesc& origDesc, const ROI& roi, bool useOrigMemDesc); - -/** - * @brief Creates a TensorDesc object for ROI. - * - * @param origDesc original TensorDesc object. - * @param begin start coordinate of ROI object inside of the original object. - * @param end end coordinate of ROI object inside of the original object. - * @param useOrigMemDesc Flag to use original memory description (strides/offset). - * Should be set if the new TensorDesc describes shared memory. - * - * @return A newly created TensorDesc object representing ROI. - */ -INFERENCE_ENGINE_API_CPP(TensorDesc) -INFERENCE_ENGINE_1_0_DEPRECATED make_roi_desc(const TensorDesc& origDesc, - const std::vector& begin, - const std::vector& end, - bool useOrigMemDesc); - -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_locked_memory.hpp b/src/inference/include/ie/ie_locked_memory.hpp deleted file mode 100644 index d334745be4224e..00000000000000 --- a/src/inference/include/ie/ie_locked_memory.hpp +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for generic LockedMemory<> and different variations of locks - * - * @file ie_locked_memory.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_allocator.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START -namespace details { -/** - * @brief This class is a LockedMemory concept for hardware memory - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemoryBase { - IAllocator* _allocator = nullptr; - void* _handle = nullptr; - mutable T* _locked = nullptr; - LockOp _lockFlag = LOCK_FOR_WRITE; - -protected: - /** - * @brief An offset size - * - * The default value is 0. - */ - size_t _offset = 0; - -public: - /** - * @brief A constructor - * - * @param ptr Pointer to an IAllocator object - * @param handle Handle provided by allocator->Alloc() - * @param lockFlag Read/Write type of mapping - * @param offsetInBytes Offset in originally locked region - */ - LockedMemoryBase(IAllocator* ptr, void* handle, LockOp lockFlag, size_t offsetInBytes) - : _allocator(ptr), - _handle(handle), - _lockFlag(lockFlag), - _offset(offsetInBytes) {} - - /** - * @brief A copy constructor - * - * @param that An rvalue reference for the other LockedMemoryBase instance - */ - LockedMemoryBase(LockedMemoryBase&& that) noexcept - : _allocator(that._allocator), - _handle(that._handle), - _lockFlag(that._lockFlag), - _offset(that._offset) { - that._locked = nullptr; - } - - /** - * @brief A virtual destructor - */ - virtual ~LockedMemoryBase() { - if (_locked != nullptr) { - _allocator->unlock(_handle); - } - } - -protected: - /** - * @brief Compares referenced values - * - * @param pointer Pointer to the object to compare with - * @return True if all handlers are nullptr or referenced values are equal, false otherwise - */ - bool isEqualTo(const T* pointer) const { - if (pointer == nullptr && (_allocator == nullptr || _handle == nullptr)) { - return true; - } - return dereference() == pointer; - } - - /** - * @brief Gets the locked object. - * - * Locks the handler and casts memory to the object of the given template type. - * - * @return The pointer to the locked object, nullptr otherwise - */ - virtual T* dereference() const { - if (_locked != nullptr) - return _locked; - - if (_allocator == nullptr) { - return nullptr; - } - - if (_handle == nullptr) { - return nullptr; - } - - uint8_t* pBytes = reinterpret_cast(_allocator->lock(_handle, _lockFlag)); - - return _locked = reinterpret_cast(pBytes + _offset); - } -}; -} // namespace details - -/** - * @brief This class represents locked memory for read/write memory - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemory : public details::LockedMemoryBase { - using base = details::LockedMemoryBase; - -public: - /** - * @brief A constructor - * - * @param ptr Pointer to IAllocator object - * @param handle Handle provided by allocator - * @param offsetInBytes Offset in originally locked region - */ - LockedMemory(IAllocator* ptr, void* handle, size_t offsetInBytes = 0) - : base(ptr, handle, LOCK_FOR_WRITE, offsetInBytes) {} - - /** - * @brief A default copy constructor, accepting rvalue - */ - LockedMemory(LockedMemory&&) = default; - - /** - * @brief A default copy constructor that accepts rvalue - * - * Also sets the offset value for the new memory object - * - * @param that Rvalue reference for the other LockedMemoryBase instance - * @param offset Offset value - */ - LockedMemory(LockedMemory&& that, size_t offset) : base(std::move(that)) { - base::_offset = offset; - } - - /** - * @brief A disabled copy constructor for lvalue - */ - LockedMemory(const LockedMemory&) = delete; - - /** - * @brief Gets a pointer to the stored object - * - * Dereferences from the base class. - * - * @return The pointer to the object of the given template type - */ - operator T*() { - return base::dereference(); - } - - /** - * @brief Gets the const pointer to the stored object - * - * Dereferences from the base class. - * @return The const pointer object of the given template type. - */ - operator const T*() const { - return base::dereference(); - } - - /** - * @brief Compares stored object with the given one - * @param pointer An pointer to compare with. - * @return `true` if objects are equal, `false` otherwise - */ - bool operator==(const T* pointer) const { - // special case with nullptr - return base::isEqualTo(pointer); - } - - /** - * @brief Compares the object with the one stored in the memory. - * @param pointer A pointer to compare with - * @param lm A compared LockedMemory object - * @return `true` if objects are equal, `false` otherwise - */ - friend bool operator==(const T* pointer, const LockedMemory& lm) { - return lm.operator==(pointer); - } - - /** - * @brief Casts stored object to any provided type. - * - * Uses reinterpret_cast. - * - * @tparam S Type to be casted to - * @return Casted to the given type object - */ - template ::value>> - S as() { - return reinterpret_cast(base::dereference()); - } - - /** - * @brief Casts stored object to any provided type. - * - * Uses reinterpret_cast. - * - * @tparam S Type to be casted to - * @return Casted to the given type const object - */ - template ::value>> - const S as() const { - return reinterpret_cast(base::dereference()); - } -}; - -/** - * @brief This class is for data and allows casting to any pointers - */ -template <> -class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemory : public details::LockedMemoryBase { - using base = details::LockedMemoryBase; - -public: - /** - * @brief A constructor - * - * @param ptr Pointer to IAllocator object - * @param handle Handle provided by allocator - * @param offsetInBytes Offset in originally locked region - */ - LockedMemory(IAllocator* ptr, void* handle, size_t offsetInBytes) - : base(ptr, handle, LOCK_FOR_WRITE, offsetInBytes) {} - - /** - * @brief A default copy constructor that accepts rvalue - */ - LockedMemory(LockedMemory&&) = default; - - /** - * @brief A default copy constructor that accepts rvalue - * - * Also sets the offset value for the new memory object - * - * @param that Rvalue reference for the other LockedMemoryBase instance - * @param offset Offset value - */ - LockedMemory(LockedMemory&& that, size_t offset) : base(std::move(that)) { - base::_offset = offset; - } - - /** - * @brief A disabled copy constructor for lvalue - */ - LockedMemory(const LockedMemory&) = delete; - - /** - * @brief Gets the pointer to the stored object of the given template type - * - * Dereferences from the base class. - * - * @tparam S Type to be casted to - * @return The pointer to the object of the given template type - */ - template - operator S*() { - return reinterpret_cast(base::dereference()); - } - - /** - * @brief Compares stored object with the given one - * @param pointer A pointer to compare with - * @return `true` if objects are equal, `false` otherwise - */ - bool operator==(const void* pointer) const { - // special case with nullptr - return base::isEqualTo(pointer); - } - - /** - * @brief Compares stored object with the given one - * @param pointer A LockedMemory to compare with - * @return `true` if objects are equal, `false` otherwise - */ - bool operator==(const LockedMemory& lm) const { - // special case with nullptr - return base::isEqualTo(lm.as()); - } - - IE_SUPPRESS_DEPRECATED_START - /** - * @brief Compares the object with the one stored in the memory - * @param pointer A pointer to compare with - * @param lm A compared LockedMemory object - * @return `true` if objects are equal, `false` otherwise - */ - friend bool operator==(const void* pointer, const LockedMemory& lm) { - return lm.operator==(pointer); - } - IE_SUPPRESS_DEPRECATED_END - - /** - * @brief Casts stored object to any given type - * - * Uses reinterpret_cast. - * - * @tparam S Type to be casted to - * @return Casted to the given type object - */ - template ::value>> - S as() { - return reinterpret_cast(dereference()); - } - - /** - * @brief Casts stored object to any given type - * - * Uses reinterpret_cast. - * - * @tparam S Type to be casted to - * @return Casted to the given type const object - */ - template ::value>> - const S as() const { - return reinterpret_cast(dereference()); - } -}; - -/** - * @brief This class is for read-only segments - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemory : public details::LockedMemoryBase { - using base = details::LockedMemoryBase; - -public: - /** - * @brief A constructor - * - * @param ptr Pointer to IAllocator object - * @param handle Handle provided by allocator - * @param offset Offset in bytes in originally locked region - */ - LockedMemory(IAllocator* ptr, void* handle, size_t offset) : base(ptr, handle, LOCK_FOR_READ, offset) {} - - /** - * @brief A default copy constructor that accepts rvalue - */ - LockedMemory(LockedMemory&&) = default; - - /** - * @brief A default copy constructor that accepts rvalue. - * - * Also sets the offset value for the new memory object - * - * @param that Rvalue reference for the other LockedMemoryBase instance - * @param offset Offset value - */ - LockedMemory(LockedMemory&& that, size_t offset) : base(std::move(that)) { - base::_offset = offset; - } - - /** - * @brief A disabled copy constructor for lvalue - */ - LockedMemory(const LockedMemory&) = delete; - - /** - * @brief Gets the const pointer to the stored object - * - * Dereferences from the base class. - * - * @return The pointer to the object. - */ - operator const T*() const { - return base::dereference(); - } - - /** - * @brief Compares stored object with the given one - * @param pointer A pointer to compare with - * @return `true` if objects are equal, `false` otherwise - */ - bool operator==(const T* pointer) const { - // special case with nullptr - return base::isEqualTo(pointer); - } - - /** - * @brief Compares the object with the one stored in the memory - * @param pointer A pointer to compare with - * @param lm A compared LockedMemory object - * @return `true` if objects are equal, `false` otherwise - */ - friend bool operator==(const T* pointer, const LockedMemory& lm) { - return lm.operator==(pointer); - } - - /** - * @brief Casts stored object to any given type. - * - * Uses reinterpret_cast. - * - * @tparam S Type to be casted to - * @return Casted to the given type object - */ - template ::value && std::is_const::value>> - S as() const { - return reinterpret_cast(base::dereference()); - } -}; -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_precision.hpp b/src/inference/include/ie/ie_precision.hpp deleted file mode 100644 index f967e2dd645c58..00000000000000 --- a/src/inference/include/ie/ie_precision.hpp +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file that provides class for describing precision of data - * - * @file ie_precision.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_common.h" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief This class holds precision value and provides precision related operations - */ -class INFERENCE_ENGINE_1_0_DEPRECATED Precision { -public: - /** Enum to specify of different */ - enum ePrecision : uint8_t { - UNSPECIFIED = 255, /**< Unspecified value. Used by default */ - MIXED = 0, /**< Mixed value. Can be received from network. No applicable for tensors */ - FP32 = 10, /**< 32bit floating point value */ - FP16 = 11, /**< 16bit floating point value, 5 bit for exponent, 10 bit for mantisa */ - BF16 = 12, /**< 16bit floating point value, 8 bit for exponent, 7 bit for mantisa*/ - FP64 = 13, /**< 64bit floating point value */ - NF4 = 14, /**< 4bit normalized float value */ - Q78 = 20, /**< 16bit specific signed fixed point precision */ - I16 = 30, /**< 16bit signed integer value */ - U4 = 39, /**< 4bit unsigned integer value */ - U8 = 40, /**< 8bit unsigned integer value */ - I4 = 49, /**< 4bit signed integer value */ - I8 = 50, /**< 8bit signed integer value */ - U16 = 60, /**< 16bit unsigned integer value */ - I32 = 70, /**< 32bit signed integer value */ - U32 = 74, /**< 32bit unsigned integer value */ - I64 = 72, /**< 64bit signed integer value */ - U64 = 73, /**< 64bit unsigned integer value */ - BIN = 71, /**< 1bit integer value */ - BOOL = 41, /**< 8bit bool type */ - STRING = 79, /**< string type, std::string in C++ */ - CUSTOM = 80 /**< custom precision has it's own name and size of elements */ - }; - -private: - struct PrecisionInfo { - /** @brief Size of underlined element */ - size_t bitsSize = 0; - - /** @brief Null terminated string with precision name */ - const char* name = "UNSPECIFIED"; - - bool isFloat = false; - ePrecision value = Precision::UNSPECIFIED; - }; - PrecisionInfo precisionInfo; - -public: - /** @brief Default constructor */ - Precision() = default; - - /** - * @brief Constructor with specified precision - * @param value A value of ePrecision to create an object from - */ - Precision(const Precision::ePrecision value) { - precisionInfo = getPrecisionInfo(value); - } - - /** - * @brief Custom precision constructor - * - * @param bitsSize size of elements - * @param name optional: name string, used in serialisation - */ - explicit Precision(size_t bitsSize, const char* name = nullptr) { - if (bitsSize == 0) { - IE_THROW() << "Precision with 0 elements size not supported"; - } - precisionInfo.bitsSize = bitsSize; - if (name == nullptr) { - precisionInfo.name = "CUSTOM"; - } else { - precisionInfo.name = name; - } - precisionInfo.value = CUSTOM; - } - - /** - * @brief Creates custom precision with specific underlined type - * @param typeName A string name of precision - * @return Precision converted from string name - */ - template - static Precision fromType(const char* typeName = nullptr) { - return Precision(8 * sizeof(T), typeName == nullptr ? typeid(T).name() : typeName); - } - - /** - * @brief checks whether given storage class T can be used to store objects of current precision - * @param typeName A string name of precision - * @return `true` if `typeName` has underlaying storage type - */ - template - bool hasStorageType(const char* typeName = nullptr) const noexcept { - try { -#define CASE(x, y) \ - case x: \ - return std::is_same() -#define CASE2(x, y1, y2) \ - case x: \ - return std::is_same() || std::is_same() - - switch (precisionInfo.value) { - CASE(FP32, float); - CASE(FP64, double); - CASE2(FP16, int16_t, uint16_t); - CASE2(BF16, int16_t, uint16_t); - CASE(NF4, int8_t); - CASE2(I4, int8_t, uint8_t); - CASE(I8, int8_t); - CASE(I16, int16_t); - CASE(I32, int32_t); - CASE(I64, int64_t); - CASE(U4, uint8_t); - CASE(U8, uint8_t); - CASE(U16, uint16_t); - CASE(U32, uint32_t); - CASE(U64, uint64_t); - CASE(BOOL, uint8_t); - CASE2(Q78, int16_t, uint16_t); - CASE2(BIN, int8_t, uint8_t); - CASE(STRING, std::string); - default: - return areSameStrings(name(), typeName == nullptr ? typeid(T).name() : typeName); -#undef CASE -#undef CASE2 - } - } catch (...) { - return false; - } - } - - /** - * @brief Equality operator with Precision object - * @param p A value of Precision to compare with - * @return `true` if values represent the same precisions, `false` otherwise - */ - bool operator==(const Precision& p) const noexcept { - return precisionInfo.value == p && precisionInfo.bitsSize == p.precisionInfo.bitsSize && - areSameStrings(precisionInfo.name, p.precisionInfo.name); - } - - /** - * @brief Inequality operator with Precision object - * @param p A value of Precision to compare with - * @return `true` if values represent different precisions, `false` otherwise - */ - bool operator!=(const Precision& p) const noexcept { - return !(*this == p); - } - - /** - * @brief Equality operator with ePrecision enum value - * @param p A value of ePrecision to compare with - * @return `true` if values represent the same precisions, `false` otherwise - */ - bool operator==(const ePrecision p) const noexcept { - return precisionInfo.value == p; - } - - /** - * @brief Inequality operator with ePrecision enum value - * @param p A value of ePrecision to compare with - * @return `true` if values represent different precisions, `false` otherwise - */ - bool operator!=(const ePrecision p) const noexcept { - return precisionInfo.value != p; - } - - /** - * @brief Assignment operator with ePrecision enum value - * @param p A value of ePrecision enumeration - * @return A Precision instance - */ - Precision& operator=(const ePrecision p) noexcept { - precisionInfo = getPrecisionInfo(p); - return *this; - } - - /** - * @brief Cast operator to a bool - * @return `true` if precision is specified, `false` otherwise - */ - explicit operator bool() const noexcept { - return precisionInfo.value != UNSPECIFIED; - } - - /** - * @brief Logical negation operator - * @return `true` if precision is NOT specified, `false` otherwise - */ - bool operator!() const noexcept { - return precisionInfo.value == UNSPECIFIED; - } - - /** - * @brief Cast operator to a ePrecision - * @return A casted value of Precision::ePrecision enumeration - */ - operator Precision::ePrecision() const noexcept { - return precisionInfo.value; - } - - /** - * @brief Gets the precision value of type ePrecision. - * @return The preccision value. - */ - constexpr uint8_t getPrecVal() const noexcept { - return precisionInfo.value; - } - - /** - * @brief Getter of precision name - * @return A string representing precision name - */ - const char* name() const noexcept { - return precisionInfo.name; - } - - /** - * @brief Creates Precision from string with precision name - * @param str A string representing precision - * @return Precision created from string representation - */ - static Precision FromStr(const std::string& str) { - static const std::unordered_map names = { -#define PRECISION_NAME(s) {#s, s} - PRECISION_NAME(Q78), PRECISION_NAME(BOOL), PRECISION_NAME(BF16), PRECISION_NAME(I4), - PRECISION_NAME(I8), PRECISION_NAME(I16), PRECISION_NAME(I32), PRECISION_NAME(I64), - PRECISION_NAME(U4), PRECISION_NAME(U8), PRECISION_NAME(U16), PRECISION_NAME(U32), - PRECISION_NAME(U64), PRECISION_NAME(FP32), PRECISION_NAME(FP64), PRECISION_NAME(FP16), - PRECISION_NAME(MIXED), PRECISION_NAME(NF4), PRECISION_NAME(STRING), PRECISION_NAME(BIN), -#undef PRECISION_NAME - }; - auto i = names.find(str); - return i == names.end() ? Precision() : Precision(i->second); - } - - /** - * @brief Returns size of single element of that precision in bytes - * @returns Number of bytes per element - */ - size_t size() const { - return (bitsSize() + 7) >> 3; - } - - /** - * @brief Returns size of single element of that precision in bits - * @returns Number of bits per element - */ - size_t bitsSize() const { - if (precisionInfo.bitsSize == 0) { - IE_THROW() << " cannot estimate element if precision is " << precisionInfo.name; - } - return precisionInfo.bitsSize; - } - - /** - * @brief Checks if it is a floating point value - * @return True if precision is float point, `false` otherwise - */ - bool is_float() const noexcept { - return precisionInfo.isFloat; - } - - /** - * @brief Checks if it is a signed value - * @return True if precision is signed, `false` otherwise - */ - bool isSigned() const noexcept { - return (precisionInfo.value == Precision::UNSPECIFIED) || (precisionInfo.value == Precision::MIXED) || - (precisionInfo.value == Precision::FP32) || (precisionInfo.value == Precision::FP64) || - (precisionInfo.value == Precision::FP16) || (precisionInfo.value == Precision::Q78) || - (precisionInfo.value == Precision::I16) || (precisionInfo.value == Precision::I8) || - (precisionInfo.value == Precision::I32) || (precisionInfo.value == Precision::I64) || - (precisionInfo.value == Precision::BIN) || (precisionInfo.value == Precision::BF16) || - (precisionInfo.value == Precision::CUSTOM) || (precisionInfo.value == Precision::I4) || - (precisionInfo.value == Precision::NF4); - } - -protected: - /** - * @brief Creates PrecisionInfo by @p precision with a specified name - * @tparam precision A precision to create PrecisionInfo for - * @param name Name of precision - * @return A PrecisionInfo object - */ - template - static PrecisionInfo makePrecisionInfo(const char* name); - - /** - * @brief Compare two c-strings - * - * @param l Const pointer to first string - * @param r Const pointer to another string - * @returns True if strings are the same - */ - static bool areSameStrings(const char* l, const char* r) noexcept { - if (l == r) - return true; - - if (l == nullptr || r == nullptr) - return false; - - for (; *l && *r; l++, r++) { - if (*l != *r) - return false; - } - return *l == *r; - } - - /** - * @brief Creates PrecisionInfo based on ePrecision - * @param v A value of ePrecision emuneration - * @return Precision info object - */ - static PrecisionInfo getPrecisionInfo(ePrecision v) { -#define CASE(x) \ - case x: \ - return makePrecisionInfo(#x); - switch (v) { - CASE(FP32); - CASE(FP64); - CASE(FP16); - CASE(BF16); - CASE(NF4); - CASE(I4); - CASE(I8); - CASE(I16); - CASE(I32); - CASE(I64); - CASE(U4); - CASE(U8); - CASE(U16); - CASE(U32); - CASE(U64); - CASE(Q78); - CASE(MIXED); - CASE(BIN); - CASE(BOOL); - CASE(STRING); - default: - return makePrecisionInfo("UNSPECIFIED"); -#undef CASE - } - } -}; - -/** - * @brief Particular precision traits - */ -template -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait {}; - -/** @cond INTERNAL */ -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = float; - enum { is_float = true }; -}; - -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = double; - enum { is_float = true }; -}; - -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int16_t; - enum { is_float = true }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int16_t; - enum { is_float = true }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = uint16_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int16_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = uint16_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = uint8_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = uint8_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int8_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int8_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = uint8_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int32_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = uint32_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int64_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = uint64_t; - enum { is_float = false }; -}; -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int8_t; - enum { is_float = false }; -}; - -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = int8_t; - enum { is_float = false }; -}; - -template <> -struct PrecisionTrait { - using value_type = std::string; - enum { is_float = false }; -}; - -template -INFERENCE_ENGINE_1_0_DEPRECATED inline uint8_t type_size_or_zero() { - return sizeof(T); -} - -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { - using value_type = void; - enum { is_float = false }; -}; - -template <> -struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait : PrecisionTrait {}; - -template <> -INFERENCE_ENGINE_1_0_DEPRECATED inline uint8_t type_size_or_zero() { - return 0; -} - -template -INFERENCE_ENGINE_1_0_DEPRECATED inline Precision::PrecisionInfo Precision::makePrecisionInfo(const char* name) { - Precision::PrecisionInfo info; - info.name = name; - - size_t nBits = precision == BIN ? 1 : (precision == U4 || precision == I4 || precision == NF4) ? 4 : 8; - info.bitsSize = nBits * type_size_or_zero::value_type>(); - info.isFloat = PrecisionTrait::is_float; - info.value = precision; - return info; -} - -inline std::ostream& operator<<(std::ostream& out, const InferenceEngine::Precision& p) { - return out << p.name(); -} - -inline std::ostream& operator<<(std::ostream& out, const InferenceEngine::Precision::ePrecision& p) { - return out << Precision(p).name(); -} - -inline std::ostream& operator<<(std::ostream& os, const std::vector& values) { - os << "{ "; - for (size_t i = 0; i < values.size(); ++i) { - os << values[i]; - if (i != (values.size() - 1ul)) { - os << ", "; - } - } - os << " }"; - return os; -} - -INFERENCE_ENGINE_1_0_DEPRECATED inline constexpr uint32_t getPrecisionMask( - InferenceEngine::Precision::ePrecision precision1, - InferenceEngine::Precision::ePrecision precision2, - InferenceEngine::Precision::ePrecision precision3 = InferenceEngine::Precision::MIXED, - InferenceEngine::Precision::ePrecision precision4 = InferenceEngine::Precision::MIXED) { - return (precision1) | (precision2 << 8) | (precision3 << 16) | (precision4 << 24); -} - -/** @endcond */ - -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/inference_engine.hpp b/src/inference/include/ie/inference_engine.hpp deleted file mode 100644 index 5638013e294af2..00000000000000 --- a/src/inference/include/ie/inference_engine.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file that provides a set minimal required Inference Engine API. - * @file inference_engine.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ie_core.hpp" - -// remove in 2022.1 major release -#include diff --git a/src/inference/include/openvino/runtime/core.hpp b/src/inference/include/openvino/runtime/core.hpp index d0653cdaffcbad..128a90372b7d82 100644 --- a/src/inference/include/openvino/runtime/core.hpp +++ b/src/inference/include/openvino/runtime/core.hpp @@ -25,10 +25,6 @@ #include "openvino/runtime/remote_context.hpp" #include "openvino/runtime/tensor.hpp" -namespace InferenceEngine { -class IExtension; -} // namespace InferenceEngine - namespace ov { /** diff --git a/src/inference/include/openvino/runtime/variable_state.hpp b/src/inference/include/openvino/runtime/variable_state.hpp index 711544b6efa0da..9ea114d7b92fc9 100644 --- a/src/inference/include/openvino/runtime/variable_state.hpp +++ b/src/inference/include/openvino/runtime/variable_state.hpp @@ -15,10 +15,6 @@ #include "openvino/runtime/common.hpp" #include "openvino/runtime/tensor.hpp" -namespace InferenceEngine { -class IAsyncInferRequestWrapper; -} // namespace InferenceEngine - namespace ov { class InferRequest; @@ -42,8 +38,6 @@ class OPENVINO_RUNTIME_API VariableState { VariableState(const std::shared_ptr& impl, const std::shared_ptr& so); friend class ov::InferRequest; - friend class ov::IInferRequestInternalWrapper; - friend class InferenceEngine::IAsyncInferRequestWrapper; public: /** diff --git a/src/inference/src/any_copy.cpp b/src/inference/src/any_copy.cpp deleted file mode 100644 index d4cbb2b2450592..00000000000000 --- a/src/inference/src/any_copy.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "any_copy.hpp" - -#include - -#include "openvino/runtime/properties.hpp" - -namespace ov { -std::map any_copy(const ov::AnyMap& params) { - std::map result; - for (auto&& value : params) { - result.emplace(value.first, value.second.as()); - } - return result; -} - -ov::AnyMap any_copy(const std::map& params) { - ov::AnyMap result; - for (auto&& value : params) { - result.emplace(value.first, value.second); - } - return result; -} -} // namespace ov diff --git a/src/inference/src/any_copy.hpp b/src/inference/src/any_copy.hpp deleted file mode 100644 index f9121035edd028..00000000000000 --- a/src/inference/src/any_copy.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -namespace ov { -std::map any_copy(const ov::AnyMap& config_map); -ov::AnyMap any_copy(const std::map& config_map); -} // namespace ov diff --git a/src/inference/src/blob_factory.cpp b/src/inference/src/blob_factory.cpp deleted file mode 100644 index dcf7181944502e..00000000000000 --- a/src/inference/src/blob_factory.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "blob_factory.hpp" - -#include - -IE_SUPPRESS_DEPRECATED_START - -InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc) { - return make_blob_with_precision(desc.getPrecision(), desc); -} - -InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc, void* ptr) { - return make_blob_with_precision(desc.getPrecision(), desc, ptr); -} - -InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc, - const std::shared_ptr& alloc) { - return make_blob_with_precision(desc.getPrecision(), desc, alloc); -} diff --git a/src/inference/src/cache_guard.cpp b/src/inference/src/cache_guard.cpp index 184455fa463d4b..6460cdbb861b07 100644 --- a/src/inference/src/cache_guard.cpp +++ b/src/inference/src/cache_guard.cpp @@ -4,8 +4,6 @@ #include "cache_guard.hpp" -#include "ie_common.h" - namespace ov { CacheGuardEntry::CacheGuardEntry(CacheGuard& cacheGuard, diff --git a/src/inference/src/ie_cache_manager.hpp b/src/inference/src/cache_manager.hpp similarity index 80% rename from src/inference/src/ie_cache_manager.hpp rename to src/inference/src/cache_manager.hpp index 01f6cc943d68ab..41e74fa1c20f0d 100644 --- a/src/inference/src/ie_cache_manager.hpp +++ b/src/inference/src/cache_manager.hpp @@ -3,9 +3,9 @@ // /** - * @brief This is a header file for the Inference Engine Cache Manager class C++ API + * @brief This is a header file for the OpenVINO Cache Manager class C++ API * - * @file ie_cache_manager.hpp + * @file cache_manager.hpp */ #pragma once @@ -14,7 +14,6 @@ #include #include -#include "ie_api.h" #include "openvino/util/file_util.hpp" namespace ov { @@ -36,38 +35,38 @@ class ICacheManager { */ using StreamWriter = std::function; /** - * @brief Callback when Inference Engine intends to write network to cache + * @brief Callback when OpenVINO intends to write model to cache * * Client needs to call create std::ostream object and call writer(ostream) - * Otherwise, network will not be cached + * Otherwise, model will not be cached * - * @param id Id of cache (hash of the network) + * @param id Id of cache (hash of the model) * @param writer Lambda function to be called when stream is created */ virtual void write_cache_entry(const std::string& id, StreamWriter writer) = 0; /** * @brief Function passing created input stream - * */ using StreamReader = std::function; + /** - * @brief Callback when Inference Engine intends to read network from cache + * @brief Callback when OpenVINO intends to read model from cache * * Client needs to call create std::istream object and call reader(istream) - * Otherwise, network will not be read from cache and will be loaded as usual + * Otherwise, model will not be read from cache and will be loaded as usual * - * @param id Id of cache (hash of the network) + * @param id Id of cache (hash of the model) * @param reader Lambda function to be called when input stream is created */ virtual void read_cache_entry(const std::string& id, StreamReader reader) = 0; /** - * @brief Callback when Inference Engine intends to remove cache entry + * @brief Callback when OpenVINO intends to remove cache entry * * Client needs to perform appropriate cleanup (e.g. delete a cache file) * - * @param id Id of cache (hash of the network) + * @param id Id of cache (hash of the model) */ virtual void remove_cache_entry(const std::string& id) = 0; }; diff --git a/src/inference/src/check_network_batchable.hpp b/src/inference/src/check_network_batchable.hpp index cf21cfa1636d38..3452b524e4c0e3 100644 --- a/src/inference/src/check_network_batchable.hpp +++ b/src/inference/src/check_network_batchable.hpp @@ -4,8 +4,9 @@ #pragma once #include +#include -#include "cnn_network_ngraph_impl.hpp" +#include "openvino/core/model.hpp" namespace ov { namespace details { diff --git a/src/inference/src/cnn_network_ngraph_impl.cpp b/src/inference/src/cnn_network_ngraph_impl.cpp deleted file mode 100644 index 433461f22d3dcc..00000000000000 --- a/src/inference/src/cnn_network_ngraph_impl.cpp +++ /dev/null @@ -1,631 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cnn_network_ngraph_impl.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "blob_factory.hpp" -#include "cpp/ie_cnn_network.h" -#include "ie_common.h" -#include "ie_ngraph_utils.hpp" -#include "itt.hpp" -#include "openvino/cc/pass/itt.hpp" -#include "openvino/core/except.hpp" -#include "openvino/op/util/op_types.hpp" -#include "openvino/pass/manager.hpp" -#include "openvino/pass/serialize.hpp" -#include "transformations/common_optimizations/fold_subgraph_empty_inputs.hpp" -#include "transformations/common_optimizations/nop_elimination.hpp" -#include "transformations/common_optimizations/remove_concat_zero_dim_input.hpp" -#include "transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp" -#include "transformations/smart_reshape/set_batch_size.hpp" -#include "transformations/smart_reshape/smart_reshape.hpp" -#include "transformations/utils/utils.hpp" - -using namespace std; -using namespace InferenceEngine; -using details::CNNNetworkNGraphImpl; -using InferenceEngine::details::CNNNetworkNGraphImpl; - -void CNNNetworkNGraphImpl::createDataForResult(const ::ov::Output<::ov::Node>& output, - const std::string& outName, - DataPtr& ptr) { - const auto isCompatible = [](int64_t size, const Layout& l) -> bool { - switch (size) { - case -1: - return l == Layout::BLOCKED; - case 0: - return l == Layout::SCALAR; - case 1: - return l == Layout::C; - case 2: - return l == Layout::CN || l == Layout::HW || l == Layout::NC; - case 3: - return l == Layout::CHW || l == Layout::HWC; - case 4: - return l == Layout::NCHW || l == Layout::NHWC; - case 5: - return l == Layout::NCDHW || l == Layout::NDHWC; - default: - return false; - } - }; - auto shape = output.get_partial_shape(); - SizeVector dims(1, 0); - if (shape.rank().is_static()) { - dims.resize(shape.size(), 0); - for (size_t i = 0; i < shape.size(); ++i) { - if (shape[i].get_max_length() != -1) // dimension has an estimation - dims[i] = shape[i].get_max_length(); - } - } - // query shape from ngraph::Parameter output shape and check there are no zeros in it - for (const auto& dim : shape) { - if (dim.is_static() && dim.get_length() == 0) - IE_THROW() << outName << " has zero dimension which is not allowed"; - } - - auto rank = shape.rank().is_static() ? shape.rank().get_length() : -1; - const Layout rankLayout = rank < 0 ? Layout::BLOCKED : TensorDesc::getLayoutByRank(rank); - if (ptr) { - const auto origLayout = ptr->getTensorDesc().getLayout(); - const auto layout = isCompatible(rank, origLayout) ? origLayout : rankLayout; - ptr->reshape(dims, layout); - } else { - const auto precision = details::convertPrecision(output.get_element_type()); - ptr.reset(new Data(outName, {precision, dims, rankLayout})); - } -} - -void CNNNetworkNGraphImpl::validateFunctionNames() const { - // nGraph function parameters and pre-Results operations should have unique names - std::unordered_map> unique_names; - for (const auto& param : _ngraph_function->get_parameters()) { - if (unique_names.count(param->get_friendly_name())) { - IE_THROW() << "Function contains several inputs with one friendly name!"; - } - unique_names.insert({param->get_friendly_name(), param}); - } - for (const auto& result : _ngraph_function->get_results()) { - const auto& parent = result->get_input_node_shared_ptr(0); - auto name = parent->get_friendly_name(); - if (parent->get_output_size() > 1) { - name += "." + std::to_string(result->get_input_source_output(0).get_index()); - } - if (unique_names.count(name) && !ov::op::util::is_parameter(parent) && parent != unique_names.at(name)) { - IE_THROW() << "Function contains several inputs and outputs with one friendly name: " << name; - } - unique_names.insert({name, parent}); - } -} - -ov::element::Type details::toLegacyType(const ov::element::Type& ngraph_type, bool input) { - if (input) { - return ngraph_type == ov::element::f16 ? ov::element::f32 : ngraph_type; - } else { - if (ngraph_type == ov::element::i64 || ngraph_type == ov::element::u64 || ngraph_type == ov::element::i32 || - ngraph_type == ov::element::u32) { - return ov::element::i32; - } else if (ngraph_type != ov::element::f32) { - return ov::element::f32; - } - } - - return ngraph_type; -} - -CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr& nGraph, bool newAPI) - : _ngraph_function(nGraph), - _new_api(newAPI) { - { - ov::pass::Manager m; - using namespace ov::pass; - REGISTER_PASS(m, EliminateScatterUpdate) - REGISTER_PASS(m, RemoveConcatZeroDimInput) - REGISTER_PASS(m, RemoveMultiSubGraphOpDanglingParamsResults) - REGISTER_PASS(m, FoldSubgraphEmptyInputs) - m.run_passes(_ngraph_function); - } - // Restore usual attributes for CNNNetwork - auto keep_input_info = [=](CNNNetworkNGraphImpl& network, const DataPtr& inData) { - InputInfo::Ptr info(new InputInfo()); - info->setInputData(inData); - - if (!_new_api) { - Precision prc = info->getPrecision(); - - // Convert precision into native format (keep element size) - prc = prc == Precision::Q78 - ? Precision::I16 - : prc == Precision::FP16 ? Precision::FP32 : static_cast(prc); - - info->setPrecision(details::convertPrecision(toLegacyType(details::convertPrecision(prc), true))); - } - - network.setInputInfo(info); - }; - - validateFunctionNames(); - - reshape(); - for (const auto& layer : _ngraph_function->get_parameters()) { - std::string outName = layer->get_friendly_name(); - IE_ASSERT(layer->get_output_size() == 1); // Parameter as only singly output port - - // map original names to OpenVINO name - for (const auto& name : layer->get_output_tensor(0).get_names()) { - _tensorNames[name] = outName; - } - - DataPtr& ptr = _data[outName]; - IE_ASSERT(ptr); // Data must be allocated after the reshape method - - keep_input_info(*this, ptr); - } - - if (!_new_api) { - for (auto& output : _outputData) { - // Convert precision into native format. Be consistent with possible conversion to CNNNetwork later. - output.second->setPrecision(details::convertPrecision( - toLegacyType(details::convertPrecision(output.second->getPrecision()), false))); - } - } -} - -CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) { - IE_SUPPRESS_DEPRECATED_START - const ICNNNetwork& iNetwork = network; - IE_SUPPRESS_DEPRECATED_END - const auto net = dynamic_cast(&iNetwork); - if (network.getFunction() == nullptr || !net) { - IE_THROW() << "Cannot create CNNNetwork with nGraph from legacy network format!"; - } - - _ngraph_function = network.getFunction()->clone(); - validateFunctionNames(); - InputsDataMap inputs = network.getInputsInfo(); - OutputsDataMap outputs = network.getOutputsInfo(); - - _tensorNames = net->_tensorNames; - - for (const auto& outputInfo : outputs) { - const auto& name = outputInfo.second->getName(); - DataPtr output = std::make_shared(name, outputInfo.second->getTensorDesc()); - _outputData[name] = output; - _data[name] = output; - } - for (const auto& inputInfo : inputs) { - InputInfo::Ptr info = std::make_shared(); - const auto& name = inputInfo.second->getInputData()->getName(); - const auto& inData = inputInfo.second->getInputData(); - DataPtr input = std::make_shared(name, inData->getTensorDesc()); - _data[name] = input; - info->setInputData(input); - info->setPrecision(inputInfo.second->getPrecision()); - info->setLayout(inputInfo.second->getLayout()); - _inputData[name] = info; - } -} - -void CNNNetworkNGraphImpl::setInputInfo(InputInfo::Ptr data) { - _inputData[data->name()] = data; -} - -const std::string& CNNNetworkNGraphImpl::getName() const noexcept { - return _ngraph_function->get_friendly_name(); -} - -InputInfo::Ptr CNNNetworkNGraphImpl::getInput(const std::string& inputName) const noexcept { - auto it = _inputData.find(inputName); - if (it == _inputData.end()) { - return nullptr; - } - return it->second; -} - -void CNNNetworkNGraphImpl::getOutputsInfo(OutputsDataMap& out) const noexcept { - out = _outputData; -} - -void CNNNetworkNGraphImpl::getInputsInfo(InputsDataMap& inputs) const noexcept { - inputs = _inputData; -} - -size_t CNNNetworkNGraphImpl::layerCount() const { - return _ngraph_function->get_ops().size(); -} - -void CNNNetworkNGraphImpl::validate(int version) { - _ngraph_function->validate_nodes_and_infer_types(); -} - -StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, - size_t outputIndex, - ResponseDesc* resp) noexcept { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "CNNNetworkNGraphImpl::addOutput"); - - try { - for (const auto& layer : _ngraph_function->get_ops()) { - // Result can have the same name as previous operation - if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast(layer)) { - // Check that output port exists - if (layer->outputs().size() <= outputIndex) { - return DescriptionBuffer(OUT_OF_BOUNDS, resp) - << "port index " << outputIndex << " exceeds the number of layer outputs " - << layer->outputs().size(); - } - std::string outputName = layerName; - if (layer->outputs().size() != 1) { - outputName += "." + std::to_string(outputIndex); - } - - // Check that we don't have a result for the output port - for (const auto& port : layer->output(outputIndex).get_target_inputs()) { - if (dynamic_cast(port.get_node())) - return OK; - } - auto result = make_shared<::ov::op::v0::Result>(layer->output(outputIndex)); - result->set_friendly_name(outputName); - _ngraph_function->add_results({result}); - // Check that we cannot add Result to layer with non unique friendly name - try { - validateFunctionNames(); - } catch (...) { - _ngraph_function->remove_result(result); - throw; - } - - if (_outputData.count(outputName) == 0) { - reshape(); - } - return OK; - } - } - } catch (...) { - return GENERAL_ERROR; - } - return DescriptionBuffer(NOT_FOUND, resp) << "Cannot add output! Layer " << layerName << " wasn't found!"; -} - -void CNNNetworkNGraphImpl::addOutput(const ::ov::Output<::ov::Node>& output) { - auto dataName = ov::op::util::create_ie_output_name(output); - DataPtr data; - if (_data.count(dataName)) - data = _data[dataName]; - createDataForResult(output, dataName, data); - _data[dataName] = data; - _outputData[dataName] = data; - - // Save original framework names - for (const auto& name : output.get_tensor().get_names()) { - _tensorNames[name] = dataName; - } -} - -size_t CNNNetworkNGraphImpl::getBatchSize() const { - // TODO Provide adequate implementation. - // The original code from CNNNetworkImpl just gets the first input and returns the first dimension. - // This is not correct in general. We can follow the same semantics, but order of inputs should be - // guaranteed to be the same. - auto params = _ngraph_function->get_parameters(); - sort(params.begin(), params.end(), [](std::shared_ptr lhs, std::shared_ptr rhs) { - return lhs->get_friendly_name() < rhs->get_friendly_name(); - }); - - for (const auto& param : params) { - if (param->get_output_partial_shape(0).rank().is_dynamic()) - continue; - auto pshape = param->get_output_partial_shape(0); - auto rank = pshape.rank().get_length(); - // WA: for speech recognition and scalar layouts (copy-past from CNNNetwork) - if ((rank == 2 || rank > 3) && pshape[0].is_static()) { - return pshape[0].get_length(); - } - } - return 1; -} - -void CNNNetworkNGraphImpl::reshape() { - reshape({}); -} - -StatusCode CNNNetworkNGraphImpl::reshape(const std::map& inputShapes, - ResponseDesc* responseDesc) noexcept { - try { - if (inputShapes.empty()) - return OK; - - const auto& params = _ngraph_function->get_parameters(); - - // Check that we need to do reshape only if input shapes will be changed - bool needReshape = false; - for (const auto& param : params) { - const auto it = inputShapes.find(param->get_friendly_name()); - if (it == inputShapes.end()) { - continue; - } - if (param->get_output_partial_shape(0).is_dynamic() || param->get_output_partial_shape(0) != it->second) { - needReshape = true; - break; - } - } - - if (!needReshape) - return OK; - - // save original parameters shape - std::map originalInputShapes; - for (const auto& param : params) { - originalInputShapes[param->get_friendly_name()] = param->get_output_partial_shape(0); - } - - try { - ov::pass::Manager ssr_manager; - using namespace ov::pass; - REGISTER_PASS(ssr_manager, SmartReshape) - ssr_manager.run_passes(_ngraph_function); - - reshape(inputShapes); - } catch (std::exception& ex) { - reshape(originalInputShapes); - return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what(); - } - - return OK; - } catch (const InferenceEngine::GeneralError& ex) { - return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what(); - } catch (const ov::Exception& ex) { - return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what(); - } catch (const std::runtime_error& ex) { - return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what(); - } catch (const std::out_of_range& ex) { - return DescriptionBuffer(OUT_OF_BOUNDS, responseDesc) << ex.what(); - } catch (...) { - return GENERAL_ERROR; - } -} - -StatusCode CNNNetworkNGraphImpl::reshape(const std::map& inputShapes, - ResponseDesc* responseDesc) noexcept { - std::map shapes; - for (const auto& shape : inputShapes) - shapes[shape.first] = ov::PartialShape(shape.second); - return reshape(shapes, responseDesc); -} - -#if 0 -namespace { -void collect_dynamism_signature(const std::shared_ptr& ov_model, - std::map>& signatures, - bool obfuscate) { - for (const auto& op : ov_model->get_ordered_ops()) { - const auto& type_name = string(op->get_type_info().name) + "_" + op->get_type_info().version_id; - - std::stringstream shape_representation; - for (const auto& input : op->input_values()) { - bool first = true; - shape_representation << "{"; - for (const auto& dimension : input.get_partial_shape()) { - if (!first) - shape_representation << ","; - first = false; - - if (obfuscate) - shape_representation << (dimension.is_dynamic() ? "D" : "S"); - else - shape_representation << dimension; - } - shape_representation << "} "; - } - shape_representation << "-> "; - for (const auto& output : op->outputs()) { - bool first = true; - shape_representation << "{"; - for (const auto& dimension : output.get_partial_shape()) { - if (!first) - shape_representation << ","; - first = false; - - if (obfuscate) - shape_representation << (dimension.is_dynamic() ? "D" : "S"); - else - shape_representation << dimension; - } - shape_representation << "} "; - } - signatures[type_name][shape_representation.str()]++; - - // collect dynamism signature for sub-graphs of multi-subgraph operation - if (const auto multi_sub_graph_op = ov::as_type_ptr(op)) { - int num_subgraphs = static_cast(multi_sub_graph_op->get_internal_subgraphs_size()); - for (int i = 0; i < num_subgraphs; i++) - collect_dynamism_signature(multi_sub_graph_op->get_function(i), signatures, obfuscate); - } - } -} -} // namespace -#endif - -void CNNNetworkNGraphImpl::reshape(const std::map& inputShapes) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "CNNNetworkNGraphImpl::reshape"); - - auto params = _ngraph_function->get_parameters(); - - bool parameter_replaced = false; - for (auto& param : params) { - if (inputShapes.find(param->get_friendly_name()) == inputShapes.end()) - continue; - param->set_partial_shape(inputShapes.at(param->get_friendly_name())); - parameter_replaced = true; - } - if (parameter_replaced) - _ngraph_function->validate_nodes_and_infer_types(); - -#if 0 - bool obfuscate = true; // set to false to get exact dimensions - std::map> signatures; - - collect_dynamism_signature(_ngraph_function, signatures, obfuscate); - - for (const auto& item : signatures) - for (const auto& shape_to_count : item.second) - std::cout << item.first << " " << shape_to_count.second << "x " << shape_to_count.first << std::endl; -#endif - - std::unordered_set opName; - for (const auto& result : _ngraph_function->get_results()) { - addOutput(result->input_value(0)); - } - - for (const auto& parameter : _ngraph_function->get_parameters()) { - const auto& outName = parameter->get_friendly_name(); - if (opName.find(outName) != opName.end()) { - IE_THROW() << "All operations in nGraph function should have unique friendly names!"; - } - opName.insert(outName); - createDataForResult(parameter, outName, _data[outName]); - } -} - -StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, - const std::string& binPath, - ResponseDesc* resp) const noexcept { - try { - ov::pass::Manager manager; - using namespace ov::pass; - REGISTER_PASS(manager, Serialize, xmlPath, binPath, ov::pass::Serialize::Version::IR_V10) - manager.run_passes(_ngraph_function); - } catch (const Exception& e) { - return DescriptionBuffer(GENERAL_ERROR, resp) << e.what(); - } catch (const std::exception& e) { - return DescriptionBuffer(UNEXPECTED, resp) << e.what(); - } catch (...) { - return DescriptionBuffer(UNEXPECTED, resp); - } - return OK; -} - -StatusCode CNNNetworkNGraphImpl::serialize(std::ostream& xmlBuf, std::ostream& binBuf, ResponseDesc* resp) const - noexcept { - try { - ov::pass::Manager manager; - using namespace ov::pass; - REGISTER_PASS(manager, Serialize, xmlBuf, binBuf, ov::pass::Serialize::Version::IR_V10) - manager.run_passes(_ngraph_function); - } catch (const Exception& e) { - return DescriptionBuffer(GENERAL_ERROR, resp) << e.what(); - } catch (const std::exception& e) { - return DescriptionBuffer(UNEXPECTED, resp) << e.what(); - } catch (...) { - return DescriptionBuffer(UNEXPECTED, resp); - } - return OK; -} - -StatusCode CNNNetworkNGraphImpl::serialize(std::ostream& xmlBuf, Blob::Ptr& binBlob, ResponseDesc* resp) const - noexcept { - try { - std::stringstream binBuf; - ov::pass::Manager manager; - using namespace ov::pass; - REGISTER_PASS(manager, Serialize, xmlBuf, binBuf, ov::pass::Serialize::Version::IR_V10) - manager.run_passes(_ngraph_function); - - std::streambuf* pbuf = binBuf.rdbuf(); - unsigned long bufSize = static_cast(binBuf.tellp()); - - TensorDesc tensorDesc(Precision::U8, {bufSize}, Layout::C); - binBlob = make_shared_blob(tensorDesc); - binBlob->allocate(); - pbuf->sgetn(binBlob->buffer(), bufSize); - } catch (const Exception& e) { - return DescriptionBuffer(GENERAL_ERROR, resp) << e.what(); - } catch (const std::exception& e) { - return DescriptionBuffer(UNEXPECTED, resp) << e.what(); - } catch (...) { - return DescriptionBuffer(UNEXPECTED, resp); - } - return OK; -} - -StatusCode CNNNetworkNGraphImpl::getOVNameForTensor(std::string& ov_name, - const std::string& orig_name, - ResponseDesc* resp) const noexcept { - if (_tensorNames.find(orig_name) == _tensorNames.end()) - return DescriptionBuffer(NOT_FOUND, resp) - << "Framework tensor with name \"" << orig_name << "\" was not mapped to OpenVINO data!"; - ov_name = _tensorNames.at(orig_name); - return OK; -} - -StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept { - try { - if (getBatchSize() == size) - return OK; - auto original_parameters = _ngraph_function->get_parameters(); - if (original_parameters.empty()) - return DescriptionBuffer(GENERAL_ERROR, responseDesc) - << "Cannot set batch! Function doesn't contain parameters!"; - - stringstream ss; - ss << " Please use reshape method instead. Original parameter shapes are: "; - for (size_t i = 0; i < original_parameters.size(); ++i) { - if (i) - ss << ", "; - ss << "\"" << original_parameters[i]->get_friendly_name() - << "\": " << original_parameters[i]->get_output_partial_shape(0); - } - - // ill-formed logic from the past setBatchSize (we keep it for backward-compatibility) - const auto first_parameter = - *std::min_element(original_parameters.begin(), - original_parameters.end(), - [](std::shared_ptr lhs, std::shared_ptr rhs) { - return lhs->get_friendly_name() < rhs->get_friendly_name(); - }); - const auto first_parameter_pshape = first_parameter->get_output_partial_shape(0); - if (first_parameter_pshape.is_dynamic()) - return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) - << "Cannot set batch! Function contains parameter with partially defined shape!" << ss.str(); - const auto first_parameter_rank = first_parameter_pshape.rank().get_length(); - if (first_parameter_rank == 0 || first_parameter_rank == 1 || first_parameter_rank == 3) - return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) - << "Cannot set batch! Function contains 0D/1D/3D parameter with unknown batch dimension placement." - << ss.str(); - - std::map> inShapes; - for (const auto& parameter : original_parameters) { - const auto& pshape = parameter->get_output_partial_shape(0); - if (pshape.is_dynamic()) - return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) - << "Cannot set batch! Function contains parameter with partially defined shape!" << ss.str(); - const auto& rank = pshape.rank().get_length(); - if (rank == 0) - return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) - << "Cannot set batch! Function contains 0D/1D/3D parameter with unknown batch dimension " - "placement." - << ss.str(); - auto shape = parameter->get_shape(); - shape[0] = {static_cast( - std::ceil(size * static_cast(shape[0]) / static_cast(getBatchSize())))}; - inShapes[parameter->get_friendly_name()] = shape; - } - ov::pass::Manager ssr_manager; - using namespace ov::pass; - REGISTER_PASS(ssr_manager, SetBatchSize) - ssr_manager.run_passes(_ngraph_function); - - return reshape(inShapes, responseDesc); - } catch (std::exception& ex) { - return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what(); - } -} diff --git a/src/inference/src/cnn_network_ngraph_impl.hpp b/src/inference/src/cnn_network_ngraph_impl.hpp deleted file mode 100644 index 482e3a3e5baa6f..00000000000000 --- a/src/inference/src/cnn_network_ngraph_impl.hpp +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A file containing ngraph implementation of public CNNNetwork wrapper - * @file cnn_network_ngraph_impl.hpp - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include "cpp/ie_cnn_network.h" -#include "description_buffer.hpp" -#include "ie_api.h" -#include "ie_blob.h" -#include "ie_common.h" -#include "ie_data.h" -#include "ie_input_info.hpp" - -namespace InferenceEngine { -namespace details { - -ov::element::Type toLegacyType(const ov::element::Type& ngraph_type, bool input); - -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief Ngraph-based implementation of the CNNNetwork. - */ -class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl) final : public ICNNNetwork { -public: - CNNNetworkNGraphImpl(const std::shared_ptr<::ov::Model>& nGraph, bool newAPI = false); - - CNNNetworkNGraphImpl(const CNNNetwork& nGraph); - - void getOutputsInfo(std::map& out) const noexcept override; - - void getInputsInfo(InputsDataMap& inputs) const noexcept override; - - InputInfo::Ptr getInput(const std::string& inputName) const noexcept override; - const std::string& getName() const noexcept override; - - size_t layerCount() const override; - - void setInputInfo(InputInfo::Ptr data); - - // public version - StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept override; - - size_t getBatchSize() const override; - - StatusCode addOutput(const std::string& layerName, size_t outputIndex, ResponseDesc* resp) noexcept override; - - void addOutput(const ::ov::Output<::ov::Node>& dataName); - - std::shared_ptr getFunction() const noexcept override { - return _ngraph_function; - } - std::shared_ptr<::ov::Model> getFunction() noexcept override { - return _ngraph_function; - } - - virtual void validate(int = 10); - - StatusCode reshape(const std::map& inputShapes, ResponseDesc* resp) noexcept override; - StatusCode reshape(const std::map& inputShapes, - ResponseDesc* resp) noexcept override; - - StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const - noexcept override; - - StatusCode serialize(std::ostream& xmlBuf, std::ostream& binBuf, ResponseDesc* resp) const noexcept override; - - StatusCode serialize(std::ostream& xmlBuf, Blob::Ptr& binBlob, ResponseDesc* resp) const noexcept override; - - StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const - noexcept override; - - // used by convertFunctionToICNNNetwork from legacy library - std::map _data; - -protected: - std::shared_ptr<::ov::Model> _ngraph_function; - -private: - InferenceEngine::InputsDataMap _inputData; - std::map _outputData; - std::unordered_map _tensorNames; - bool _new_api = false; - - /** - * @brief Create DataPtr for nGraph operation - * - * @param output output port from nGraph op - * @param outName name for DataPtr - * @param ptr reference to new DataPtr - */ - void createDataForResult(const ::ov::Output<::ov::Node>& output, const std::string& outName, DataPtr& ptr); - - /** - * @brief Reshape on the same shape - */ - void reshape(); - void reshape(const std::map& inputShapes); - void validateFunctionNames() const; -}; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/src/compilation_context.cpp b/src/inference/src/compilation_context.cpp index 8eaca40d5f09c8..c0fd99c6022a00 100644 --- a/src/inference/src/compilation_context.cpp +++ b/src/inference/src/compilation_context.cpp @@ -11,7 +11,6 @@ # include #endif -#include "cpp/ie_cnn_network.h" #include "itt.hpp" #include "openvino/pass/manager.hpp" #include "openvino/util/file_util.hpp" @@ -39,18 +38,6 @@ static int32_t as_int32_t(T v) { } // namespace ov -namespace { - -uint64_t calculate_td(const InferenceEngine::TensorDesc& td, uint64_t _seed) { - uint64_t seed = _seed; - - seed = ov::hash_combine(seed, ov::as_int32_t(td.getPrecision())); - seed = ov::hash_combine(seed, ov::as_int32_t(td.getLayout())); - return seed; -} - -} // namespace - namespace ov { std::string ModelCache::calculate_file_info(const std::string& filePath) { @@ -102,23 +89,6 @@ std::string ModelCache::compute_hash(const std::shared_ptr& mod } } - // 4. Legacy part if CNNNetwork is used with new Plugin API - for (auto&& input : model->inputs()) { - auto& rt_info = input.get_rt_info(); - - auto it = rt_info.find("ie_legacy_td"); - if (it != rt_info.end()) { - seed = calculate_td(it->second.as(), seed); - } - } - for (auto&& output : model->outputs()) { - auto& rt_info = output.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - if (it != rt_info.end()) { - seed = calculate_td(it->second.as(), seed); - } - } - return std::to_string(seed); } @@ -184,10 +154,7 @@ std::istream& operator>>(std::istream& stream, CompiledBlobHeader& header) { pugi::xml_document document; pugi::xml_parse_result res = document.load_string(xmlStr.c_str()); - - if (res.status != pugi::status_ok) { - IE_THROW(NetworkNotRead) << "Error reading compiled blob header"; - } + OPENVINO_ASSERT(res.status == pugi::status_ok, "Error reading compiled blob header"); pugi::xml_node compiledBlobNode = document.document_element(); header.m_ieVersion = ov::util::pugixml::get_str_attr(compiledBlobNode, "ie_version"); diff --git a/src/inference/src/compiled_model.cpp b/src/inference/src/cpp/compiled_model.cpp similarity index 100% rename from src/inference/src/compiled_model.cpp rename to src/inference/src/cpp/compiled_model.cpp diff --git a/src/inference/src/core.cpp b/src/inference/src/cpp/core.cpp similarity index 95% rename from src/inference/src/core.cpp rename to src/inference/src/cpp/core.cpp index 75a68a2c75723d..1c61c33c4d35be 100644 --- a/src/inference/src/core.cpp +++ b/src/inference/src/cpp/core.cpp @@ -4,17 +4,18 @@ #include "openvino/runtime/core.hpp" -#include "any_copy.hpp" -#include "dev/converter_utils.hpp" #include "dev/core_impl.hpp" #include "itt.hpp" #include "openvino/core/so_extension.hpp" +#include "openvino/frontend/manager.hpp" #include "openvino/runtime/device_id_parser.hpp" #include "openvino/runtime/iremote_context.hpp" #include "openvino/util/file_util.hpp" namespace ov { +namespace { + std::string find_plugins_xml(const std::string& xml_file) { if (xml_file.empty()) { const auto ov_library_path = ov::util::get_ov_lib_path(); @@ -39,6 +40,8 @@ std::string find_plugins_xml(const std::string& xml_file) { return xml_file; } +} // namespace + #define OV_CORE_CALL_STATEMENT(...) \ try { \ __VA_ARGS__; \ @@ -50,13 +53,13 @@ std::string find_plugins_xml(const std::string& xml_file) { class Core::Impl : public CoreImpl { public: - Impl() : ov::CoreImpl(true) {} + Impl() : ov::CoreImpl() {} }; Core::Core(const std::string& xml_config_file) { _impl = std::make_shared(); - std::string xmlConfigFile = ov::find_plugins_xml(xml_config_file); + std::string xmlConfigFile = find_plugins_xml(xml_config_file); if (!xmlConfigFile.empty()) OV_CORE_CALL_STATEMENT( // If XML is default, load default plugins by absolute paths @@ -66,13 +69,7 @@ Core::Core(const std::string& xml_config_file) { } std::map Core::get_versions(const std::string& device_name) const { - OV_CORE_CALL_STATEMENT({ - std::map versions; - for (auto&& kvp : _impl->GetVersions(device_name)) { - versions[kvp.first] = Version{kvp.second.buildNumber, kvp.second.description}; - } - return versions; - }) + OV_CORE_CALL_STATEMENT({ return _impl->get_versions(device_name); }) } #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT std::shared_ptr Core::read_model(const std::wstring& model_path, const std::wstring& bin_path) const { @@ -214,7 +211,7 @@ Any Core::get_property(const std::string& device_name, const std::string& name, } std::vector Core::get_available_devices() const { - OV_CORE_CALL_STATEMENT(return _impl->GetAvailableDevices();); + OV_CORE_CALL_STATEMENT(return _impl->get_available_devices();); } void Core::register_plugin(const std::string& plugin, const std::string& device_name, const ov::AnyMap& properties) { @@ -260,4 +257,8 @@ RemoteContext Core::get_default_context(const std::string& device_name) { }); } +void shutdown() { + frontend::FrontEndManager::shutdown(); +} + } // namespace ov diff --git a/src/inference/src/cpp/exception2status.hpp b/src/inference/src/cpp/exception2status.hpp deleted file mode 100644 index 4350a2b6c1f4dc..00000000000000 --- a/src/inference/src/cpp/exception2status.hpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Wrappers from c++ function to c-style one - * @file exception2status.hpp - */ -#pragma once - -#include - -#include "description_buffer.hpp" -#include "openvino/core/except.hpp" - -namespace InferenceEngine { -#define CATCH_IE_EXCEPTION_TO_STATUS(StatusCode, ExceptionType) \ - catch (const InferenceEngine::ExceptionType& ex) { \ - return InferenceEngine::DescriptionBuffer(StatusCode, resp) << ex.what(); \ - } - -#define CATCH_OV_EXCEPTION_TO_STATUS(StatusCode, ExceptionType) \ - catch (const ov::ExceptionType& ex) { \ - return InferenceEngine::DescriptionBuffer(StatusCode, resp) << ex.what(); \ - } - -#define CATCH_IE_EXCEPTIONS_TO_STATUS \ - CATCH_OV_EXCEPTION_TO_STATUS(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_OV_EXCEPTION_TO_STATUS(GENERAL_ERROR, Exception) \ - CATCH_IE_EXCEPTION_TO_STATUS(GENERAL_ERROR, GeneralError) \ - CATCH_IE_EXCEPTION_TO_STATUS(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_IE_EXCEPTION_TO_STATUS(NETWORK_NOT_LOADED, NetworkNotLoaded) \ - CATCH_IE_EXCEPTION_TO_STATUS(PARAMETER_MISMATCH, ParameterMismatch) \ - CATCH_IE_EXCEPTION_TO_STATUS(NOT_FOUND, NotFound) \ - CATCH_IE_EXCEPTION_TO_STATUS(OUT_OF_BOUNDS, OutOfBounds) \ - CATCH_IE_EXCEPTION_TO_STATUS(UNEXPECTED, Unexpected) \ - CATCH_IE_EXCEPTION_TO_STATUS(REQUEST_BUSY, RequestBusy) \ - CATCH_IE_EXCEPTION_TO_STATUS(RESULT_NOT_READY, ResultNotReady) \ - CATCH_IE_EXCEPTION_TO_STATUS(NOT_ALLOCATED, NotAllocated) \ - CATCH_IE_EXCEPTION_TO_STATUS(INFER_NOT_STARTED, InferNotStarted) \ - CATCH_IE_EXCEPTION_TO_STATUS(NETWORK_NOT_READ, NetworkNotRead) \ - CATCH_IE_EXCEPTION_TO_STATUS(INFER_CANCELLED, InferCancelled) - -/** - * @def TO_STATUS(x) - * @brief Converts C++ exceptioned function call into a c-style one - * @ingroup ie_dev_api_error_debug - */ -#define TO_STATUS(x) \ - try { \ - x; \ - return OK; \ - } \ - CATCH_IE_EXCEPTIONS_TO_STATUS catch (const std::exception& ex) { \ - return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what(); \ - } \ - catch (...) { \ - return InferenceEngine::DescriptionBuffer(UNEXPECTED); \ - } - -#define CALL_STATUS_FNC(function, ...) \ - if (!actual) { \ - IE_THROW() << "Wrapper used was not initialized."; \ - } \ - ResponseDesc resp; \ - auto res = actual->function(__VA_ARGS__, &resp); \ - if (res != OK) \ - IE_EXCEPTION_SWITCH( \ - res, \ - ExceptionType, \ - (InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM}) <<= std::stringstream{} << resp.msg) - -#define CALL_STATUS_FNC_NO_ARGS(function) \ - if (!actual) \ - IE_THROW() << "Wrapper used in the CALL_STATUS_FNC_NO_ARGS was not initialized."; \ - ResponseDesc resp; \ - auto res = actual->function(&resp); \ - if (res != OK) \ - IE_EXCEPTION_SWITCH( \ - res, \ - ExceptionType, \ - (InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM}) <<= std::stringstream{}) - -} // namespace InferenceEngine diff --git a/src/inference/src/cpp/ie_cnn_network.cpp b/src/inference/src/cpp/ie_cnn_network.cpp deleted file mode 100644 index dc5977499ddf04..00000000000000 --- a/src/inference/src/cpp/ie_cnn_network.cpp +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cpp/ie_cnn_network.h" - -#include "cnn_network_ngraph_impl.hpp" -#include "exception2status.hpp" -#include "itt.hpp" - -namespace InferenceEngine { - -CNNNetwork::CNNNetwork() : network(), actual() {} - -IE_SUPPRESS_DEPRECATED_START - -CNNNetwork::CNNNetwork(std::shared_ptr network) : network(network) { - actual = network.get(); - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; -} - -CNNNetwork::CNNNetwork(const std::shared_ptr& graph) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "CNNNetwork::CNNNetwork"); - - if (graph == nullptr) { - IE_THROW() << "CNNNetwork was not initialized: 'graph' object is empty"; - } - - // Create CNNNetworkNGraphImpl - network = std::make_shared(graph); - actual = network.get(); - if (actual == nullptr) { - IE_THROW() << "CNNNetwork was not initialized."; - } -} - -OutputsDataMap CNNNetwork::getOutputsInfo() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - OutputsDataMap outputs; - actual->getOutputsInfo(outputs); - return outputs; -} - -InputsDataMap CNNNetwork::getInputsInfo() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - InputsDataMap inputs; - actual->getInputsInfo(inputs); - return inputs; -} - -size_t CNNNetwork::layerCount() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - return actual->layerCount(); -} - -const std::string& CNNNetwork::getName() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - return actual->getName(); -} - -void CNNNetwork::setBatchSize(const size_t size) { - CALL_STATUS_FNC(setBatchSize, size); -} - -size_t CNNNetwork::getBatchSize() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - return actual->getBatchSize(); -} - -CNNNetwork::operator ICNNNetwork::Ptr() { - return network; -} - -CNNNetwork::operator ICNNNetwork&() { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - return *actual; -} - -CNNNetwork::operator const ICNNNetwork&() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - return *actual; -} - -std::shared_ptr CNNNetwork::getFunction() { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - return actual->getFunction(); -} - -std::shared_ptr CNNNetwork::getFunction() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - return actual->getFunction(); -} - -void CNNNetwork::addOutput(const std::string& layerName, size_t outputIndex) { - CALL_STATUS_FNC(addOutput, layerName, outputIndex); -} - -ICNNNetwork::InputShapes CNNNetwork::getInputShapes() const { - if (actual == nullptr) - IE_THROW() << "CNNNetwork was not initialized."; - ICNNNetwork::InputShapes shapes; - InputsDataMap inputs; - actual->getInputsInfo(inputs); - for (const auto& pair : inputs) { - auto info = pair.second; - if (info) { - auto data = info->getInputData(); - if (data) { - shapes[data->getName()] = data->getTensorDesc().getDims(); - } - } - } - return shapes; -} - -void CNNNetwork::reshape(const ICNNNetwork::InputShapes& inputShapes) { - CALL_STATUS_FNC(reshape, inputShapes); -} - -void CNNNetwork::serialize(const std::string& xmlPath, const std::string& binPath) const { - CALL_STATUS_FNC(serialize, xmlPath, binPath); -} - -void CNNNetwork::serialize(std::ostream& xmlBuf, std::ostream& binBuf) const { - CALL_STATUS_FNC(serialize, xmlBuf, binBuf); -} - -void CNNNetwork::serialize(std::ostream& xmlBuf, Blob::Ptr& binBlob) const { - CALL_STATUS_FNC(serialize, xmlBuf, binBlob); -} - -std::string CNNNetwork::getOVNameForTensor(const std::string& orig_name) const { - std::string ov_name; - CALL_STATUS_FNC(getOVNameForTensor, ov_name, orig_name); - return ov_name; -} - -} // namespace InferenceEngine diff --git a/src/inference/src/cpp/ie_executable_network.cpp b/src/inference/src/cpp/ie_executable_network.cpp deleted file mode 100644 index 1b5ecdf5d7a2a1..00000000000000 --- a/src/inference/src/cpp/ie_executable_network.cpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cpp/ie_executable_network.hpp" - -#include "any_copy.hpp" -#include "cpp/exception2status.hpp" -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "ie_common.h" -#include "ie_executable_network_base.hpp" -#include "openvino/core/except.hpp" -#include "openvino/runtime/compiled_model.hpp" - -namespace InferenceEngine { - -#define EXEC_NET_CALL_STATEMENT(...) \ - if (_impl == nullptr) \ - IE_THROW(NotAllocated) << "ExecutableNetwork was not initialized."; \ - try { \ - __VA_ARGS__; \ - } catch (...) { \ - InferenceEngine::details::Rethrow(); \ - } - -ExecutableNetwork::~ExecutableNetwork() { - _impl = {}; -} - -ExecutableNetwork::ExecutableNetwork(const IExecutableNetworkInternal::Ptr& impl, const std::shared_ptr& so) - : _impl(impl), - _so(so) { - IE_ASSERT(_impl != nullptr); -} - -IE_SUPPRESS_DEPRECATED_START - -ConstOutputsDataMap ExecutableNetwork::GetOutputsInfo() const { - EXEC_NET_CALL_STATEMENT(return _impl->GetOutputsInfo()); -} - -ConstInputsDataMap ExecutableNetwork::GetInputsInfo() const { - EXEC_NET_CALL_STATEMENT(return _impl->GetInputsInfo()); -} - -void ExecutableNetwork::reset(IExecutableNetwork::Ptr newActual) { - if (_impl == nullptr) - IE_THROW() << "ExecutableNetwork was not initialized."; - if (newActual == nullptr) - IE_THROW() << "ExecutableNetwork wrapper used for reset was not initialized."; - auto newBase = std::dynamic_pointer_cast(newActual); - IE_ASSERT(newBase != nullptr); - auto newImpl = newBase->GetImpl(); - IE_ASSERT(newImpl != nullptr); - _impl = newImpl; -} - -ExecutableNetwork::operator IExecutableNetwork::Ptr() { - return std::make_shared(_impl); -} - -InferRequest ExecutableNetwork::CreateInferRequest() { - EXEC_NET_CALL_STATEMENT(return {_impl->CreateInferRequest(), _so}); -} - -InferRequest::Ptr ExecutableNetwork::CreateInferRequestPtr() { - return std::make_shared(CreateInferRequest()); -} - -void ExecutableNetwork::Export(const std::string& modelFileName) { - EXEC_NET_CALL_STATEMENT(_impl->Export(modelFileName)); -} - -void ExecutableNetwork::Export(std::ostream& networkModel) { - EXEC_NET_CALL_STATEMENT(_impl->Export(networkModel)); -} - -CNNNetwork ExecutableNetwork::GetExecGraphInfo() { - EXEC_NET_CALL_STATEMENT(return CNNNetwork{_impl->GetExecGraphInfo()}); -} - -void ExecutableNetwork::SetConfig(const ov::AnyMap& config) { - EXEC_NET_CALL_STATEMENT(_impl->SetConfig(config)); -} - -ov::Any ExecutableNetwork::GetConfig(const std::string& name) const { - EXEC_NET_CALL_STATEMENT(return {_impl->GetConfig(name), {_so}}); -} - -ov::Any ExecutableNetwork::GetMetric(const std::string& name) const { - EXEC_NET_CALL_STATEMENT(return {_impl->GetMetric(name), {_so}}); -} - -bool ExecutableNetwork::operator!() const noexcept { - return !_impl; -} - -ExecutableNetwork::operator bool() const noexcept { - return !!_impl; -} -} // namespace InferenceEngine diff --git a/src/inference/src/cpp/ie_executable_network_base.hpp b/src/inference/src/cpp/ie_executable_network_base.hpp deleted file mode 100644 index d62e8fc3ee2eb9..00000000000000 --- a/src/inference/src/cpp/ie_executable_network_base.hpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * \brief inference engine executanle network API wrapper, to be used by particular implementors - * \file ie_executable_network_base.hpp - */ - -#pragma once - -#include -#include -#include -#include - -#include "cpp/exception2status.hpp" -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" -#include "ie_iexecutable_network.hpp" -#include "ie_infer_async_request_base.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START -/** - * @brief Executable network `noexcept` wrapper which accepts IExecutableNetworkInternal derived instance which can - * throw exceptions - * @ingroup ie_dev_api_exec_network_api - */ -class ExecutableNetworkBase : public IExecutableNetwork { -protected: - std::shared_ptr _impl; - -public: - /** - * @brief Constructor with actual underlying implementation. - * @param impl Underlying implementation of type IExecutableNetworkInternal - */ - explicit ExecutableNetworkBase(std::shared_ptr impl) { - if (impl.get() == nullptr) { - IE_THROW() << "implementation not defined"; - } - _impl = impl; - } - - StatusCode GetOutputsInfo(ConstOutputsDataMap& outs, ResponseDesc* resp) const noexcept override { - TO_STATUS(outs = _impl->GetOutputsInfo()); - } - - StatusCode GetInputsInfo(ConstInputsDataMap& inputs, ResponseDesc* resp) const noexcept override { - TO_STATUS(inputs = _impl->GetInputsInfo()); - } - - StatusCode CreateInferRequest(IInferRequest::Ptr& req, ResponseDesc* resp) noexcept override { - TO_STATUS(req = std::make_shared(_impl->CreateInferRequest())); - } - - StatusCode Export(const std::string& modelFileName, ResponseDesc* resp) noexcept override { - TO_STATUS(_impl->Export(modelFileName)); - } - - StatusCode Export(std::ostream& networkModel, ResponseDesc* resp) noexcept override { - TO_STATUS(_impl->Export(networkModel)); - } - - StatusCode GetExecGraphInfo(ICNNNetwork::Ptr& graphPtr, ResponseDesc* resp) noexcept override { - TO_STATUS(graphPtr = CNNNetwork{_impl->GetExecGraphInfo()}); - } - - StatusCode SetConfig(const ov::AnyMap& config, ResponseDesc* resp) noexcept override { - TO_STATUS(_impl->SetConfig(config)); - } - - StatusCode GetConfig(const std::string& name, ov::Any& result, ResponseDesc* resp) const noexcept override { - TO_STATUS(result = _impl->GetConfig(name)); - } - - StatusCode GetMetric(const std::string& name, ov::Any& result, ResponseDesc* resp) const noexcept override { - TO_STATUS(result = _impl->GetMetric(name)); - } - - std::shared_ptr GetImpl() const { - return _impl; - } -}; -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/src/cpp/ie_infer_async_request_base.hpp b/src/inference/src/cpp/ie_infer_async_request_base.hpp deleted file mode 100644 index 79415ba6cb34b5..00000000000000 --- a/src/inference/src/cpp/ie_infer_async_request_base.hpp +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "cpp/exception2status.hpp" -#include "cpp_interfaces/plugin_itt.hpp" -#include -#include "ie_iinfer_request.hpp" - -namespace InferenceEngine { - -#define CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(StatusCode, ExceptionType) \ - catch (const InferenceEngine::ExceptionType& ex) { \ - return InferenceEngine::DescriptionBuffer(StatusCode) << ex.what(); \ - } - -#define CATCH_OV_EXCEPTION_TO_STATUS_NO_RESP(StatusCode, ExceptionType) \ - catch (const ov::ExceptionType& ex) { \ - return InferenceEngine::DescriptionBuffer(StatusCode) << ex.what(); \ - } - -#define CATCH_IE_EXCEPTIONS_TO_STATUS_NO_RESP \ - CATCH_OV_EXCEPTION_TO_STATUS_NO_RESP(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_OV_EXCEPTION_TO_STATUS_NO_RESP(GENERAL_ERROR, Exception) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(GENERAL_ERROR, GeneralError) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(NETWORK_NOT_LOADED, NetworkNotLoaded) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(PARAMETER_MISMATCH, ParameterMismatch) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(NOT_FOUND, NotFound) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(OUT_OF_BOUNDS, OutOfBounds) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(UNEXPECTED, Unexpected) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(REQUEST_BUSY, RequestBusy) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(RESULT_NOT_READY, ResultNotReady) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(NOT_ALLOCATED, NotAllocated) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(INFER_NOT_STARTED, InferNotStarted) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(NETWORK_NOT_READ, NetworkNotRead) \ - CATCH_IE_EXCEPTION_TO_STATUS_NO_RESP(INFER_CANCELLED, InferCancelled) - -/** - * @def TO_STATUS_NO_RESP(x) - * @brief Converts C++ exceptioned function call into a status code. Does not work with a ResponseDesc object - * @ingroup ie_dev_api_error_debug - */ -#define TO_STATUS_NO_RESP(x) \ - try { \ - x; \ - return OK; \ - } CATCH_IE_EXCEPTIONS_TO_STATUS_NO_RESP catch (const std::exception& ex) { \ - return InferenceEngine::DescriptionBuffer(GENERAL_ERROR) << ex.what(); \ - } catch (...) { \ - return InferenceEngine::DescriptionBuffer(UNEXPECTED); \ - } - -#define CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(StatusCode, ExceptionType) \ -catch (const InferenceEngine::ExceptionType& ex) { \ - return InferenceEngine::DescriptionBuffer(StatusCode, resp) << ex.what(); \ -} - -#define CATCH_OV_EXCEPTION_CALL_RETURN_STATUS(StatusCode, ExceptionType) \ -catch (const ov::ExceptionType& ex) { \ - return InferenceEngine::DescriptionBuffer(StatusCode, resp) << ex.what(); \ -} - -#define CATCH_IE_EXCEPTIONS_CALL_RETURN_STATUS \ - CATCH_OV_EXCEPTION_CALL_RETURN_STATUS(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_OV_EXCEPTION_CALL_RETURN_STATUS(GENERAL_ERROR, Exception) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(GENERAL_ERROR, GeneralError) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(NETWORK_NOT_LOADED, NetworkNotLoaded) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(PARAMETER_MISMATCH, ParameterMismatch) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(NOT_FOUND, NotFound) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(OUT_OF_BOUNDS, OutOfBounds) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(UNEXPECTED, Unexpected) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(REQUEST_BUSY, RequestBusy) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(RESULT_NOT_READY, ResultNotReady) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(NOT_ALLOCATED, NotAllocated) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(INFER_NOT_STARTED, InferNotStarted) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(NETWORK_NOT_READ, NetworkNotRead) \ - CATCH_IE_EXCEPTION_CALL_RETURN_STATUS(INFER_CANCELLED, InferCancelled) - -/** - * @def NO_EXCEPT_CALL_RETURN_STATUS(x) - * @brief Returns a status code of a called function, handles exeptions and converts to a status code. - * @ingroup ie_dev_api_error_debug - */ -#define NO_EXCEPT_CALL_RETURN_STATUS(x) \ - try { \ - return x; \ - } CATCH_IE_EXCEPTIONS_CALL_RETURN_STATUS catch (const std::exception& ex) { \ - return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what(); \ - } catch (...) { \ - return InferenceEngine::DescriptionBuffer(UNEXPECTED); \ - } - -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief Inference request `noexcept` wrapper which accepts IInferRequestInternal derived instance which can throw exceptions - * @ingroup ie_dev_api_infer_request_api - */ -class InferRequestBase : public IInferRequest { - std::shared_ptr _impl; - -public: - /** - * @brief Constructor with actual underlying implementation. - * @param impl Underlying implementation of type IInferRequestInternal - */ - explicit InferRequestBase(std::shared_ptr impl): _impl(impl) {} - - StatusCode Infer(ResponseDesc* resp) noexcept override { - OV_ITT_SCOPED_TASK(itt::domains::Plugin, "Infer"); - TO_STATUS(_impl->Infer()); - } - - StatusCode Cancel(ResponseDesc* resp) noexcept override { - OV_ITT_SCOPED_TASK(itt::domains::Plugin, "Cancel"); - TO_STATUS(_impl->Cancel()); - } - - StatusCode GetPerformanceCounts(std::map& perfMap, - ResponseDesc* resp) const noexcept override { - TO_STATUS(perfMap = _impl->GetPerformanceCounts()); - } - - StatusCode SetBlob(const char* name, const Blob::Ptr& data, ResponseDesc* resp) noexcept override { - TO_STATUS(_impl->SetBlob(name, data)); - } - - StatusCode GetBlob(const char* name, Blob::Ptr& data, ResponseDesc* resp) noexcept override { - TO_STATUS(data = _impl->GetBlob(name)); - } - - StatusCode StartAsync(ResponseDesc* resp) noexcept override { - OV_ITT_SCOPED_TASK(itt::domains::Plugin, "StartAsync"); - TO_STATUS(_impl->StartAsync()); - } - - StatusCode Wait(int64_t millis_timeout, ResponseDesc* resp) noexcept override { - OV_ITT_SCOPED_TASK(itt::domains::Plugin, "Wait"); - NO_EXCEPT_CALL_RETURN_STATUS(_impl->Wait(millis_timeout)); - } - - StatusCode SetCompletionCallback(CompletionCallback callback) noexcept override { - auto weakImpl = std::shared_ptr(_impl.get(), [](IInferRequestInternal*){}); - TO_STATUS_NO_RESP(_impl->SetCallback([callback, weakImpl] (std::exception_ptr exceptionPtr) { - StatusCode statusCode = [&] ()-> StatusCode { - if (exceptionPtr) { - TO_STATUS_NO_RESP(std::rethrow_exception(exceptionPtr)); - } else { - return OK; - } - } (); - callback(std::make_shared(weakImpl), statusCode); - })); - } - - StatusCode GetUserData(void** data, ResponseDesc* resp) noexcept override { - if (data != nullptr) { - TO_STATUS(*data = _impl->GetUserData()); - } else { - return GENERAL_ERROR; - } - } - - StatusCode SetUserData(void* data, ResponseDesc* resp) noexcept override { - TO_STATUS(_impl->SetUserData(data)); - } -}; - -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/src/cpp/ie_infer_request.cpp b/src/inference/src/cpp/ie_infer_request.cpp deleted file mode 100644 index 4b384b3df69e20..00000000000000 --- a/src/inference/src/cpp/ie_infer_request.cpp +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cpp/ie_infer_request.hpp" - -#include -#include -#include - -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" -#include "dev/converter_utils.hpp" -#include "ie_infer_async_request_base.hpp" -#include "ie_ngraph_utils.hpp" -#include "openvino/runtime/compiled_model.hpp" -#include "openvino/runtime/exception.hpp" -#include "openvino/runtime/infer_request.hpp" -#include "transformations/utils/utils.hpp" - -namespace InferenceEngine { - -#define INFER_REQ_CALL_STATEMENT(...) \ - if (_impl == nullptr) \ - IE_THROW(NotAllocated) << "Inference Request is not initialized"; \ - try { \ - __VA_ARGS__ \ - } catch (...) { \ - ::InferenceEngine::details::Rethrow(); \ - } - -InferRequest::~InferRequest() { - _impl = {}; -} - -InferRequest::InferRequest(const IInferRequestInternal::Ptr& impl, const std::shared_ptr& so) - : _impl(impl), - _so(so) { - IE_ASSERT(_impl != nullptr); -} - -IE_SUPPRESS_DEPRECATED_START - -void InferRequest::SetBlob(const std::string& name, const Blob::Ptr& data) { - INFER_REQ_CALL_STATEMENT(_impl->SetBlob(name, data);) -} - -Blob::Ptr InferRequest::GetBlob(const std::string& name) { - Blob::Ptr blobPtr; - INFER_REQ_CALL_STATEMENT(blobPtr = _impl->GetBlob(name);) - std::string error = "Internal error: blob with name `" + name + "` is not allocated!"; - if (blobPtr == nullptr) - IE_THROW() << error; - if (blobPtr->buffer() == nullptr) - IE_THROW() << error; - return blobPtr; -} - -void InferRequest::Infer() { - INFER_REQ_CALL_STATEMENT(_impl->Infer();) -} - -void InferRequest::Cancel() { - INFER_REQ_CALL_STATEMENT(_impl->Cancel();) -} - -std::map InferRequest::GetPerformanceCounts() const { - INFER_REQ_CALL_STATEMENT(return _impl->GetPerformanceCounts();) -} - -void InferRequest::SetInput(const BlobMap& inputs) { - INFER_REQ_CALL_STATEMENT(for (auto&& input : inputs) { _impl->SetBlob(input.first, input.second); }) -} - -void InferRequest::SetOutput(const BlobMap& results) { - INFER_REQ_CALL_STATEMENT(for (auto&& result : results) { _impl->SetBlob(result.first, result.second); }) -} - -void InferRequest::StartAsync() { - INFER_REQ_CALL_STATEMENT(_impl->StartAsync();) -} - -StatusCode InferRequest::Wait(int64_t millis_timeout) { - INFER_REQ_CALL_STATEMENT(return _impl->Wait(millis_timeout);) -} - -void InferRequest::SetCompletionCallbackImpl(std::function callbackToSet) { - INFER_REQ_CALL_STATEMENT(_impl->SetCallback([callbackToSet](std::exception_ptr) { - callbackToSet(); - });) -} - -#define CATCH_IE_EXCEPTION_RETURN(StatusCode, ExceptionType) \ - catch (const ::InferenceEngine::ExceptionType&) { \ - return StatusCode; \ - } - -#define CATCH_OV_EXCEPTION_RETURN(StatusCode, ExceptionType) \ - catch (const ::ov::ExceptionType&) { \ - return StatusCode; \ - } - -#define CATCH_IE_EXCEPTIONS_RETURN \ - CATCH_OV_EXCEPTION_RETURN(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_OV_EXCEPTION_RETURN(GENERAL_ERROR, Exception) \ - CATCH_IE_EXCEPTION_RETURN(GENERAL_ERROR, GeneralError) \ - CATCH_IE_EXCEPTION_RETURN(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_IE_EXCEPTION_RETURN(NETWORK_NOT_LOADED, NetworkNotLoaded) \ - CATCH_IE_EXCEPTION_RETURN(PARAMETER_MISMATCH, ParameterMismatch) \ - CATCH_IE_EXCEPTION_RETURN(NOT_FOUND, NotFound) \ - CATCH_IE_EXCEPTION_RETURN(OUT_OF_BOUNDS, OutOfBounds) \ - CATCH_IE_EXCEPTION_RETURN(UNEXPECTED, Unexpected) \ - CATCH_IE_EXCEPTION_RETURN(REQUEST_BUSY, RequestBusy) \ - CATCH_IE_EXCEPTION_RETURN(RESULT_NOT_READY, ResultNotReady) \ - CATCH_IE_EXCEPTION_RETURN(NOT_ALLOCATED, NotAllocated) \ - CATCH_IE_EXCEPTION_RETURN(INFER_NOT_STARTED, InferNotStarted) \ - CATCH_IE_EXCEPTION_RETURN(NETWORK_NOT_READ, NetworkNotRead) \ - CATCH_IE_EXCEPTION_RETURN(INFER_CANCELLED, InferCancelled) - -void InferRequest::SetCompletionCallbackImpl(std::function callbackToSet) { - INFER_REQ_CALL_STATEMENT( - auto weakThis = - InferRequest{std::shared_ptr{_impl.get(), [](IInferRequestInternal*) {}}, _so}; - _impl->SetCallback([callbackToSet, weakThis](std::exception_ptr exceptionPtr) { - StatusCode statusCode = StatusCode::OK; - if (exceptionPtr != nullptr) { - statusCode = [&] { - try { - std::rethrow_exception(exceptionPtr); - } - CATCH_IE_EXCEPTIONS_RETURN catch (const std::exception&) { - return GENERAL_ERROR; - } - catch (...) { - return UNEXPECTED; - } - }(); - } - callbackToSet(weakThis, statusCode); - });) -} - -void InferRequest::SetCompletionCallbackImpl(IInferRequest::CompletionCallback callbackToSet) { - INFER_REQ_CALL_STATEMENT( - IInferRequest::Ptr weakThis = - InferRequest{std::shared_ptr{_impl.get(), [](IInferRequestInternal*) {}}, _so}; - _impl->SetCallback([callbackToSet, weakThis](std::exception_ptr exceptionPtr) { - StatusCode statusCode = StatusCode::OK; - if (exceptionPtr != nullptr) { - statusCode = [&] { - try { - std::rethrow_exception(exceptionPtr); - } - CATCH_IE_EXCEPTIONS_RETURN catch (const std::exception&) { - return GENERAL_ERROR; - } - catch (...) { - return UNEXPECTED; - } - }(); - } - callbackToSet(weakThis, statusCode); - });) -} - -InferRequest::operator IInferRequest::Ptr() { - INFER_REQ_CALL_STATEMENT(return std::make_shared(_impl);) -} - -std::vector InferRequest::QueryState() { - std::vector controller; - INFER_REQ_CALL_STATEMENT(for (auto&& state - : _impl->QueryState()) { - controller.emplace_back(VariableState{state, _so}); - }) - return controller; -} - -bool InferRequest::operator!() const noexcept { - return !_impl; -} - -InferRequest::operator bool() const noexcept { - return (!!_impl); -} - -bool InferRequest::operator!=(const InferRequest& r) const noexcept { - return !(r == *this); -} - -bool InferRequest::operator==(const InferRequest& r) const noexcept { - return r._impl == _impl; -} - -} // namespace InferenceEngine diff --git a/src/inference/src/infer_request.cpp b/src/inference/src/cpp/infer_request.cpp similarity index 92% rename from src/inference/src/infer_request.cpp rename to src/inference/src/cpp/infer_request.cpp index 19954729400f68..44fe17b2bd7fec 100644 --- a/src/inference/src/infer_request.cpp +++ b/src/inference/src/cpp/infer_request.cpp @@ -8,7 +8,6 @@ #include #include -#include "ie_common.h" #include "openvino/core/except.hpp" #include "openvino/core/node.hpp" #include "openvino/runtime/compiled_model.hpp" @@ -18,18 +17,10 @@ #include "openvino/runtime/so_ptr.hpp" #include "transformations/utils/utils.hpp" -#ifdef __GNUC__ -// on RHEL 8.2 deprecation inside the macro does not work -OPENVINO_SUPPRESS_DEPRECATED_START -#endif - #define OV_INFER_REQ_CALL_STATEMENT(...) \ OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); \ - OPENVINO_SUPPRESS_DEPRECATED_START \ try { \ __VA_ARGS__; \ - } catch (const ::InferenceEngine::RequestBusy& ex) { \ - ov::Busy::create(ex.what()); \ } catch (const ov::Busy&) { \ throw; \ } catch (const ov::Cancelled&) { \ @@ -38,8 +29,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_THROW(ex.what()); \ } catch (...) { \ OPENVINO_THROW("Unexpected exception"); \ - } \ - OPENVINO_SUPPRESS_DEPRECATED_END + } namespace { @@ -247,34 +237,26 @@ void InferRequest::start_async() { void InferRequest::wait() { OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); - OPENVINO_SUPPRESS_DEPRECATED_START try { _impl->wait(); } catch (const ov::Cancelled&) { throw; - } catch (const InferenceEngine::InferCancelled& e) { - Cancelled::create(e.what()); } catch (const std::exception& ex) { OPENVINO_THROW(ex.what()); } catch (...) { OPENVINO_THROW("Unexpected exception"); } - OPENVINO_SUPPRESS_DEPRECATED_END } bool InferRequest::wait_for(const std::chrono::milliseconds timeout) { OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); - OPENVINO_SUPPRESS_DEPRECATED_START try { return _impl->wait_for(timeout); - } catch (const InferenceEngine::InferCancelled& e) { - Cancelled::create(e.what()); } catch (const std::exception& ex) { OPENVINO_THROW(ex.what()); } catch (...) { OPENVINO_THROW("Unexpected exception"); } - OPENVINO_SUPPRESS_DEPRECATED_END } void InferRequest::set_callback(std::function callback) { diff --git a/src/inference/src/remote_context.cpp b/src/inference/src/cpp/remote_context.cpp similarity index 99% rename from src/inference/src/remote_context.cpp rename to src/inference/src/cpp/remote_context.cpp index e2a2bc61a0c731..10b3736a7e5cb9 100644 --- a/src/inference/src/remote_context.cpp +++ b/src/inference/src/cpp/remote_context.cpp @@ -6,7 +6,6 @@ #include -#include "any_copy.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/itensor.hpp" diff --git a/src/inference/src/remote_tensor.cpp b/src/inference/src/cpp/remote_tensor.cpp similarity index 100% rename from src/inference/src/remote_tensor.cpp rename to src/inference/src/cpp/remote_tensor.cpp diff --git a/src/inference/src/cpp/ie_variable_state.cpp b/src/inference/src/cpp/variable_state.cpp similarity index 54% rename from src/inference/src/cpp/ie_variable_state.cpp rename to src/inference/src/cpp/variable_state.cpp index e89b7e3413a8df..e38c527367417e 100644 --- a/src/inference/src/cpp/ie_variable_state.cpp +++ b/src/inference/src/cpp/variable_state.cpp @@ -2,22 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "cpp/ie_memory_state.hpp" -#include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" +#include "openvino/runtime/variable_state.hpp" + #include "openvino/core/except.hpp" #include "openvino/runtime/ivariable_state.hpp" #include "openvino/runtime/make_tensor.hpp" -#include "openvino/runtime/variable_state.hpp" - -IE_SUPPRESS_DEPRECATED_START -#define VARIABLE_CALL_STATEMENT(...) \ - if (_impl == nullptr) \ - IE_THROW(NotAllocated) << "VariableState was not initialized."; \ - try { \ - __VA_ARGS__; \ - } catch (...) { \ - ::InferenceEngine::details::Rethrow(); \ - } #define OV_VARIABLE_CALL_STATEMENT(...) \ OPENVINO_ASSERT(_impl != nullptr, "VariableState was not initialized."); \ @@ -29,38 +18,6 @@ IE_SUPPRESS_DEPRECATED_START OPENVINO_THROW("Unexpected exception"); \ } -namespace InferenceEngine { - -VariableState::~VariableState() { - _impl = {}; -} - -VariableState::VariableState(const IVariableStateInternal::Ptr& impl, const std::shared_ptr& so) - : _impl(impl), - _so(so) { - if (_impl == nullptr) - IE_THROW() << "VariableState was not initialized."; -} - -void VariableState::Reset() { - VARIABLE_CALL_STATEMENT(_impl->Reset()); -} - -std::string VariableState::GetName() const { - VARIABLE_CALL_STATEMENT(return _impl->GetName()); -} - -Blob::CPtr VariableState::GetState() const { - VARIABLE_CALL_STATEMENT(return _impl->GetState()); -} - -void VariableState::SetState(Blob::Ptr state) { - VARIABLE_CALL_STATEMENT(_impl->SetState(state)); -} - -} // namespace InferenceEngine - -IE_SUPPRESS_DEPRECATED_END namespace ov { VariableState::~VariableState() { diff --git a/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp deleted file mode 100644 index 33b85be225558a..00000000000000 --- a/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" - -#include -#include -#include -#include -#include - -#include "cpp/ie_cnn_network.h" -#include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "ie_icore.hpp" -#include "ie_ngraph_utils.hpp" -#include "openvino/core/node.hpp" - -namespace InferenceEngine { - -void IExecutableNetworkInternal::setNetworkInputs(const InputsDataMap& networkInputs) { - _networkInputs = networkInputs; -} - -void IExecutableNetworkInternal::setNetworkOutputs(const OutputsDataMap& networkOutputs) { - _networkOutputs = networkOutputs; -} - -void IExecutableNetworkInternal::setInputs(const std::vector>& params) { - _parameters = params; -} -const std::vector>& IExecutableNetworkInternal::getInputs() const { - return _parameters; -} -void IExecutableNetworkInternal::setOutputs(const std::vector>& results) { - _results = results; -} -const std::vector>& IExecutableNetworkInternal::getOutputs() const { - return _results; -} - -ConstOutputsDataMap IExecutableNetworkInternal::GetOutputsInfo() const { - ConstOutputsDataMap outputMap; - for (const auto& output : _networkOutputs) { - outputMap.emplace(output.first, output.second); - } - return outputMap; -} - -ConstInputsDataMap IExecutableNetworkInternal::GetInputsInfo() const { - ConstInputsDataMap inputMap; - for (const auto& input : _networkInputs) { - inputMap.emplace(input.first, input.second); - } - return inputMap; -} - -std::shared_ptr IExecutableNetworkInternal::CreateInferRequest() { - std::shared_ptr asyncRequestImpl; - try { - asyncRequestImpl = CreateInferRequestImpl(_parameters, _results); - } catch (const InferenceEngine::NotImplemented&) { - } catch (const ov::NotImplemented&) { - } - if (!asyncRequestImpl) - asyncRequestImpl = CreateInferRequestImpl(_networkInputs, _networkOutputs); - asyncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this()); - return asyncRequestImpl; -} - -void IExecutableNetworkInternal::Export(const std::string& modelFileName) { - std::ofstream modelFile(modelFileName, std::ios::out | std::ios::binary); - - if (modelFile.is_open()) { - Export(modelFile); - } else { - IE_THROW() << "The " << modelFileName << " file can not be opened for Export"; - } -} - -void IExecutableNetworkInternal::Export(std::ostream& networkModel) { - IE_THROW(NotImplemented); -} - -std::shared_ptr IExecutableNetworkInternal::GetExecGraphInfo() { - IE_THROW(NotImplemented); -} - -void IExecutableNetworkInternal::SetPointerToPlugin(const std::shared_ptr& plugin) { - _plugin = plugin; -} - -std::shared_ptr IExecutableNetworkInternal::GetPointerToSo() { - return _so; -} - -void IExecutableNetworkInternal::SetConfig(const ov::AnyMap&) { - IE_THROW(NotImplemented); -} - -ov::Any IExecutableNetworkInternal::GetConfig(const std::string&) const { - IE_THROW(NotImplemented); -} - -ov::Any IExecutableNetworkInternal::GetMetric(const std::string&) const { - IE_THROW(NotImplemented); -} - -std::shared_ptr IExecutableNetworkInternal::CreateInferRequestImpl( - InputsDataMap networkInputs, - OutputsDataMap networkOutputs) { - IE_THROW(NotImplemented); -} - -void IExecutableNetworkInternal::loadedFromCache() { - _loadedFromCache = true; -} - -bool IExecutableNetworkInternal::isLoadedFromCache() const { - return _loadedFromCache; -} - -std::shared_ptr IExecutableNetworkInternal::CreateInferRequestImpl( - const std::vector>& inputs, - const std::vector>& outputs) { - IE_THROW(NotImplemented); -} - -} // namespace InferenceEngine diff --git a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp deleted file mode 100644 index e388fc868fbcfe..00000000000000 --- a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" - -#include -#include -#include - -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "cpp_interfaces/plugin_itt.hpp" -#include "ie_blob.h" -#include "ie_common.h" -#include "ie_ngraph_utils.hpp" -#include "openvino/core/partial_shape.hpp" -#include "openvino/util/common_util.hpp" -#include "transformations/utils/utils.hpp" - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START - -IInferRequestInternal::~IInferRequestInternal() {} - -IInferRequestInternal::IInferRequestInternal(const InputsDataMap& networkInputs, const OutputsDataMap& networkOutputs) - : // We should copy maps since they can be overriden in SetBlob with preprocess - _networkInputs{copyInfo(networkInputs)}, - _networkOutputs{copyInfo(networkOutputs)} {} - -IInferRequestInternal::IInferRequestInternal(const std::vector>& inputs, - const std::vector>& outputs) - : _parameters(inputs), - _results(outputs) { - const auto& create_old_data = [](const ov::Output& output) -> InferenceEngine::DataPtr { - auto name = ov::op::util::get_ie_output_name(output); - auto shape = output.get_partial_shape(); - auto rank = shape.rank().is_static() ? shape.rank().get_length() : -1; - SizeVector dims(1, 0); - if (shape.is_static()) { - dims = output.get_shape(); - } else if (rank >= 0) { - dims = SizeVector(rank, 0); - } - for (const auto& dim : shape) { - if (dim.is_static() && dim.get_length() == 0) - IE_THROW() << name << " has zero dimension which is not allowed"; - } - const Layout rankLayout = rank < 0 ? Layout::BLOCKED : TensorDesc::getLayoutByRank(rank); - const auto precision = InferenceEngine::details::convertPrecision(output.get_element_type()); - return std::make_shared(name, TensorDesc{precision, dims, rankLayout}); - }; - const auto& create_old_input_data = - [create_old_data](const ov::Output& output) -> InferenceEngine::InputInfo::Ptr { - auto info = std::make_shared(); - info->setInputData(create_old_data(output)); - return info; - }; - - for (const auto& param : _parameters) { - const auto& input = create_old_input_data(param->output(0)); - input->setName(param->get_friendly_name()); - _networkInputs[input->name()] = input; - } - - for (const auto& result : _results) { - auto input = result->input_value(0); - const auto& output = create_old_data(ov::Output(input.get_node(), input.get_index())); - _networkOutputs[output->getName()] = output; - } -} - -void IInferRequestInternal::Infer() { - checkBlobs(); - InferImpl(); -} - -void IInferRequestInternal::InferImpl() { - IE_THROW(NotImplemented); -} - -void IInferRequestInternal::Cancel() { - IE_THROW(NotImplemented); -} - -std::map IInferRequestInternal::GetPerformanceCounts() const { - IE_THROW(NotImplemented); -} - -std::shared_ptr IInferRequestInternal::findInputByNodeName(const std::string& name) const { - for (const auto& input : GetInputs()) { - if (input->get_friendly_name() == name) - return input; - } - return nullptr; -} - -std::shared_ptr IInferRequestInternal::findOutputByNodeName(const std::string& name) const { - for (const auto& output : GetOutputs()) { - if (output->input_value(0).get_node()->get_friendly_name() == name) - return output; - } - return nullptr; -} - -void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& userBlob) { - OV_ITT_SCOPED_TASK(itt::domains::Plugin, "SetBlob"); - if (name.empty()) { - IE_THROW(NotFound) << "Failed to set blob with empty name"; - } - if (!userBlob) - IE_THROW(NotAllocated) << "Failed to set empty blob with name: \'" << name << "\'"; - InputInfo::Ptr foundInput; - DataPtr foundOutput; - const bool isInput = findInputAndOutputBlobByName(name, foundInput, foundOutput); - const auto input = findInputByNodeName(name); - const auto output = findOutputByNodeName(name); - - if (userBlob->buffer() == nullptr) - IE_THROW(NotAllocated) << "Input data was not allocated. Input name: \'" << name << "\'"; - if (userBlob->size() == 0 && !((input && input->get_output_partial_shape(0).is_dynamic()) || - (output && output->get_output_partial_shape(0).is_dynamic()))) { - IE_THROW() << "Input data is empty. Input name: \'" << name << "\'"; - } - const bool isInputDynamic = input && input->get_output_partial_shape(0).is_dynamic(); - const bool isOutputDynamic = output && output->get_input_partial_shape(0).is_dynamic(); - - size_t dataSize = userBlob->size(); - if (isInput) { - // ilavreno: the condition below is obsolete, but we need an exact list of precisions - // which are supports by G-API preprocessing - if (foundInput->getPrecision() != userBlob->getTensorDesc().getPrecision()) { - IE_THROW(ParameterMismatch) - << "Failed to set Blob with precision not corresponding to user input precision"; - } - - auto& devBlob = _deviceInputs[name]; - - size_t inputSize = foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR - ? ov::util::product(foundInput->getTensorDesc().getDims()) - : 1; - if (!isInputDynamic && dataSize != inputSize) { - IE_THROW() << "Input tensor size is not equal network input size (" << dataSize << "!=" << inputSize - << ")."; - } - _inputs[name] = userBlob; - devBlob = userBlob; - } else { - size_t outputSize = foundOutput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR - ? ov::util::product(foundOutput->getTensorDesc().getDims()) - : 1; - if (!isOutputDynamic && dataSize != outputSize) { - IE_THROW() << "Output blob size is not equal network output size (" << dataSize << "!=" << outputSize - << ")."; - } - if (foundOutput->getPrecision() != userBlob->getTensorDesc().getPrecision()) { - IE_THROW(ParameterMismatch) - << "Failed to set Blob with precision not corresponding to user output precision"; - } - // ilavreno: this condition is valid for most plugins except MYRIAD - // it is able to perform layout conversion for output blob dynamically - // if (foundOutput->getLayout() != userBlob->getTensorDesc().getLayout()) { - // IE_THROW(ParameterMismatch) << "Failed to set Blob with layout not corresponding to user output layout"; - // } - _outputs[name] = userBlob; - } -} - -Blob::Ptr IInferRequestInternal::GetBlob(const std::string& name) { - OV_ITT_SCOPED_TASK(itt::domains::Plugin, "GetBlob"); - Blob::Ptr data; - InputInfo::Ptr foundInput; - DataPtr foundOutput; - const SizeVector oneVector = {1}; - if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) { - const auto input = findInputByNodeName(name); - const bool isInputDynamic = input && input->get_output_partial_shape(0).is_dynamic(); - data = _inputs[name]; - const auto& dims = foundInput->getTensorDesc().getDims(); - if (isInputDynamic) - checkBlob(data, name, true); - else - checkBlob(data, name, true, foundInput->getTensorDesc().getLayout() != SCALAR ? dims : oneVector); - } else { - const auto output = findOutputByNodeName(name); - const bool isOutputDynamic = output && output->get_output_partial_shape(0).is_dynamic(); - data = _outputs[name]; - const auto& dims = foundOutput->getTensorDesc().getDims(); - if (isOutputDynamic) - checkBlob(data, name, false); - else - checkBlob(data, name, false, foundOutput->getTensorDesc().getLayout() != SCALAR ? dims : oneVector); - } - return data; -} - -std::vector> IInferRequestInternal::QueryState() { - IE_THROW(NotImplemented); -} - -void IInferRequestInternal::StartAsync() { - checkBlobs(); - StartAsyncImpl(); -} - -void IInferRequestInternal::StartAsyncImpl() { - IE_THROW(NotImplemented); -} - -StatusCode IInferRequestInternal::Wait(int64_t millis_timeout) { - IE_THROW(NotImplemented); -} - -void IInferRequestInternal::SetCallback(Callback callback) { - _callback = std::move(callback); -} - -void IInferRequestInternal::execDataPreprocessing(InferenceEngine::BlobMap& preprocessedBlobs, bool serial) {} - -bool IInferRequestInternal::findInputAndOutputBlobByName(const std::string& name, - InputInfo::Ptr& foundInput, - DataPtr& foundOutput) const { - foundInput = nullptr; - foundOutput = nullptr; - if (_networkOutputs.empty()) { - IE_THROW() << "Internal error: network outputs is not set"; - } - auto foundInputPair = std::find_if(std::begin(_networkInputs), - std::end(_networkInputs), - [&](const std::pair& pair) { - return pair.first == name; - }); - auto foundOutputPair = std::find_if(std::begin(_networkOutputs), - std::end(_networkOutputs), - [&](const std::pair& pair) { - return pair.first == name; - }); - bool retVal; - - if (foundInputPair != std::end(_networkInputs)) { - foundInput = foundInputPair->second; - retVal = true; - } else if (foundOutputPair != std::end(_networkOutputs)) { - foundOutput = foundOutputPair->second; - retVal = false; - } else { - IE_THROW(NotFound) << "Failed to find input or output with name: \'" << name << "\'"; - } - return retVal; -} - -void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, - const std::string& name, - bool isInput, - const SizeVector& refDims) const { - std::string bType = isInput ? "Input" : "Output"; - std::string sType = isInput ? "input" : "output"; - std::string strNotAllocated(bType + " data was not allocated."); - std::string strNotMatched("The " + sType + " blob size is not equal to the network " + sType + " size"); - - if (!blob) { - IE_THROW(NotAllocated) << strNotAllocated; - } - size_t refSize; - bool isDynamic = false; - if (refDims.empty()) { - SizeVector dims; - if (isInput) { - auto foundInputPair = std::find_if(std::begin(_networkInputs), - std::end(_networkInputs), - [&](const std::pair& pair) { - return pair.first == name; - }); - if (foundInputPair == std::end(_networkInputs)) { - IE_THROW(NotFound) << "Failed to find input with name: \'" << name << "\'"; - } - const auto input = findInputByNodeName(name); - isDynamic = input && input->get_output_partial_shape(0).is_dynamic(); - dims = foundInputPair->second->getTensorDesc().getDims(); - refSize = foundInputPair->second->getTensorDesc().getLayout() != SCALAR ? ov::util::product(dims) : 1; - } else { - auto foundOutputPair = std::find_if(std::begin(_networkOutputs), - std::end(_networkOutputs), - [&](const std::pair& pair) { - return pair.first == name; - }); - if (foundOutputPair == std::end(_networkOutputs)) { - IE_THROW(NotFound) << "Failed to find output with name: \'" << name << "\'"; - } - const auto output = findOutputByNodeName(name); - isDynamic = output && output->get_output_partial_shape(0).is_dynamic(); - ov::PartialShape blobPartialShape(blob->getTensorDesc().getDims()); - if (output && output->get_output_partial_shape(0).compatible(blobPartialShape)) { - dims = blob->getTensorDesc().getDims(); - } else { - // TODO: it is strange to request tensor desc from data when the shapes are not compatible, probably we - // need to immediately throw here - dims = foundOutputPair->second->getTensorDesc().getDims(); - } - refSize = foundOutputPair->second->getTensorDesc().getLayout() != SCALAR ? ov::util::product(dims) : 1; - } - } else { - refSize = ov::util::product(refDims); - } - - if (!isDynamic && refSize != blob->size()) { - IE_THROW() << strNotMatched + ": got " << blob->size() << " expecting " << refSize; - } - if (blob->buffer() == nullptr) - IE_THROW() << strNotAllocated; -} - -void IInferRequestInternal::checkBlobs() { - for (auto const& input : _inputs) { - checkBlob(input.second, input.first, true); - } - for (auto const& output : _outputs) { - checkBlob(output.second, output.first, false); - } -} - -void IInferRequestInternal::setPointerToExecutableNetworkInternal( - const std::shared_ptr& exeNetwork) { - _exeNetwork = exeNetwork; -} - -std::shared_ptr IInferRequestInternal::getPointerToExecutableNetworkInternal() const { - return _exeNetwork; -} - -void IInferRequestInternal::setPointerToSo(const std::shared_ptr& so) { - _so = so; -} - -std::shared_ptr IInferRequestInternal::getPointerToSo() const { - return _so; -} - -void* IInferRequestInternal::GetUserData() noexcept { - return _userData; -} - -void IInferRequestInternal::SetUserData(void* userData) noexcept { - _userData = userData; -} - -void IInferRequestInternal::setModelInputsOutputs(const std::vector>& inputs, - const std::vector>& outputs) { - _parameters = inputs; - _results = outputs; -} - -const std::vector>& IInferRequestInternal::GetInputs() const { - return _parameters; -} - -const std::vector>& IInferRequestInternal::GetOutputs() const { - return _results; -} -} // namespace InferenceEngine diff --git a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp deleted file mode 100644 index 8612b7b64f0a49..00000000000000 --- a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Inference Engine plugin API wrapper, to be used by particular implementors - * @file ie_iplugin_internal.hpp - */ - -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "any_copy.hpp" -#include "blob_factory.hpp" -#include "cnn_network_ngraph_impl.hpp" -#include "cpp/ie_cnn_network.h" -#include "dev/converter_utils.hpp" -#include "ie_api.h" -#include "ie_icore.hpp" -#include "ie_input_info.hpp" -#include "ie_ngraph_utils.hpp" -#include "openvino/core/deprecated.hpp" -#include "openvino/core/except.hpp" -#include "openvino/core/model.hpp" -#include "openvino/core/runtime_attribute.hpp" -#include "openvino/op/util/op_types.hpp" -#include "openvino/pass/manager.hpp" -#include "openvino/runtime/exec_model_info.hpp" -#include "openvino/runtime/threading/executor_manager.hpp" -#include "transformations/utils/utils.hpp" - -namespace InferenceEngine { - -InputsDataMap copyInfo(const InputsDataMap& networkInputs) { - InputsDataMap _networkInputs; - for (const auto& it : networkInputs) { - InputInfo::Ptr newPtr; - if (it.second) { - newPtr = std::make_shared(); - newPtr->setInputData(std::make_shared(*it.second->getInputData())); - } - _networkInputs.emplace(it.first, newPtr); - } - return _networkInputs; -} - -OutputsDataMap copyInfo(const OutputsDataMap& networkOutputs) { - OutputsDataMap _networkOutputs; - for (const auto& it : networkOutputs) { - DataPtr newData; - if (it.second) { - newData = std::make_shared(*it.second); - } - _networkOutputs.emplace(it.first, newData); - } - return _networkOutputs; -} - -IInferencePlugin::IInferencePlugin() : _executorManager(ov::threading::executor_manager()), _isNewAPI(true) {} - -void IInferencePlugin::VersionStore::copyFrom(const Version& v) { - description = v.description; - buildNumber = v.buildNumber; -} - -IInferencePlugin::VersionStore::VersionStore(const Version& v) { - copyFrom(v); -} - -IInferencePlugin::VersionStore& IInferencePlugin::VersionStore::operator=(const VersionStore& v) { - if (&v != this) { - copyFrom(v); - } - return *this; -} - -void IInferencePlugin::SetVersion(const ov::Version& version) { - _version = VersionStore(version); -} - -const ov::Version& IInferencePlugin::GetVersion() const { - return _version; -} - -std::string IInferencePlugin::GetName() const noexcept { - return _pluginName; -} - -void IInferencePlugin::SetName(const std::string& pluginName) noexcept { - _pluginName = pluginName; -} - -template -std::map> const_map_cast(const std::map>& map) { - std::map> res; - for (auto&& v : map) - res.emplace(v.first, std::const_pointer_cast(v.second)); - return res; -} - -std::shared_ptr IInferencePlugin::LoadNetwork( - const CNNNetwork& orig_network, - const std::map& config) { - std::shared_ptr impl; - - // if IR `version` is not set, suppose it's IR v10 for old API - // it allows to use operation names in set_ / get_tensor instead of tensor_names - auto orig_function = orig_network.getFunction(); - std::shared_ptr function; - InferenceEngine::CNNNetwork network = orig_network; - if (orig_function) { - function = std::make_shared(orig_function->get_results(), - orig_function->get_sinks(), - orig_function->get_parameters(), - orig_function->get_variables(), - orig_function->get_friendly_name()); - function->get_rt_info() = orig_function->get_rt_info(); - } - if (function && !IsNewAPI()) { - if (!function->has_rt_info("version")) { - function->set_rt_info(int64_t(10), "version"); - - // re-create `network` with new patched `function` - using namespace InferenceEngine; - OPENVINO_SUPPRESS_DEPRECATED_START - const auto& orig_icnn = static_cast(orig_network); - auto orig_impl = - std::dynamic_pointer_cast(orig_icnn.shared_from_this()); - OPENVINO_ASSERT(orig_impl != nullptr, - "Internal: orig_impl must be castable to details::CNNNetworkNGraphImpl"); - auto new_impl = std::make_shared(function, IsNewAPI()); - network = CNNNetwork(new_impl); - for (const auto& inputInfo : orig_network.getInputsInfo()) { - auto toInfo = network.getInputsInfo().at(inputInfo.first); - toInfo->setPrecision(inputInfo.second->getPrecision()); - toInfo->setLayout(inputInfo.second->getLayout()); - } - for (const auto& outputInfo : orig_network.getOutputsInfo()) { - auto toInfo = network.getOutputsInfo().at(outputInfo.first); - toInfo->setPrecision(outputInfo.second->getPrecision()); - toInfo->setLayout(outputInfo.second->getLayout()); - } - OPENVINO_SUPPRESS_DEPRECATED_END - } - } - - impl = LoadExeNetworkImpl(network, config); - - SetExeNetworkInfo(impl, const_map_cast(network.getInputsInfo()), const_map_cast(network.getOutputsInfo())); - if (function) { - SetExeNetworkInfo(impl, function); - } - - return impl; -} - -ov::SoPtr IInferencePlugin::LoadNetwork(const std::string& modelPath, - const std::map& config) { - auto cnnNet = GetCore()->ReadNetwork(modelPath, std::string()); - return GetCore()->LoadNetwork(cnnNet, GetName(), config); -} - -void IInferencePlugin::AddExtension(const std::shared_ptr&) { - IE_THROW(NotImplemented); -} - -void IInferencePlugin::SetConfig(const std::map&) { - IE_THROW(NotImplemented); -} - -void IInferencePlugin::SetProperties(const ov::AnyMap& config) { - SetConfig(any_copy(config)); -} - -ov::Any IInferencePlugin::GetConfig(const std::string&, const ov::AnyMap&) const { - IE_THROW(NotImplemented); -} - -ov::Any IInferencePlugin::GetMetric(const std::string&, const ov::AnyMap&) const { - IE_THROW(NotImplemented); -} - -std::shared_ptr IInferencePlugin::ImportNetwork( - const std::string& modelFileName, - const std::map& config) { - std::ifstream blobFile(modelFileName, std::ios::binary); - - if (!blobFile.is_open()) { - IE_THROW(NetworkNotRead); - } - - return ImportNetwork(blobFile, config); -} - -std::shared_ptr IInferencePlugin::ImportNetwork( - std::istream& networkModel, - const std::map& config) { - IE_THROW(NotImplemented); -} - -void IInferencePlugin::SetCore(std::weak_ptr core) { - IE_ASSERT(!core.expired()); - _core = core; - auto locked_core = _core.lock(); - if (locked_core) - _isNewAPI = locked_core->isNewAPI(); -} - -std::shared_ptr IInferencePlugin::GetCore() const noexcept { - return _core.lock(); -} - -bool IInferencePlugin::IsNewAPI() const noexcept { - return _isNewAPI; -} - -const std::shared_ptr& IInferencePlugin::executorManager() const { - return _executorManager; -} - -QueryNetworkResult IInferencePlugin::QueryNetwork(const CNNNetwork& network, - const std::map& config) const { - IE_THROW(NotImplemented); -} - -std::shared_ptr IInferencePlugin::LoadExeNetworkImpl( - const CNNNetwork&, - const std::map&) { - IE_THROW(NotImplemented); -} - -void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const ConstInputsDataMap& inputs, - const ConstOutputsDataMap& outputs) { - IE_ASSERT(exeNetwork != nullptr); - - // Set inputs/outputs and pointer to plugin manually here - exeNetwork->setNetworkInputs(copyInfo(constMapCast(inputs))); - exeNetwork->setNetworkOutputs(copyInfo(constMapCast(outputs))); - - exeNetwork->SetPointerToPlugin(shared_from_this()); -} - -void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const std::shared_ptr& function) { - bool newAPI = IsNewAPI(); - InferenceEngine::SetExeNetworkInfo(exeNetwork, function, newAPI); - exeNetwork->SetPointerToPlugin(shared_from_this()); -} - -std::unordered_set GetRemovedNodes(const std::shared_ptr& originalFunction, - const std::shared_ptr& transformedFunction) { - std::unordered_set result = {}; - std::unordered_set transformedNodeNames = {}; - - for (auto&& node : transformedFunction->get_ops()) { - transformedNodeNames.emplace(node->get_friendly_name()); - for (auto&& fusedLayerName : ov::getFusedNamesVector(node)) - transformedNodeNames.emplace(fusedLayerName); - } - - for (auto&& originalNode : originalFunction->get_ops()) { - if (transformedNodeNames.find(originalNode->get_friendly_name()) == transformedNodeNames.end()) - result.emplace(originalNode->get_friendly_name()); - } - - return result; -} - -std::unordered_set GetSupportedNodes( - const std::shared_ptr& model, - std::function&)> transform, - std::function)> is_node_supported) { - return ov::get_supported_nodes(model, transform, is_node_supported); -} - -void SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const std::shared_ptr& function, - bool new_api) { - OPENVINO_ASSERT(exeNetwork != nullptr); - OPENVINO_ASSERT(function != nullptr); - - std::vector> const_params; - std::vector> const_results; - - std::unordered_set leaf_names; - bool add_operation_names = false; - if (function->has_rt_info("version")) { - const int64_t ir_version = function->get_rt_info("version"); - // here we decide whether we need to add operation_names as tensor names for - // getInputs / getOutputs. Since these functions are designed to be used in new API only - // always need to add operation names for IR v10 - add_operation_names = ir_version == 10; - - for (const auto& vals : {function->inputs(), function->outputs()}) { - for (const auto& val : vals) { - for (const auto& name : val.get_names()) { - leaf_names.insert(name); - } - } - } - } - - const auto& inputsInfo = exeNetwork->GetInputsInfo(); - const auto& outputsInfo = exeNetwork->GetOutputsInfo(); - OPENVINO_ASSERT(inputsInfo.size() == function->get_parameters().size()); - - if (outputsInfo.size() != function->get_output_size()) { - const auto& outputs = function->outputs(); - std::unordered_set> output_tensors; - std::transform(outputs.cbegin(), - outputs.cend(), - std::inserter(output_tensors, output_tensors.begin()), - [](const ov::Output& out) { - return out.get_tensor_ptr(); - }); - - OPENVINO_ASSERT(outputsInfo.size() == output_tensors.size(), - "outputsInfo.size() is: ", - outputsInfo.size(), - ", and function->get_output_size() is: ", - function->get_output_size(), - ". Number of duplicated outputs: ", - outputs.size() - output_tensors.size()); - } - - for (const auto& param : function->get_parameters()) { - const auto& param_name = param->get_friendly_name(); - auto new_param = ov::as_type_ptr(param->copy_with_new_inputs({})); - new_param->set_friendly_name(param_name); - if (add_operation_names) { - OPENVINO_ASSERT(!new_api || leaf_names.find(param_name) == leaf_names.end() || - param->output(0).get_names().find(param_name) != param->output(0).get_names().end(), - "Model operation names have collisions with tensor names.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); - leaf_names.insert(param_name); - new_param->output(0).get_tensor().add_names({param_name}); - } - // WA: use CNNNetwork's precisions since plugins sometimes override their precisions - // after transformation pipeline is run - new_param->set_element_type( - InferenceEngine::details::convertPrecision(inputsInfo.at(param_name)->getPrecision())); - new_param->set_layout(param->get_layout()); - new_param->output(0).get_rt_info() = param->output(0).get_rt_info(); - new_param->validate_and_infer_types(); - const_params.emplace_back(new_param); - } - for (const auto& result : function->get_results()) { - auto fake_param = std::make_shared(result->get_output_element_type(0), - result->get_output_partial_shape(0)); - const std::string res_name = ov::op::util::create_ie_output_name(result->input_value(0)); - fake_param->set_friendly_name(res_name); - fake_param->set_element_type( - InferenceEngine::details::convertPrecision(outputsInfo.at(res_name)->getPrecision())); - fake_param->validate_and_infer_types(); - auto new_result = result->copy_with_new_inputs({fake_param}); - new_result->set_friendly_name(result->get_friendly_name()); - if (add_operation_names) { - OPENVINO_ASSERT(!new_api || leaf_names.find(res_name) == leaf_names.end() || - result->output(0).get_names().find(res_name) != result->output(0).get_names().end(), - "Model operation names have collisions with tensor names.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); - leaf_names.insert(res_name); - new_result->output(0).get_tensor().add_names({res_name}); - } - auto r = std::dynamic_pointer_cast(new_result); - OPENVINO_ASSERT(r, "Internal error. SetNetworkInfo failure casting output copy to Result"); - r->set_layout(result->get_layout()); - const_results.emplace_back(new_result); - } - - exeNetwork->setInputs(const_params); - exeNetwork->setOutputs(const_results); -} - -std::shared_ptr<::ov::IPlugin> convert_plugin(const std::shared_ptr& from) { - return ov::legacy_convert::convert_plugin(from); -} - -} // namespace InferenceEngine diff --git a/src/inference/src/cpp_interfaces/interface/ie_ivariable_state_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_ivariable_state_internal.cpp deleted file mode 100644 index 79bb02d8fe62d3..00000000000000 --- a/src/inference/src/cpp_interfaces/interface/ie_ivariable_state_internal.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -IVariableStateInternal::IVariableStateInternal(const std::string& name_) : name{name_} {} - -std::string IVariableStateInternal::GetName() const { - return name; -} - -void IVariableStateInternal::Reset() { - IE_THROW(NotImplemented); -} - -void IVariableStateInternal::SetState(const Blob::Ptr& newState) { - state = newState; -} - -Blob::CPtr IVariableStateInternal::GetState() const { - return state; -} - -} // namespace InferenceEngine diff --git a/src/inference/src/dev/converter_utils.cpp b/src/inference/src/dev/converter_utils.cpp deleted file mode 100644 index 6566534df7e83b..00000000000000 --- a/src/inference/src/dev/converter_utils.cpp +++ /dev/null @@ -1,707 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "converter_utils.hpp" - -#include -#include -#include - -#include "any_copy.hpp" -#include "cnn_network_ngraph_impl.hpp" -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" -#include "icompiled_model_wrapper.hpp" -#include "ie_blob.h" -#include "ie_common.h" -#include "ie_icore.hpp" -#include "ie_input_info.hpp" -#include "ie_layouts.h" -#include "ie_ngraph_utils.hpp" -#include "iplugin_wrapper.hpp" -#include "openvino/core/except.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/runtime/exception.hpp" -#include "openvino/runtime/icompiled_model.hpp" -#include "openvino/runtime/iinfer_request.hpp" -#include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/itensor.hpp" -#include "openvino/runtime/ivariable_state.hpp" -#include "openvino/runtime/make_tensor.hpp" -#include "openvino/runtime/profiling_info.hpp" -#include "openvino/runtime/so_ptr.hpp" -#include "openvino/runtime/tensor.hpp" -#include "openvino/runtime/threading/executor_manager.hpp" -#include "openvino/runtime/variable_state.hpp" -#include "transformations/utils/utils.hpp" - -#ifdef PROXY_PLUGIN_ENABLED -# include "openvino/proxy/infer_request.hpp" -#endif - -namespace { - -std::string get_legacy_name_from_port(const ov::Output& port) { - ov::Output p(std::const_pointer_cast(port.get_node_shared_ptr()), port.get_index()); - if (auto node = std::dynamic_pointer_cast(p.get_node_shared_ptr())) { - p = node->input_value(0); - } - return ov::op::util::create_ie_output_name(p); -} - -void fill_input_info(ov::Output& input, InferenceEngine::InputInfo::Ptr& input_info) { - const ov::Output const_input(input.get_node(), input.get_index()); - ov::legacy_convert::fill_input_info(const_input, input_info); - auto& rt_info = input.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - if (it != rt_info.end()) { - rt_info.erase(it); - } -} - -void fill_output_info(ov::Output& input, InferenceEngine::DataPtr& output_info) { - const ov::Output const_input(input.get_node(), input.get_index()); - ov::legacy_convert::fill_output_info(const_input, output_info); - auto& rt_info = input.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - if (it != rt_info.end()) { - rt_info.erase(it); - } -} - -InferenceEngine::SizeVector get_dims(const ov::Output& port) { - InferenceEngine::SizeVector dims = {}; - const auto& p_shape = port.get_partial_shape(); - if (p_shape.is_static()) - dims = p_shape.get_shape(); - return dims; -} - -} // namespace - -void ov::legacy_convert::fill_input_info(const ov::Output& input, - InferenceEngine::InputInfo::Ptr& input_info) { - if (!input_info) { - // Create input info - auto param_name = input.get_node()->get_friendly_name(); - auto dims = get_dims(input); - InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(input.get_element_type()), - dims, - InferenceEngine::TensorDesc::getLayoutByDims(dims)); - auto data = std::make_shared(param_name, desc); - input_info = std::make_shared(); - input_info->setInputData(data); - } - auto& rt_info = input.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - if (it != rt_info.end()) { - auto td = it->second.as(); - input_info->getInputData()->reshape(td.getDims(), td.getLayout()); - input_info->setPrecision(td.getPrecision()); - } -} -void ov::legacy_convert::fill_output_info(const ov::Output& output, - InferenceEngine::DataPtr& output_info) { - if (!output_info) { - // Create input info - const auto& res_name = ov::op::util::create_ie_output_name(output); - auto dims = get_dims(output); - InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(output.get_element_type()), - dims, - InferenceEngine::TensorDesc::getLayoutByDims(dims)); - output_info = std::make_shared(res_name, desc); - } - auto& rt_info = output.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - if (it != rt_info.end()) { - auto td = it->second.as(); - output_info->reshape(td.getDims(), td.getLayout()); - output_info->setPrecision(td.getPrecision()); - } -} - -InferenceEngine::CNNNetwork ov::legacy_convert::convert_model(const std::shared_ptr& model, - bool is_new_api) { - auto network = InferenceEngine::CNNNetwork(std::shared_ptr( - new InferenceEngine::details::CNNNetworkNGraphImpl(model->clone(), is_new_api))); - std::shared_ptr cloned_model = network.getFunction(); - for (auto&& input : cloned_model->inputs()) { - auto param_name = input.get_node()->get_friendly_name(); - - OPENVINO_ASSERT(network.getInputsInfo().count(param_name)); - - auto input_info = network.getInputsInfo()[param_name]; - ::fill_input_info(input, input_info); - } - for (auto&& result : cloned_model->get_results()) { - auto output = result->input_value(0); - const auto& res_name = ov::op::util::create_ie_output_name(output); - - OPENVINO_ASSERT(network.getOutputsInfo().count(res_name)); - auto output_info = network.getOutputsInfo()[res_name]; - - ::fill_output_info(output, output_info); - } - return network; -} -std::shared_ptr ov::legacy_convert::convert_model(const InferenceEngine::CNNNetwork& network, - bool is_new_api) { - OPENVINO_ASSERT(network.getFunction(), - "CNNNetwork can be converted to OpenVINO Model only in case if it contains ov::Model"); - if (is_new_api) - return network.getFunction(); - - auto cloned_model = network.getFunction()->clone(); - for (auto&& input : cloned_model->inputs()) { - auto param_name = input.get_node()->get_friendly_name(); - - OPENVINO_ASSERT(network.getInputsInfo().count(param_name)); - - auto input_info = network.getInputsInfo().at(param_name); - auto& rt_info = input.get_rt_info(); - rt_info["ie_legacy_td"] = input_info->getTensorDesc(); - } - for (auto&& result : cloned_model->get_results()) { - auto output = result->input_value(0); - const auto& res_name = ov::op::util::create_ie_output_name(output); - - OPENVINO_ASSERT(network.getOutputsInfo().count(res_name)); - auto output_info = network.getOutputsInfo().at(res_name); - - auto& rt_info = output.get_rt_info(); - rt_info["ie_legacy_td"] = output_info->getTensorDesc(); - } - if (!cloned_model->has_rt_info("version")) { - cloned_model->set_rt_info(int64_t(10), "version"); - } - return cloned_model; -} - -namespace ov { - -class IVariableStateInternalWrapper : public InferenceEngine::IVariableStateInternal { - ov::SoPtr m_state; - -public: - IVariableStateInternalWrapper(const ov::SoPtr& state) - : InferenceEngine::IVariableStateInternal(state->get_name()), - m_state(state) {} - - std::string GetName() const override { - return m_state->get_name(); - } - - void Reset() override { - m_state->reset(); - } - - void SetState(const InferenceEngine::Blob::Ptr& newState) override { - m_state->set_state(ov::make_tensor(newState, true)); - } - - InferenceEngine::Blob::CPtr GetState() const override { - return tensor_to_blob(m_state->get_state()); - } -}; - -class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { -public: - IInferencePluginWrapper(const ov::SoPtr& plugin) : m_plugin(plugin) { - auto& ver = plugin->get_version(); - ov::Version version; - version.buildNumber = ver.buildNumber; - version.description = ver.description; - SetVersion(version); - _isNewAPI = plugin->is_new_api(); - _executorManager = plugin->get_executor_manager(); - } - - virtual ~IInferencePluginWrapper() = default; - - std::string GetName() const noexcept override { - return m_plugin->get_device_name(); - } - - void SetName(const std::string& name) noexcept override { - m_plugin->set_device_name(name); - } - - std::shared_ptr LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::map& config) override { - return ov::legacy_convert::convert_compiled_model( - {m_plugin->compile_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()), - ov::any_copy(config)), - m_plugin._so}); - } - - ov::SoPtr LoadNetwork( - const std::string& modelPath, - const std::map& config) override { - return ov::SoPtr( - ov::legacy_convert::convert_compiled_model( - {m_plugin->compile_model(modelPath, ov::any_copy(config)), m_plugin._so}), - m_plugin._so); - } - - void SetConfig(const std::map& config) override { - m_plugin->set_property(ov::any_copy(config)); - } - - void SetProperties(const ov::AnyMap& config) override { - m_plugin->set_property(config); - } - - ov::Any GetConfig(const std::string& name, const ov::AnyMap& options) const override { - return m_plugin->get_property(name, options); - } - - ov::Any GetMetric(const std::string& name, const ov::AnyMap& options) const override { - return m_plugin->get_property(name, options); - } - - std::shared_ptr ImportNetwork( - const std::string& modelFileName, - const std::map& config) override { - std::ifstream model(modelFileName, std::ios::binary); - return ov::legacy_convert::convert_compiled_model( - {m_plugin->import_model(model, ov::any_copy(config)), m_plugin._so}); - } - - std::shared_ptr ImportNetwork( - std::istream& networkModel, - const std::map& config) override { - return ov::legacy_convert::convert_compiled_model( - {m_plugin->import_model(networkModel, ov::any_copy(config)), m_plugin._so}); - } - - void SetCore(std::weak_ptr core) override { - return m_plugin->set_core(std::dynamic_pointer_cast(core.lock())); - } - - std::shared_ptr GetCore() const noexcept override { - auto core = m_plugin->get_core(); - return std::dynamic_pointer_cast(core); - } - - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, - const std::map& config) const override { - auto res = m_plugin->query_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()), - ov::any_copy(config)); - InferenceEngine::QueryNetworkResult ret; - if (!network.getFunction() || res.empty()) { - ret.rc = InferenceEngine::GENERAL_ERROR; - return ret; - } - ret.supportedLayersMap = res; - - return ret; - } - - ov::SoPtr get_plugin() { - return m_plugin; - } - -private: - ov::SoPtr m_plugin; -}; - -} // namespace ov - -std::shared_ptr<::InferenceEngine::IInferencePlugin> ov::legacy_convert::convert_plugin( - const ov::SoPtr<::ov::IPlugin>& plugin) { - if (auto wrapper = std::dynamic_pointer_cast(plugin._ptr)) - return wrapper->get_plugin(); - return std::make_shared(plugin); -} - -std::shared_ptr<::ov::IPlugin> ov::legacy_convert::convert_plugin( - const std::shared_ptr<::InferenceEngine::IInferencePlugin>& plugin) { - std::shared_ptr<::ov::IPlugin> ov_plugin(new ::InferenceEngine::IPluginWrapper(plugin)); - return ov_plugin; -} - -namespace ov { - -class IExecutableNetworkWrapper : public InferenceEngine::IExecutableNetworkInternal { -public: - explicit IExecutableNetworkWrapper(const ov::SoPtr& model) : m_model(model) { - for (const auto& input : m_model->inputs()) { - InferenceEngine::InputInfo::Ptr input_info; - ov::legacy_convert::fill_input_info(input, input_info); - _networkInputs[input_info->name()] = input_info; - _parameters.emplace_back(input.get_node_shared_ptr()); - } - for (const auto& output : m_model->outputs()) { - auto out = output.get_node()->input_value(0); - InferenceEngine::DataPtr output_info; - ov::legacy_convert::fill_output_info(ov::Output(out.get_node(), out.get_index()), - output_info); - _networkOutputs[output_info->getName()] = output_info; - _results.emplace_back(output.get_node_shared_ptr()); - } - _plugin = - ov::legacy_convert::convert_plugin({std::const_pointer_cast(m_model->m_plugin), m_model._so}); - _so = model._so; - } - - std::shared_ptr CreateInferRequest() override { - auto infer_request = legacy_convert::convert_infer_request({m_model->create_infer_request(), m_model._so}); - infer_request->setPointerToExecutableNetworkInternal(shared_from_this()); - return infer_request; - } - - void Export(std::ostream& model) override { - m_model->export_model(model); - } - - void Export(const std::string& modelFileName) override { - std::ofstream ostream(modelFileName, std::ios::out | std::ios::binary); - Export(ostream); - } - - std::shared_ptr GetExecGraphInfo() override { - return m_model->get_runtime_model()->clone(); - } - - void SetConfig(const ov::AnyMap& config) override { - m_model->set_property(config); - } - - ov::Any GetConfig(const std::string& name) const override { - return m_model->get_property(name); - } - - ov::Any GetMetric(const std::string& name) const override { - return m_model->get_property(name); - } - - ov::SoPtr get_compiled_model() { - return m_model; - } - -private: - ov::SoPtr m_model; -}; -} // namespace ov - -std::shared_ptr ov::legacy_convert::convert_compiled_model( - const ov::SoPtr& model) { - if (auto comp_model = std::dynamic_pointer_cast(model._ptr)) { - return comp_model->get_executable_network(); - } - return std::make_shared(model); -} - -ov::SoPtr ov::legacy_convert::convert_compiled_model( - const std::shared_ptr& model) { - if (auto comp_model = std::dynamic_pointer_cast(model)) { - return comp_model->get_compiled_model(); - } - return {std::make_shared(model), model->GetPointerToSo()}; -} - -namespace ov { - -class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestInternal { - ov::Output find_port(const std::string& legacy_name) const { - for (const auto& port : m_request->get_inputs()) { - if (get_legacy_name_from_port(port) == legacy_name) - return port; - } - for (const auto& port : m_request->get_outputs()) { - if (get_legacy_name_from_port(port) == legacy_name) - return port; - } - OPENVINO_THROW("Failed to find input or output with name: \'", legacy_name, "\'"); - } - -public: - explicit IInferRequestInternalWrapper(const ov::SoPtr& request) : m_request(request) { - _so = request._so; - } - - void Infer() override { - m_request->infer(); - } - - void Cancel() override { - m_request->cancel(); - } - - std::map GetPerformanceCounts() const override { - auto res = m_request->get_profiling_info(); - std::map ret; - for (size_t i = 0; i < res.size(); i++) { - const auto& info = res[i]; - InferenceEngine::InferenceEngineProfileInfo old_info; - old_info.cpu_uSec = info.cpu_time.count(); - old_info.execution_index = static_cast(i); - old_info.realTime_uSec = info.real_time.count(); - strncpy(old_info.exec_type, info.exec_type.c_str(), sizeof(old_info.exec_type)); - old_info.exec_type[sizeof(old_info.exec_type) - 1] = 0; - strncpy(old_info.layer_type, info.node_type.c_str(), sizeof(old_info.layer_type)); - old_info.layer_type[sizeof(old_info.layer_type) - 1] = 0; - switch (info.status) { - case ov::ProfilingInfo::Status::EXECUTED: - old_info.status = InferenceEngine::InferenceEngineProfileInfo::EXECUTED; - break; - case ov::ProfilingInfo::Status::NOT_RUN: - old_info.status = InferenceEngine::InferenceEngineProfileInfo::NOT_RUN; - break; - case ov::ProfilingInfo::Status::OPTIMIZED_OUT: - old_info.status = InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT; - break; - } - ret[info.node_name] = old_info; - } - return ret; - } - - void SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr& data) override { - try { - m_request->set_tensor(find_port(name), ov::make_tensor(data, true)); - } catch (const ov::Exception& ex) { - const std::string what = ex.what(); - if (what.find("Failed to set tensor") != std::string::npos) { - IE_THROW(ParameterMismatch) << what; - } - IE_THROW(GeneralError) << what; - } - } - - InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override { - auto port = find_port(name); - auto& rt_info = port.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - InferenceEngine::TensorDesc desc; - if (it != rt_info.end()) { - desc = it->second.as(); - } - return tensor_to_blob(m_request->get_tensor(port), true, desc); - } - - std::vector> QueryState() override { - auto res = m_request->query_state(); - std::vector> ret; - for (const auto& state : res) { - ret.emplace_back(std::make_shared(state)); - } - return ret; - } - - void StartAsync() override { - m_request->start_async(); - } - - InferenceEngine::StatusCode Wait(int64_t millis_timeout) override { - if (millis_timeout == InferenceEngine::IInferRequest::RESULT_READY) { - m_request->wait(); - } else { - std::chrono::milliseconds timeout(millis_timeout); - bool res = m_request->wait_for(timeout); - if (!res) - return InferenceEngine::StatusCode::RESULT_NOT_READY; - } - return InferenceEngine::StatusCode::OK; - } - - void SetCallback(std::function callback) override { - m_request->set_callback(std::move(callback)); - } - - ov::SoPtr get_infer_request() { - return m_request; - } - -private: - ov::SoPtr m_request; -}; - -} // namespace ov - -namespace InferenceEngine { - -class IVariableStateWrapper : public ov::IVariableState { -private: - std::shared_ptr m_state; - -public: - explicit IVariableStateWrapper(const std::shared_ptr& state) - : ov::IVariableState(state->GetName()), - m_state(state) {} - - void reset() override { - m_state->Reset(); - } - - void set_state(const ov::SoPtr& state) override { - m_state->SetState(ov::tensor_to_blob(state)); - } - - ov::SoPtr get_state() const override { - return ov::make_tensor(std::const_pointer_cast(m_state->GetState())); - } -}; - -class IAsyncInferRequestWrapper : public ov::IAsyncInferRequest { -public: - IAsyncInferRequestWrapper(const std::shared_ptr& request, - const std::string& plugin_name) - : ov::IAsyncInferRequest(nullptr, nullptr, nullptr), - m_request(request), - m_unwrap_tensor(plugin_name != "AUTO" && plugin_name != "MULTI" && plugin_name != "BATCH" && - plugin_name != "HETERO") { - if (m_request->getPointerToExecutableNetworkInternal()) - m_compiled_model = - ov::legacy_convert::convert_compiled_model(m_request->getPointerToExecutableNetworkInternal()); - } - std::shared_ptr get_infer_request() { - return m_request; - } - - void infer() override { - m_request->Infer(); - } - void start_async() override { - m_request->StartAsync(); - } - - void wait() override { - try { - m_request->Wait(InferenceEngine::InferRequest::RESULT_READY); - } catch (const ov::Cancelled&) { - throw; - } catch (const InferenceEngine::InferCancelled& e) { - ov::Cancelled::create(e.what()); - } catch (const std::exception& ex) { - OPENVINO_THROW(ex.what()); - } catch (...) { - OPENVINO_THROW("Unexpected exception"); - } - } - bool wait_for(const std::chrono::milliseconds& timeout) override { - try { - return m_request->Wait(timeout.count()) == InferenceEngine::OK; - } catch (const InferenceEngine::InferCancelled& e) { - ov::Cancelled::create(e.what()); - } catch (const std::exception& ex) { - OPENVINO_THROW(ex.what()); - } catch (...) { - OPENVINO_THROW("Unexpected exception"); - } - } - - void cancel() override { - m_request->Cancel(); - } - - std::vector get_profiling_info() const override { - auto ieInfos = m_request->GetPerformanceCounts(); - std::vector infos; - infos.reserve(ieInfos.size()); - while (!ieInfos.empty()) { - auto itIeInfo = std::min_element( - std::begin(ieInfos), - std::end(ieInfos), - [](const decltype(ieInfos)::value_type& lhs, const decltype(ieInfos)::value_type& rhs) { - return lhs.second.execution_index < rhs.second.execution_index; - }); - IE_ASSERT(itIeInfo != ieInfos.end()); - auto& ieInfo = itIeInfo->second; - infos.push_back(ov::ProfilingInfo{}); - auto& info = infos.back(); - switch (ieInfo.status) { - case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: - info.status = ov::ProfilingInfo::Status::NOT_RUN; - break; - case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: - info.status = ov::ProfilingInfo::Status::OPTIMIZED_OUT; - break; - case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: - info.status = ov::ProfilingInfo::Status::EXECUTED; - break; - } - info.real_time = std::chrono::microseconds{ieInfo.realTime_uSec}; - info.cpu_time = std::chrono::microseconds{ieInfo.cpu_uSec}; - info.node_name = itIeInfo->first; - info.exec_type = std::string{ieInfo.exec_type}; - info.node_type = std::string{ieInfo.layer_type}; - ieInfos.erase(itIeInfo); - } - return infos; - } - - ov::SoPtr get_tensor(const ov::Output& port) const override { - const auto& name = get_legacy_name_from_port(port); - auto blob = m_request->GetBlob(name); - ov::SoPtr tensor = ov::make_tensor(blob); - if (!tensor._so) - tensor._so = m_request->getPointerToSo(); - return tensor; - } - - void set_tensor(const ov::Output& port, const ov::SoPtr& tensor) override { - m_request->SetBlob(get_legacy_name_from_port(port), ov::tensor_to_blob(tensor, m_unwrap_tensor)); - } - - std::vector> query_state() const override { - std::vector> variable_states; - for (auto&& state : m_request->QueryState()) { - variable_states.push_back( - {std::make_shared(state), m_request->getPointerToSo()}); - } - return variable_states; - } - - void set_callback(std::function callback) override { - m_request->SetCallback(std::move(callback)); - } - - const std::shared_ptr& get_compiled_model() const override { - if (!m_compiled_model) { - std::lock_guard lock(m_mutex); - if (!m_compiled_model) { - if (m_request->getPointerToExecutableNetworkInternal()) - m_compiled_model = - ov::legacy_convert::convert_compiled_model(m_request->getPointerToExecutableNetworkInternal()); - } - } - OPENVINO_ASSERT(m_compiled_model); - return m_compiled_model._ptr; - } - - const std::vector>& get_inputs() const override { - return get_compiled_model()->inputs(); - } - const std::vector>& get_outputs() const override { - return get_compiled_model()->outputs(); - } - -private: - std::shared_ptr m_request; - mutable ov::SoPtr m_compiled_model; - mutable std::mutex m_mutex; - const bool m_unwrap_tensor; -}; - -} // namespace InferenceEngine - -std::shared_ptr<::InferenceEngine::IInferRequestInternal> ov::legacy_convert::convert_infer_request( - const ov::SoPtr<::ov::IAsyncInferRequest>& request) { - if (auto comp_model = std::dynamic_pointer_cast(request._ptr)) { - return comp_model->get_infer_request(); - } - return std::make_shared(request); -} -ov::SoPtr<::ov::IAsyncInferRequest> ov::legacy_convert::convert_infer_request( - const std::shared_ptr<::InferenceEngine::IInferRequestInternal>& request, - const std::string& plugin_name) { - if (auto comp_model = std::dynamic_pointer_cast(request)) { - return comp_model->get_infer_request(); - } - return {std::make_shared(request, plugin_name), - request->getPointerToSo()}; -} diff --git a/src/inference/src/dev/converter_utils.hpp b/src/inference/src/dev/converter_utils.hpp deleted file mode 100644 index 6264a59432fd50..00000000000000 --- a/src/inference/src/dev/converter_utils.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "cpp/ie_cnn_network.h" -#include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "openvino/core/extension.hpp" -#include "openvino/core/model.hpp" -#include "openvino/runtime/iasync_infer_request.hpp" -#include "openvino/runtime/icompiled_model.hpp" -#include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/iremote_context.hpp" - -namespace ov { -namespace legacy_convert { - -void fill_input_info(const ov::Output& input, InferenceEngine::InputInfo::Ptr& inputInfo); -void fill_output_info(const ov::Output& output, InferenceEngine::DataPtr& outputInfo); - -InferenceEngine::CNNNetwork convert_model(const std::shared_ptr& model, bool is_new_api); -std::shared_ptr convert_model(const InferenceEngine::CNNNetwork& model, bool is_new_api); - -std::shared_ptr<::InferenceEngine::IInferencePlugin> convert_plugin(const ov::SoPtr<::ov::IPlugin>& plugin); -std::shared_ptr<::ov::IPlugin> convert_plugin(const std::shared_ptr<::InferenceEngine::IInferencePlugin>& plugin); - -std::shared_ptr<::InferenceEngine::IExecutableNetworkInternal> convert_compiled_model( - const ov::SoPtr<::ov::ICompiledModel>& model); -ov::SoPtr<::ov::ICompiledModel> convert_compiled_model( - const std::shared_ptr<::InferenceEngine::IExecutableNetworkInternal>& model); - -std::shared_ptr<::InferenceEngine::IInferRequestInternal> convert_infer_request( - const ov::SoPtr<::ov::IAsyncInferRequest>& request); -ov::SoPtr<::ov::IAsyncInferRequest> convert_infer_request( - const std::shared_ptr<::InferenceEngine::IInferRequestInternal>& request, - const std::string& plugin_name = ""); -} // namespace legacy_convert -} // namespace ov diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 89b805254ef9dd..179684e397a7c4 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -8,9 +8,6 @@ #include "check_network_batchable.hpp" #include "compilation_context.hpp" -#include "dev/converter_utils.hpp" -#include "dev/icompiled_model_wrapper.hpp" -#include "dev/iplugin_wrapper.hpp" #include "itt.hpp" #include "model_reader.hpp" #include "openvino/core/any.hpp" @@ -56,7 +53,6 @@ template void allowNotImplemented(F&& f) { try { f(); - } catch (const InferenceEngine::NotImplemented&) { } catch (const ov::NotImplemented&) { } } @@ -312,7 +308,7 @@ ov::Parsed ov::parseDeviceNameIntoConfig(const std::string& deviceName, return {updated_device_name, updated_config}; } -ov::CoreImpl::CoreImpl(bool _newAPI) : m_new_api(_newAPI) { +ov::CoreImpl::CoreImpl() { add_mutex(""); // Register global mutex m_executor_manager = ov::threading::executor_manager(); for (const auto& it : ov::get_available_opsets()) { @@ -455,6 +451,14 @@ void ov::CoreImpl::register_plugin_in_registry_unsafe(const std::string& device_ void ov::CoreImpl::register_compile_time_plugins() { std::lock_guard lock(get_mutex()); + auto any_copy = [](const std::map& params) -> ov::AnyMap { + ov::AnyMap result; + for (auto&& value : params) { + result.emplace(value.first, value.second); + } + return result; + }; + const decltype(::getCompiledPluginsRegistry())& plugins = getCompiledPluginsRegistry(); for (const auto& plugin : plugins) { const auto& deviceName = plugin.first; @@ -586,8 +590,6 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const { so = ov::util::load_shared_object(desc.libraryLocation.c_str()); std::shared_ptr plugin_impl; reinterpret_cast(ov::util::get_symbol(so, ov::create_plugin_function))(plugin_impl); - if (auto wrapper = std::dynamic_pointer_cast(plugin_impl)) - wrapper->set_shared_object(so); plugin = Plugin{plugin_impl, so}; } @@ -692,7 +694,7 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const { std::vector ext; desc.extensionCreateFunc(ext); add_extensions_unsafe(ext); - } catch (const InferenceEngine::GeneralError&) { + } catch (const ov::Exception&) { // the same extension can be registered multiple times - ignore it! } } else { @@ -700,15 +702,6 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const { } return plugins.emplace(deviceName, plugin).first->second; - } catch (const InferenceEngine::Exception& ex) { - OPENVINO_THROW("Failed to create plugin ", - ov::util::from_file_path(desc.libraryLocation), - " for device ", - deviceName, - "\n", - "Please, check your environment\n", - ex.what(), - "\n"); } catch (const ov::Exception& ex) { OPENVINO_THROW("Failed to create plugin ", ov::util::from_file_path(desc.libraryLocation), @@ -850,12 +843,7 @@ ov::SoPtr ov::CoreImpl::import_model(std::istream& model, const ov::AnyMap& config) const { OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::import_model"); auto parsed = parseDeviceNameIntoConfig(device_name, config); - auto compiled_model = get_plugin(parsed._deviceName).import_model(model, parsed._config); - if (auto wrapper = std::dynamic_pointer_cast(compiled_model._ptr)) { - wrapper->get_executable_network()->loadedFromCache(); - } - - return compiled_model; + return get_plugin(parsed._deviceName).import_model(model, parsed._config); } ov::SoPtr ov::CoreImpl::import_model(std::istream& modelStream, @@ -863,12 +851,7 @@ ov::SoPtr ov::CoreImpl::import_model(std::istream& modelStre const ov::AnyMap& config) const { OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::import_model"); auto parsed = parseDeviceNameIntoConfig(context->get_device_name(), config); - auto compiled_model = get_plugin(parsed._deviceName).import_model(modelStream, context, parsed._config); - if (auto wrapper = std::dynamic_pointer_cast(compiled_model._ptr)) { - wrapper->get_executable_network()->loadedFromCache(); - } - - return compiled_model; + return get_plugin(parsed._deviceName).import_model(modelStream, context, parsed._config); } ov::SupportedOpsMap ov::CoreImpl::query_model(const std::shared_ptr& model, @@ -907,10 +890,7 @@ std::vector ov::CoreImpl::get_available_devices() const { if (is_hidden_device(deviceName)) continue; try { - const ov::Any p = GetMetric(deviceName, propertyName); - devicesIDs = p.as>(); - } catch (const InferenceEngine::Exception&) { - // plugin is not created by e.g. invalid env + devicesIDs = get_property(deviceName, ov::available_devices.name(), {}).as>(); } catch (const ov::Exception&) { // plugin is not created by e.g. invalid env } catch (const std::runtime_error&) { @@ -1016,10 +996,6 @@ ov::AnyMap ov::CoreImpl::get_supported_property(const std::string& full_device_n return supported_config; } -bool ov::CoreImpl::is_new_api() const { - return m_new_api; -} - ov::SoPtr ov::CoreImpl::get_default_context(const std::string& device_name) const { auto parsed = ov::parseDeviceNameIntoConfig(device_name); return get_plugin(parsed._deviceName).get_default_context(parsed._config); @@ -1376,8 +1352,6 @@ bool ov::CoreImpl::device_supports_model_caching(const ov::Plugin& plugin) const bool ov::CoreImpl::device_supports_cache_dir(const ov::Plugin& plugin) const { try { return util::contains(plugin.get_property(ov::supported_properties), ov::cache_dir); - } catch (const InferenceEngine::NotImplemented&) { - return false; } catch (const ov::NotImplemented&) { return false; } @@ -1459,9 +1433,6 @@ ov::SoPtr ov::CoreImpl::load_model_from_cache( update_config[ov::loaded_from_cache.name()] = true; compiled_model = context ? plugin.import_model(networkStream, context, update_config) : plugin.import_model(networkStream, update_config); - if (auto wrapper = std::dynamic_pointer_cast(compiled_model._ptr)) { - wrapper->get_executable_network()->loadedFromCache(); - } }); } catch (const HeaderException&) { // For these exceptions just remove old cache and set that import didn't work @@ -1618,3 +1589,56 @@ std::shared_ptr ov::CoreImpl::read_model(const std::string& model, OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::ReadTime, "CoreImpl::read_model from memory"); return ov::util::read_model(model, weights, extensions, frontendMode); } + +std::map ov::CoreImpl::get_versions(const std::string& deviceName) const { + std::map versions; + std::vector deviceNames; + + { + // for compatibility with samples / demo + if (deviceName.find("HETERO") == 0) { + auto pos = deviceName.find_first_of(":"); + if (pos != std::string::npos) { + deviceNames = ov::DeviceIDParser::get_hetero_devices(deviceName.substr(pos + 1)); + } + deviceNames.push_back("HETERO"); + } else if (deviceName.find("MULTI") == 0) { + auto pos = deviceName.find_first_of(":"); + if (pos != std::string::npos) { + deviceNames = ov::DeviceIDParser::get_multi_devices(deviceName.substr(pos + 1)); + } + deviceNames.push_back("MULTI"); + } else if (deviceName.find("AUTO") == 0) { + auto pos = deviceName.find_first_of(":"); + if (pos != std::string::npos) { + deviceNames = ov::DeviceIDParser::get_multi_devices(deviceName.substr(pos + 1)); + } + deviceNames.emplace_back("AUTO"); + } else if (deviceName.find("BATCH") == 0) { + auto pos = deviceName.find_first_of(":"); + if (pos != std::string::npos) { + deviceNames = {ov::DeviceIDParser::get_batch_device(deviceName.substr(pos + 1))}; + } + deviceNames.push_back("BATCH"); + } else { + deviceNames.push_back(deviceName); + } + } + + for (auto&& deviceName_ : deviceNames) { + ov::DeviceIDParser parser(deviceName_); + std::string deviceNameLocal = parser.get_device_name(); + + try { + ov::Plugin plugin = get_plugin(deviceNameLocal); + versions[deviceNameLocal] = plugin.get_version(); + } catch (const ov::Exception& ex) { + std::string exception(ex.what()); + if (exception.find("not registered in the OpenVINO Runtime") == std::string::npos) { + throw; + } + } + } + + return versions; +} diff --git a/src/inference/src/dev/core_impl.hpp b/src/inference/src/dev/core_impl.hpp index 4543f5ce342349..f453fb4554800f 100644 --- a/src/inference/src/dev/core_impl.hpp +++ b/src/inference/src/dev/core_impl.hpp @@ -4,14 +4,9 @@ #pragma once -#include - -#include "any_copy.hpp" #include "cache_guard.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "dev/plugin.hpp" -#include "ie_cache_manager.hpp" -#include "ie_icore.hpp" +#include "cache_manager.hpp" #include "openvino/core/any.hpp" #include "openvino/core/extension.hpp" #include "openvino/core/so_extension.hpp" @@ -51,9 +46,7 @@ Parsed parseDeviceNameIntoConfig(const std::string& deviceName, */ bool is_config_applicable(const std::string& device_name, const std::string& device_name_to_parse); -std::string find_plugins_xml(const std::string& xmlFile); - -class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_this { +class CoreImpl : public ov::ICore, public std::enable_shared_from_this { private: mutable std::map plugins; // Mutex is needed to prevent changes of dev mutexes map from different threads @@ -150,8 +143,6 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t std::map pluginRegistry; - const bool m_new_api; - ov::SoPtr compile_model_and_cache(ov::Plugin& plugin, const std::shared_ptr& model, const ov::AnyMap& parsedConfig, @@ -189,14 +180,8 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t } void add_extensions_unsafe(const std::vector& extensions) const; - // Legacy API - ov::SoPtr LoadNetworkImpl( - const InferenceEngine::CNNNetwork& model, - ov::Plugin& plugin, - const std::map& parsedConfig); - public: - CoreImpl(bool _newAPI); + CoreImpl(); ~CoreImpl() override = default; @@ -217,63 +202,6 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t */ void register_compile_time_plugins(); - // - // ICore public API - // - - InferenceEngine::CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath) const override; - - InferenceEngine::CNNNetwork ReadNetwork(const std::string& model, - const InferenceEngine::Blob::CPtr& weights, - bool frontendMode = false) const override; - - bool isNewAPI() const override; - - InferenceEngine::SoExecutableNetworkInternal LoadNetwork(const InferenceEngine::CNNNetwork& network, - const std::string& deviceNameOrig, - const std::map& config) override; - - InferenceEngine::SoExecutableNetworkInternal LoadNetwork( - const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - const std::function& val = nullptr) override; - - InferenceEngine::SoExecutableNetworkInternal LoadNetwork( - const std::string& modelStr, - const InferenceEngine::Blob::CPtr& weights, - const std::string& deviceName, - const std::map& config, - const std::function& val = nullptr) override; - - InferenceEngine::SoExecutableNetworkInternal ImportNetwork( - std::istream& networkModel, - const std::string& deviceName, - const std::map& config) override; - - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, - const std::string& deviceName, - const std::map& config) const override; - - Any GetMetric(const std::string& deviceName, const std::string& name, const AnyMap& options = {}) const override; - - Any GetConfig(const std::string& deviceName, const std::string& name) const override; - - /** - * @brief Returns devices available for neural networks inference - * - * @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, NPU } - * If there more than one device of specific type, they are enumerated with .# suffix. - */ - std::vector GetAvailableDevices() const override; - - std::map GetSupportedConfig(const std::string& deviceName, - const std::map& configs) override; - - bool DeviceSupportsModelCaching(const std::string& deviceName) const override; - - std::map GetVersions(const std::string& deviceName) const; - // Common API /** @@ -359,10 +287,10 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t ov::AnyMap get_supported_property(const std::string& device_name, const ov::AnyMap& config, const bool keep_core_property = true) const override; - bool is_new_api() const override; - ov::SoPtr get_default_context(const std::string& device_name) const override; + std::map get_versions(const std::string& deviceName) const; + /** * @brief Sets properties for a device, acceptable keys can be found in openvino/runtime/properties.hpp. * diff --git a/src/inference/src/dev/core_impl_ie.cpp b/src/inference/src/dev/core_impl_ie.cpp deleted file mode 100644 index 5be5624aba8151..00000000000000 --- a/src/inference/src/dev/core_impl_ie.cpp +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "any_copy.hpp" -#include "blob_factory.hpp" -#include "compilation_context.hpp" -#include "core_impl.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "dev/converter_utils.hpp" -#include "dev/icompiled_model_wrapper.hpp" -#include "ie_network_reader.hpp" -#include "iplugin_wrapper.hpp" -#include "itt.hpp" -#include "openvino/itt.hpp" -#include "openvino/pass/constant_folding.hpp" -#include "openvino/runtime/device_id_parser.hpp" -#include "openvino/runtime/icompiled_model.hpp" -#include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/itensor.hpp" -#include "openvino/runtime/make_tensor.hpp" -#include "openvino/util/common_util.hpp" - -bool ov::CoreImpl::isNewAPI() const { - return is_new_api(); -} - -ov::SoPtr ov::CoreImpl::LoadNetworkImpl( - const InferenceEngine::CNNNetwork& network, - ov::Plugin& plugin, - const std::map& parsedConfig) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "CoreImpl::LoadNetworkImpl"); - ov::SoPtr execNetwork; - auto wrapper = std::dynamic_pointer_cast(plugin.m_ptr); - OPENVINO_ASSERT(wrapper); - auto old_plugin = wrapper->get_plugin(); - return {old_plugin->LoadNetwork(network, parsedConfig), plugin.m_so}; -} - -InferenceEngine::CNNNetwork ov::CoreImpl::ReadNetwork(const std::string& modelPath, const std::string& binPath) const { - OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::ReadTime, "CoreImpl::ReadNetwork from file"); - return InferenceEngine::details::ReadNetwork(modelPath, - binPath, - extensions, - isNewAPI(), - coreConfig.get_enable_mmap()); -} - -InferenceEngine::CNNNetwork ov::CoreImpl::ReadNetwork(const std::string& model, - const InferenceEngine::Blob::CPtr& weights, - bool frontendMode) const { - OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::ReadTime, "CoreImpl::ReadNetwork from memory"); - return InferenceEngine::details::ReadNetwork(model, weights, extensions, isNewAPI(), frontendMode); -} - -InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::string& deviceName, - const std::map& config) { - OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::LoadTime, "Core::LoadNetwork::CNN"); - if (network.getFunction()) { - auto compiled_model = - compile_model(ov::legacy_convert::convert_model(network, isNewAPI()), deviceName, any_copy(config)); - return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so}; - } - auto parsed = parseDeviceNameIntoConfig(deviceName, any_copy(config)); - auto plugin = get_plugin(parsed._deviceName); - auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config)); - return {res._ptr, res._so}; -} - -InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( - const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - const std::function& val) { - OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::LoadTime, "Core::LoadNetwork::Path"); - - auto compiled_model = compile_model(modelPath, deviceName, any_copy(config)); - return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so}; -} - -InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( - const std::string& modelStr, - const InferenceEngine::Blob::CPtr& weights, - const std::string& deviceName, - const std::map& config, - const std::function& val) { - OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::LoadTime, "Core::LoadNetwork::Memory"); - - auto compiled_model = - compile_model(modelStr, - ov::make_tensor(ov::make_tensor(std::const_pointer_cast(weights))), - deviceName, - ov::any_copy(config)); - return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so}; -} - -InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::ImportNetwork( - std::istream& networkModel, - const std::string& deviceName, - const std::map& config) { - auto compiled_model = import_model(networkModel, deviceName, any_copy(config)); - if (auto wrapper = std::dynamic_pointer_cast(compiled_model._ptr)) { - wrapper->get_executable_network()->loadedFromCache(); - } - return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so}; -} - -InferenceEngine::QueryNetworkResult ov::CoreImpl::QueryNetwork(const InferenceEngine::CNNNetwork& network, - const std::string& deviceName, - const std::map& config) const { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::QueryNetwork"); - InferenceEngine::QueryNetworkResult ret; - if (!network.getFunction()) { - ret.rc = InferenceEngine::GENERAL_ERROR; - return ret; - } - auto res = query_model(network.getFunction(), deviceName, any_copy(config)); - ret.supportedLayersMap = res; - - return ret; -} - -ov::Any ov::CoreImpl::GetMetric(const std::string& deviceName, - const std::string& name, - const ov::AnyMap& options) const { - // HETERO case - { - if (deviceName.find("HETERO:") == 0) { - IE_THROW() - << "You can get specific metrics with the GetMetric only for the HETERO itself (without devices). " - "To get individual devices's metrics call GetMetric for each device separately"; - } - } - - // MULTI case - { - if (deviceName.find("MULTI:") == 0) { - IE_THROW() - << "You can get specific metrics with the GetMetric only for the MULTI itself (without devices). " - "To get individual devices's metrics call GetMetric for each device separately"; - } - } - - // AUTO case - { - if (deviceName.find("AUTO:") == 0) { - IE_THROW() << "You can get specific metrics with the GetMetric only for the AUTO itself (without devices). " - "To get individual devices's metrics call GetMetric for each device separately"; - } - } - - // BATCH case - { - if (deviceName.find("BATCH:") == 0) { - IE_THROW() - << "You can get specific metrics with the GetMetric only for the BATCH itself (without devices). " - "To get individual devices's metrics call GetMetric for each device separately"; - } - } - - auto parsed = parseDeviceNameIntoConfig(deviceName, options); - return get_plugin(parsed._deviceName).get_property(name, parsed._config); -} - -ov::Any ov::CoreImpl::GetConfig(const std::string& deviceName, const std::string& name) const { - auto parsed = parseDeviceNameIntoConfig(deviceName); - return get_plugin(parsed._deviceName).get_property(name, parsed._config); -} - -std::vector ov::CoreImpl::GetAvailableDevices() const { - return get_available_devices(); -} - -bool ov::CoreImpl::DeviceSupportsModelCaching(const std::string& deviceName) const { - return device_supports_model_caching(deviceName); -} - -std::map ov::CoreImpl::GetSupportedConfig(const std::string& deviceName, - const std::map& configs) { - return ov::any_copy(get_supported_property(deviceName, any_copy(configs))); -} - -std::map ov::CoreImpl::GetVersions(const std::string& deviceName) const { - std::map versions; - std::vector deviceNames; - - { - // for compatibility with samples / demo - if (deviceName.find("HETERO") == 0) { - auto pos = deviceName.find_first_of(":"); - if (pos != std::string::npos) { - deviceNames = ov::DeviceIDParser::get_hetero_devices(deviceName.substr(pos + 1)); - } - deviceNames.push_back("HETERO"); - } else if (deviceName.find("MULTI") == 0) { - auto pos = deviceName.find_first_of(":"); - if (pos != std::string::npos) { - deviceNames = ov::DeviceIDParser::get_multi_devices(deviceName.substr(pos + 1)); - } - deviceNames.push_back("MULTI"); - } else if (deviceName.find("AUTO") == 0) { - auto pos = deviceName.find_first_of(":"); - if (pos != std::string::npos) { - deviceNames = ov::DeviceIDParser::get_multi_devices(deviceName.substr(pos + 1)); - } - deviceNames.emplace_back("AUTO"); - } else if (deviceName.find("BATCH") == 0) { - auto pos = deviceName.find_first_of(":"); - if (pos != std::string::npos) { - deviceNames = {ov::DeviceIDParser::get_batch_device(deviceName.substr(pos + 1))}; - } - deviceNames.push_back("BATCH"); - } else { - deviceNames.push_back(deviceName); - } - } - - for (auto&& deviceName_ : deviceNames) { - ov::DeviceIDParser parser(deviceName_); - std::string deviceNameLocal = parser.get_device_name(); - - try { - ov::Plugin cppPlugin = get_plugin(deviceNameLocal); - auto convertedPlugin = - ov::legacy_convert::convert_plugin(ov::SoPtr{cppPlugin.m_ptr, cppPlugin.m_so}); - versions[deviceNameLocal] = convertedPlugin->GetVersion(); - } catch (const ov::Exception& ex) { - std::string exception(ex.what()); - if (exception.find("not registered in the OpenVINO Runtime") == std::string::npos) { - throw; - } - } - } - - return versions; -} diff --git a/src/inference/src/dev/icompiled_model.cpp b/src/inference/src/dev/icompiled_model.cpp index 49d60e5268cb49..80d2491697af43 100644 --- a/src/inference/src/dev/icompiled_model.cpp +++ b/src/inference/src/dev/icompiled_model.cpp @@ -4,9 +4,9 @@ #include "openvino/runtime/icompiled_model.hpp" -#include "dev/converter_utils.hpp" -#include "icompiled_model_wrapper.hpp" #include "openvino/core/model.hpp" +#include "openvino/runtime/iasync_infer_request.hpp" +#include "openvino/runtime/iplugin.hpp" #include "openvino/runtime/properties.hpp" #include "transformations/utils/utils.hpp" @@ -54,7 +54,7 @@ ov::ICompiledModel::ICompiledModel(const std::shared_ptr& model auto new_param = ov::as_type_ptr(param->copy_with_new_inputs({})); new_param->set_friendly_name(param_name); if (add_operation_names) { - OPENVINO_ASSERT(!m_plugin->is_new_api() || leaf_names.find(param_name) == leaf_names.end() || + OPENVINO_ASSERT(leaf_names.find(param_name) == leaf_names.end() || param->output(0).get_names().find(param_name) != param->output(0).get_names().end(), "Model operation names have collisions with tensor names.", " Please use MO to generate new IR version, it should allow to avoid the issue"); @@ -84,7 +84,7 @@ ov::ICompiledModel::ICompiledModel(const std::shared_ptr& model auto new_result = result->copy_with_new_inputs({fake_param}); new_result->set_friendly_name(result->get_friendly_name()); if (add_operation_names) { - OPENVINO_ASSERT(!m_plugin->is_new_api() || leaf_names.find(res_name) == leaf_names.end() || + OPENVINO_ASSERT(leaf_names.find(res_name) == leaf_names.end() || result->output(0).get_names().find(res_name) != result->output(0).get_names().end(), "Model operation names have collisions with tensor names.", " Please use MO to generate new IR version, it should allow to avoid the issue"); diff --git a/src/inference/src/dev/icompiled_model_wrapper.cpp b/src/inference/src/dev/icompiled_model_wrapper.cpp deleted file mode 100644 index 563c428cdf6c9e..00000000000000 --- a/src/inference/src/dev/icompiled_model_wrapper.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "icompiled_model_wrapper.hpp" - -#include "dev/converter_utils.hpp" -#include "openvino/core/except.hpp" - -InferenceEngine::ICompiledModelWrapper::ICompiledModelWrapper( - const std::shared_ptr& model) - : ov::ICompiledModel(nullptr, ov::legacy_convert::convert_plugin(model->_plugin), nullptr, nullptr), - m_model(model) { - std::vector> inputs, outputs; - for (const auto& input : m_model->getInputs()) { - inputs.emplace_back(input->output(0)); - } - for (const auto& output : m_model->getOutputs()) { - outputs.emplace_back(output->output(0)); - } - m_inputs = inputs; - m_outputs = outputs; -} - -std::shared_ptr InferenceEngine::ICompiledModelWrapper::create_infer_request() const { - auto infer_request = m_model->CreateInferRequest(); - infer_request->setPointerToSo(m_model->GetPointerToSo()); - return ov::legacy_convert::convert_infer_request(infer_request, m_model->_plugin->GetName())._ptr; -} - -void InferenceEngine::ICompiledModelWrapper::export_model(std::ostream& model) const { - try { - m_model->Export(model); - } catch (const InferenceEngine::NotImplemented& ex) { - OPENVINO_THROW_NOT_IMPLEMENTED(ex.what()); - } -} - -std::shared_ptr InferenceEngine::ICompiledModelWrapper::get_runtime_model() const { - return m_model->GetExecGraphInfo(); -} - -void InferenceEngine::ICompiledModelWrapper::set_property(const ov::AnyMap& properties) { - m_model->SetConfig(properties); -} - -ov::Any InferenceEngine::ICompiledModelWrapper::get_property(const std::string& name) const { - if (ov::loaded_from_cache == name) { - return m_model->isLoadedFromCache(); - } - return m_model->GetConfig(name); -} - -std::shared_ptr -InferenceEngine::ICompiledModelWrapper::get_executable_network() { - return m_model; -} - -std::shared_ptr -InferenceEngine::ICompiledModelWrapper::get_executable_network() const { - return m_model; -} diff --git a/src/inference/src/dev/icompiled_model_wrapper.hpp b/src/inference/src/dev/icompiled_model_wrapper.hpp deleted file mode 100644 index 09c4fbb8dafd92..00000000000000 --- a/src/inference/src/dev/icompiled_model_wrapper.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "dev/converter_utils.hpp" -#include "openvino/runtime/icompiled_model.hpp" - -namespace InferenceEngine { - -class ICompiledModelWrapper : public ov::ICompiledModel { -public: - ICompiledModelWrapper(const std::shared_ptr& model); - virtual ~ICompiledModelWrapper() = default; - std::shared_ptr create_infer_request() const override; - - void export_model(std::ostream& model) const override; - - std::shared_ptr get_runtime_model() const override; - - void set_property(const ov::AnyMap& properties) override; - - ov::Any get_property(const std::string& name) const override; - - std::shared_ptr get_executable_network(); - std::shared_ptr get_executable_network() const; - -private: - std::shared_ptr m_model; - - std::shared_ptr create_sync_infer_request() const override { - OPENVINO_NOT_IMPLEMENTED; - } -}; -} // namespace InferenceEngine diff --git a/src/inference/src/dev/iplugin.cpp b/src/inference/src/dev/iplugin.cpp index 3e93a4556c107c..0b099d2326e518 100644 --- a/src/inference/src/dev/iplugin.cpp +++ b/src/inference/src/dev/iplugin.cpp @@ -32,7 +32,7 @@ std::unordered_set get_removed_nodes(const std::shared_ptr& core) { OPENVINO_ASSERT(!core.expired()); m_core = core; auto locked_core = m_core.lock(); - if (locked_core) - m_is_new_api = locked_core->is_new_api(); } std::shared_ptr ov::IPlugin::get_core() const { return m_core.lock(); } -bool ov::IPlugin::is_new_api() const { - return m_is_new_api; -} - const std::shared_ptr& ov::IPlugin::get_executor_manager() const { return m_executor_manager; } diff --git a/src/inference/src/dev/iplugin_wrapper.cpp b/src/inference/src/dev/iplugin_wrapper.cpp deleted file mode 100644 index 1dc493293029a2..00000000000000 --- a/src/inference/src/dev/iplugin_wrapper.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "iplugin_wrapper.hpp" - -#include - -#include "any_copy.hpp" -#include "dev/converter_utils.hpp" -#include "ie_icore.hpp" -#include "openvino/runtime/iremote_context.hpp" -#include "openvino/runtime/threading/executor_manager.hpp" - -namespace InferenceEngine { - -IPluginWrapper::IPluginWrapper(const std::shared_ptr& ptr) : m_old_plugin(ptr) { - OPENVINO_ASSERT(m_old_plugin); - auto& ver = m_old_plugin->GetVersion(); - m_version.buildNumber = ver.buildNumber; - m_version.description = ver.description; - m_plugin_name = m_old_plugin->GetName(); - m_is_new_api = m_old_plugin->IsNewAPI(); - m_core = m_old_plugin->GetCore(); - m_executor_manager = m_old_plugin->executorManager(); -} - -const std::shared_ptr& IPluginWrapper::update_exec_network( - const std::shared_ptr& network) const { - network->SetPointerToPlugin(m_old_plugin); - if (!network->GetPointerToSo()) - network->_so = m_so; - - return network; -} - -std::shared_ptr IPluginWrapper::compile_model(const std::shared_ptr& model, - const ov::AnyMap& properties) const { - auto exec_network = - m_old_plugin->LoadNetwork(ov::legacy_convert::convert_model(model, is_new_api()), ov::any_copy(properties)); - return ov::legacy_convert::convert_compiled_model(update_exec_network(exec_network))._ptr; -} - -std::shared_ptr IPluginWrapper::compile_model(const std::string& model_path, - const ov::AnyMap& properties) const { - auto exec_network = m_old_plugin->LoadNetwork(model_path, any_copy(properties)); - return ov::legacy_convert::convert_compiled_model(update_exec_network(exec_network._ptr))._ptr; -} - -std::shared_ptr IPluginWrapper::compile_model(const std::shared_ptr& model, - const ov::AnyMap& properties, - const ov::SoPtr& context) const { - OPENVINO_NOT_IMPLEMENTED; -} - -void IPluginWrapper::set_property(const ov::AnyMap& properties) { - m_old_plugin->SetProperties(properties); -} - -ov::Any IPluginWrapper::get_property(const std::string& name, const ov::AnyMap& arguments) const { - try { - return m_old_plugin->GetConfig(name, arguments); - } catch (...) { - return m_old_plugin->GetMetric(name, arguments); - } -} - -ov::SoPtr IPluginWrapper::create_context(const ov::AnyMap& remote_properties) const { - OPENVINO_NOT_IMPLEMENTED; -} - -ov::SoPtr IPluginWrapper::get_default_context(const ov::AnyMap& remote_properties) const { - OPENVINO_NOT_IMPLEMENTED; -} - -std::shared_ptr IPluginWrapper::import_model(std::istream& model, - const ov::AnyMap& properties) const { - return ov::legacy_convert::convert_compiled_model( - update_exec_network(m_old_plugin->ImportNetwork(model, any_copy(properties)))) - ._ptr; -} - -std::shared_ptr IPluginWrapper::import_model(std::istream& model, - const ov::SoPtr& context, - const ov::AnyMap& properties) const { - OPENVINO_NOT_IMPLEMENTED; -} - -ov::SupportedOpsMap IPluginWrapper::query_model(const std::shared_ptr& model, - const ov::AnyMap& properties) const { - auto res = m_old_plugin->QueryNetwork(ov::legacy_convert::convert_model(model, is_new_api()), any_copy(properties)); - OPENVINO_ASSERT(res.rc == InferenceEngine::OK, res.resp.msg); - return res.supportedLayersMap; -} - -const std::shared_ptr& IPluginWrapper::get_plugin() const { - return m_old_plugin; -} - -void IPluginWrapper::set_core(const std::weak_ptr& core) { - auto locked_core = core.lock(); - auto old_core = std::dynamic_pointer_cast(locked_core); - if (old_core) - m_old_plugin->SetCore(old_core); - m_core = core; -} - -void IPluginWrapper::set_device_name(const std::string& device_name) { - m_plugin_name = device_name; - m_old_plugin->SetName(device_name); -} - -void IPluginWrapper::set_shared_object(const std::shared_ptr& so) { - m_so = so; -} - -} // namespace InferenceEngine diff --git a/src/inference/src/dev/iplugin_wrapper.hpp b/src/inference/src/dev/iplugin_wrapper.hpp deleted file mode 100644 index 5aeada9621ab47..00000000000000 --- a/src/inference/src/dev/iplugin_wrapper.hpp +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "openvino/runtime/iplugin.hpp" - -namespace InferenceEngine { - -/** - * @brief Class wraps InferenceEngine::IInferencePlugin into ov::IPlugin - */ -class IPluginWrapper : public ov::IPlugin { -public: - /** - * @brief Constructs Plugin wrapper - * - * @param ptr shared pointer to InferenceEngine::IInferencePlugin - */ - IPluginWrapper(const std::shared_ptr& ptr); - - /** - * @brief Destructor - */ - virtual ~IPluginWrapper() = default; - - /** - * @brief Create compiled model based on model and properties - * - * @param model OpenVINO Model representation - * @param properties configurations for compiled model - * - * @return shared pointer to compiled model interface - */ - std::shared_ptr compile_model(const std::shared_ptr& model, - const ov::AnyMap& properties) const override; - - /** - * @brief Create compiled model based on model and properties - * - * @param model_path Path to the model - * @param properties configurations for compiled model - * - * @return shared pointer to compiled model interface - */ - std::shared_ptr compile_model(const std::string& model_path, - const ov::AnyMap& properties) const override; - - /** - * @brief Create compiled model based on model and properties - * - * @param model OpenVINO Model representation - * @param properties configurations for compiled model - * @param context remote context - * - * @return shared pointer to compiled model interface - */ - std::shared_ptr compile_model(const std::shared_ptr& model, - const ov::AnyMap& properties, - const ov::SoPtr& context) const override; - - /** - * @brief Specifies some plugin properties - * - * @param properties map with configuration properties - */ - void set_property(const ov::AnyMap& properties) override; - - /** - * @brief Returns the property - * - * @param name property name - * @param arguments configuration parameters - * - * @return ov::Any object which contains property value - */ - ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override; - - /** - * @brief Create remote context - * - * @param remote_properties configuration parameters - * - * @return Remote context - */ - ov::SoPtr create_context(const ov::AnyMap& remote_properties) const override; - - /** - * @brief Create default remote context - * - * @param remote_properties configuration parameters - * - * @return Remote context - */ - ov::SoPtr get_default_context(const ov::AnyMap& remote_properties) const override; - - /** - * @brief Import model to the plugin - * - * @param model strim with the model - * @param properties configuration properties - * - * @return shared pointer to compiled model interface - */ - std::shared_ptr import_model(std::istream& model, const ov::AnyMap& properties) const override; - - /** - * @brief Import model to the plugin - * - * @param model strim with the model - * @param context remote context - * @param properties configuration properties - * - * @return shared pointer to compiled model interface - */ - std::shared_ptr import_model(std::istream& model, - const ov::SoPtr& context, - const ov::AnyMap& properties) const override; - - /** - * @brief query model - * - * @param model OpenVINO Model - * @param properties configuration properties - * - * @return Map of supported operations - */ - ov::SupportedOpsMap query_model(const std::shared_ptr& model, - const ov::AnyMap& properties) const override; - - /** - * @brief Returns the instance of the legacy plugin - * - * @return Legacy InferenceEngine::IInferencePlugin object - */ - const std::shared_ptr& get_plugin() const; - - /** - * @brief Set core interface to the plugin - * This method works under the non-virtual method of IPlugin class - * - * @param core OpenVINO Core interface - */ - void set_core(const std::weak_ptr& core); - - /** - * @brief Set plugin name for the wrapper and legacy plugin - * This method works under the non-virtual method of IPlugin class - * - * @param device_name The name of plugin - */ - void set_device_name(const std::string& device_name); - - void set_shared_object(const std::shared_ptr& so); - -private: - std::shared_ptr m_old_plugin; - std::shared_ptr m_so; - - const std::shared_ptr& update_exec_network( - const std::shared_ptr& network) const; -}; - -} // namespace InferenceEngine diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 9f7ea21f1dc79e..e34497749ad98e 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -6,8 +6,6 @@ #include -#include "blob_factory.hpp" -#include "ie_ngraph_utils.hpp" #include "openvino/runtime/iremote_tensor.hpp" #include "openvino/runtime/properties.hpp" #ifdef PROXY_PLUGIN_ENABLED @@ -352,232 +350,6 @@ std::shared_ptr make_tensor(const std::shared_ptr& other, return std::make_shared(other, begin, end); } -/** - * @brief Tensor what contains InferenceEngine::Blob inside - * Blob owns the memory - */ -class BlobTensor : public ITensor { - mutable element::Type m_type; - mutable Shape m_shape; - mutable Strides m_strides; - - void update_strides() { - if (get_element_type().bitwidth() >= 8) { - const auto& element_strides = blob->getTensorDesc().getBlockingDesc().getStrides(); - const size_t elem_size = get_element_type().size(); - m_strides.clear(); - m_strides.resize(element_strides.size()); - std::transform(element_strides.begin(), - element_strides.end(), - m_strides.begin(), - [&elem_size](size_t stride) { - return stride * elem_size; - }); - } - } - -public: - std::shared_ptr blob; - - BlobTensor(const InferenceEngine::Blob::Ptr& blob) : blob{blob} { - OPENVINO_ASSERT(blob); - m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); - update_strides(); - } - - const element::Type& get_element_type() const override { - m_type = InferenceEngine::details::convertPrecision(blob->getTensorDesc().getPrecision()); - return m_type; - } - - void set_shape(ov::Shape shape) override { - blob->setShape({shape.begin(), shape.end()}); - update_strides(); - } - - const Shape& get_shape() const override { - m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); - return m_shape; - } - - const Strides& get_strides() const override { - OPENVINO_ASSERT(get_element_type().bitwidth() >= 8, - "Could not get strides for types with bitwidths less then 8 bit. Tensor type: ", - get_element_type()); - return m_strides; - } - - size_t get_size() const override { - return blob->size(); - } - - size_t get_byte_size() const override { - return blob->byteSize(); - } - - void* data(const element::Type& element_type) const override { - OPENVINO_ASSERT(blob != nullptr, "Tensor was not initialized."); -#define TYPE_CHECK(TYPE) (dynamic_cast*>(blob.get()) != nullptr) - auto host_accesable_implementation = TYPE_CHECK(bool) || TYPE_CHECK(int8_t) || TYPE_CHECK(uint8_t) || - TYPE_CHECK(int16_t) || TYPE_CHECK(uint16_t) || TYPE_CHECK(int32_t) || - TYPE_CHECK(uint32_t) || TYPE_CHECK(int64_t) || TYPE_CHECK(uint64_t) || - TYPE_CHECK(float) || TYPE_CHECK(double); -#undef TYPE_CHECK - OPENVINO_ASSERT(host_accesable_implementation, - "Tensor implementation type dose not contains host accessable data"); - if (element_type != element::undefined && element_type.is_static()) { - OPENVINO_ASSERT(element_type == get_element_type(), - "Tensor data with element type ", - get_element_type(), - ", is not representable as pointer to ", - element_type); - } - // since we don't use byte offsets, we need to explicitly multiply by element_size - auto byte_offset = blob->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size(); - OPENVINO_ASSERT((get_element_type().bitwidth() >= 8) || (byte_offset == 0), - "ROI access for types with bitwidths less then 8 bit is not implemented. Tensor type: ", - get_element_type()); - return byte_offset + InferenceEngine::as(blob)->rmap().as(); - } -}; - -/** - * @brief Create InferenceEngine::TBlob from the tensor - * - * @tparam T Blob data type - */ -template -class TensorMemoryBlob : public InferenceEngine::TBlob { -public: - ~TensorMemoryBlob() override = default; - explicit TensorMemoryBlob(const ov::SoPtr& tensor_, InferenceEngine::TensorDesc desc) try : InferenceEngine - ::TBlob{desc, static_cast(tensor_->data()), tensor_->get_byte_size()}, tensor{tensor_} { - OPENVINO_ASSERT(!std::dynamic_pointer_cast(tensor._ptr)); - } - catch (const std::exception& ex) { - OPENVINO_THROW(ex.what()); - } - - void setShape(const InferenceEngine::SizeVector& dims) override { - tensor->set_shape(dims); - InferenceEngine::TBlob::getTensorDesc().setDims(dims); - allocate(); - } - - void allocate() noexcept override { - if ((void*)InferenceEngine::TBlob::buffer() != tensor->data()) { - InferenceEngine::TBlob::_allocator = - InferenceEngine::details::make_pre_allocator(static_cast(tensor->data()), tensor->get_byte_size()); - InferenceEngine::TBlob::allocate(); - } - } - - ov::SoPtr tensor; -}; - -ov::SoPtr make_tensor(const std::shared_ptr& blob, bool unwrap) { -#define ELSE_IF(type) \ - else if (auto tblob = dynamic_cast*>(blob.get())) { \ - return tblob->tensor; \ - } - if (blob == nullptr) { - return {}; - } - ELSE_IF(float) - ELSE_IF(double) - ELSE_IF(int8_t) - ELSE_IF(int8_t) - ELSE_IF(int16_t) - ELSE_IF(int32_t) - ELSE_IF(int64_t) - ELSE_IF(uint8_t) - ELSE_IF(uint8_t) - ELSE_IF(uint16_t) - ELSE_IF(uint32_t) - ELSE_IF(uint64_t) - ELSE_IF(int8_t) - ELSE_IF(bool) else { - return {std::make_shared(blob), nullptr}; - } -#undef IF -} - -InferenceEngine::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, - bool unwrap, - InferenceEngine::TensorDesc desc) { - auto create_desc = [](const ov::SoPtr& tensor, - const InferenceEngine::TensorDesc& desc) -> InferenceEngine::TensorDesc { - if (desc.getLayout() != InferenceEngine::ANY || - desc.getPrecision() != InferenceEngine::Precision::UNSPECIFIED) { - return desc; - } - auto element_type = tensor->get_element_type(); - auto shape = tensor->get_shape(); - InferenceEngine::SizeVector blk_order(shape.size()); - std::iota(blk_order.begin(), blk_order.end(), 0); - InferenceEngine::SizeVector dim_offset(shape.size(), 0); - InferenceEngine::SizeVector blk_strides; - auto byte_strides = element_type.bitwidth() >= 8 ? tensor->get_strides() : Strides{}; - if (byte_strides.empty()) { - blk_strides = ov::row_major_strides(shape); - } else { - blk_strides.resize(byte_strides.size()); - std::transform(byte_strides.begin(), - byte_strides.end(), - blk_strides.begin(), - [&element_type](size_t byte_stride) { - OPENVINO_ASSERT(byte_stride % element_type.size() == 0, - "Limitation: Stride in bytes ", - byte_stride, - " should be divisible by size of element ", - element_type.size()); - return byte_stride / element_type.size(); - }); - } - return InferenceEngine::TensorDesc{InferenceEngine::details::convertPrecision(element_type), - shape, - InferenceEngine::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; - }; -#ifdef PROXY_PLUGIN_ENABLED - const auto& tensor = unwrap ? ov::proxy::get_hardware_tensor(orig_tensor) : orig_tensor; -#else - const auto& tensor = orig_tensor; -#endif - if (tensor == nullptr) { - return {}; - } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob; - } else { -#define CASE(precision, T) \ - case element::precision: \ - return std::make_shared>(tensor, create_desc(tensor, desc)); - switch (tensor->get_element_type()) { - CASE(f32, float); - CASE(f64, double); - CASE(i4, int8_t); - CASE(i8, int8_t); - CASE(i16, int16_t); - CASE(i32, int32_t); - CASE(i64, int64_t); - CASE(u4, uint8_t); - CASE(u8, uint8_t); - CASE(u16, uint16_t); - CASE(u32, uint32_t); - CASE(u64, uint64_t); - CASE(u1, int8_t); - CASE(boolean, bool); - case element::f16: - return std::make_shared>(tensor, create_desc(tensor, desc)); - case element::bf16: - return std::make_shared>(tensor, create_desc(tensor, desc)); - default: - OPENVINO_THROW("Unsupported element type"); - } -#undef CASE - } - OPENVINO_THROW("Cannot convert tensor to blob!"); -} // namespace ov - namespace util { ov::Tensor make_tensor(const std::shared_ptr& tensor, const std::shared_ptr& so) { diff --git a/src/inference/src/dev/plugin.cpp b/src/inference/src/dev/plugin.cpp index f8bfb69250f6a2..66c75ddd7e7e02 100644 --- a/src/inference/src/dev/plugin.cpp +++ b/src/inference/src/dev/plugin.cpp @@ -6,8 +6,6 @@ #include -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "iplugin_wrapper.hpp" #include "openvino/runtime/internal_properties.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/util/common_util.hpp" @@ -32,11 +30,7 @@ ov::Plugin::Plugin(const std::shared_ptr& ptr, const std::shared_pt } void ov::Plugin::set_name(const std::string& deviceName) { - OV_PLUGIN_CALL_STATEMENT({ - m_ptr->set_device_name(deviceName); - if (auto wrapper = std::dynamic_pointer_cast(m_ptr)) - wrapper->set_device_name(deviceName); - }); + OV_PLUGIN_CALL_STATEMENT({ m_ptr->set_device_name(deviceName); }); } const std::string& ov::Plugin::get_name() const { @@ -44,11 +38,7 @@ const std::string& ov::Plugin::get_name() const { } void ov::Plugin::set_core(std::weak_ptr core) { - OV_PLUGIN_CALL_STATEMENT({ - m_ptr->set_core(core); - if (auto wrapper = std::dynamic_pointer_cast(m_ptr)) - wrapper->set_core(core); - }); + OV_PLUGIN_CALL_STATEMENT({ m_ptr->set_core(core); }); } const ov::Version ov::Plugin::get_version() const { diff --git a/src/inference/src/ie_blob_common.cpp b/src/inference/src/ie_blob_common.cpp deleted file mode 100644 index eb26b055a597bd..00000000000000 --- a/src/inference/src/ie_blob_common.cpp +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "ie_blob.h" -#include "openvino/runtime/make_tensor.hpp" -#include "system_allocator.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -void Blob::setShape(const SizeVector& dims) { - // we don't want to allow setShape for: - // 1. ROI cases - { - size_t denseStride = 1; - const auto& blockedDims = getTensorDesc().getBlockingDesc().getBlockDims(); - const auto& strides = getTensorDesc().getBlockingDesc().getStrides(); - - for (size_t i = 1; i <= strides.size(); i++) { - if (denseStride != strides[strides.size() - i]) { - IE_THROW() << "Blob::setShape requires dense blob"; - } - denseStride *= blockedDims[blockedDims.size() - i]; - } - } - - if (properProduct(dims) > properProduct(getTensorDesc().getDims())) { - // 2. Blobs created on top of preallocated memory - if (std::dynamic_pointer_cast(getAllocator())) { - IE_THROW() - << "Cannot call setShape for Blobs created on top of preallocated memory if shape was increased."; - } - // New blob shape requires more memory than old one -- reallocate - if (!deallocate()) { - IE_THROW() << "Cannot deallocate blob while an attempt to enlarge blob area in setShape."; - } - - // Old and new ranks should match as well as layouts - getTensorDesc().setDims(dims); - - allocate(); - // no way to detect if allocation is successful other than map/unmap - // that we wouldn't like to do here; but for cases when we use SystemMemoryAllocator - // we can do it - if (std::dynamic_pointer_cast(getAllocator())) { - if (buffer() == nullptr) { - IE_THROW() << "Failed to allocate memory in Blob::setShape"; - } - } - } else { - // Don't shrink area when new size fit the existing area - getTensorDesc().setDims(dims); - } -} - -Blob::Ptr Blob::createROI(const ROI& roi) const { - if (getTensorDesc().getLayout() == Layout::NCHW || getTensorDesc().getLayout() == Layout::NHWC) { - return createROI({roi.id, 0, roi.posY, roi.posX}, - {roi.id + 1, getTensorDesc().getDims()[1], roi.posY + roi.sizeY, roi.posX + roi.sizeX}); - } - IE_THROW(NotImplemented) << "createROI is not implemented for current type of Blob"; -} - -Blob::Ptr Blob::createROI(const std::vector& begin, const std::vector& end) const { - IE_THROW(NotImplemented) << "createROI is not implemented for current type of Blob or roi"; -} - -Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi) { - return inputBlob->createROI(roi); -} - -Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, - const std::vector& begin, - const std::vector& end) { - return inputBlob->createROI(begin, end); -} - -// -// RTTI -// - -Blob::~Blob() {} -MemoryBlob::~MemoryBlob() {} - -#ifndef _WIN32 -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -template class TBlob; -#endif - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_common.cpp b/src/inference/src/ie_common.cpp deleted file mode 100644 index b15e49a9d8b371..00000000000000 --- a/src/inference/src/ie_common.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_common.h" - -#include -#include -#include -#include -#include -#include - -#include "ie_blob.h" -#include "openvino/core/except.hpp" -#include "openvino/runtime/exception.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START -namespace details { - -void Rethrow() { - try { - throw; - } catch (const ov::NotImplemented& e) { - IE_THROW(NotImplemented) << e.what(); - } catch (const InferenceEngine::GeneralError& e) { - throw e; - } catch (const InferenceEngine::NotImplemented& e) { - throw e; - } catch (const InferenceEngine::NetworkNotLoaded& e) { - throw e; - } catch (const InferenceEngine::ParameterMismatch& e) { - throw e; - } catch (const InferenceEngine::NotFound& e) { - throw e; - } catch (const InferenceEngine::OutOfBounds& e) { - throw e; - } catch (const InferenceEngine::Unexpected& e) { - throw e; - } catch (const InferenceEngine::RequestBusy& e) { - throw e; - } catch (const InferenceEngine::ResultNotReady& e) { - throw e; - } catch (const InferenceEngine::NotAllocated& e) { - throw e; - } catch (const InferenceEngine::InferNotStarted& e) { - throw e; - } catch (const InferenceEngine::NetworkNotRead& e) { - throw e; - } catch (const InferenceEngine::InferCancelled& e) { - throw e; - } catch (const ov::Cancelled& e) { - IE_THROW(InferCancelled) << e.what(); - } catch (const std::exception& e) { - IE_THROW() << e.what(); - } catch (...) { - IE_THROW(Unexpected); - } -} - -IE_SUPPRESS_DEPRECATED_START - -StatusCode InferenceEngineException::getStatus() const { - if (dynamic_cast(this) != nullptr || dynamic_cast(this) != nullptr) { - return GENERAL_ERROR; - } else if (dynamic_cast(this) != nullptr || - dynamic_cast(this) != nullptr) { - return NOT_IMPLEMENTED; - } else if (dynamic_cast(this) != nullptr) { - return NETWORK_NOT_LOADED; - } else if (dynamic_cast(this) != nullptr) { - return PARAMETER_MISMATCH; - } else if (dynamic_cast(this) != nullptr) { - return NOT_FOUND; - } else if (dynamic_cast(this) != nullptr) { - return OUT_OF_BOUNDS; - } else if (dynamic_cast(this) != nullptr) { - return UNEXPECTED; - } else if (dynamic_cast(this) != nullptr) { - return REQUEST_BUSY; - } else if (dynamic_cast(this) != nullptr) { - return RESULT_NOT_READY; - } else if (dynamic_cast(this) != nullptr) { - return NOT_ALLOCATED; - } else if (dynamic_cast(this) != nullptr) { - return INFER_NOT_STARTED; - } else if (dynamic_cast(this) != nullptr) { - return NETWORK_NOT_READ; - } else if (dynamic_cast(this) != nullptr) { - return INFER_CANCELLED; - } else { - assert(!"Unreachable"); - return OK; - } -} -} // namespace details -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp deleted file mode 100644 index 3073ae4f65be06..00000000000000 --- a/src/inference/src/ie_core.cpp +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_core.hpp" - -#include - -#include -#include -#include -#include -#include - -#include "any_copy.hpp" -#include "cache_guard.hpp" -#include "check_network_batchable.hpp" -#include "cnn_network_ngraph_impl.hpp" -#include "compilation_context.hpp" -#include "cpp/ie_cnn_network.h" -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "dev/converter_utils.hpp" -#include "dev/core_impl.hpp" -#include "ie_cache_manager.hpp" -#include "ie_icore.hpp" -#include "ie_network_reader.hpp" -#include "ie_ngraph_utils.hpp" -#include "itt.hpp" -#include "openvino/core/except.hpp" -#include "openvino/core/so_extension.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/op/result.hpp" -#include "openvino/runtime/compiled_model.hpp" -#include "openvino/runtime/core.hpp" -#include "openvino/runtime/device_id_parser.hpp" -#include "openvino/runtime/threading/executor_manager.hpp" -#include "openvino/util/common_util.hpp" -#include "openvino/util/file_util.hpp" -#include "openvino/util/shared_object.hpp" -#include "openvino/util/xml_parse_utils.hpp" - -using namespace InferenceEngine; -using namespace std::placeholders; - -namespace { - -std::tuple CheckStatic(const InferenceEngine::CNNNetwork& network) { - bool res = true; - std::stringstream errMsg; - auto model = network.getFunction(); - if (model) { - for (const auto& input : model->inputs()) { - if (input.get_partial_shape().is_dynamic()) { - errMsg << "{ input:'"; - for (const auto& name : input.get_names()) { - errMsg << name << ","; - } - if (auto node = input.get_node_shared_ptr()) { - errMsg << node->get_friendly_name(); - } - errMsg << "', shape=" << input.get_partial_shape() << "} "; - res = false; - } - } - } - return {res, errMsg.str()}; -} -} // namespace - -namespace InferenceEngine { - -class Core::Impl : public ov::CoreImpl { -public: - Impl() : ov::CoreImpl(false) {} -}; - -Core::Core(const std::string& xmlConfigFile) { - _impl = std::make_shared(); - - try { - std::string xmlConfigFile_ = ov::find_plugins_xml(xmlConfigFile); - if (!xmlConfigFile_.empty()) - // If XML is default, load default plugins by absolute paths - _impl->register_plugins_in_registry(xmlConfigFile_, xmlConfigFile.empty()); - // Load plugins from pre-compiled list - _impl->register_compile_time_plugins(); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -std::map Core::GetVersions(const std::string& deviceName) const { - return _impl->GetVersions(deviceName); -} - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - -CNNNetwork Core::ReadNetwork(const std::wstring& modelPath, const std::wstring& binPath) const { - try { - return ReadNetwork(ov::util::wstring_to_string(modelPath), ov::util::wstring_to_string(binPath)); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -#endif - -CNNNetwork Core::ReadNetwork(const std::string& modelPath, const std::string& binPath) const { - try { - return _impl->ReadNetwork(modelPath, binPath); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -CNNNetwork Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights) const { - try { - return _impl->ReadNetwork(model, weights); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ExecutableNetwork Core::LoadNetwork(const CNNNetwork& network, const std::map& config) { - try { - return LoadNetwork(network, ov::DEFAULT_DEVICE_NAME, config); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ExecutableNetwork Core::LoadNetwork(const CNNNetwork& network, - const std::string& deviceName, - const std::map& config) { - auto valid = ::CheckStatic(network); - try { - OPENVINO_ASSERT(std::get<0>(valid), - "InferenceEngine::Core::LoadNetwork doesn't support inputs having dynamic shapes. ", - "Use ov::Core::compile_model API instead. Dynamic inputs are :", - std::get<1>(valid)); - auto exec = _impl->LoadNetwork(network, deviceName, config); - return {exec._ptr, exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ExecutableNetwork Core::LoadNetwork(const std::string& modelPath, - const std::string& deviceName, - const std::map& config) { - try { - auto exec = _impl->LoadNetwork(modelPath, deviceName, config, [](const CNNNetwork& network) { - auto valid = ::CheckStatic(network); - OPENVINO_ASSERT(std::get<0>(valid), - "InferenceEngine::Core::LoadNetwork doesn't support inputs having dynamic shapes. ", - "Use ov::Core::compile_model API instead. Dynamic inputs are :", - std::get<1>(valid)); - }); - return {exec._ptr, exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ExecutableNetwork Core::LoadNetwork(const std::string& modelPath, const std::map& config) { - try { - return LoadNetwork(modelPath, ov::DEFAULT_DEVICE_NAME, config); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ExecutableNetwork Core::ImportNetwork(const std::string& modelFileName, - const std::string& deviceName, - const std::map& config) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::ImportNetwork"); - auto parsed = ov::parseDeviceNameIntoConfig(deviceName, ov::any_copy(config)); - std::ifstream modelStream(modelFileName, std::ios::binary); - if (!modelStream.is_open()) - IE_THROW(NetworkNotRead) << "Model file " << modelFileName << " cannot be opened!"; - try { - auto exec = _impl->get_plugin(parsed._deviceName).import_model(modelStream, parsed._config); - return {ov::legacy_convert::convert_compiled_model(exec), exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ExecutableNetwork Core::ImportNetwork(std::istream& networkModel, - const std::string& deviceName, - const std::map& config) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::ImportNetwork"); - try { - auto exec = _impl->ImportNetwork(networkModel, deviceName, config); - return {exec._ptr, exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ExecutableNetwork Core::ImportNetwork(std::istream& networkModel) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::ImportNetwork"); - - using ExportMagic = std::array; - constexpr static const ExportMagic exportMagic = {{0x1, 0xE, 0xE, 0x1}}; - - std::string deviceName; - ExportMagic magic = {}; - auto currentPos = networkModel.tellg(); - networkModel.read(magic.data(), magic.size()); - if (exportMagic == magic) { - std::getline(networkModel, deviceName); - } else { - IE_THROW() << "Passed compiled stream does not contain device name. " - "Please, provide device name manually"; - } - networkModel.seekg(currentPos, networkModel.beg); - - try { - auto exec = _impl->get_plugin(deviceName).import_model(networkModel, {}); - return {ov::legacy_convert::convert_compiled_model(exec), exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -QueryNetworkResult Core::QueryNetwork(const CNNNetwork& network, - const std::string& deviceName, - const std::map& config) const { - try { - auto valid = ::CheckStatic(network); - OPENVINO_ASSERT(std::get<0>(valid), - "InferenceEngine::Core::QueryNetwork doesn't support inputs having dynamic shapes. ", - "Use ov::Core::compile_model API instead. Dynamic inputs are :", - std::get<1>(valid)); - - return _impl->QueryNetwork(network, deviceName, config); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -void Core::SetConfig(const std::map& config, const std::string& deviceName) { - // HETERO case - if (deviceName.find("HETERO:") == 0) { - IE_THROW() << "SetConfig is supported only for HETERO itself (without devices). " - "You can configure the devices with SetConfig before creating the HETERO on top."; - } - - // MULTI case - if (deviceName.find("MULTI:") == 0) { - IE_THROW() << "SetConfig is supported only for MULTI itself (without devices). " - "You can configure the devices with SetConfig before creating the MULTI on top."; - } - - // AUTO case - if (deviceName.find("AUTO:") == 0) { - IE_THROW() << "SetConfig is supported only for AUTO itself (without devices). " - "You can configure the devices with SetConfig before creating the AUTO on top."; - } - - ov::AnyMap conf = ov::any_copy(config); - try { - if (deviceName.empty()) { - _impl->set_property_for_device(conf, std::string()); - } else { - _impl->set_property_for_device(conf, deviceName); - } - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ov::Any Core::GetConfig(const std::string& deviceName, const std::string& name) const { - // HETERO case - { - if (deviceName.find("HETERO:") == 0) { - IE_THROW() << "You can only GetConfig of the HETERO itself (without devices). " - "GetConfig is also possible for the individual devices before creating the HETERO on top."; - } - } - // MULTI case - { - if (deviceName.find("MULTI:") == 0) { - IE_THROW() << "You can only GetConfig of the MULTI itself (without devices). " - "GetConfig is also possible for the individual devices before creating the MULTI on top."; - } - } - // AUTO case - { - if (deviceName.find("AUTO:") == 0) { - IE_THROW() << "You can only GetConfig of the AUTO itself (without devices). " - "GetConfig is also possible for the individual devices before creating the AUTO on top."; - } - } - - if (name == ov::force_tbb_terminate.name()) { - return ov::threading::executor_manager()->get_property(ov::force_tbb_terminate.name()).as(); - } - - try { - auto parsed = ov::parseDeviceNameIntoConfig(deviceName); - return _impl->get_plugin(parsed._deviceName).get_property(name, parsed._config); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -ov::Any Core::GetMetric(const std::string& deviceName, const std::string& name, const ov::AnyMap& options) const { - try { - return _impl->GetMetric(deviceName, name, options); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -std::vector Core::GetAvailableDevices() const { - try { - return _impl->GetAvailableDevices(); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -void Core::RegisterPlugin(const std::string& pluginName, const std::string& deviceName) { - try { - _impl->register_plugin(pluginName, deviceName, {}); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -void Core::RegisterPlugins(const std::string& xmlConfigFile) { - try { - _impl->register_plugins_in_registry(xmlConfigFile); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -void Core::UnregisterPlugin(const std::string& deviceName_) { - try { - ov::DeviceIDParser parser(deviceName_); - std::string deviceName = parser.get_device_name(); - - _impl->unload_plugin(deviceName); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what() << std::endl; - } -} - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_data.cpp b/src/inference/src/ie_data.cpp deleted file mode 100644 index 3ce53ce286dbf4..00000000000000 --- a/src/inference/src/ie_data.cpp +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "blob_factory.hpp" -#include "cnn_network_ngraph_impl.hpp" -#include "ie_api.h" -#include "ie_common.h" - -using namespace InferenceEngine; - -Blob::Ptr Blob::CreateFromData(const DataPtr& data) { - // TODO Here some decision should be made about the layout. - // For now we just pass the layout and use conversion to NCHW for ANY. - InferenceEngine::Layout targetLayout = data->getLayout(); - if (data->getLayout() == InferenceEngine::Layout::ANY) { - targetLayout = InferenceEngine::Layout::NCHW; - } - - InferenceEngine::TensorDesc desc(data->getPrecision(), data->getTensorDesc().getDims(), targetLayout); - - switch (data->getPrecision()) { - case InferenceEngine::Precision::FP32: - return std::make_shared>(desc); - case InferenceEngine::Precision::Q78: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::FP16: - return std::make_shared>(desc); - case InferenceEngine::Precision::U8: - return std::make_shared>(desc); - case InferenceEngine::Precision::I8: - return std::make_shared>(desc); - case InferenceEngine::Precision::I32: - return std::make_shared>(desc); - case InferenceEngine::Precision::BF16: - return std::make_shared>(desc); - default: - IE_THROW() << "precision is no set"; - } -} - -namespace InferenceEngine { - -class CNNLayer; - -/** - * @brief A smart pointer to the CNNLayer - */ -using CNNLayerPtr = std::shared_ptr; -/** - * @brief A smart weak pointer to the CNNLayer - */ -using CNNLayerWeakPtr = std::weak_ptr; - -} // namespace InferenceEngine - -class Data::Impl { -public: - /** - * @brief A pointer to the layer that creates this data element, null for input data elements - */ - CNNLayerWeakPtr creatorLayer; - - /** - * @brief A map of layers that use this node as input. - * It is useful for recursive NN graph traversal. - */ - std::map inputTo; - - ov::PartialShape pShape; -}; - -Data::Data(const std::string& name, Precision _precision, Layout layout) - : name(name), - userObject({0}), - tensorDesc(_precision, layout) { - _impl = std::make_shared(); -} - -Data::Data(const std::string& name, const TensorDesc& desc) : name(name), userObject({0}), tensorDesc(desc) { - _impl = std::make_shared(); - _impl->pShape = ov::PartialShape(desc.getDims()); -} - -const Precision& Data::getPrecision() const { - return tensorDesc.getPrecision(); -} - -const TensorDesc& Data::getTensorDesc() const { - return tensorDesc; -} - -bool Data::isInitialized() const { - return !tensorDesc.getDims().empty() || tensorDesc.getLayout() == SCALAR; -} - -void Data::setDims(const SizeVector& a_dims) { - tensorDesc.setDims(a_dims); - _impl->pShape = ov::PartialShape(a_dims); -} - -void Data::setLayout(Layout layout) { - tensorDesc.setLayout(layout); -} - -void Data::reshape(const SizeVector& a_dims, Layout a_layout) { - tensorDesc.reshape(a_dims, a_layout); - _impl->pShape = ov::PartialShape(a_dims); -} - -Data::Data(const Data& data) : name(data.name), userObject(data.userObject), tensorDesc(data.tensorDesc) { - _impl = std::make_shared(); - _impl->creatorLayer = data._impl->creatorLayer; - _impl->inputTo = data._impl->inputTo; - _impl->pShape = data._impl->pShape; -} - -Data& Data::operator=(const Data& data) { - if (this != &data) { - name = data.name; - userObject = data.userObject; - tensorDesc = data.tensorDesc; - - _impl->creatorLayer = data._impl->creatorLayer; - _impl->inputTo = data._impl->inputTo; - _impl->pShape = data._impl->pShape; - } - - return *this; -} - -const std::string& Data::getName() const { - return name; -} - -void Data::setName(const std::string& newName) { - name = newName; -} - -const UserValue& Data::getUserObject() const { - return userObject; -} - -Layout Data::getLayout() const { - return tensorDesc.getLayout(); -} - -void Data::setPrecision(const Precision& precision) { - tensorDesc.setPrecision(precision); -} - -const SizeVector& Data::getDims() const { - if (_impl->pShape.is_dynamic()) - IE_THROW() << "Cannot return dims for Data with dynamic shapes!"; - if (tensorDesc.getDims().empty() && tensorDesc.getLayout() != SCALAR) { - tensorDesc.setDims(_impl->pShape.to_shape()); - } - return tensorDesc.getDims(); -} - -// compatibility - -namespace InferenceEngine { - -INFERENCE_ENGINE_API_CPP(CNNLayerWeakPtr&) getCreatorLayer(const DataPtr& data); -INFERENCE_ENGINE_API_CPP(std::map&) getInputTo(const DataPtr& data); -INFERENCE_ENGINE_API_CPP(std::map&) getInputTo(Data* data); - -CNNLayerWeakPtr& getCreatorLayer(const DataPtr& data) { - return data->_impl->creatorLayer; -} - -std::map& getInputTo(const DataPtr& data) { - return data->_impl->inputTo; -} - -std::map& getInputTo(Data* data) { - return data->_impl->inputTo; -} - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_layouts.cpp b/src/inference/src/ie_layouts.cpp deleted file mode 100644 index 6c2543b36056be..00000000000000 --- a/src/inference/src/ie_layouts.cpp +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_layouts.h" - -#include -#include - -using namespace InferenceEngine; -IE_SUPPRESS_DEPRECATED_START - -TensorDesc::TensorDesc(const Precision& precision, const SizeVector& dims, Layout layout) - : precision(precision), - blockingDesc(dims, layout) { - this->dims = dims; - this->layout = layout; -} - -TensorDesc::TensorDesc(const Precision& precision, Layout layout) : precision(precision), blockingDesc() { - this->layout = layout; -} - -TensorDesc::TensorDesc(const Precision& precision, const SizeVector& dims, const BlockingDesc& blockDesc) - : dims(dims), - precision(precision), - blockingDesc(blockDesc) { - if (dims.size() == 0 || blockingDesc.getBlockDims().size() == 0) { - layout = Layout::SCALAR; - return; - } - if (dims.size() != *std::max_element(blockDesc.getOrder().begin(), blockDesc.getOrder().end()) + 1) - IE_THROW() << "Cannot create TensorDesc! Blocked dims are inconsistent with original dims."; - - layout = Layout::BLOCKED; - if (dims.size() == blockingDesc.getBlockDims().size()) { - switch (dims.size()) { - case 1: - layout = Layout::C; - break; - case 2: - if (blockingDesc.getOrder()[0] == 0 && blockingDesc.getOrder()[1] == 1) - layout = Layout::NC; - else - layout = Layout::CN; - break; - case 3: - if (blockingDesc.getOrder()[0] == 0 && blockingDesc.getOrder()[1] == 1 && blockingDesc.getOrder()[2] == 2) { - layout = Layout::CHW; - } else if (blockingDesc.getOrder()[0] == 1 && blockingDesc.getOrder()[1] == 2 && - blockingDesc.getOrder()[2] == 0) { - layout = Layout::HWC; - } - break; - case 4: - if (blockingDesc.getOrder()[0] == 0 && blockingDesc.getOrder()[1] == 1 && blockingDesc.getOrder()[2] == 2 && - blockingDesc.getOrder()[3] == 3) { - layout = Layout::NCHW; - } else if (blockingDesc.getOrder()[0] == 0 && blockingDesc.getOrder()[1] == 2 && - blockingDesc.getOrder()[2] == 3 && blockingDesc.getOrder()[3] == 1) { - layout = Layout::NHWC; - } - break; - case 5: - if (blockingDesc.getOrder()[0] == 0 && blockingDesc.getOrder()[1] == 1 && blockingDesc.getOrder()[2] == 2 && - blockingDesc.getOrder()[3] == 3 && blockingDesc.getOrder()[4] == 4) { - layout = Layout::NCDHW; - } else if (blockingDesc.getOrder()[0] == 0 && blockingDesc.getOrder()[1] == 2 && - blockingDesc.getOrder()[2] == 3 && blockingDesc.getOrder()[3] == 4 && - blockingDesc.getOrder()[4] == 1) { - layout = Layout::NDHWC; - } - break; - default: - break; - } - } -} - -TensorDesc::TensorDesc() { - this->layout = Layout::ANY; - precision = Precision::UNSPECIFIED; -} - -void TensorDesc::setDims(const SizeVector& dims) { - if (layout == Layout::BLOCKED) { - auto newOrder = blockingDesc.getOrder(); - auto oldDims = blockingDesc.getBlockDims(); - - // {0} shape is fully dynamic shape with default (BLOCKED) layout, order is dummy - if (oldDims.size() == 1 && oldDims[0] == 0) - newOrder.resize(0); - - if (newOrder.empty()) { - for (size_t i = 0; i < dims.size(); i++) { - newOrder.push_back(i); - } - } - blockingDesc = BlockingDesc(dims, newOrder); - } else { - if (layout == Layout::SCALAR && (dims.size() > 1 || (dims.size() == 1 && dims[0] != 1))) - IE_THROW() << "Cannot set dimensions for SCALAR layout!"; - blockingDesc = BlockingDesc(dims, layout); - } - if (layout != Layout::SCALAR) - this->dims = dims; -} - -void TensorDesc::setLayout(Layout l) { - bool inconsistentLayout = true; - - switch (l) { - case Layout::SCALAR: - inconsistentLayout = !dims.empty(); - break; - case Layout::C: - inconsistentLayout = dims.size() != 1; - break; - case Layout::BLOCKED: - case Layout::ANY: - inconsistentLayout = false; - break; - case Layout::GOIDHW: - inconsistentLayout = dims.size() != 6; - break; - case Layout::NCDHW: - case Layout::NDHWC: - case Layout::OIDHW: - case Layout::GOIHW: - inconsistentLayout = dims.size() != 5; - break; - case Layout::OIHW: - case Layout::NCHW: - case Layout::NHWC: - inconsistentLayout = dims.size() != 4; - break; - case Layout::CHW: - case Layout::HWC: - inconsistentLayout = dims.size() != 3; - break; - case Layout::CN: - case Layout::NC: - case Layout::HW: - inconsistentLayout = dims.size() != 2; - break; - default: - break; - } - - if (inconsistentLayout) { - IE_THROW() << "Size of dims(" << std::to_string(dims.size()) << ") and format(" << l << ") are inconsistent."; - } - - // HACK: we need to update BlockingDesc after layout change, but if it was set manually not sure how to di this - // properly - const bool hasDefaultBlockingDesc = blockingDesc == BlockingDesc(dims, layout); - - layout = l; - - if (hasDefaultBlockingDesc) { - blockingDesc = BlockingDesc(dims, layout); - } -} - -bool TensorDesc::operator==(const TensorDesc& rhs) const { - return blockingDesc == rhs.blockingDesc && precision == rhs.precision && layout == rhs.layout && dims == rhs.dims; -} - -bool TensorDesc::operator!=(const TensorDesc& rhs) const { - return !(*this == rhs); -} - -Layout TensorDesc::getLayoutByRank(size_t rank) { - switch (rank) { - case 0: - return Layout::SCALAR; - case 1: - return Layout::C; - case 2: - return Layout::NC; - case 3: - return Layout::CHW; - case 4: - return Layout::NCHW; - case 5: - return Layout::NCDHW; - default: - return Layout::BLOCKED; - } -} - -Layout TensorDesc::getLayoutByDims(const SizeVector& dims) { - // {0} shape is fully dynamic shape with default (BLOCKED) layout - if (dims.size() == 1 && dims[0] == 0) - return Layout::BLOCKED; - - return getLayoutByRank(dims.size()); -} - -size_t TensorDesc::offset(const SizeVector& v) const { - if (layout == Layout::ANY) - IE_THROW() << "Cannot calculate offset for any format!"; - - if (layout == Layout::SCALAR) - return blockingDesc.getOffsetPadding(); - - SizeVector off_v = v; - const SizeVector& blockedDims = blockingDesc.getBlockDims(); - const SizeVector& strides = blockingDesc.getStrides(); - const SizeVector& order = blockingDesc.getOrder(); - - size_t n_blocked_dims = order.size(); - if (blockedDims.size() != n_blocked_dims || strides.size() != n_blocked_dims) { - IE_THROW() << "Cannot calculate offset. Incorrect primitive descriptor!"; - } - SizeVector blockedShift(n_blocked_dims); - for (size_t i = 1; i <= n_blocked_dims; i++) { - blockedShift[n_blocked_dims - i] = off_v[order[n_blocked_dims - i]] % blockedDims[n_blocked_dims - i]; - off_v[order[n_blocked_dims - i]] /= blockedDims[n_blocked_dims - i]; - } - size_t offset = blockingDesc.getOffsetPadding(); - for (size_t d = 0; d < n_blocked_dims; ++d) { - const size_t p = blockedShift[d] + blockingDesc.getOffsetPaddingToData()[d]; - offset += p * strides[d]; - } - return offset; -} - -size_t TensorDesc::offset(size_t l) const { - size_t n_dims = dims.size(); - SizeVector pos(n_dims); - for (size_t rd = 1; rd <= n_dims; ++rd) { - const size_t d = n_dims - rd; - const size_t cur_dim = dims[d]; - pos[d] = l % cur_dim; - l /= cur_dim; - } - return offset(pos); -} - -void TensorDesc::reshape(const SizeVector& dims, Layout layout) { - for (auto& padd : blockingDesc.getOffsetPaddingToData()) { - if (padd) - IE_THROW() << "Cannot reshape a non-packaged blob!"; - } - if (layout != Layout::ANY) { - blockingDesc = BlockingDesc(dims, layout); - this->layout = layout; - } else { - blockingDesc = BlockingDesc(dims, this->layout); - } - this->dims = dims; -} - -void TensorDesc::reshape(const SizeVector& dims, const BlockingDesc& blockDesc) { - blockingDesc = blockDesc; - this->dims = dims; - this->layout = Layout::BLOCKED; -} - -BlockingDesc::BlockingDesc(const SizeVector& block_dims, const SizeVector& order) : offsetPadding(0) { - this->order = order; - if (block_dims.empty() || order.empty()) - return; - fillDesc(block_dims, order); -} - -BlockingDesc::BlockingDesc() : BlockingDesc({}, Layout::ANY) {} - -BlockingDesc::BlockingDesc(const SizeVector& blocked_dims, const SizeVector& order, size_t offset) - : BlockingDesc(blocked_dims, order) { - this->offsetPadding = offset; -} - -BlockingDesc::BlockingDesc(const SizeVector& blocked_dims, - const SizeVector& order, - size_t offset, - const SizeVector& dimOffsets) - : BlockingDesc(blocked_dims, order) { - this->offsetPadding = offset; - if (blocked_dims.size() != dimOffsets.size()) - IE_THROW() << "Offsets are not initialized for all dimensions."; - this->offsetPaddingToData = dimOffsets; -} - -BlockingDesc::BlockingDesc(const SizeVector& blocked_dims, - const SizeVector& order, - size_t offset, - const SizeVector& dimOffsets, - const SizeVector& strides) - : BlockingDesc(blocked_dims, order) { - this->offsetPadding = offset; - if (blocked_dims.size() != strides.size()) - IE_THROW() << "Strides are not initialized for all dimensions."; - this->strides = strides; - if (blocked_dims.size() != dimOffsets.size()) - IE_THROW() << "Offsets are not initialized for all dimensions."; - this->offsetPaddingToData = dimOffsets; - - // check that strides are valid - if (!std::any_of(blocked_dims.begin(), blocked_dims.end(), [](const size_t dim) { - return dim == 0ul; - })) { - size_t denseStride = 1; - - for (size_t i = 1; i <= strides.size(); i++) { - if (denseStride > strides[strides.size() - i]) { - IE_THROW() << "Stride in " << (strides.size() - i) - << "-th dimension " - "is not valid; actual " - << strides[strides.size() - i] << ", should be >= " << denseStride << std::endl; - } - denseStride = std::max(strides[strides.size() - i], denseStride) * blocked_dims[blocked_dims.size() - i]; - } - } -} - -BlockingDesc::BlockingDesc(const SizeVector& dims, Layout layout) : offsetPadding(0) { - if (dims.empty()) - return; - - offsetPadding = 0; - auto checkDims = [](size_t r_size, size_t e_size) { - if (r_size != e_size) - IE_THROW() << "Dims and format are inconsistent."; - }; - SizeVector l_order; - SizeVector l_dims; - switch (layout) { - case Layout::SCALAR: - case Layout::ANY: - return; - case Layout::C: - checkDims(dims.size(), 1); - l_order = {0}; - l_dims = dims; - break; - case Layout::OIHW: - case Layout::NCHW: - checkDims(dims.size(), 4); - l_order = {0, 1, 2, 3}; - l_dims = dims; - break; - case Layout::OIDHW: - case Layout::GOIHW: - case Layout::NCDHW: - checkDims(dims.size(), 5); - l_order = {0, 1, 2, 3, 4}; - l_dims = dims; - break; - case Layout::GOIDHW: - checkDims(dims.size(), 6); - l_order = {0, 1, 2, 3, 4, 5}; - l_dims = dims; - break; - case Layout::NHWC: - checkDims(dims.size(), 4); - l_order = {0, 2, 3, 1}; - l_dims = {dims[0], dims[2], dims[3], dims[1]}; - break; - case Layout::NDHWC: - checkDims(dims.size(), 5); - l_order = {0, 2, 3, 4, 1}; - l_dims = {dims[0], dims[2], dims[3], dims[4], dims[1]}; - break; - case Layout::CHW: - checkDims(dims.size(), 3); - l_order = {0, 1, 2}; - l_dims = dims; - break; - case Layout::HWC: - checkDims(dims.size(), 3); - l_order = {1, 2, 0}; - l_dims = {dims[1], dims[2], dims[0]}; - break; - case Layout::CN: - checkDims(dims.size(), 2); - l_order = {1, 0}; - l_dims = {dims[1], dims[0]}; - break; - case Layout::NC: - case Layout::HW: - checkDims(dims.size(), 2); - l_order = {0, 1}; - l_dims = dims; - break; - case Layout::BLOCKED: - l_order.clear(); - for (size_t i = 0; i < dims.size(); i++) - l_order.push_back(i); - l_dims = dims; - break; - } - - fillDesc(l_dims, l_order); -} - -void BlockingDesc::fillDesc(const SizeVector& blocked_dims, const SizeVector& order) { - if (order.size() != blocked_dims.size()) - IE_THROW() << "Cannot fill descriptor. Size of dimensions (" << blocked_dims.size() << ") and order (" - << order.size() << ") vector don't match."; - if (blocked_dims.empty() || order.empty()) - IE_THROW() << "Cannot fill descriptor. Dimensions and order vector are empty."; - this->order = order; - this->blockedDims = blocked_dims; - offsetPadding = 0; - offsetPaddingToData.resize(order.size()); - strides.resize(order.size()); - strides[strides.size() - 1] = 1; - offsetPaddingToData[offsetPaddingToData.size() - 1] = 0; - for (size_t i = 2; i <= order.size(); i++) { - offsetPaddingToData[offsetPaddingToData.size() - i] = 0; - strides[strides.size() - i] = strides[strides.size() - (i - 1)] * blocked_dims[blocked_dims.size() - (i - 1)]; - } - - offsetPadding = 0; -} - -bool BlockingDesc::operator==(const BlockingDesc& rhs) const { - return blockedDims == rhs.blockedDims && strides == rhs.strides && offsetPaddingToData == rhs.offsetPaddingToData && - order == rhs.order && offsetPadding == rhs.offsetPadding; -} - -bool BlockingDesc::operator!=(const BlockingDesc& rhs) const { - return !(*this == rhs); -} - -namespace { - -struct DimSlice { - size_t startInd = 0; - size_t size = 0; - - DimSlice() = default; - - DimSlice(size_t startInd, size_t size) : startInd(startInd), size(size) {} -}; - -using TensorSlice = std::vector; - -void checkROI(const TensorDesc& origDesc, const TensorSlice& roi) { - const auto numDims = origDesc.getDims().size(); - - if (roi.size() != numDims) { - IE_THROW() << "ROI num dims " << roi.size() << " differs from original num dims " << numDims; - } - - // TensorDesc stores dimensions in standard layout, as well as roi vector - for (size_t dimInd = 0; dimInd < numDims; ++dimInd) { - const auto fullSize = origDesc.getDims()[dimInd]; - - const auto& roiSlice = roi[dimInd]; - const auto endInd = roiSlice.startInd + roiSlice.size; - - if (endInd > fullSize) { - IE_THROW() << "ROI [" << roiSlice.startInd << ", " << endInd << ")" - << " is out of range " << fullSize << " for dimension " << dimInd; - } - } -} - -TensorDesc make_roi_desc(const TensorDesc& origDesc, const TensorSlice& roi, bool useOrigMemDesc) { - const auto numDims = origDesc.getDims().size(); - - checkROI(origDesc, roi); - - const auto origPrecision = origDesc.getPrecision(); - - const auto& origBlkDesc = origDesc.getBlockingDesc(); - const auto& origBlkStrides = origBlkDesc.getStrides(); - const auto& origBlkOrder = origBlkDesc.getOrder(); - - SizeVector roiDims(numDims); - SizeVector roiBlkDims(numDims); - SizeVector roiBlkDimOffsets = origBlkDesc.getOffsetPaddingToData(); - size_t roiBlkOffset = origBlkDesc.getOffsetPadding(); - - IE_ASSERT(origBlkStrides.size() == numDims); - IE_ASSERT(origBlkOrder.size() == numDims); - IE_ASSERT(roiBlkDimOffsets.size() == numDims); - - // BlockingDesc stores dimensions in memory order, so we need to use origOrder array. - // Offsets in `roi` relates to `origDesc` dimensions, while offsets in `BlockingDesc` relates to top parent tensor - // dimensions. - for (size_t memInd = 0; memInd < numDims; ++memInd) { - const auto dimInd = origBlkOrder[memInd]; - const auto& roiSlice = roi[dimInd]; - - roiDims[dimInd] = roiSlice.size; - roiBlkDims[memInd] = roiSlice.size; - roiBlkDimOffsets[memInd] += roiSlice.startInd; - roiBlkOffset += roiSlice.startInd * origBlkStrides[memInd]; - } - - const auto roiBlkDesc = useOrigMemDesc - ? BlockingDesc(roiBlkDims, origBlkOrder, roiBlkOffset, roiBlkDimOffsets, origBlkStrides) - : BlockingDesc(roiBlkDims, origBlkOrder); - - const auto roiDesc = TensorDesc(origPrecision, roiDims, roiBlkDesc); - - return roiDesc; -} - -TensorSlice make_roi_slice(const TensorDesc& origDesc, const ROI& roi) { - const auto layout = origDesc.getLayout(); - if (layout != Layout::NCHW && layout != Layout::NHWC) { - IE_THROW() << "Unsupported layout " << layout; - } - - TensorSlice roiSlice(4); - roiSlice[0] = DimSlice{roi.id, 1}; // N - roiSlice[1] = DimSlice{0, origDesc.getDims()[1]}; // C - roiSlice[2] = DimSlice{roi.posY, roi.sizeY}; // H - roiSlice[3] = DimSlice{roi.posX, roi.sizeX}; // W - - return roiSlice; -} - -} // namespace - -TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc, const ROI& roi, bool useOrigMemDesc) { - return make_roi_desc(origDesc, make_roi_slice(origDesc, roi), useOrigMemDesc); -} - -TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc, - const std::vector& begin, - const std::vector& end, - bool useOrigMemDesc) { - if (begin.size() != end.size()) { - IE_THROW() << "`begin` vector size must match `end` vector size"; - } - TensorSlice slice; - for (size_t i = 0; i < begin.size(); ++i) { - IE_ASSERT(end[i] >= begin[i]); - slice.emplace_back(begin[i], end[i] - begin[i]); - } - return make_roi_desc(origDesc, slice, useOrigMemDesc); -} -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp deleted file mode 100644 index 9858dc01677740..00000000000000 --- a/src/inference/src/ie_network_reader.cpp +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_network_reader.hpp" - -#include -#include -#include -#include -#include -#include - -#include "cnn_network_ngraph_impl.hpp" -#include "cpp/ie_cnn_network.h" -#include "dev/converter_utils.hpp" -#include "ie_api.h" -#include "ie_common.h" -#include "ie_icnn_network.hpp" -#include "ie_input_info.hpp" -#include "itt.hpp" -#include "openvino/core/deprecated.hpp" -#include "openvino/core/except.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" -#include "openvino/core/type/element_type.hpp" -#include "openvino/frontend/manager.hpp" -#include "openvino/runtime/shared_buffer.hpp" -#include "openvino/runtime/so_ptr.hpp" -#include "openvino/util/file_util.hpp" -#include "openvino/util/shared_object.hpp" -#include "transformations/rt_info/old_api_map_order_attribute.hpp" -#include "transformations/utils/utils.hpp" - -namespace InferenceEngine { - -namespace { -CNNNetwork convert_to_cnnnetwork(std::shared_ptr& function, bool is_new_api, bool frontendMode = false) { - // only for IR cases we need preprocessing or postprocessing steps - if (function->has_rt_info("version") && function->get_rt_info("version") == 11 && !is_new_api) { - IR_READER_SCOPE(ir11_old_api); - ov::preprocess::PrePostProcessor prepost(function); - - const std::string& old_api_map_key_order = ov::OldApiMapOrder::get_type_info_static(); - const std::string& old_api_map_key_type = ov::OldApiMapElementType::get_type_info_static(); - - bool need_validate_nodes_and_infer_types = false; - auto& parameters = function->get_parameters(); - for (size_t i = 0; i < parameters.size(); ++i) { - const auto& parameter = parameters[i]; - ov::RTMap& rtInfo = parameter->get_rt_info(); - const auto it_type = rtInfo.find(old_api_map_key_type); - auto& pre_input = prepost.input(i); - if (it_type != rtInfo.end()) { - const auto old_api_map_type = it_type->second.as().value; - const auto param_type = parameter->get_element_type(); - - // In the following code we add Convert node from old_api_map_type to Parameter type - // using PrePostProcessor. As some plugins do not support uint8 type, Convert to uint8 leads - // to error, so for such case type is set directly to Parameter node instead of inserting Convert. - if ((param_type == ov::element::u8 && old_api_map_type.is_real())) { - parameter->set_element_type(old_api_map_type); - need_validate_nodes_and_infer_types = true; - } else { - pre_input.tensor().set_element_type(old_api_map_type); - } - - OPENVINO_ASSERT(!old_api_map_type.is_dynamic(), "Old API map does not support dynamic type"); - rtInfo.erase(it_type); - } - const auto it_order = rtInfo.find(old_api_map_key_order); - if (it_order != rtInfo.end()) { - const auto order = it_order->second.as().value; - pre_input.preprocess().convert_layout(order); - rtInfo.erase(it_order); - } - } - - auto& results = function->get_results(); - for (size_t i = 0; i < results.size(); ++i) { - const auto& result = results[i]; - ov::RTMap& rtInfo = result->get_rt_info(); - const auto it = rtInfo.find(old_api_map_key_order); - if (it == rtInfo.end()) - continue; - - const auto order = it->second.as().value; - auto& post_output = prepost.output(i); - post_output.postprocess().convert_layout(order); - - // remove old api once we applied it - rtInfo.erase(it); - } - - if (need_validate_nodes_and_infer_types) - function->validate_nodes_and_infer_types(); - - // Set version to 10 - function->set_rt_info(10, "version"); - - function = prepost.build(); - } - - OPENVINO_SUPPRESS_DEPRECATED_START - return CNNNetwork(std::make_shared(function, is_new_api)); - OPENVINO_SUPPRESS_DEPRECATED_END -} - -} // namespace - -CNNNetwork details::ReadNetwork(const std::string& modelPath, - const std::string& binPath, - const std::vector& ov_exts, - bool is_new_api, - bool enable_mmap) { - // Fix unicode name -#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - std::wstring model_path = ov::util::string_to_wstring(modelPath.c_str()); -#else - std::string model_path = modelPath; -#endif - - // Try to load with FrontEndManager - ov::frontend::FrontEndManager manager; - ov::frontend::FrontEnd::Ptr FE; - ov::frontend::InputModel::Ptr inputModel; - - ov::AnyVector params{model_path}; - - if (!binPath.empty()) { -#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - const std::wstring& weights_path = ov::util::string_to_wstring(binPath.c_str()); -#else - const std::string& weights_path = binPath; -#endif - params.emplace_back(weights_path); - } - params.emplace_back(enable_mmap); - - FE = manager.load_by_model(params); - if (FE) { - FE->add_extension(ov_exts); - inputModel = FE->load(params); - } - - if (inputModel) { - auto ngFunc = FE->convert(inputModel); - return convert_to_cnnnetwork(ngFunc, is_new_api); - } - - const auto fileExt = modelPath.substr(modelPath.find_last_of(".") + 1); - std::string FEs; - for (const auto& fe_name : manager.get_available_front_ends()) - FEs += fe_name + " "; - IE_THROW(NetworkNotRead) << "Unable to read the model: " << modelPath - << " Please check that model format: " << fileExt - << " is supported and the model is correct." - << " Available frontends: " << FEs; -} - -CNNNetwork details::ReadNetwork(const std::string& model, - const Blob::CPtr& weights, - const std::vector& ov_exts, - bool is_new_api, - bool frontendMode) { - std::istringstream modelStringStream(model); - std::istream& modelStream = modelStringStream; - - // Try to load with FrontEndManager - ov::frontend::FrontEndManager manager; - ov::frontend::FrontEnd::Ptr FE; - ov::frontend::InputModel::Ptr inputModel; - - ov::AnyVector params{&modelStream}; - if (weights) { - char* data = weights->cbuffer().as(); - std::shared_ptr weights_buffer = - std::make_shared>(data, weights->byteSize(), weights); - params.emplace_back(weights_buffer); - } - - FE = manager.load_by_model(params); - if (FE) { - FE->add_extension(ov_exts); - inputModel = FE->load(params); - } - if (inputModel) { - auto ngFunc = FE->convert(inputModel); - return convert_to_cnnnetwork(ngFunc, is_new_api, frontendMode); - } - - IE_THROW(NetworkNotRead) - << "Unable to read the model. Please check if the model format is supported and model is correct."; -} - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_network_reader.hpp b/src/inference/src/ie_network_reader.hpp deleted file mode 100644 index 062ea26ac6f235..00000000000000 --- a/src/inference/src/ie_network_reader.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "cpp/ie_cnn_network.h" -#include "ie_blob.h" -#include "openvino/core/extension.hpp" - -namespace InferenceEngine { -namespace details { - -/** - * @brief Reads IR xml and bin files - * @param modelPath path to IR file - * @param binPath path to bin file, if path is empty, will try to read bin file with the same name as xml and - * if bin file with the same name was not found, will load IR without weights. - * @param ov_exts vector with OpenVINO extensions - * @param enable_mmap boolean to enable/disable `mmap` use in Frontend - * @return CNNNetwork - */ -CNNNetwork ReadNetwork(const std::string& modelPath, - const std::string& binPath, - const std::vector& ov_exts, - bool is_new_api, - bool enable_mmap); -/** - * @brief Reads IR xml and bin (with the same name) files - * @param model string with IR - * @param weights shared pointer to constant blob with weights - * @param ov_exts vector with OpenVINO extensions - * @param frontendMode read network without post-processing or other transformations - * @return CNNNetwork - */ -CNNNetwork ReadNetwork(const std::string& model, - const Blob::CPtr& weights, - const std::vector& ov_exts, - bool is_new_api, - bool frontendMode = false); - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/src/ie_ngraph_utils.cpp b/src/inference/src/ie_ngraph_utils.cpp deleted file mode 100644 index 1b30258fb6e06c..00000000000000 --- a/src/inference/src/ie_ngraph_utils.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_ngraph_utils.hpp" - -#include "cnn_network_ngraph_impl.hpp" -#include "itt.hpp" - -namespace InferenceEngine { -namespace details { - -CNNNetwork cloneNetwork(const CNNNetwork& network) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "cloneNetwork"); - - if (network.getFunction()) { - IE_SUPPRESS_DEPRECATED_START - return CNNNetwork(std::make_shared(network)); - IE_SUPPRESS_DEPRECATED_END - } - - IE_THROW() << "InferenceEngine::details::cloneNetwork requires ngraph-based `network` object to clone"; -} - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/src/model_reader.cpp b/src/inference/src/model_reader.cpp index bc67f6d21b225a..e12ccdc1d66d16 100644 --- a/src/inference/src/model_reader.cpp +++ b/src/inference/src/model_reader.cpp @@ -4,7 +4,6 @@ #include "model_reader.hpp" -#include "cnn_network_ngraph_impl.hpp" #include "itt.hpp" #include "openvino/core/model.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" @@ -16,6 +15,21 @@ namespace { +ov::element::Type toLegacyType(const ov::element::Type& ngraph_type, bool input) { + if (input) { + return ngraph_type == ov::element::f16 ? ov::element::f32 : ngraph_type; + } else { + if (ngraph_type == ov::element::i64 || ngraph_type == ov::element::u64 || ngraph_type == ov::element::i32 || + ngraph_type == ov::element::u32) { + return ov::element::i32; + } else if (ngraph_type != ov::element::f32) { + return ov::element::f32; + } + } + + return ngraph_type; +} + void update_v10_model(std::shared_ptr& model, bool frontendMode = false) { // only for IR cases we need preprocessing or postprocessing steps if (model->has_rt_info("version") && model->get_rt_info("version") == 10) { @@ -27,7 +41,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal for (size_t i = 0; i < inputs.size(); ++i) { if (!frontendMode) { const auto ov_type = inputs[i].get_element_type(); - const auto legacy_type = InferenceEngine::details::toLegacyType(ov_type, true); + const auto legacy_type = toLegacyType(ov_type, true); prepost.input(i).tensor().set_element_type(legacy_type); } for (const auto& name : inputs[i].get_names()) { @@ -42,7 +56,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal for (size_t i = 0; i < outputs.size(); ++i) { if (!frontendMode) { const auto ov_type = outputs[i].get_element_type(); - const auto legacy_type = InferenceEngine::details::toLegacyType(ov_type, false); + const auto legacy_type = toLegacyType(ov_type, false); prepost.output(i).tensor().set_element_type(legacy_type); } for (const auto& name : outputs[i].get_names()) { diff --git a/src/inference/src/openvino_shutdown.cpp b/src/inference/src/openvino_shutdown.cpp deleted file mode 100644 index 8096351bd0f2b7..00000000000000 --- a/src/inference/src/openvino_shutdown.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_core.hpp" -#include "openvino/frontend/manager.hpp" -#include "openvino/runtime/core.hpp" - -void ov::shutdown() { - frontend::FrontEndManager::shutdown(); -} - -void InferenceEngine::shutdown() { - ov::shutdown(); -} diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index 988fdab3baeba5..e322eb6d3b34c9 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -12,7 +12,6 @@ #include #include "dev/threading/parallel_custom_arena.hpp" -#include "ie_common.h" #include "openvino/core/except.hpp" #include "openvino/runtime/system_conf.hpp" #include "openvino/util/log.hpp" diff --git a/src/inference/src/system_allocator.cpp b/src/inference/src/system_allocator.cpp deleted file mode 100644 index a820893e4f049b..00000000000000 --- a/src/inference/src/system_allocator.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "system_allocator.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -INFERENCE_ENGINE_API_CPP(std::shared_ptr) CreateDefaultAllocator() noexcept { - try { - return std::make_shared(); - } catch (...) { - return nullptr; - } -} - -} // namespace InferenceEngine diff --git a/src/inference/src/system_allocator.hpp b/src/inference/src/system_allocator.hpp deleted file mode 100644 index 606cccf68e8cfb..00000000000000 --- a/src/inference/src/system_allocator.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ie_allocator.hpp" - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -class SystemMemoryAllocator : public InferenceEngine::IAllocator { -public: - void* lock(void* handle, InferenceEngine::LockOp = InferenceEngine::LOCK_FOR_WRITE) noexcept override { - return handle; - } - - void unlock(void* a) noexcept override {} - - void* alloc(size_t size) noexcept override { - try { - auto handle = reinterpret_cast(new char[size]); - return handle; - } catch (...) { - return nullptr; - } - } - - bool free(void* handle) noexcept override { - try { - delete[] reinterpret_cast(handle); - } catch (...) { - } - return true; - } -}; -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/tests/functional/matmul_sr_tests.cpp b/src/inference/tests/functional/matmul_sr_tests.cpp index 3d17cfd915fa58..21e9a02fad5528 100644 --- a/src/inference/tests/functional/matmul_sr_tests.cpp +++ b/src/inference/tests/functional/matmul_sr_tests.cpp @@ -8,11 +8,9 @@ #include #include -#include "cnn_network_ngraph_impl.hpp" #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "ie_common.h" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/matmul.hpp" diff --git a/src/inference/tests/functional/ov_infer_request_test.cpp b/src/inference/tests/functional/ov_infer_request_test.cpp index 935b056b2fef7e..8a63a30912ef02 100644 --- a/src/inference/tests/functional/ov_infer_request_test.cpp +++ b/src/inference/tests/functional/ov_infer_request_test.cpp @@ -4,7 +4,6 @@ #include -#include #include #include #include diff --git a/src/inference/tests/functional/ov_remote_tensor.cpp b/src/inference/tests/functional/ov_remote_tensor.cpp index 512109589abbed..5bcaf57915a83a 100644 --- a/src/inference/tests/functional/ov_remote_tensor.cpp +++ b/src/inference/tests/functional/ov_remote_tensor.cpp @@ -4,7 +4,6 @@ #include -#include #include using namespace ::testing; diff --git a/src/inference/tests/functional/ov_shared_object_test.cpp b/src/inference/tests/functional/ov_shared_object_test.cpp index 424adb88cddcbc..96700e584338bb 100644 --- a/src/inference/tests/functional/ov_shared_object_test.cpp +++ b/src/inference/tests/functional/ov_shared_object_test.cpp @@ -4,10 +4,9 @@ #include -#include - #include "common_test_utils/file_utils.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" +#include "openvino/runtime/iplugin.hpp" +#include "openvino/util/file_util.hpp" #include "openvino/util/shared_object.hpp" using namespace ::testing; @@ -25,7 +24,7 @@ class SharedObjectOVTests : public ::testing::Test { } std::shared_ptr shared_object; - using CreateF = void(std::shared_ptr&); + using CreateF = void(std::shared_ptr&); std::function make_std_function(const std::string& functionName) { std::function ptr( @@ -58,7 +57,7 @@ TEST_F(SharedObjectOVTests, throwIfMethodNofFoundInLibrary) { TEST_F(SharedObjectOVTests, canCallExistedMethod) { loadDll(get_mock_engine_name()); - auto factory = make_std_function("CreatePluginEngine"); - std::shared_ptr ptr; + auto factory = make_std_function(ov::create_plugin_function); + std::shared_ptr ptr; EXPECT_NO_THROW(factory(ptr)); } diff --git a/src/inference/tests/unit/compilation_context_test.cpp b/src/inference/tests/unit/compilation_context_test.cpp index d8f8bbf953d975..dbb241fbb917c0 100644 --- a/src/inference/tests/unit/compilation_context_test.cpp +++ b/src/inference/tests/unit/compilation_context_test.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/test_constants.hpp" -#include "cpp/ie_cnn_network.h" #include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" diff --git a/src/inference/tests/unit/core.cpp b/src/inference/tests/unit/core.cpp index c654787bd9d056..f5fcb2efb1eb88 100644 --- a/src/inference/tests/unit/core.cpp +++ b/src/inference/tests/unit/core.cpp @@ -405,7 +405,7 @@ class ApplyAutoBatchThreading : public testing::Test { // Tested function: apply_auto_batch TEST_F(ApplyAutoBatchThreading, ApplyAutoBatch) { - ov::CoreImpl core(true); + ov::CoreImpl core; auto input = std::make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 4}); ov::Output intermediate = input->output(0); for (size_t i = 0; i < 100; ++i) diff --git a/src/inference/tests/unit/query_model_test.cpp b/src/inference/tests/unit/query_model_test.cpp index a2508185e9ab91..9d76b8ea02f668 100644 --- a/src/inference/tests/unit/query_model_test.cpp +++ b/src/inference/tests/unit/query_model_test.cpp @@ -4,9 +4,8 @@ #include #include -#include -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" +#include "openvino/core/rt_info.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" diff --git a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp index 41c81fe34edfd1..50d4e3bf28e3a8 100644 --- a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp @@ -404,9 +404,6 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t config.insert(ov::hint::performance_mode(performanceHint.as())); config.insert({ov::hint::model_priority.name(), modelPriority.as()}); - if (isNewAPI) { - ON_CALL(*core.get(), is_new_api()).WillByDefault(Return(true)); - } metaDevices.push_back({ov::test::utils::DEVICE_CPU, {ov::hint::performance_mode(performanceHint.as())}, 3, diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index 6e755ce01824a0..9f98ee888a66b5 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -50,7 +50,6 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, const auto& core = m_plugin->get_core(); if (!core) OPENVINO_THROW("Unable to get API version. Core is unavailable"); - m_cfg.isLegacyApi = !core->is_new_api(); if (cfg.exclusiveAsyncRequests) { // special case when all InferRequests are muxed into a single queue diff --git a/src/plugins/intel_cpu/src/config.h b/src/plugins/intel_cpu/src/config.h index 8c4910eb02aa32..9ef50bd4aa8b70 100644 --- a/src/plugins/intel_cpu/src/config.h +++ b/src/plugins/intel_cpu/src/config.h @@ -101,8 +101,6 @@ struct Config { std::map _config; - bool isLegacyApi = false; - int modelPreferThreads = -1; ModelType modelType = ModelType::Unknown; diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index a4c7477a5a04bb..2b2c38ca772db5 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -129,13 +129,12 @@ void Graph::Replicate(const std::shared_ptr &model) { return -1; }; - const bool is_legacy_api = getConfig().isLegacyApi; for (const auto& op : model->get_ordered_ops()) { const NodePtr node {Node::factory().create(op, context)}; AddNode(node); if (op->get_type_info() == op::v0::Parameter::get_type_info_static()) { - const std::string name = get_port_name(ov::Output(op, 0), is_legacy_api); + const std::string name = get_port_name(ov::Output(op, 0)); inputNodesMap[name] = node; if (node->isDynamicNode()) { graphHasDynamicInput = true; @@ -143,7 +142,7 @@ void Graph::Replicate(const std::shared_ptr &model) { } if (op->get_type_info() == op::v0::Result::get_type_info_static()) { - const std::string inputID = get_port_name(op->output(0), is_legacy_api); + const std::string inputID = get_port_name(op->output(0)); outputNodesMap[inputID] = node; } @@ -1003,8 +1002,8 @@ void Graph::PullOutputData(std::unordered_map>& auto srcPrec = actualDesc->getPrecision(); auto dstPrec = expected_desc_ptr->getPrecision(); - if (!getConfig().isLegacyApi && srcPrec == dstPrec && ext_blob->get_byte_size() != intr_blob.getSize()) - OPENVINO_THROW("Output blob byte size is not equal network output byte size (", + if (srcPrec == dstPrec && ext_blob->get_byte_size() != intr_blob.getSize()) + OPENVINO_THROW("Output tensor byte size is not equal model output byte size (", ext_blob->get_byte_size(), "!=", intr_blob.getSize(), diff --git a/src/plugins/intel_cpu/src/infer_request.cpp b/src/plugins/intel_cpu/src/infer_request.cpp index 53b2bec418c0fc..4c165d76d3af7c 100644 --- a/src/plugins/intel_cpu/src/infer_request.cpp +++ b/src/plugins/intel_cpu/src/infer_request.cpp @@ -26,14 +26,12 @@ namespace intel_cpu { SyncInferRequest::SyncInferRequest(std::shared_ptr compiled_model) : ov::ISyncInferRequest(compiled_model), m_compiled_model(compiled_model) { - m_is_legacy_api = m_compiled_model->get_graph()._graph.getConfig().isLegacyApi; - for (const auto& in : get_inputs()) { - auto port_name = get_port_name(in, m_is_legacy_api); + auto port_name = get_port_name(in); m_input_ports_map[port_name] = in; } for (const auto& out : get_outputs()) { - auto port_name = get_port_name(out, m_is_legacy_api); + auto port_name = get_port_name(out); m_output_ports_map[port_name] = out; } create_infer_request(); @@ -84,7 +82,7 @@ void SyncInferRequest::commit_states() { void SyncInferRequest::redefine_memory_for_input_nodes() { const auto cpuInputNodes = m_graph->GetInputNodesMap(); for (const auto& port : get_inputs()) { - std::string name = get_port_name(port, m_is_legacy_api); + std::string name = get_port_name(port); if (name.empty()) { OPENVINO_THROW("compiled model doesn't contain this input port."); } @@ -101,7 +99,7 @@ void SyncInferRequest::redefine_memory_for_input_nodes() { void SyncInferRequest::update_external_tensor_ptrs() { // Update it due to batched_tensors case will update input tensor for (auto input : get_inputs()) { - std::string input_name = get_port_name(input, m_is_legacy_api); + std::string input_name = get_port_name(input); if (input_name.empty()) { OPENVINO_THROW("Input tensor map contains not registered during IPlugin::compile_model tensor with name ", input_name); @@ -345,7 +343,7 @@ std::vector> SyncInferRequest::get_tensors(const ov::Outp } const ov::Output& SyncInferRequest::get_internal_port(const ov::Output& port) const { - auto name = get_port_name(port, m_is_legacy_api); + auto name = get_port_name(port); bool is_input = ov::op::util::is_parameter(port.get_node()); if (is_input) { return m_input_ports_map.at(name); @@ -368,7 +366,7 @@ void SyncInferRequest::set_tensor(const ov::Output& in_port, con in_tensor->get_size() == ov::shape_size(in_port.get_shape()) && in_port.get_shape().size() > 0) { tensor = ov::make_tensor(in_tensor->get_element_type(), in_port.get_shape(), in_tensor->data()); } - auto name = get_port_name(in_port, m_is_legacy_api); + auto name = get_port_name(in_port); auto mem_desc_ptr = MemoryDescUtils::generateCpuBlockedMemoryDesc(tensor); bool is_input = ov::op::util::is_parameter(port.get_node()); if (is_input) { @@ -616,7 +614,7 @@ void SyncInferRequest::init_tensor(const std::string& name) { } // update tensors in case of multiple output ports with the same name for (const auto& out : get_outputs()) { - auto port_name = get_port_name(out, m_is_legacy_api); + auto port_name = get_port_name(out); if ((name == port_name) && tensor && port != out) { ov::ISyncInferRequest::set_tensor(out, tensor); } @@ -631,7 +629,7 @@ void SyncInferRequest::init_tensor(const std::string& name) { void SyncInferRequest::push_input_data() { for (auto input : get_inputs()) { - std::string input_name = get_port_name(input, m_is_legacy_api); + std::string input_name = get_port_name(input); if (input_name.empty()) { OPENVINO_THROW("Input tensor map contains not registered during IPlugin::compile_model tensor with name ", input_name); diff --git a/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp b/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp index 7c17a1381b1a91..1ee27d32309a93 100644 --- a/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "blocked_memory_desc.h" #include "utils/general_utils.h" diff --git a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp index b8078eeaed965c..4552c03372e65b 100644 --- a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "memory_desc/cpu_blocked_memory_desc.h" #include "memory_desc/dnnl_blocked_memory_desc.h" #include "cpu_memory_desc.h" #include "memory_desc/cpu_memory_desc_utils.h" #include #include -#include #include #include #include diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index fbefc74a076d65..ba00cde21577fc 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -28,7 +28,6 @@ #include #include -#include #include "utils/general_utils.h" #include "utils/cpu_utils.hpp" #include "nodes/common/cpu_convert.h" diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index e80800086dec2b..b27ab305bc6329 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -4,7 +4,6 @@ #pragma once -#include #include #include #include "cpu_memory.h" diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index bbfaa09413d734..0f656c70495766 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -25,8 +25,6 @@ #include "ov_optional.hpp" #include "utils/cpp/maybe_unused.hpp" -using namespace InferenceEngine; - namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/nodes/executors/precision_translation.cpp b/src/plugins/intel_cpu/src/nodes/executors/precision_translation.cpp index 7ab47477f72735..cda7b4e47a0c0b 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/precision_translation.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/precision_translation.cpp @@ -11,8 +11,6 @@ #include "openvino/core/type/element_type.hpp" #include "precision_matcher.hpp" -using namespace InferenceEngine; - namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index 867267de254f04..f32f67279b75d0 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -4,7 +4,6 @@ #include "eye.h" #include "openvino/op/eye.hpp" -#include #include #include "openvino/core/parallel.hpp" #include "shape_inference/shape_inference_ngraph.hpp" diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 4b18b1b391ae13..0c45b65cf111e6 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -315,7 +315,7 @@ Engine::compile_model(const std::shared_ptr& model, const ov::A // TODO: Clarify the behavior of SetConfig method. Skip eng_config or not? Config conf = engConfig; - Transformations transformations(cloned_model, enableLPT, inferencePrecision, is_legacy_api(), snippetsMode, conf); + Transformations transformations(cloned_model, enableLPT, inferencePrecision, snippetsMode, conf); transformations.UpToLpt(); @@ -325,16 +325,6 @@ Engine::compile_model(const std::shared_ptr& model, const ov::A transformations.PostLpt(); transformations.Snippets(); - // need to check that all outputs have static shapes - // checking that all inputs have static shapes is performed in the common part - if (is_legacy_api()) { - for (const auto& res : cloned_model->get_results()) { - if (res->get_input_partial_shape(0).is_dynamic()) { - OPENVINO_THROW("CPU plug-in can't load a model with dynamic output shapes via legacy API."); - } - } - } - transformations.CpuSpecificOpSet(); DEBUG_LOG(PrintableModel(*cloned_model, "cpu_")); @@ -378,10 +368,6 @@ void Engine::set_property(const ov::AnyMap &config) { engConfig.readProperties(config); } -bool Engine::is_legacy_api() const { - return !get_core()->is_new_api(); -} - ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) const { if (name == ov::optimal_number_of_infer_requests) { const auto streams = engConfig.streamExecutorConfig.get_streams(); @@ -587,7 +573,6 @@ ov::SupportedOpsMap Engine::query_model(const std::shared_ptr& Transformations transformation(model, enableLPT, conf.inferencePrecision, - is_legacy_api(), snippetsMode, engConfig); transformation.UpToLpt(); diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 5f33c05a9ba8f9..59e2bd6a197020 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -44,8 +44,6 @@ class Engine : public ov::IPlugin { }; private: - bool is_legacy_api() const; - ov::Any get_ro_property(const std::string& name, const ov::AnyMap& options) const; void get_performance_streams(Config& config, const std::shared_ptr& model) const; diff --git a/src/plugins/intel_cpu/src/serialize.h b/src/plugins/intel_cpu/src/serialize.h index b0c57a7ea9d91a..72f58db0bee14e 100644 --- a/src/plugins/intel_cpu/src/serialize.h +++ b/src/plugins/intel_cpu/src/serialize.h @@ -2,10 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once + #include -#include +#include +#include +#include -#include "cpp/ie_cnn_network.h" +#include "openvino/core/model.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 17c2574affb081..54517e73b46f07 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -258,7 +258,7 @@ void Transformations::UpToLpt() { const auto defaultPrecisions = useLpt ? precision_set::get_int8_support() : std::vector{}; - PreLpt(defaultPrecisions, isLegacyApi); + PreLpt(defaultPrecisions); if (useLpt) Lpt(defaultPrecisions); @@ -270,7 +270,7 @@ void Transformations::CpuSpecificOpSet(void) { ConvertToCPUSpecificOpset(model); } -void Transformations::PreLpt(const std::vector& defaultPrecisions, const bool isLegacyApi) { +void Transformations::PreLpt(const std::vector& defaultPrecisions) { CPU_DEBUG_CAP_TRANSFORMATION_SCOPE(this, PreLpt); // Decompression handling related transformations must be run separately from common preLPT pipeline @@ -457,7 +457,9 @@ void Transformations::PreLpt(const std::vector& defaultPrecis // NMS-alike nodes are always transformed to NMSIEInternal node in case of legacy api, for compatibility. // And on the other hand in case of api 2.0, keep them internal dynamic for better performance and functionality. - auto nmsCallback = [isLegacyApi](const_node_ptr &node) -> bool { + auto nmsCallback = [](const_node_ptr &node) -> bool { + // TODO: remove nmsCallback at all + const bool isLegacyApi = false; return isLegacyApi ? false : true; }; diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h index e79dadf44da680..420cf102f38fc6 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h @@ -23,13 +23,11 @@ class Transformations { Transformations(const std::shared_ptr& initialModel, const bool enableLpt, const ov::element::Type inferencePrecision, - const bool isLegacyApi, const Config::SnippetsMode& snippetsMode, const Config& config) : model(initialModel), enableLpt(enableLpt), inferencePrecision(inferencePrecision), - isLegacyApi(isLegacyApi), snippetsMode(snippetsMode), config(config) { CPU_DEBUG_CAPS_MAYBE_UNUSED(this->config); @@ -44,11 +42,10 @@ class Transformations { std::shared_ptr model; const bool enableLpt; const ov::element::Type inferencePrecision; - const bool isLegacyApi; const Config::SnippetsMode snippetsMode; const Config& config; - void PreLpt(const std::vector& defaultPrecisions, const bool isLegacyApi); + void PreLpt(const std::vector& defaultPrecisions); void Lpt(const std::vector& defaultPrecisions); diff --git a/src/plugins/intel_cpu/src/utils/blob_dump.cpp b/src/plugins/intel_cpu/src/utils/blob_dump.cpp index 26626404aeea98..3181c012a4556e 100644 --- a/src/plugins/intel_cpu/src/utils/blob_dump.cpp +++ b/src/plugins/intel_cpu/src/utils/blob_dump.cpp @@ -3,7 +3,6 @@ // #include "blob_dump.h" -#include "blob_factory.hpp" #include #include "dnnl_extension_utils.h" #include diff --git a/src/plugins/intel_cpu/src/utils/general_utils.h b/src/plugins/intel_cpu/src/utils/general_utils.h index 08fab4686bde85..28ac379094f225 100644 --- a/src/plugins/intel_cpu/src/utils/general_utils.h +++ b/src/plugins/intel_cpu/src/utils/general_utils.h @@ -4,9 +4,10 @@ #pragma once -#include #include "cpu_shape.h" +#include "openvino/core/type/element_type.hpp" + #include #include diff --git a/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp b/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp index 3bc6b827821aae..6cdd320d7eba63 100644 --- a/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp +++ b/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp @@ -49,10 +49,11 @@ inline bool isDynamicNgraphNode(const std::shared_ptr& op) { return ret; } -inline std::string get_port_name(const ov::Output& port, const bool is_legacy_api) { +inline std::string get_port_name(const ov::Output& port) { std::string name; // Should use tensor name as the port name, but many legacy tests still use legacy name // plus sometimes it will get empty tensor name. + const bool is_legacy_api = false; if (!is_legacy_api) { // TODO: To apply unified tensor name. } diff --git a/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp b/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp index 3bab4621bf4010..ba576d51a7a8ca 100644 --- a/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp @@ -7,6 +7,7 @@ #include #include #include +#include using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp b/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp index f95ac227f96636..7acd73a4999ad3 100644 --- a/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp @@ -4,7 +4,6 @@ #include #include -#include "ie_common.h" #include "nodes/eltwise.h" using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp b/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp index 12927158aec087..bc87c69cd84281 100644 --- a/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include diff --git a/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp b/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp index f899ee3191fb0c..df38e268068deb 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp @@ -14,7 +14,6 @@ #include #include #include -#include #include "common_test_utils/ov_test_utils.hpp" #include "utils/gen_pattern.hpp" diff --git a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp index 13b8bba7f848f8..9aab52c866c1f1 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp @@ -17,7 +17,6 @@ #include #include #include "ov_ops/type_relaxed.hpp" -#include #include "common_test_utils/ov_test_utils.hpp" diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp index b87b1b18e607df..0f256fdf2def08 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp @@ -55,9 +55,6 @@ class CompiledModel : public ov::ICompiledModel { return m_inputs; } - bool is_new_api() const { - return std::static_pointer_cast(get_plugin())->is_new_api(); - } RemoteContextImpl::Ptr get_context_impl() const { return m_context; } diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/custom_layer.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/custom_layer.hpp index 284297da3a217f..041a4ce6cf881a 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/custom_layer.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/custom_layer.hpp @@ -8,7 +8,6 @@ #include #include #include -#include #include "pugixml.hpp" #include "intel_gpu/runtime/tensor.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp index 1e164c42fda131..213464a594c795 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp @@ -14,7 +14,6 @@ #include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -using namespace InferenceEngine; using namespace ov::test; namespace GPULayerTestsDefinitions { diff --git a/src/plugins/proxy/src/plugin.cpp b/src/plugins/proxy/src/plugin.cpp index 1cb9e7f3619c4d..9b57ae712a6956 100644 --- a/src/plugins/proxy/src/plugin.cpp +++ b/src/plugins/proxy/src/plugin.cpp @@ -369,15 +369,13 @@ ov::SoPtr ov::proxy::Plugin::create_proxy_context( auto dev_name = get_device_name(); auto dev_idx = get_device_from_config(properties); auto has_dev_idx = is_device_in_config(properties); - auto is_new_api = get_core()->is_new_api(); ov::SoPtr device_context; ov::SoPtr remote_context; try { device_context = compiled_model->get_context(); if (!device_context._so) device_context._so = compiled_model._so; - remote_context = - std::make_shared(device_context, dev_name, dev_idx, has_dev_idx, is_new_api); + remote_context = std::make_shared(device_context, dev_name, dev_idx, has_dev_idx); } catch (const ov::NotImplemented&) { } return remote_context; @@ -426,7 +424,6 @@ ov::SoPtr ov::proxy::Plugin::create_context(const ov::AnyMap auto dev_name = get_device_name(); auto dev_idx = get_device_from_config(remote_properties); auto has_dev_idx = is_device_in_config(remote_properties); - auto is_new_api = get_core()->is_new_api(); auto device_config = remote_properties; remove_proxy_properties(device_config); @@ -436,8 +433,7 @@ ov::SoPtr ov::proxy::Plugin::create_context(const ov::AnyMap get_core()->create_context(get_fallback_device(get_device_from_config(remote_properties)), device_config), dev_name, dev_idx, - has_dev_idx, - is_new_api); + has_dev_idx); return remote_context; } // Properties doesn't have device id, so try to create context for all devices @@ -449,8 +445,7 @@ ov::SoPtr ov::proxy::Plugin::create_context(const ov::AnyMap device_config), dev_name, i, - has_dev_idx, - is_new_api); + has_dev_idx); return remote_context; } catch (const ov::Exception&) { } @@ -463,7 +458,6 @@ ov::SoPtr ov::proxy::Plugin::get_default_context(const ov::A auto dev_name = get_device_name(); auto dev_idx = get_device_from_config(remote_properties); auto has_dev_idx = is_device_in_config(remote_properties); - auto is_new_api = get_core()->is_new_api(); auto device_config = remote_properties; remove_proxy_properties(device_config); @@ -472,8 +466,7 @@ ov::SoPtr ov::proxy::Plugin::get_default_context(const ov::A get_core()->get_default_context(get_fallback_device(get_device_from_config(remote_properties))), dev_name, dev_idx, - has_dev_idx, - is_new_api); + has_dev_idx); return remote_context; } diff --git a/src/plugins/proxy/src/remote_context.cpp b/src/plugins/proxy/src/remote_context.cpp index 5ce8133f334a2b..a7f8f9935ac42e 100644 --- a/src/plugins/proxy/src/remote_context.cpp +++ b/src/plugins/proxy/src/remote_context.cpp @@ -11,34 +11,27 @@ #include "openvino/runtime/so_ptr.hpp" #include "remote_tensor.hpp" -void ov::proxy::RemoteContext::init_context(const std::string& dev_name, - size_t dev_index, - bool has_index, - bool is_new_api) { +void ov::proxy::RemoteContext::init_context(const std::string& dev_name, size_t dev_index, bool has_index) { OPENVINO_ASSERT(m_context); - m_tensor_name = dev_name + "." + std::to_string(dev_index); // New API always has full name, in legacy API we can have device name without index - if (is_new_api || has_index) - m_name = m_tensor_name; - else - m_name = dev_name; + // TODO: can we remove `has_index` then? We are currently in new API only + m_name = dev_name + "." + std::to_string(dev_index); } + ov::proxy::RemoteContext::RemoteContext(ov::SoPtr&& ctx, const std::string& dev_name, size_t dev_index, - bool has_index, - bool is_new_api) + bool has_index) : m_context(std::move(ctx)) { - init_context(dev_name, dev_index, has_index, is_new_api); + init_context(dev_name, dev_index, has_index); } ov::proxy::RemoteContext::RemoteContext(const ov::SoPtr& ctx, const std::string& dev_name, size_t dev_index, - bool has_index, - bool is_new_api) + bool has_index) : m_context(ctx) { - init_context(dev_name, dev_index, has_index, is_new_api); + init_context(dev_name, dev_index, has_index); } const std::string& ov::proxy::RemoteContext::get_device_name() const { @@ -50,7 +43,7 @@ const ov::AnyMap& ov::proxy::RemoteContext::get_property() const { } ov::SoPtr ov::proxy::RemoteContext::wrap_tensor(const ov::SoPtr& tensor) { - auto proxy_tensor = std::make_shared(tensor, m_tensor_name); + auto proxy_tensor = std::make_shared(tensor, m_name); return ov::SoPtr(std::dynamic_pointer_cast(proxy_tensor), nullptr); } @@ -58,7 +51,7 @@ ov::SoPtr ov::proxy::RemoteContext::create_tensor(const ov::e const ov::Shape& shape, const ov::AnyMap& params) { auto proxy_tensor = - std::make_shared(m_context->create_tensor(type, shape, params), m_tensor_name); + std::make_shared(m_context->create_tensor(type, shape, params), m_name); return ov::SoPtr(std::dynamic_pointer_cast(proxy_tensor), nullptr); } diff --git a/src/plugins/proxy/src/remote_context.hpp b/src/plugins/proxy/src/remote_context.hpp index c2490c5fcc5c12..8d4718a678daeb 100644 --- a/src/plugins/proxy/src/remote_context.hpp +++ b/src/plugins/proxy/src/remote_context.hpp @@ -26,23 +26,17 @@ class RemoteContext : public ov::IRemoteContext { * @param dev_name device name without index * @param dev_index device index if exists else 0 * @param has_index flag is true if device has an index and false in another case - * @param is_new_api flag reports which API is used * * These arguments are needed to support the difference between legacy and 2.0 APIs. * In legacy API remote context doesn't contain the index in the name but Blob contains. * In 2.0 API Tensor and Context always contain device index */ - RemoteContext(ov::SoPtr&& ctx, - const std::string& dev_name, - size_t dev_index, - bool has_index, - bool is_new_api); + RemoteContext(ov::SoPtr&& ctx, const std::string& dev_name, size_t dev_index, bool has_index); RemoteContext(const ov::SoPtr& ctx, const std::string& dev_name, size_t dev_index, - bool has_index, - bool is_new_api); + bool has_index); const std::string& get_device_name() const override; const ov::AnyMap& get_property() const override; @@ -60,9 +54,8 @@ class RemoteContext : public ov::IRemoteContext { private: ov::SoPtr m_context; std::string m_name; - std::string m_tensor_name; - void init_context(const std::string& dev_name, size_t dev_index, bool has_index, bool is_new_api); + void init_context(const std::string& dev_name, size_t dev_index, bool has_index); }; } // namespace proxy diff --git a/src/plugins/template/tests/functional/op_reference/if.cpp b/src/plugins/template/tests/functional/op_reference/if.cpp index 5f51a03f02d427..fe05f18c27f50d 100644 --- a/src/plugins/template/tests/functional/op_reference/if.cpp +++ b/src/plugins/template/tests/functional/op_reference/if.cpp @@ -8,7 +8,6 @@ #include #include "base_reference_test.hpp" -#include "ie_core.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" using namespace reference_tests; diff --git a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index b7f27d1819022e..8414d922519b17 100644 --- a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -261,14 +261,6 @@ class OVClassSeveralDevicesTests : public OVPluginTestBase, } }; -#define SKIP_IF_NOT_IMPLEMENTED(...) \ -{ \ - try { \ - __VA_ARGS__; \ - } catch (const InferenceEngine::NotImplemented&) { \ - GTEST_SKIP(); \ - } \ -} } // namespace behavior } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp index ee82e48642f0f9..e69960fd493a2c 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp @@ -13,7 +13,7 @@ namespace ov { namespace test { namespace behavior { -namespace IE = InferenceEngine; + // for deviceConfigs, the deviceConfigs[0] is target device which need to be tested. // deviceConfigs[1], deviceConfigs[2],deviceConfigs[n] are the devices which will // be compared with target device, the result of target should be in one of the compared diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp index 5b5feb5c470689..45ba0e98259c75 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "openvino/runtime/exec_model_info.hpp" #include "openvino/core/model.hpp" #include "common_test_utils/common_utils.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index cf2813e77d2063..68ce95d492a2fa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include @@ -52,7 +51,7 @@ void DepthToSpaceTransformation::SetUp() { init_input_shapes(inputShape); if (inputShape.rank().is_dynamic() || inputShape.rank().get_length() != 4) { - IE_THROW() << "not supported input shape size " << inputShape.rank(); + OPENVINO_THROW("not supported input shape size ", inputShape.rank()); } function = ov::builder::subgraph::DepthToSpaceFunction::getOriginal(precision, inputShape, mode, blockSize); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index b4262c9b14001f..8151bc84410211 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 52fe187d7e5f86..7c1dd15d3657d1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp index 7eaf42e6421504..1b5200db978d5a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index dd7575a4ff22a2..20a17137abad17 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp index 15d89175ea5382..98229bbfd2ecd2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp @@ -157,7 +157,7 @@ class BenchmarkLayerTest : public BaseLayerTest { return profile.node_type == node_type_name; }); if (found_profile == profiling_info.end()) { - IE_THROW() << "Cannot find operator by node type: " << node_type_name; + OPENVINO_THROW("Cannot find operator by node type: ", node_type_name); } time += found_profile->real_time.count(); } diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp index 0db366b4e7b034..985ba1e8bbc660 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp @@ -4,7 +4,6 @@ #pragma once -#include "ie_core.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp index 0a4d8ea5e8166a..61de1970774a86 100644 --- a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp +++ b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp @@ -7,8 +7,6 @@ #include #include -#include - #include "ov_models/pass/convert_prc.hpp" namespace LayerTestsUtils { diff --git a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp index c826e5ffcfc9b8..33b00c0a69e3af 100644 --- a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp +++ b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp @@ -96,10 +96,10 @@ void SubgraphBaseTest::run() { GTEST_FATAL_FAILURE_(errorMessage.c_str()); } } else if (jmpRes == ov::test::utils::JMP_STATUS::anyError) { - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } else if (jmpRes == ov::test::utils::JMP_STATUS::alarmErr) { summary.updateOPsStats(function, ov::test::utils::PassRate::Statuses::HANGED, rel_influence_coef); - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } } @@ -170,7 +170,7 @@ void SubgraphBaseTest::query_model() { actual.insert(res.first); } if (expected != actual) { - IE_THROW() << "Expected and actual are different"; + OPENVINO_THROW("Expected and actual are different"); } status = ov::test::utils::PassRate::Statuses::PASSED; } catch (const std::exception& ex) { @@ -185,10 +185,10 @@ void SubgraphBaseTest::query_model() { GTEST_FATAL_FAILURE_(errorMessage.c_str()); } } else if (jmpRes == ov::test::utils::JMP_STATUS::anyError) { - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } else if (jmpRes == ov::test::utils::JMP_STATUS::alarmErr) { summary.updateOPsStats(function, ov::test::utils::PassRate::Statuses::HANGED, rel_influence_coef); - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } } @@ -247,10 +247,10 @@ void SubgraphBaseTest::import_export() { GTEST_FATAL_FAILURE_(errorMessage.c_str()); } } else if (jmpRes == ov::test::utils::JMP_STATUS::anyError) { - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } else if (jmpRes == ov::test::utils::JMP_STATUS::alarmErr) { summary.updateOPsStats(function, ov::test::utils::PassRate::Statuses::HANGED, rel_influence_coef); - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp index 30cb2331f71f8f..6e541000b422a3 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp @@ -83,12 +83,5 @@ void OutputBeforeActivation::SetUp() { function = std::make_shared(outputs, input_parameter, "output_before_activation"); } -// void OutputBeforeActivation::generate_inputs(const std::vector& targetInputStaticShapes) { -// ov::test::SubgraphBaseTest::generate_inputs(targetInputStaticShapes); -// } -// InferenceEngine::Blob::Ptr OutputBeforeActivation::GenerateInput(const InferenceEngine::InputInfo& info) const { -// return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, -1, 100); -// } - } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index 0d3593478e38c0..c300689016458d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/node_builders/convolution_backprop_data.hpp" #include "common_test_utils/node_builders/constant.hpp" #include "ov_models/utils/ov_helpers.hpp" -#include "ie_common.h" #include "common_test_utils/node_builders/fake_quantize.hpp" namespace ov { @@ -48,7 +47,7 @@ void QuantConvBackpropDataLayerTest::SetUp() { ov::element::Type element_type = ov::element::undefined; std::tie(groupConvBackpropDataParams, element_type, inputShape, targetDevice) = this->GetParam(); ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; size_t quantLevels; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp index 35c59647712f5e..3fecea21d1208a 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp @@ -5,7 +5,6 @@ #include "shared_test_classes/subgraph/split_conv_concat.hpp" #include "common_test_utils/data_utils.hpp" -#include "ie_common.h" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/node_builders/convolution.hpp" diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp index ae619f66f2a2a4..49fa9bdd138ed4 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp @@ -12,7 +12,6 @@ #include #include -#include "description_buffer.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/common.hpp" #include "openvino/runtime/icore.hpp" diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp index 1a90ddd73fba8a..5d0c8336349559 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp @@ -7,6 +7,7 @@ #include #include "openvino/runtime/icompiled_model.hpp" +#include "openvino/runtime/icore.hpp" namespace ov { @@ -16,7 +17,6 @@ class MockICore : public ov::ICore { MOCK_METHOD(ov::Any, get_property, (const std::string&, const std::string&), (const)); MOCK_METHOD(ov::AnyMap, get_supported_property, (const std::string&, const ov::AnyMap&, const bool), (const)); - MOCK_METHOD(bool, is_new_api, (), (const)); MOCK_METHOD(ov::SoPtr, create_context, (const std::string& deviceName, const ov::AnyMap& params), From ec93193d31d43cace8af2d680bd60a0c51469410 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Feb 2024 07:01:10 +0800 Subject: [PATCH 09/13] Fixed Android build --- .../offline_transformations/pruning_test.cpp | 2 +- src/inference/src/cpp/infer_request.cpp | 2 ++ .../custom/behavior/export_import.cpp | 2 +- .../custom/single_layer_tests/broadcast.cpp | 20 ++++++++--------- .../custom/single_layer_tests/tile.cpp | 10 ++++----- .../custom/single_layer_tests/topk.cpp | 16 +++++++------- .../single_layer_tests/broadcast.cpp | 18 +++++++-------- .../convolution_backprop_data.cpp | 2 +- .../single_layer_tests/gather.cpp | 8 +++---- .../single_layer_tests/broadcast.cpp | 22 +++++++++---------- .../single_layer_tests/gather.cpp | 6 ++--- .../single_layer_tests/dynamic/broadcast.cpp | 22 +++++++++---------- .../single_layer_tests/dynamic/tile.cpp | 4 ++-- .../single_layer_tests/dynamic/top_k.cpp | 4 ++-- 14 files changed, 70 insertions(+), 68 deletions(-) diff --git a/src/common/transformations/tests/offline_transformations/pruning_test.cpp b/src/common/transformations/tests/offline_transformations/pruning_test.cpp index cad91bb59bd41d..52978905a41494 100644 --- a/src/common/transformations/tests/offline_transformations/pruning_test.cpp +++ b/src/common/transformations/tests/offline_transformations/pruning_test.cpp @@ -5180,7 +5180,7 @@ TEST(TransformationTests, CheckReshapeWithNoConstInShape) { m.run_passes(model); } -INSTANTIATE_TEST_CASE_P(TransformationTestsBoolParam, TransformationTestsBoolParamF, ::testing::Values(false, true)); +INSTANTIATE_TEST_SUITE_P(TransformationTestsBoolParam, TransformationTestsBoolParamF, ::testing::Values(false, true)); TEST_F(TransformationTestsF, PruningWithVariadicSplitOnSecondAxis) { { diff --git a/src/inference/src/cpp/infer_request.cpp b/src/inference/src/cpp/infer_request.cpp index 44fe17b2bd7fec..12f5523d168f7b 100644 --- a/src/inference/src/cpp/infer_request.cpp +++ b/src/inference/src/cpp/infer_request.cpp @@ -252,6 +252,8 @@ bool InferRequest::wait_for(const std::chrono::milliseconds timeout) { OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); try { return _impl->wait_for(timeout); + } catch (const Cancelled& e) { + throw e; } catch (const std::exception& ex) { OPENVINO_THROW(ex.what()); } catch (...) { diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp index c9c548a852a43f..93739427d89d0d 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp @@ -120,7 +120,7 @@ const std::vector testing_property_for_enable_hyper_threading = {{ov const std::vector testing_property_for_enable_cpu_pinning = {{ov::hint::enable_cpu_pinning(true)}, {ov::hint::enable_cpu_pinning(false)}}; -INSTANTIATE_TEST_CASE_P(smoke_ExportImportTest, +INSTANTIATE_TEST_SUITE_P(smoke_ExportImportTest, ExportOptimalNumStreams, ::testing::Combine(::testing::Values(std::string("CPU")), ::testing::Values(testing_property_for_streams, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/broadcast.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/broadcast.cpp index 6159d854d29f7b..825d7898e47600 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/broadcast.cpp @@ -223,7 +223,7 @@ const std::vector> staticInputShapes4D = {{{{} {// Static shapes {50, 50}}}}}; -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, +INSTANTIATE_TEST_SUITE_P(smoke_StaticShape4D, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(staticInputShapes4D[0]), ::testing::ValuesIn(std::vector>{ @@ -237,7 +237,7 @@ INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, ::testing::ValuesIn(CPUParams4D)), BroadcastLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4DE, +INSTANTIATE_TEST_SUITE_P(smoke_StaticShape4DE, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(staticInputShapes4D[1]), ::testing::Values(std::vector{1, 50, 50, 16}), @@ -253,7 +253,7 @@ const std::vector> staticInputShapesScalar = { {// Static shapes {1}}}}}; -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4DScalar, +INSTANTIATE_TEST_SUITE_P(smoke_StaticShape4DScalar, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapesScalar), ::testing::Values(std::vector{1, 16, 3, 3}), @@ -280,7 +280,7 @@ const std::vector> dynamicInputShapes4D = { {{1, 16, 1, 1}}, {{8, 1, 1, 1}}}}}}; -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( smoke_DynamicShape4D, BroadcastLayerCPUTest, ::testing::Combine( @@ -300,7 +300,7 @@ const std::vector> dynamicInputShapesScalar = {1}, {7}}}}}; -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape4DScalar, +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape4DScalar, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapesScalar), ::testing::Values(std::vector{8, 16, 1, 7}), @@ -338,7 +338,7 @@ const std::vector CPUParams5D = { cpuParams_ndhwc, }; -INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, +INSTANTIATE_TEST_SUITE_P(smoke_StaticShape5D, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapes5D), ::testing::ValuesIn(std::vector>{ @@ -352,7 +352,7 @@ INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, ::testing::ValuesIn(CPUParams5D)), BroadcastLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_StaticShape5DScalar, +INSTANTIATE_TEST_SUITE_P(smoke_StaticShape5DScalar, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapesScalar), ::testing::Values(std::vector{1, 16, 3, 1, 3}), @@ -364,7 +364,7 @@ INSTANTIATE_TEST_CASE_P(smoke_StaticShape5DScalar, ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), BroadcastLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5D, +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape5D, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D), ::testing::ValuesIn(targetShapes5D), @@ -378,7 +378,7 @@ INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5D, ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), BroadcastLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5DScalar, +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape5DScalar, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapesScalar), ::testing::Values(std::vector{8, 16, 1, 1, 7}), @@ -399,7 +399,7 @@ const std::vector> dynamicShapes1D = {{{// Ori {1}, {1}}}}}; -INSTANTIATE_TEST_CASE_P(smoke_DynamicShapes1D, +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapes1D, BroadcastLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicShapes1D), ::testing::Values(std::vector{0}), diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/tile.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/tile.cpp index a562df499c7065..a769719c302fa7 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/tile.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/tile.cpp @@ -222,7 +222,7 @@ const std::vector CPUParams5D = { /* ============= */ /* INSTANCES */ -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, +INSTANTIATE_TEST_SUITE_P(smoke_StaticShape4D, TileLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapes4D), ::testing::ValuesIn(repeats4D), @@ -232,7 +232,7 @@ INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, ::testing::ValuesIn(CPUParams4D)), TileLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape4D, +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape4D, TileLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D), ::testing::ValuesIn(repeats4D), @@ -249,7 +249,7 @@ const std::vector> dynBatchInputShapes4D = {{/ {1, 16, 3, 4}, {3, 16, 3, 4}}}}}; -INSTANTIATE_TEST_CASE_P(smoke_DynBatch4D, +INSTANTIATE_TEST_SUITE_P(smoke_DynBatch4D, TileLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynBatchInputShapes4D), ::testing::Values(std::vector{1, 2, 1, 3}), @@ -259,7 +259,7 @@ INSTANTIATE_TEST_CASE_P(smoke_DynBatch4D, ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), TileLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, +INSTANTIATE_TEST_SUITE_P(smoke_StaticShape5D, TileLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapes5D), ::testing::ValuesIn(repeats5D), @@ -269,7 +269,7 @@ INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, ::testing::ValuesIn(CPUParams5D)), TileLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5D, +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape5D, TileLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D), ::testing::ValuesIn(repeats5D), diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp index 5e3d38ce758d7e..d178dedba78d16 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp @@ -268,7 +268,7 @@ std::vector cpuParams = {CPUSpecificParams({nChw16c, x}, {nCh CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {}), CPUSpecificParams({nhwc, x}, {nhwc, nhwc}, {}, {})}; -INSTANTIATE_TEST_CASE_P(smoke_TopK, +INSTANTIATE_TEST_SUITE_P(smoke_TopK, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(k), ::testing::ValuesIn(axes), @@ -282,7 +282,7 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK, ::testing::ValuesIn(additionalConfig)), TopKLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_TopK_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_TopK_dynamic, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(1), ::testing::ValuesIn(axes), @@ -305,7 +305,7 @@ std::vector inputShapes_int32 = { std::vector inputShapesDynamic_int32 = { {{9, {5, 10}, 9, {5, 10}}, {{9, 9, 9, 9}, {9, 10, 9, 10}}}}; -INSTANTIATE_TEST_CASE_P(smoke_TopK_int32, +INSTANTIATE_TEST_SUITE_P(smoke_TopK_int32, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::ValuesIn(k_int32), ::testing::ValuesIn(axes), @@ -319,7 +319,7 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_int32, ::testing::Values(additionalConfig[0])), TopKLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_TopK_int32_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_TopK_int32_dynamic, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(1), ::testing::ValuesIn(axes), @@ -340,7 +340,7 @@ std::vector inputShapes_bubble_BLK_on_channel_horiz = { std::vector inputShapesDynamic_bubble_BLK_on_channel_horiz = { {{2, {2, 3}, 2, 2}, {{2, 2, 2, 2}, {2, 3, 2, 2}}}}; -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( smoke_TopK_bubble_BLK_on_channel_horiz, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(1), @@ -355,7 +355,7 @@ INSTANTIATE_TEST_CASE_P( ::testing::ValuesIn(additionalConfig)), TopKLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( smoke_TopK_bubble_BLK_on_channel_horiz_dynamic, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(1), @@ -376,7 +376,7 @@ std::vector inputShapes_top1 = { std::vector inputShapesDynamic_top1 = {{{1, 1, 2, {1, 2}}, {{1, 1, 2, 1}, {1, 1, 2, 2}}}}; -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( smoke_Top1, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(1), @@ -391,7 +391,7 @@ INSTANTIATE_TEST_CASE_P( ::testing::ValuesIn(additionalConfig)), TopKLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( smoke_Top1_dynamic, TopKLayerCPUTest, ::testing::Combine(::testing::Combine(::testing::Values(1), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp index d1129082dd6535..d4ad504aedfec0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp @@ -48,9 +48,9 @@ const auto numpyBroadcast1DInputParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast1D, BroadcastLayerTest, numpyBroadcast1DInputParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast1D, BroadcastLayerTest, numpyBroadcast1DInputParams, BroadcastLayerTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_PrecTransformation, BroadcastLayerTest, +INSTANTIATE_TEST_SUITE_P(smoke_PrecTransformation, BroadcastLayerTest, ::testing::Combine( ::testing::Values(targetShapesNumpy1D[0]), ::testing::Values(ov::AxisSet{}), //not used in numpy mode @@ -81,7 +81,7 @@ const auto numpyBroadcast2DInputParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast2D, BroadcastLayerTest, numpyBroadcast2DInputParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast2D, BroadcastLayerTest, numpyBroadcast2DInputParams, BroadcastLayerTest::getTestCaseName); // 3D std::vector> targetShapesNumpy3D = { @@ -104,7 +104,7 @@ const auto numpyBroadcast3DInputParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast3D, BroadcastLayerTest, numpyBroadcast3DInputParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast3D, BroadcastLayerTest, numpyBroadcast3DInputParams, BroadcastLayerTest::getTestCaseName); const std::vector> evaluate_shapes_static = { {{ 1, 2, 1, 4, 1, 6, 1, 8, 1, 10 }} @@ -120,7 +120,7 @@ const auto numpyBroadcastEvaluateParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcastEvaluate, +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcastEvaluate, BroadcastLayerTest, numpyBroadcastEvaluateParams, BroadcastLayerTest::getTestCaseName); @@ -148,7 +148,7 @@ const auto bidirectionalBroadcastParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestBidirectionalBroadcast, BroadcastLayerTest, bidirectionalBroadcastParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestBidirectionalBroadcast, BroadcastLayerTest, bidirectionalBroadcastParams, BroadcastLayerTest::getTestCaseName); // EXPLICIT MODE /////////////////////////////////////// // 1D @@ -168,7 +168,7 @@ const auto explicitBroadcast1DInputParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast1D, BroadcastLayerTest, explicitBroadcast1DInputParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestExplicitBroadcast1D, BroadcastLayerTest, explicitBroadcast1DInputParams, BroadcastLayerTest::getTestCaseName); const auto bidirectionalBroadcastParams3 = ::testing::Combine( ::testing::Values(targetShapesBidi[2]), @@ -215,7 +215,7 @@ const auto explicitBroadcast2DInputParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast2D, BroadcastLayerTest, explicitBroadcast2DInputParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestExplicitBroadcast2D, BroadcastLayerTest, explicitBroadcast2DInputParams, BroadcastLayerTest::getTestCaseName); // 3D std::vector> input_shapes_explicit_3d_static = { @@ -233,7 +233,7 @@ const auto explicitBroadcast3DInputParams = ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast3D, BroadcastLayerTest, explicitBroadcast3DInputParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestExplicitBroadcast3D, BroadcastLayerTest, explicitBroadcast3DInputParams, BroadcastLayerTest::getTestCaseName); // END EXPLICIT MODE /////////////////////////////////// } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp index f2a3adfd3436fa..34e96e9aeb7a2e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp @@ -122,7 +122,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddi ::testing::Values(ov::test::utils::DEVICE_CPU)), ConvolutionBackpropDataLayerTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_ConvolutionBackpropData2D_RoundingOfPadding, ConvolutionBackpropDataLayerTest, +INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData2D_RoundingOfPadding, ConvolutionBackpropDataLayerTest, ::testing::Combine( ::testing::Combine( ::testing::Values(std::vector({31, 1})), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp index 68a16954a19a85..f82319774f847d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp @@ -156,7 +156,7 @@ const std::vector> axes_batches_4d_gather8 = { {0, 0} }; -INSTANTIATE_TEST_CASE_P(smoke_static_4D, Gather8LayerTest, +INSTANTIATE_TEST_SUITE_P(smoke_static_4D, Gather8LayerTest, testing::Combine( testing::ValuesIn(ov::test::static_shapes_to_test_representation(data_shapes_4d_gather8)), testing::ValuesIn(idx_shapes_4d_gather8), @@ -186,7 +186,7 @@ const auto gatherParamsVec2 = testing::Combine( testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_Vec2, Gather8LayerTest, gatherParamsVec2, Gather8LayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Vec2, Gather8LayerTest, gatherParamsVec2, Gather8LayerTest::getTestCaseName); const std::vector data_shapes_vec3_gather8 = {{4, 4}}; @@ -202,7 +202,7 @@ const auto gatherParamsVec3 = testing::Combine( testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_Vec3, Gather8LayerTest, gatherParamsVec3, Gather8LayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Vec3, Gather8LayerTest, gatherParamsVec3, Gather8LayerTest::getTestCaseName); const ov::test::gather7ParamsTuple dummyParams = { @@ -227,6 +227,6 @@ const auto gatherWithIndicesParams = testing::Combine( testing::ValuesIn(indicesData) ); -INSTANTIATE_TEST_CASE_P(smoke, Gather8withIndicesDataLayerTest, gatherWithIndicesParams, Gather8withIndicesDataLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke, Gather8withIndicesDataLayerTest, gatherWithIndicesParams, Gather8withIndicesDataLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp index cbe0417bfa0419..cf416dd1905bf8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp @@ -34,7 +34,7 @@ std::vector> input_shapes_0d_static = { {{}} }; -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast0D, +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast0D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy0D), ::testing::Values(ov::AxisSet{}), // not used in numpy mode @@ -60,7 +60,7 @@ std::vector> input_shapes_1d_static = { {{1}} }; -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast1D, +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast1D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy1D), ::testing::Values(ov::AxisSet{}), // not used in numpy mode @@ -70,7 +70,7 @@ INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast1D, ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_PrecTransformation, BroadcastLayerTest, +INSTANTIATE_TEST_SUITE_P(smoke_PrecTransformation, BroadcastLayerTest, ::testing::Combine( ::testing::Values(targetShapesNumpy1D[0]), ::testing::Values(ov::AxisSet{}), //not used in numpy mode @@ -92,7 +92,7 @@ std::vector> input_shapes_2d_static = { {{3, 1}} }; -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast2D, +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast2D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy2D), ::testing::Values(ov::AxisSet{}), // not used in numpy mode @@ -115,7 +115,7 @@ std::vector> input_shapes_3d_static = { }; -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast3D, +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast3D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy3D), ::testing::Values(ov::AxisSet{}), // not used in numpy mode @@ -133,7 +133,7 @@ std::vector> input_shapes_6d_static = { {{1, 2, 1, 4, 1, 6}} }; -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast6D, +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast6D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy6D), ::testing::Values(ov::AxisSet{}), // not used in numpy mode @@ -151,7 +151,7 @@ std::vector> input_shapes_5d_static = { {{1, 2, 1, 4, 1}} }; -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast5D, +INSTANTIATE_TEST_SUITE_P(smoke_TestNumpyBroadcast5D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy5D), ::testing::Values(ov::AxisSet{}), // not used in numpy mode @@ -175,7 +175,7 @@ std::vector> targetShapesBidi = { {1, 1, 4, 4} }; -INSTANTIATE_TEST_CASE_P(smoke_TestBidirectionalBroadcast, +INSTANTIATE_TEST_SUITE_P(smoke_TestBidirectionalBroadcast, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesBidi), ::testing::Values(ov::AxisSet{}), // not used in bidirectional mode @@ -191,7 +191,7 @@ std::vector> inShapesExplicit1D = { {{4}} }; std::vector> targetShapesExplicit1D = { {4, 2, 4}, {4, 2, 4, 1} }; std::vector axes1D = { {0}, {2} }; -INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast1D, +INSTANTIATE_TEST_SUITE_P(smoke_TestExplicitBroadcast1D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesExplicit1D), ::testing::ValuesIn(axes1D), @@ -228,7 +228,7 @@ std::vector> inShapesExplicit2D = { {{2, 4}} }; std::vector> targetShapesExplicit2D = { {2, 2, 4}, {2, 2, 4, 1}}; std::vector axes2D = { {1, 2}, {0, 2} }; -INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast2D, +INSTANTIATE_TEST_SUITE_P(smoke_TestExplicitBroadcast2D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesExplicit2D), ::testing::ValuesIn(axes2D), @@ -243,7 +243,7 @@ std::vector> inShapesExplicit3D = { {{2, 2, 2}} }; std::vector> targetShapesExplicit3D = { {2, 2, 2, 2} }; std::vector axes3D = { {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3} }; -INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast3D, +INSTANTIATE_TEST_SUITE_P(smoke_TestExplicitBroadcast3D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesExplicit3D), ::testing::ValuesIn(axes3D), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp index 544c39f912e3c9..be6912e269b10f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp @@ -528,7 +528,7 @@ const auto gatherWithIndicesParams = testing::Combine( testing::ValuesIn(indicesData) ); -INSTANTIATE_TEST_CASE_P(smoke, +INSTANTIATE_TEST_SUITE_P(smoke, Gather8withIndicesDataLayerTest, gatherWithIndicesParams, Gather8withIndicesDataLayerTest::getTestCaseName @@ -553,7 +553,7 @@ const auto gatherWithNagativeIndicesParams1 = testing::Combine( testing::ValuesIn(nagativeSingleindicesData) ); -INSTANTIATE_TEST_CASE_P(smoke_Gather8NagativeIndice1, +INSTANTIATE_TEST_SUITE_P(smoke_Gather8NagativeIndice1, Gather8withIndicesDataLayerTest, gatherWithNagativeIndicesParams1, Gather8withIndicesDataLayerTest::getTestCaseName @@ -572,7 +572,7 @@ const auto gatherWithNagativeIndicesParams2 = testing::Combine( testing::ValuesIn(nagativeSingleindicesData) ); -INSTANTIATE_TEST_CASE_P(smoke_Gather8NagativeIndice2, +INSTANTIATE_TEST_SUITE_P(smoke_Gather8NagativeIndice2, Gather8withIndicesDataLayerTest, gatherWithNagativeIndicesParams2, Gather8withIndicesDataLayerTest::getTestCaseName diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp index 6249b6f6159073..a3d9a1a9d3465d 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp @@ -212,7 +212,7 @@ const std::vector> dynamicInputShapes1D_explicit = { { {-1}, {{7}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_1d_explicit_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_1d_explicit_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes1D_explicit), @@ -229,7 +229,7 @@ const std::vector> dynamicInputShapes1D = { { {-1}, {{1}, {7}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_1d_numpy_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_1d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes1D), @@ -248,7 +248,7 @@ const std::vector> dynamicInputShapes2D_explicit = { { {-1, -1}, {{3, 5}} } } }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_2d_explicit_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_2d_explicit_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes2D_explicit), @@ -265,7 +265,7 @@ const std::vector> dynamicInputShapes2D = { { {-1, -1}, {{3, 1}, {3, 5}} } } }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_2d_numpy_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_2d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes2D), @@ -284,7 +284,7 @@ const std::vector> dynamicInputShapes3D_explicit = { { {-1, -1, -1}, {{4, 5, 6}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_3d_explicit_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_3d_explicit_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes3D_explicit), @@ -301,7 +301,7 @@ const std::vector> dynamicInputShapes3D = { { {-1, -1, -1}, {{4, 5, 1}, {1, 5, 1}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_3d_numpy_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_3d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes3D), @@ -320,7 +320,7 @@ const std::vector> dynamicInputShapes4D_explicit = { { {-1, -1, -1, -1}, {{1, 16, 1, 7}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_4d_explicit_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_4d_explicit_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes4D_explicit), @@ -337,7 +337,7 @@ const std::vector> dynamicInputShapes4D = { { {-1, -1, -1, -1}, {{2, 1, 1, 3}, {1, 4, 1, 3}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_4d_numpy_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_4d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes4D), @@ -356,7 +356,7 @@ const std::vector> dynamicInputShapes5D_explicit = { { {-1, -1, -1, -1, -1}, {{2, 3, 4, 5, 6}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_5d_explicit_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_5d_explicit_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes5D_explicit), @@ -373,7 +373,7 @@ const std::vector> dynamicInputShapes5D = { { {-1, -1, -1, -1, -1}, {{8, 1, 1, 7, 1}, {8, 4, 1, 7, 3}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_5d_numpy_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_5d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes5D), @@ -391,7 +391,7 @@ const std::vector> dynamicInputShapes6D = { { {-1, -1, -1, -1, -1, -1}, {{8, 1, 1, 7, 1, 3}, {8, 4, 1, 7, 16, 3}} } }, }; -INSTANTIATE_TEST_CASE_P(smoke_broadcast_6d_numpy_compareWithRefs_dynamic, +INSTANTIATE_TEST_SUITE_P(smoke_broadcast_6d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes6D), diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp index 139ba609c1b6fd..107d4a707ee751 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp @@ -204,7 +204,7 @@ const std::vector> repeats5D = { {2, 3, 1, 1, 1} }; -INSTANTIATE_TEST_CASE_P(DynamicShape4D, TileLayerGPUTest, +INSTANTIATE_TEST_SUITE_P(DynamicShape4D, TileLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamic_input_shapes4D), ::testing::ValuesIn(repeats4D), @@ -213,7 +213,7 @@ INSTANTIATE_TEST_CASE_P(DynamicShape4D, TileLayerGPUTest, ::testing::Values(ov::test::utils::DEVICE_GPU)), TileLayerGPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(DynamicShape5D, TileLayerGPUTest, +INSTANTIATE_TEST_SUITE_P(DynamicShape5D, TileLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamic_input_shapes5D), ::testing::ValuesIn(repeats5D), diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp index d260b66331fa51..4532baf455a4ee 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp @@ -178,7 +178,7 @@ std::vector input_shapesDynamic = { } }; -INSTANTIATE_TEST_CASE_P(smoke_TopK_constant_dynamic, TopKLayerGPUTest, +INSTANTIATE_TEST_SUITE_P(smoke_TopK_constant_dynamic, TopKLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(k), ::testing::ValuesIn(axes), @@ -192,7 +192,7 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_constant_dynamic, TopKLayerGPUTest, ::testing::Values(ov::test::utils::InputLayerType::CONSTANT)), TopKLayerGPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest, +INSTANTIATE_TEST_SUITE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest, ::testing::Combine( ::testing::Values(1), ::testing::ValuesIn(axes), From b202100ccedf525285231be80a19c9f3725247db Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Feb 2024 07:07:39 +0800 Subject: [PATCH 10/13] Fixed WebAsm compilation --- src/plugins/intel_cpu/src/infer_request.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/plugins/intel_cpu/src/infer_request.h b/src/plugins/intel_cpu/src/infer_request.h index c2cf0d37379330..d5ca5f220f5921 100644 --- a/src/plugins/intel_cpu/src/infer_request.h +++ b/src/plugins/intel_cpu/src/infer_request.h @@ -111,8 +111,6 @@ class SyncInferRequest : public ov::ISyncInferRequest { Graph* m_graph = nullptr; std::unordered_map> m_external_ptr; - bool m_is_legacy_api = false; - std::shared_ptr m_compiled_model; openvino::itt::handle_t m_profiling_task; std::vector m_memory_states; From df6855d58f1d6e4570eb6972640e31bfa8e332ef Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Feb 2024 07:59:28 +0800 Subject: [PATCH 11/13] Fixed build --- cmake/templates/openvino.pc.in | 5 +- src/bindings/c/src/CMakeLists.txt | 3 +- src/cmake/openvino.cmake | 2 +- .../openvino/runtime/icompiled_model.hpp | 4 - .../runtime/performance_heuristics.hpp | 121 +--------------- .../src/dev/performance_heuristics.cpp | 129 ++++++++++++++++++ .../intel_gpu/plugin/program_builder.hpp | 5 + 7 files changed, 142 insertions(+), 127 deletions(-) create mode 100644 src/inference/src/dev/performance_heuristics.cpp diff --git a/cmake/templates/openvino.pc.in b/cmake/templates/openvino.pc.in index 958a8bbd61d9ac..1b95283c922058 100644 --- a/cmake/templates/openvino.pc.in +++ b/cmake/templates/openvino.pc.in @@ -9,14 +9,13 @@ exec_prefix=${prefix}/@OV_CPACK_RUNTIMEDIR@ libdir=${exec_prefix} include_prefix=${prefix}/@OV_CPACK_INCLUDEDIR@ -includedir_old=${include_prefix}/ie -includedir_new=${include_prefix} +includedir=${include_prefix} Name: OpenVINO Description: OpenVINO™ Toolkit URL: https://docs.openvino.ai/latest/index.html Version: @OpenVINO_VERSION@ Conflicts: openvino < @OpenVINO_VERSION@ -Cflags: -I${includedir_old} -I${includedir_new} @PKGCONFIG_OpenVINO_DEFINITIONS@ +Cflags: -I${includedir} @PKGCONFIG_OpenVINO_DEFINITIONS@ Libs: -L${libdir} @PKGCONFIG_OpenVINO_FRONTENDS@ -lopenvino_c -lopenvino @PKGCONFIG_OpenVINO_PRIVATE_DEPS@ Libs.private: -ldl -lm -lpthread -lrt diff --git a/src/bindings/c/src/CMakeLists.txt b/src/bindings/c/src/CMakeLists.txt index bae88412fd0600..bade88e437e48b 100644 --- a/src/bindings/c/src/CMakeLists.txt +++ b/src/bindings/c/src/CMakeLists.txt @@ -55,8 +55,7 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} ${OV_CPACK_COMP_CORE_C_EXCLUDE_ALL} LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} ${OV_CPACK_COMP_CORE_C_EXCLUDE_ALL} NAMELINK_COMPONENT ${OV_CPACK_COMP_CORE_C_DEV} - INCLUDES DESTINATION ${OV_CPACK_INCLUDEDIR} - ${OV_CPACK_INCLUDEDIR}/ie) + INCLUDES DESTINATION ${OV_CPACK_INCLUDEDIR}) install(DIRECTORY ${OpenVINO_C_API_SOURCE_DIR}/include/openvino/ DESTINATION ${OV_CPACK_INCLUDEDIR}/openvino diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index ed772ad0810734..92f494d274ac0f 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -143,7 +143,7 @@ if(ENABLE_PLUGINS_XML) ${OV_CPACK_COMP_CORE_EXCLUDE_ALL}) if(ENABLE_TESTS) - # for InferenceEngineUnitTest + # for ov_inference_unit_tests install(FILES $/plugins.xml DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) endif() diff --git a/src/inference/dev_api/openvino/runtime/icompiled_model.hpp b/src/inference/dev_api/openvino/runtime/icompiled_model.hpp index c214de6c36f6b0..dbbd6bce182984 100644 --- a/src/inference/dev_api/openvino/runtime/icompiled_model.hpp +++ b/src/inference/dev_api/openvino/runtime/icompiled_model.hpp @@ -146,10 +146,6 @@ class OPENVINO_RUNTIME_API ICompiledModel : public std::enable_shared_from_this< friend ov::CoreImpl; - // FIXME: Remove after removing IE API - std::vector> _parameters; - std::vector> _results; - protected: /** * @brief Method creates infer request implementation diff --git a/src/inference/dev_api/openvino/runtime/performance_heuristics.hpp b/src/inference/dev_api/openvino/runtime/performance_heuristics.hpp index 7d6bcbd88b58be..7e8eb7bb388c68 100644 --- a/src/inference/dev_api/openvino/runtime/performance_heuristics.hpp +++ b/src/inference/dev_api/openvino/runtime/performance_heuristics.hpp @@ -1,11 +1,13 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // + #pragma once #include #include +#include "openvino/runtime/common.hpp" #include "openvino/core/model.hpp" #include "transformations/utils/utils.hpp" @@ -24,124 +26,9 @@ struct MemBandwidthPressure { static constexpr float LIMITED = 0.5f; // conservatively assume 1/2 utilization of the cache }; -static MemBandwidthPressure mem_bandwidth_pressure_tolerance( +OPENVINO_RUNTIME_API MemBandwidthPressure mem_bandwidth_pressure_tolerance( const std::shared_ptr model, const float cache_size, - const float memThresholdAssumeLimited = MemBandwidthPressure::LIMITED) { - int total_convs = 0, mem_limited_convs = 0, compute_convs = 0, total_gemms = 0, mem_limited_gemms = 0, - total_deconvs = 0, compute_deconvs = 0, mem_limited_deconvs = 0; - auto memLimitedFactor = [&](size_t size_data_moved, int datatype_size = 4) -> float { - return (cache_size / (size_data_moved * datatype_size)); - }; - auto isLowPrecision = [&](ov::element::Type type) -> bool { - return (type == ov::element::i8) || (type == ov::element::u8); - }; - auto isHalfPrecision = [&](ov::element::Type type) -> bool { - return (type == ov::element::bf16) || (type == ov::element::f16); - }; - - float worst_case = MemBandwidthPressure::UNKNOWN; - // Traverse OpenVINO Model in topological order - for (auto& node : model->get_ordered_ops()) { - const auto node_name = node->get_type_info().name; - if (std::strcmp("MatMul", node_name) && std::strcmp("Convolution", node_name) && - std::strcmp("ConvolutionBackpropData", node_name)) { - if (!std::strcmp("GRUSequence", node_name) || !std::strcmp("TensorIterator", node_name)) { - MemBandwidthPressure res; - res.max_mem_tolerance = MemBandwidthPressure::UNKNOWN; - return res; - } - continue; - } - auto type1 = node->input_value(1).get_element_type(); // weights - const bool isINT8 = isLowPrecision(type1); - const bool isBF16orFP16 = isHalfPrecision(type1); - const int data_type_size = isINT8 ? 1 : isBF16orFP16 ? 2 : 4; - - size_t dataSizeInput = 0, dataSizeOutput = 0; - if (!std::strcmp("MatMul", node_name)) { - const auto input0 = node->input(0); - const auto input1 = node->input(1); - const auto output = node->output(0); - // Check that input and output shape a fully defined (not dynamic) - if (input0.get_partial_shape().is_static() && input1.get_partial_shape().is_static() && - output.get_partial_shape().is_static()) { - const auto& shapeInput0 = input0.get_shape(); - const auto& shapeInput1 = input1.get_shape(); - const auto non_const = !ov::op::util::is_on_constant_path(node->input_value(1)); - const auto& shapeOutput = output.get_shape(); - const auto dataSizeInput0 = - std::accumulate(shapeInput0.begin(), shapeInput0.end(), size_t(1), std::multiplies()); - const auto dataSizeInput1 = - std::accumulate(shapeInput1.begin(), shapeInput1.end(), size_t(1), std::multiplies()); - dataSizeOutput = - std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); - const auto total_data = dataSizeInput0 + non_const * dataSizeInput1 + dataSizeOutput; - total_gemms++; - const auto factor = memLimitedFactor(total_data, data_type_size); - mem_limited_gemms += factor < memThresholdAssumeLimited; - worst_case = std::min(factor, worst_case); - } - } else if (!std::strcmp("Convolution", node_name)) { - // Check that input and output shape a fully defined (not dynamic) - const auto input = node->input(0); - const auto output = node->output(0); - const auto kernels = node->input(1); - - total_convs++; - if (kernels.get_partial_shape().is_static()) { - const auto& shape = kernels.get_shape(); - if (shape.size() >= 4 /* conventional 2D/3D conv */ && shape[2] >= 3 && shape[3] >= 3) { - compute_convs++; - continue; - } - } - if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static()) { - const auto& shapeInput = input.get_shape(); - const auto& shapeOutput = output.get_shape(); - if (shapeInput.size() > 4 /*5D*/ && isINT8) { - compute_convs++; - continue; - } - dataSizeInput = - std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); - dataSizeOutput = - std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); - const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); - mem_limited_convs += factor < memThresholdAssumeLimited; - worst_case = std::min(factor, worst_case); - } - } else if (!std::strcmp("ConvolutionBackpropData", node_name)) { - const auto input = node->input(0); - const auto output = node->output(0); - total_deconvs++; - - // Check that input and output shape a fully defined (not dynamic) - if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static()) { - const auto shapeInput = input.get_shape(); - const auto shapeOutput = output.get_shape(); - if (shapeInput.size() > 4 /*5D*/ && isINT8) { - compute_deconvs++; - continue; - } - dataSizeInput = - std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); - dataSizeOutput = - std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); - const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); - mem_limited_deconvs += factor < memThresholdAssumeLimited; - worst_case = std::min(factor, worst_case); - } - } - } - MemBandwidthPressure res; - res.max_mem_tolerance = worst_case; - res.ratio_mem_limited_convs = total_convs ? static_cast(mem_limited_convs) / total_convs : 0; - res.ratio_mem_limited_deconvs = total_deconvs ? static_cast(mem_limited_deconvs) / total_deconvs : 0; - res.ratio_mem_limited_gemms = total_gemms ? static_cast(mem_limited_gemms) / total_gemms : 0; - res.ratio_compute_convs = total_convs ? static_cast(compute_convs) / total_convs : 0; - res.ratio_compute_deconvs = total_deconvs ? static_cast(compute_deconvs) / total_deconvs : 0; - return res; -} + const float memThresholdAssumeLimited = MemBandwidthPressure::LIMITED); } // namespace ov diff --git a/src/inference/src/dev/performance_heuristics.cpp b/src/inference/src/dev/performance_heuristics.cpp new file mode 100644 index 00000000000000..c89256f7bf1e9b --- /dev/null +++ b/src/inference/src/dev/performance_heuristics.cpp @@ -0,0 +1,129 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/runtime/performance_heuristics.hpp" + +namespace ov { + +MemBandwidthPressure mem_bandwidth_pressure_tolerance( + const std::shared_ptr model, + const float cache_size, + const float memThresholdAssumeLimited) { + int total_convs = 0, mem_limited_convs = 0, compute_convs = 0, total_gemms = 0, mem_limited_gemms = 0, + total_deconvs = 0, compute_deconvs = 0, mem_limited_deconvs = 0; + auto memLimitedFactor = [&](size_t size_data_moved, int datatype_size = 4) -> float { + return (cache_size / (size_data_moved * datatype_size)); + }; + auto isLowPrecision = [&](ov::element::Type type) -> bool { + return (type == ov::element::i8) || (type == ov::element::u8); + }; + auto isHalfPrecision = [&](ov::element::Type type) -> bool { + return (type == ov::element::bf16) || (type == ov::element::f16); + }; + + float worst_case = MemBandwidthPressure::UNKNOWN; + // Traverse OpenVINO Model in topological order + for (auto& node : model->get_ordered_ops()) { + const auto node_name = node->get_type_info().name; + if (std::strcmp("MatMul", node_name) && std::strcmp("Convolution", node_name) && + std::strcmp("ConvolutionBackpropData", node_name)) { + if (!std::strcmp("GRUSequence", node_name) || !std::strcmp("TensorIterator", node_name)) { + MemBandwidthPressure res; + res.max_mem_tolerance = MemBandwidthPressure::UNKNOWN; + return res; + } + continue; + } + auto type1 = node->input_value(1).get_element_type(); // weights + const bool isINT8 = isLowPrecision(type1); + const bool isBF16orFP16 = isHalfPrecision(type1); + const int data_type_size = isINT8 ? 1 : isBF16orFP16 ? 2 : 4; + + size_t dataSizeInput = 0, dataSizeOutput = 0; + if (!std::strcmp("MatMul", node_name)) { + const auto input0 = node->input(0); + const auto input1 = node->input(1); + const auto output = node->output(0); + // Check that input and output shape a fully defined (not dynamic) + if (input0.get_partial_shape().is_static() && input1.get_partial_shape().is_static() && + output.get_partial_shape().is_static()) { + const auto& shapeInput0 = input0.get_shape(); + const auto& shapeInput1 = input1.get_shape(); + const auto non_const = !ov::op::util::is_on_constant_path(node->input_value(1)); + const auto& shapeOutput = output.get_shape(); + const auto dataSizeInput0 = + std::accumulate(shapeInput0.begin(), shapeInput0.end(), size_t(1), std::multiplies()); + const auto dataSizeInput1 = + std::accumulate(shapeInput1.begin(), shapeInput1.end(), size_t(1), std::multiplies()); + dataSizeOutput = + std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); + const auto total_data = dataSizeInput0 + non_const * dataSizeInput1 + dataSizeOutput; + total_gemms++; + const auto factor = memLimitedFactor(total_data, data_type_size); + mem_limited_gemms += factor < memThresholdAssumeLimited; + worst_case = std::min(factor, worst_case); + } + } else if (!std::strcmp("Convolution", node_name)) { + // Check that input and output shape a fully defined (not dynamic) + const auto input = node->input(0); + const auto output = node->output(0); + const auto kernels = node->input(1); + + total_convs++; + if (kernels.get_partial_shape().is_static()) { + const auto& shape = kernels.get_shape(); + if (shape.size() >= 4 /* conventional 2D/3D conv */ && shape[2] >= 3 && shape[3] >= 3) { + compute_convs++; + continue; + } + } + if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static()) { + const auto& shapeInput = input.get_shape(); + const auto& shapeOutput = output.get_shape(); + if (shapeInput.size() > 4 /*5D*/ && isINT8) { + compute_convs++; + continue; + } + dataSizeInput = + std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); + dataSizeOutput = + std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); + const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); + mem_limited_convs += factor < memThresholdAssumeLimited; + worst_case = std::min(factor, worst_case); + } + } else if (!std::strcmp("ConvolutionBackpropData", node_name)) { + const auto input = node->input(0); + const auto output = node->output(0); + total_deconvs++; + + // Check that input and output shape a fully defined (not dynamic) + if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static()) { + const auto shapeInput = input.get_shape(); + const auto shapeOutput = output.get_shape(); + if (shapeInput.size() > 4 /*5D*/ && isINT8) { + compute_deconvs++; + continue; + } + dataSizeInput = + std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); + dataSizeOutput = + std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); + const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); + mem_limited_deconvs += factor < memThresholdAssumeLimited; + worst_case = std::min(factor, worst_case); + } + } + } + MemBandwidthPressure res; + res.max_mem_tolerance = worst_case; + res.ratio_mem_limited_convs = total_convs ? static_cast(mem_limited_convs) / total_convs : 0; + res.ratio_mem_limited_deconvs = total_deconvs ? static_cast(mem_limited_deconvs) / total_deconvs : 0; + res.ratio_mem_limited_gemms = total_gemms ? static_cast(mem_limited_gemms) / total_gemms : 0; + res.ratio_compute_convs = total_convs ? static_cast(compute_convs) / total_convs : 0; + res.ratio_compute_deconvs = total_deconvs ? static_cast(compute_deconvs) / total_deconvs : 0; + return res; +} + +} // namespace ov diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp index 7a891709415ad5..e92feaa31f58a6 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp @@ -22,6 +22,11 @@ #include #include +#if defined(_WIN32) && !defined(__GNUC__) +# define __PRETTY_FUNCTION__ __FUNCSIG__ +#else +# define __PRETTY_FUNCTION__ __PRETTY_FUNCTION__ +#endif // Forward declarations for cldnn part namespace cldnn { From d3657f600b2a97e43bcc36a8abac02c2a33fb7d2 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Feb 2024 08:07:12 +0800 Subject: [PATCH 12/13] Clang format fix --- src/inference/src/dev/performance_heuristics.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/inference/src/dev/performance_heuristics.cpp b/src/inference/src/dev/performance_heuristics.cpp index c89256f7bf1e9b..3ca8cf38673bed 100644 --- a/src/inference/src/dev/performance_heuristics.cpp +++ b/src/inference/src/dev/performance_heuristics.cpp @@ -6,10 +6,9 @@ namespace ov { -MemBandwidthPressure mem_bandwidth_pressure_tolerance( - const std::shared_ptr model, - const float cache_size, - const float memThresholdAssumeLimited) { +MemBandwidthPressure mem_bandwidth_pressure_tolerance(const std::shared_ptr model, + const float cache_size, + const float memThresholdAssumeLimited) { int total_convs = 0, mem_limited_convs = 0, compute_convs = 0, total_gemms = 0, mem_limited_gemms = 0, total_deconvs = 0, compute_deconvs = 0, mem_limited_deconvs = 0; auto memLimitedFactor = [&](size_t size_data_moved, int datatype_size = 4) -> float { From 67743f4c2ea224e6a01c98e3613dffc03154c3e3 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Feb 2024 15:18:47 +0800 Subject: [PATCH 13/13] Disabled test --- .../include/behavior/ov_infer_request/properties_tests.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp index b79b7e5c498e38..aa5d03b6e93d8f 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp @@ -103,7 +103,7 @@ TEST_P(InferRequestPropertiesTest, withoutExclusiveAsyncRequests) { } } -TEST_P(InferRequestPropertiesTest, ReusableCPUStreamsExecutor) { +TEST_P(InferRequestPropertiesTest, DISABLED_ReusableCPUStreamsExecutor) { ASSERT_EQ(0u, ov::threading::executor_manager()->get_executors_number()); ASSERT_EQ(0u, ov::threading::executor_manager()->get_idle_cpu_streams_executors_number());