From 44d7fda4177f72a1b9b03cb629c9e4890b8cd125 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 9 Jun 2023 16:25:43 +0400 Subject: [PATCH 1/4] Deprecate plugins config keys --- src/inference/include/ie/cpu/cpu_config.hpp | 14 +++- src/inference/include/ie/gna/gna_config.hpp | 79 +++++++++---------- .../ie/gpu/details/gpu_context_helpers.hpp | 12 ++- src/inference/include/ie/gpu/gpu_config.hpp | 46 ++++++----- .../include/ie/gpu/gpu_context_api_dx.hpp | 36 ++++++--- .../include/ie/gpu/gpu_context_api_ocl.hpp | 46 +++++++---- .../include/ie/gpu/gpu_context_api_va.hpp | 30 ++++--- .../include/ie/gpu/gpu_ocl_wrapper.hpp | 10 +++ src/inference/include/ie/gpu/gpu_params.hpp | 52 +++++++----- .../ie/hetero/hetero_plugin_config.hpp | 12 ++- .../ie/multi-device/multi_device_config.hpp | 12 ++- src/inference/include/ie/vpu/hddl_config.hpp | 50 +++++++----- .../include/ie/vpu/myriad_config.hpp | 34 +++++--- src/inference/include/ie/vpu/vpu_config.hpp | 16 +++- 14 files changed, 294 insertions(+), 155 deletions(-) diff --git a/src/inference/include/ie/cpu/cpu_config.hpp b/src/inference/include/ie/cpu/cpu_config.hpp index e26f25b540ecd2..e1619832223a36 100644 --- a/src/inference/include/ie/cpu/cpu_config.hpp +++ b/src/inference/include/ie/cpu/cpu_config.hpp @@ -10,6 +10,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "ie_plugin_config.hpp" namespace InferenceEngine { @@ -38,9 +48,9 @@ namespace CPUConfigParams { * PluginConfigParams::YES or PluginConfigParams::NO * If not set explicitly, the behavior is kept in runtime enviroment where compile_model is called. */ -DECLARE_CPU_CONFIG_KEY(DENORMALS_OPTIMIZATION); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CPU_CONFIG_KEY(DENORMALS_OPTIMIZATION); -DECLARE_CPU_CONFIG_KEY(SPARSE_WEIGHTS_DECOMPRESSION_RATE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CPU_CONFIG_KEY(SPARSE_WEIGHTS_DECOMPRESSION_RATE); } // namespace CPUConfigParams } // namespace InferenceEngine diff --git a/src/inference/include/ie/gna/gna_config.hpp b/src/inference/include/ie/gna/gna_config.hpp index 5efd720d75b56b..b4a5f81551b634 100644 --- a/src/inference/include/ie/gna/gna_config.hpp +++ b/src/inference/include/ie/gna/gna_config.hpp @@ -11,6 +11,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "ie_plugin_config.hpp" namespace InferenceEngine { @@ -40,128 +50,119 @@ namespace GNAConfigParams { * @details For multiple input case, individual scale factors can be passed, using * KEY_GNA_SCALE_FACTOR[_input_layer_name] where input_layer can be obtained from CNNNetwork::GetInputsInfo */ -DECLARE_GNA_CONFIG_KEY(SCALE_FACTOR); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(SCALE_FACTOR); /** * @brief By default gna api works with Int16 weights precision, however this can be adjusted if necessary, * currently supported values are I16, I8 */ -DECLARE_GNA_CONFIG_KEY(PRECISION); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(PRECISION); /** * @brief if turned on, dump GNA firmware model into specified file */ -DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE); /** * @brief GNA proc_type setting that should be one of GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_EXACT */ -DECLARE_GNA_CONFIG_KEY(DEVICE_MODE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(DEVICE_MODE); /** * @brief Specific software acceleration mode. * Uses Intel GNA if available, otherwise uses software execution mode on CPU. */ -DECLARE_GNA_CONFIG_VALUE(AUTO); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AUTO); /** * @brief Specific software acceleration mode. * Uses Intel GNA if available, otherwise raises an error. */ -DECLARE_GNA_CONFIG_VALUE(HW); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(HW); /** * @brief Specific software acceleration mode. * Uses Intel GNA if available, otherwise raises an error. * If the hardware queue is not empty, automatically falls back to CPU in the bit-exact mode. */ -DECLARE_GNA_CONFIG_VALUE(HW_WITH_SW_FBACK); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(HW_WITH_SW_FBACK); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(SW); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SW); /** * @brief Specific software acceleration mode. * Executes the GNA-compiled graph on CPU performing calculations * in the same precision as the Intel GNA in the bit-exact mode. */ -DECLARE_GNA_CONFIG_VALUE(SW_EXACT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SW_EXACT); /** * @brief Specific software acceleration mode. * Executes the GNA-compiled graph on CPU but substitutes parameters and calculations * from low precision to floating point */ -DECLARE_GNA_CONFIG_VALUE(SW_FP32); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SW_FP32); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(GEN); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(GEN); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(GEN_EXACT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(GEN_EXACT); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(SSE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SSE); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(SSE_EXACT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SSE_EXACT); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(AVX1); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX1); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(AVX1_EXACT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX1_EXACT); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(AVX2); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX2); /** * @brief Specific software acceleration mode. * @deprecated Mode is deprecated and will be removed in a future release. * Use InferenceEngine::GNAConfigParams::SW_EXACT instead. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead") -DECLARE_GNA_CONFIG_VALUE(AVX2_EXACT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX2_EXACT); /** * @brief The option to override the GNA HW execution target. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. @@ -171,21 +172,21 @@ DECLARE_GNA_CONFIG_VALUE(AVX2_EXACT); * A fully supported GNA HW generation means it must be supported by both the OV GNA Plugin and the core GNA Library. * For the OV GNA Plugin 2022.1, the latest supported GNA HW generation corresponds to GNA_TARGET_3_0. */ -DECLARE_GNA_CONFIG_KEY(EXEC_TARGET); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(EXEC_TARGET); -DECLARE_GNA_CONFIG_VALUE(TARGET_2_0); -DECLARE_GNA_CONFIG_VALUE(TARGET_3_0); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(TARGET_2_0); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(TARGET_3_0); /** * @brief The option to override the GNA HW compile target. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. * By default the same as GNA_EXEC_TARGET. */ -DECLARE_GNA_CONFIG_KEY(COMPILE_TARGET); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(COMPILE_TARGET); /** * @brief if enabled produced minimum memory footprint for loaded network in GNA memory, default value is YES */ -DECLARE_GNA_CONFIG_KEY(COMPACT_MODE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(COMPACT_MODE); /** * @brief The option to enable/disable uniformly distributed PWL algorithm. @@ -195,8 +196,7 @@ DECLARE_GNA_CONFIG_KEY(COMPACT_MODE); * Uniform distribution usually gives poor approximation with same number of segments * @deprecated The config key is deprecated and will be removed in a future release. */ -INFERENCE_ENGINE_DEPRECATED("The config key is deprected and will be removed") -DECLARE_GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN); /** * @brief The option to allow to specify the maximum error percent that the optimized algorithm finding @@ -204,8 +204,7 @@ DECLARE_GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN); * By default (in case of NO value set), 1.0 value is used. * @deprecated The config key is deprecated and will be removed in a future release. */ -INFERENCE_ENGINE_DEPRECATED("The config key is deprected and will be removed") -DECLARE_GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT); /** * @brief By default, the GNA plugin uses one worker thread for inference computations. @@ -215,8 +214,7 @@ DECLARE_GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT); * of issuing. Additionally, in this case, software modes do not implement any serializations. * @deprecated The config key is deprecated and will be removed in a future release */ -INFERENCE_ENGINE_DEPRECATED("The config key will be removed") -DECLARE_GNA_CONFIG_KEY(LIB_N_THREADS); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(LIB_N_THREADS); } // namespace GNAConfigParams namespace Metrics { @@ -224,7 +222,7 @@ namespace Metrics { * @brief Metric to get a std::string of GNA Library version, usually in the form * ... */ -DECLARE_METRIC_KEY(GNA_LIBRARY_FULL_VERSION, std::string); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(GNA_LIBRARY_FULL_VERSION, std::string); } // namespace Metrics namespace PluginConfigParams { @@ -236,8 +234,7 @@ namespace PluginConfigParams { * PluginConfigParams::YES or PluginConfigParams::NO * @deprecated The config key is deprecated and will be removed in a future release */ -INFERENCE_ENGINE_DEPRECATED("The config key will be removed") -DECLARE_CONFIG_KEY(SINGLE_THREAD); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(SINGLE_THREAD); } // namespace PluginConfigParams diff --git a/src/inference/include/ie/gpu/details/gpu_context_helpers.hpp b/src/inference/include/ie/gpu/details/gpu_context_helpers.hpp index f9a523df7e094b..99776b12cf6fcb 100644 --- a/src/inference/include/ie/gpu/details/gpu_context_helpers.hpp +++ b/src/inference/include/ie/gpu/details/gpu_context_helpers.hpp @@ -9,6 +9,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include "ie_parameter.hpp" @@ -22,7 +32,7 @@ namespace details { * @brief This wrapper class is used to obtain low-level handles * from remote blob or context object parameters. */ -class param_map_obj_getter { +class INFERENCE_ENGINE_1_0_DEPRECATED param_map_obj_getter { protected: /** * @brief Template function that returns specified diff --git a/src/inference/include/ie/gpu/gpu_config.hpp b/src/inference/include/ie/gpu/gpu_config.hpp index 0f2a2c6e0049f4..a4d7ff267f08da 100644 --- a/src/inference/include/ie/gpu/gpu_config.hpp +++ b/src/inference/include/ie/gpu/gpu_config.hpp @@ -10,6 +10,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "ie_plugin_config.hpp" namespace InferenceEngine { @@ -33,29 +43,29 @@ namespace Metrics { * @brief Metric which defines size of memory in bytes available for the device. For iGPU it returns host memory size, * for dGPU - dedicated gpu memory size */ -DECLARE_GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE, uint64_t); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE, uint64_t); /** * @brief Metric to get microarchitecture identifier in major.minor.revision format */ -DECLARE_GPU_METRIC_KEY(UARCH_VERSION, std::string); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_METRIC_KEY(UARCH_VERSION, std::string); /** * @brief Metric to get count of execution units for current GPU */ -DECLARE_GPU_METRIC_KEY(EXECUTION_UNITS_COUNT, int); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_METRIC_KEY(EXECUTION_UNITS_COUNT, int); /** * @brief Metric to get statistics of GPU memory allocated by engine for each allocation type * It contains information about current memory usage */ -DECLARE_GPU_METRIC_KEY(MEMORY_STATISTICS, std::map); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_METRIC_KEY(MEMORY_STATISTICS, std::map); /** * @brief Possible return value for OPTIMIZATION_CAPABILITIES metric * - "HW_MATMUL" - Defines if device has hardware block for matrix multiplication */ -DECLARE_GPU_METRIC_VALUE(HW_MATMUL); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_METRIC_VALUE(HW_MATMUL); } // namespace Metrics @@ -77,7 +87,7 @@ namespace GPUConfigParams { * this option should be used with an unsigned integer value (1 is lowest priority) * 0 means no priority hint is set and default queue is created. */ -DECLARE_GPU_CONFIG_KEY(PLUGIN_PRIORITY); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(PLUGIN_PRIORITY); /** * @brief This key instructs the GPU plugin to use throttle hints the OpenCL queue throttle hint @@ -85,7 +95,7 @@ DECLARE_GPU_CONFIG_KEY(PLUGIN_PRIORITY); * chapter 9.19. This option should be used with an unsigned integer value (1 is lowest energy consumption) * 0 means no throttle hint is set and default queue created. */ -DECLARE_GPU_CONFIG_KEY(PLUGIN_THROTTLE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(PLUGIN_THROTTLE); /** * @brief This key instructs the GPU plugin which cpu core type of TBB affinity used in load network. @@ -94,22 +104,22 @@ DECLARE_GPU_CONFIG_KEY(PLUGIN_THROTTLE); * - MEDIUM (DEFAULT) - instructs the GPU Plugin to use any available cores (BIG or LITTLE cores) * - HIGH - instructs the GPU Plugin to use BIG cores if they are available */ -DECLARE_GPU_CONFIG_KEY(HOST_TASK_PRIORITY); -DECLARE_GPU_CONFIG_VALUE(HOST_TASK_PRIORITY_HIGH); -DECLARE_GPU_CONFIG_VALUE(HOST_TASK_PRIORITY_MEDIUM); -DECLARE_GPU_CONFIG_VALUE(HOST_TASK_PRIORITY_LOW); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(HOST_TASK_PRIORITY); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_VALUE(HOST_TASK_PRIORITY_HIGH); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_VALUE(HOST_TASK_PRIORITY_MEDIUM); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_VALUE(HOST_TASK_PRIORITY_LOW); /** * @brief This key should be set to correctly handle NV12 input without pre-processing. * Turned off by default. */ -DECLARE_GPU_CONFIG_KEY(NV12_TWO_INPUTS); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(NV12_TWO_INPUTS); /** * @brief This key sets the max number of host threads that can be used by GPU plugin on model loading. * Default value is maximum number of threads available in the environment. */ -DECLARE_GPU_CONFIG_KEY(MAX_NUM_THREADS); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(MAX_NUM_THREADS); /** * @brief Turning on this key enables to unroll recurrent layers such as TensorIterator or Loop with fixed iteration @@ -118,13 +128,13 @@ DECLARE_GPU_CONFIG_KEY(MAX_NUM_THREADS); * performance for both graph loading time and inference time with many iteration counts (greater than 16). Note that * turning this key on will increase the graph loading time in proportion to the iteration counts. * Thus, this key should be turned off if graph loading time is considered to be most important target to optimize.*/ -DECLARE_GPU_CONFIG_KEY(ENABLE_LOOP_UNROLLING); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(ENABLE_LOOP_UNROLLING); /** * @brief These keys instruct the GPU plugin to use surface/buffer memory type. */ -DECLARE_GPU_CONFIG_KEY(SURFACE); -DECLARE_GPU_CONFIG_KEY(BUFFER); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(SURFACE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_CONFIG_KEY(BUFFER); } // namespace GPUConfigParams @@ -139,8 +149,8 @@ namespace PluginConfigParams { * drop * - a positive integer value creates the requested number of streams */ -DECLARE_CONFIG_VALUE(GPU_THROUGHPUT_AUTO); -DECLARE_CONFIG_KEY(GPU_THROUGHPUT_STREAMS); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(GPU_THROUGHPUT_AUTO); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(GPU_THROUGHPUT_STREAMS); } // namespace PluginConfigParams } // namespace InferenceEngine diff --git a/src/inference/include/ie/gpu/gpu_context_api_dx.hpp b/src/inference/include/ie/gpu/gpu_context_api_dx.hpp index 6992e5b24cbaa0..53022836a02889 100644 --- a/src/inference/include/ie/gpu/gpu_context_api_dx.hpp +++ b/src/inference/include/ie/gpu/gpu_context_api_dx.hpp @@ -11,6 +11,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include @@ -28,7 +38,7 @@ namespace gpu { * GetContext() method of Executable network or using CreateContext() Core call. * @note User can also obtain OpenCL context handle from this class. */ -class D3DContext : public ClContext { +class INFERENCE_ENGINE_1_0_DEPRECATED D3DContext : public ClContext { public: /** * @brief A smart pointer to the D3DContext object @@ -53,7 +63,7 @@ class D3DContext : public ClContext { * The plugin object derived from this class can be obtained with CreateBlob() call. * @note User can also obtain OpenCL buffer handle from this class. */ -class D3DBufferBlob : public ClBufferBlob { +class INFERENCE_ENGINE_1_0_DEPRECATED D3DBufferBlob : public ClBufferBlob { public: /** * @brief A smart pointer to the D3DBufferBlob object @@ -84,7 +94,7 @@ class D3DBufferBlob : public ClBufferBlob { * The plugin object derived from this class can be obtained with CreateBlob() call. * @note User can also obtain OpenCL 2D image handle from this class. */ -class D3DSurface2DBlob : public ClImage2DBlob { +class INFERENCE_ENGINE_1_0_DEPRECATED D3DSurface2DBlob : public ClImage2DBlob { public: /** * @brief A smart pointer to the D3DSurface2DBlob object @@ -159,10 +169,10 @@ static inline Blob::Ptr make_shared_blob_nv12(size_t height, * device should be used * @return A shared remote context instance */ -static inline D3DContext::Ptr make_shared_context(Core& core, - std::string deviceName, - ID3D11Device* device, - int target_tile_id = -1) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline D3DContext::Ptr make_shared_context(Core& core, + std::string deviceName, + ID3D11Device* device, + int target_tile_id = -1) { // clang-format off ParamMap contextParams = { {GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)}, @@ -180,7 +190,9 @@ static inline D3DContext::Ptr make_shared_context(Core& core, * @param buffer A pointer to ID3D11Buffer instance to create remote blob based on * @return A remote blob instance */ -static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, ID3D11Buffer* buffer) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, + RemoteContext::Ptr ctx, + ID3D11Buffer* buffer) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { IE_THROW() << "Invalid remote context passed"; @@ -200,10 +212,10 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext:: * @return Smart pointer to created RemoteBlob object cast to base class * @note The underlying ID3D11Texture2D can also be a plane of output surface of DXGI video decoder */ -static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, - RemoteContext::Ptr ctx, - ID3D11Texture2D* surface, - uint32_t plane = 0) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, + RemoteContext::Ptr ctx, + ID3D11Texture2D* surface, + uint32_t plane = 0) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { IE_THROW() << "Invalid remote context passed"; diff --git a/src/inference/include/ie/gpu/gpu_context_api_ocl.hpp b/src/inference/include/ie/gpu/gpu_context_api_ocl.hpp index c034c0bbc3de3b..d7c52c50253806 100644 --- a/src/inference/include/ie/gpu/gpu_context_api_ocl.hpp +++ b/src/inference/include/ie/gpu/gpu_context_api_ocl.hpp @@ -16,6 +16,16 @@ # define WAS_OV_LIBRARY_DEFINED #endif +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include #include @@ -35,7 +45,7 @@ namespace gpu { * The plugin object derived from this class can be obtained either with * GetContext() method of Executable network or using CreateContext() Core call. */ -class ClContext : public RemoteContext, public details::param_map_obj_getter { +class INFERENCE_ENGINE_1_0_DEPRECATED ClContext : public RemoteContext, public details::param_map_obj_getter { public: /** * @brief A smart pointer to the ClContext object @@ -75,7 +85,7 @@ class ClContext : public RemoteContext, public details::param_map_obj_getter { * @brief The basic class for all GPU plugin remote blob objects. * The OpenCL memory object handle (cl_mem) can be obtained from this class object. */ -class ClBlob : public RemoteBlob { +class INFERENCE_ENGINE_1_0_DEPRECATED ClBlob : public RemoteBlob { public: /** * @brief A smart pointer to the ClBlob object @@ -95,7 +105,7 @@ class ClBlob : public RemoteBlob { * The plugin object derived from this class can be obtained with CreateBlob() call. * @note User can obtain OpenCL buffer handle from this class. */ -class ClBufferBlob : public ClBlob, public details::param_map_obj_getter { +class INFERENCE_ENGINE_1_0_DEPRECATED ClBufferBlob : public ClBlob, public details::param_map_obj_getter { public: /** * @brief A smart pointer to the ClBufferBlob object @@ -143,7 +153,7 @@ class ClBufferBlob : public ClBlob, public details::param_map_obj_getter { * The plugin object derived from this class can be obtained with CreateBlob() call. * @note User can obtain USM pointer from this class. */ -class USMBlob : public ClBlob, public details::param_map_obj_getter { +class INFERENCE_ENGINE_1_0_DEPRECATED USMBlob : public ClBlob, public details::param_map_obj_getter { public: /** * @brief A smart pointer to the ClBufferBlob object @@ -186,7 +196,7 @@ class USMBlob : public ClBlob, public details::param_map_obj_getter { * The plugin object derived from this class can be obtained with CreateBlob() call. * @note User can obtain OpenCL image handle from this class. */ -class ClImage2DBlob : public ClBlob, public details::param_map_obj_getter { +class INFERENCE_ENGINE_1_0_DEPRECATED ClImage2DBlob : public ClBlob, public details::param_map_obj_getter { public: /** * @brief A smart pointer to the ClImage2DBlob object @@ -272,10 +282,10 @@ static inline Blob::Ptr make_shared_blob_nv12(RemoteContext::Ptr ctx, * device should be used * @return A shared remote context instance */ -static inline RemoteContext::Ptr make_shared_context(Core& core, - std::string deviceName, - cl_context ctx, - int target_tile_id = -1) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline RemoteContext::Ptr make_shared_context(Core& core, + std::string deviceName, + cl_context ctx, + int target_tile_id = -1) { ParamMap contextParams = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL)}, {GPU_PARAM_KEY(OCL_CONTEXT), static_cast(ctx)}, {GPU_PARAM_KEY(TILE_ID), target_tile_id}}; @@ -290,7 +300,9 @@ static inline RemoteContext::Ptr make_shared_context(Core& core, * @note Only latency mode is supported for such context sharing case. * @return A shared remote context instance */ -static inline RemoteContext::Ptr make_shared_context(Core& core, std::string deviceName, cl_command_queue queue) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline RemoteContext::Ptr make_shared_context(Core& core, + std::string deviceName, + cl_command_queue queue) { cl_context ctx; auto res = clGetCommandQueueInfo(queue, CL_QUEUE_CONTEXT, sizeof(cl_context), &ctx, nullptr); if (res != CL_SUCCESS) @@ -307,7 +319,7 @@ static inline RemoteContext::Ptr make_shared_context(Core& core, std::string dev * @param ctx A remote context used to create remote blob * @return A remote blob instance */ -static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, ClContext::Ptr ctx) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, ClContext::Ptr ctx) { return std::dynamic_pointer_cast(ctx->CreateBlob(desc)); } @@ -318,7 +330,9 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, ClContext::Ptr * @param buffer A cl::Buffer object wrapped by a remote blob * @return A remote blob instance */ -static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl::Buffer& buffer) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, + RemoteContext::Ptr ctx, + cl::Buffer& buffer) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { IE_THROW() << "Invalid remote context passed"; @@ -336,7 +350,9 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext:: * @param buffer A cl_mem object wrapped by a remote blob * @return A remote blob instance */ -static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl_mem buffer) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, + RemoteContext::Ptr ctx, + cl_mem buffer) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { IE_THROW() << "Invalid remote context passed"; @@ -354,7 +370,9 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext:: * @param image A cl::Image2D object wrapped by a remote blob * @return A remote blob instance */ -static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl::Image2D& image) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, + RemoteContext::Ptr ctx, + cl::Image2D& image) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { IE_THROW() << "Invalid remote context passed"; diff --git a/src/inference/include/ie/gpu/gpu_context_api_va.hpp b/src/inference/include/ie/gpu/gpu_context_api_va.hpp index 8c67588e977e7a..8bb417c8752694 100644 --- a/src/inference/include/ie/gpu/gpu_context_api_va.hpp +++ b/src/inference/include/ie/gpu/gpu_context_api_va.hpp @@ -11,6 +11,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include @@ -30,7 +40,7 @@ namespace gpu { * GetContext() method of Executable network or using CreateContext() Core call. * @note User can also obtain OpenCL context handle from this class. */ -class VAContext : public ClContext { +class INFERENCE_ENGINE_1_0_DEPRECATED VAContext : public ClContext { public: /** * @brief A smart pointer to the VAContext object @@ -55,7 +65,7 @@ class VAContext : public ClContext { * The plugin object derived from this class can be obtained with CreateBlob() call. * @note User can also obtain OpenCL 2D image handle from this class. */ -class VASurfaceBlob : public ClImage2DBlob { +class INFERENCE_ENGINE_1_0_DEPRECATED VASurfaceBlob : public ClImage2DBlob { public: /** * @brief A smart pointer to the VASurfaceBlob object @@ -128,10 +138,10 @@ static inline Blob::Ptr make_shared_blob_nv12(size_t height, * device should be used * @return A remote context wrapping `VADisplay` */ -static inline VAContext::Ptr make_shared_context(Core& core, - std::string deviceName, - VADisplay device, - int target_tile_id = -1) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline VAContext::Ptr make_shared_context(Core& core, + std::string deviceName, + VADisplay device, + int target_tile_id = -1) { ParamMap contextParams = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)}, {GPU_PARAM_KEY(VA_DEVICE), static_cast(device)}, {GPU_PARAM_KEY(TILE_ID), target_tile_id}}; @@ -146,10 +156,10 @@ static inline VAContext::Ptr make_shared_context(Core& core, * @param plane An index of a plane inside `VASurfaceID` to create blob from * @return A remote blob wrapping `VASurfaceID` */ -static inline VASurfaceBlob::Ptr make_shared_blob(const TensorDesc& desc, - RemoteContext::Ptr ctx, - VASurfaceID surface, - uint32_t plane = 0) { +INFERENCE_ENGINE_1_0_DEPRECATED static inline VASurfaceBlob::Ptr make_shared_blob(const TensorDesc& desc, + RemoteContext::Ptr ctx, + VASurfaceID surface, + uint32_t plane = 0) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { IE_THROW() << "Invalid remote context passed"; diff --git a/src/inference/include/ie/gpu/gpu_ocl_wrapper.hpp b/src/inference/include/ie/gpu/gpu_ocl_wrapper.hpp index f4e055aab92e8a..702686bb12e755 100644 --- a/src/inference/include/ie/gpu/gpu_ocl_wrapper.hpp +++ b/src/inference/include/ie/gpu/gpu_ocl_wrapper.hpp @@ -10,6 +10,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + /** * @brief Definitions required by Khronos headers */ diff --git a/src/inference/include/ie/gpu/gpu_params.hpp b/src/inference/include/ie/gpu/gpu_params.hpp index e8ec526f92d4fc..8b3d7c0dd611d4 100644 --- a/src/inference/include/ie/gpu/gpu_params.hpp +++ b/src/inference/include/ie/gpu/gpu_params.hpp @@ -11,8 +11,20 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include +#include "ie_api.h" + namespace InferenceEngine { /** * @brief Shortcut for defining a handle parameter @@ -46,101 +58,101 @@ namespace GPUContextParams { * @brief Shared device context type: can be either pure OpenCL (OCL) * or shared video decoder (VA_SHARED) context */ -DECLARE_GPU_PARAM_KEY(CONTEXT_TYPE, std::string); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(CONTEXT_TYPE, std::string); /** * @brief Pure OpenCL device context */ -DECLARE_GPU_PARAM_VALUE(OCL); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(OCL); /** * @brief Shared context (video decoder or D3D) */ -DECLARE_GPU_PARAM_VALUE(VA_SHARED); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(VA_SHARED); /** * @brief This key identifies OpenCL context handle * in a shared context or shared memory blob parameter map */ -DECLARE_GPU_PARAM_KEY(OCL_CONTEXT, gpu_handle_param); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(OCL_CONTEXT, gpu_handle_param); /** * @brief This key identifies ID of device in OpenCL context * if multiple devices are present in the context */ -DECLARE_GPU_PARAM_KEY(OCL_CONTEXT_DEVICE_ID, int); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(OCL_CONTEXT_DEVICE_ID, int); /** * @brief In case of multi-tile system, * this key identifies tile within given context */ -DECLARE_GPU_PARAM_KEY(TILE_ID, int); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(TILE_ID, int); /** * @brief This key identifies OpenCL queue handle in a shared context */ -DECLARE_GPU_PARAM_KEY(OCL_QUEUE, gpu_handle_param); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(OCL_QUEUE, gpu_handle_param); /** * @brief This key identifies video acceleration device/display handle * in a shared context or shared memory blob parameter map */ -DECLARE_GPU_PARAM_KEY(VA_DEVICE, gpu_handle_param); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(VA_DEVICE, gpu_handle_param); /** * @brief This key identifies type of internal shared memory * in a shared memory blob parameter map. */ -DECLARE_GPU_PARAM_KEY(SHARED_MEM_TYPE, std::string); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(SHARED_MEM_TYPE, std::string); /** * @brief Shared OpenCL buffer blob */ -DECLARE_GPU_PARAM_VALUE(OCL_BUFFER); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(OCL_BUFFER); /** * @brief Shared OpenCL 2D image blob */ -DECLARE_GPU_PARAM_VALUE(OCL_IMAGE2D); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(OCL_IMAGE2D); /** * @brief Shared USM pointer allocated by user */ -DECLARE_GPU_PARAM_VALUE(USM_USER_BUFFER); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(USM_USER_BUFFER); /** * @brief Shared USM pointer type with host allocation type allocated by plugin */ -DECLARE_GPU_PARAM_VALUE(USM_HOST_BUFFER); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(USM_HOST_BUFFER); /** * @brief Shared USM pointer type with device allocation type allocated by plugin */ -DECLARE_GPU_PARAM_VALUE(USM_DEVICE_BUFFER); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(USM_DEVICE_BUFFER); /** * @brief Shared video decoder surface or D3D 2D texture blob */ -DECLARE_GPU_PARAM_VALUE(VA_SURFACE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(VA_SURFACE); /** * @brief Shared D3D buffer blob */ -DECLARE_GPU_PARAM_VALUE(DX_BUFFER); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_VALUE(DX_BUFFER); /** * @brief This key identifies OpenCL memory handle * in a shared memory blob parameter map */ -DECLARE_GPU_PARAM_KEY(MEM_HANDLE, gpu_handle_param); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(MEM_HANDLE, gpu_handle_param); /** * @brief This key identifies video decoder surface handle * in a shared memory blob parameter map */ #ifdef _WIN32 -DECLARE_GPU_PARAM_KEY(DEV_OBJECT_HANDLE, gpu_handle_param); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(DEV_OBJECT_HANDLE, gpu_handle_param); #else -DECLARE_GPU_PARAM_KEY(DEV_OBJECT_HANDLE, uint32_t); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(DEV_OBJECT_HANDLE, uint32_t); #endif /** * @brief This key identifies video decoder surface plane * in a shared memory blob parameter map */ -DECLARE_GPU_PARAM_KEY(VA_PLANE, uint32_t); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GPU_PARAM_KEY(VA_PLANE, uint32_t); } // namespace GPUContextParams } // namespace InferenceEngine diff --git a/src/inference/include/ie/hetero/hetero_plugin_config.hpp b/src/inference/include/ie/hetero/hetero_plugin_config.hpp index a39d6e19c081c1..ada6b20ca84327 100644 --- a/src/inference/include/ie/hetero/hetero_plugin_config.hpp +++ b/src/inference/include/ie/hetero/hetero_plugin_config.hpp @@ -11,6 +11,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "ie_plugin_config.hpp" namespace InferenceEngine { @@ -32,7 +42,7 @@ namespace HeteroConfigParams { * this network would be executed on different devices to the disk in GraphViz format. * This option should be used with values: CONFIG_VALUE(NO) (default) or CONFIG_VALUE(YES) */ -DECLARE_HETERO_CONFIG_KEY(DUMP_GRAPH_DOT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_HETERO_CONFIG_KEY(DUMP_GRAPH_DOT); } // namespace HeteroConfigParams } // namespace InferenceEngine diff --git a/src/inference/include/ie/multi-device/multi_device_config.hpp b/src/inference/include/ie/multi-device/multi_device_config.hpp index 83f05f12dfa746..b51916f73d15dc 100644 --- a/src/inference/include/ie/multi-device/multi_device_config.hpp +++ b/src/inference/include/ie/multi-device/multi_device_config.hpp @@ -11,6 +11,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "ie_plugin_config.hpp" namespace InferenceEngine { @@ -32,7 +42,7 @@ namespace MultiDeviceConfigParams { /** * @brief Device Priorities config option, with comma-separated devices listed in the desired priority */ -DECLARE_MULTI_CONFIG_KEY(DEVICE_PRIORITIES); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_MULTI_CONFIG_KEY(DEVICE_PRIORITIES); } // namespace MultiDeviceConfigParams } // namespace InferenceEngine diff --git a/src/inference/include/ie/vpu/hddl_config.hpp b/src/inference/include/ie/vpu/hddl_config.hpp index 8faf92c32781de..777c1f5e1d63c0 100644 --- a/src/inference/include/ie/vpu/hddl_config.hpp +++ b/src/inference/include/ie/vpu/hddl_config.hpp @@ -11,6 +11,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "vpu_config.hpp" namespace InferenceEngine { @@ -20,67 +30,67 @@ namespace Metrics { /** * @brief Metric to get a int of the device number, String value is METRIC_HDDL_DEVICE_NUM */ -DECLARE_METRIC_KEY(HDDL_DEVICE_NUM, int); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_NUM, int); /** * @brief Metric to get a std::vector of device names, String value is METRIC_HDDL_DEVICE_NAME */ -DECLARE_METRIC_KEY(HDDL_DEVICE_NAME, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_NAME, std::vector); /** * @brief Metric to get a std::vector of device thermal, String value is METRIC_HDDL_DEVICE_THERMAL */ -DECLARE_METRIC_KEY(HDDL_DEVICE_THERMAL, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_THERMAL, std::vector); /** * @brief Metric to get a std::vector of device ids, String value is METRIC_HDDL_DEVICE_ID */ -DECLARE_METRIC_KEY(HDDL_DEVICE_ID, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_ID, std::vector); /** * @brief Metric to get a std::vector of device subclasses, String value is METRIC_HDDL_DEVICE_SUBCLASS */ -DECLARE_METRIC_KEY(HDDL_DEVICE_SUBCLASS, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_SUBCLASS, std::vector); /** * @brief Metric to get a std::vector of device total memory, String value is METRIC_HDDL_MEMORY_TOTAL */ -DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_TOTAL, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_TOTAL, std::vector); /** * @brief Metric to get a std::vector of device used memory, String value is METRIC_HDDL_DEVICE_MEMORY_USED */ -DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_USED, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_USED, std::vector); /** * @brief Metric to get a std::vector of device utilization, String value is METRIC_HDDL_DEVICE_UTILIZATION */ -DECLARE_METRIC_KEY(HDDL_DEVICE_UTILIZATION, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_UTILIZATION, std::vector); /** * @brief Metric to get a std::vector of stream ids, String value is METRIC_HDDL_DEVICE_STREAM_ID */ -DECLARE_METRIC_KEY(HDDL_STREAM_ID, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_STREAM_ID, std::vector); /** * @brief Metric to get a std::vector of device tags, String value is METRIC_HDDL_DEVICE_TAG */ -DECLARE_METRIC_KEY(HDDL_DEVICE_TAG, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_TAG, std::vector); /** * @brief Metric to get a std::vector of group ids, String value is METRIC_HDDL_GROUP_ID */ -DECLARE_METRIC_KEY(HDDL_GROUP_ID, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_GROUP_ID, std::vector); /** * @brief Metric to get a int number of device be using for group, String value is METRIC_HDDL_DEVICE_GROUP_USING_NUM */ -DECLARE_METRIC_KEY(HDDL_DEVICE_GROUP_USING_NUM, int); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_GROUP_USING_NUM, int); /** * @brief Metric to get a int number of total device, String value is METRIC_HDDL_DEVICE_TOTAL_NUM */ -DECLARE_METRIC_KEY(HDDL_DEVICE_TOTAL_NUM, int); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_TOTAL_NUM, int); } // namespace Metrics @@ -100,7 +110,7 @@ DECLARE_METRIC_KEY(HDDL_DEVICE_TOTAL_NUM, int); * } * It means that an executable network marked with tagA will be executed on 3 devices */ -DECLARE_VPU_CONFIG(HDDL_GRAPH_TAG); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_GRAPH_TAG); /** * @brief [Only for OpenVINO Intel HDDL device] @@ -116,7 +126,7 @@ DECLARE_VPU_CONFIG(HDDL_GRAPH_TAG); * } * It means that 5 device will be used for stream-affinity */ -DECLARE_VPU_CONFIG(HDDL_STREAM_ID); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_STREAM_ID); /** * @brief [Only for OpenVINO Intel HDDL device] @@ -132,7 +142,7 @@ DECLARE_VPU_CONFIG(HDDL_STREAM_ID); * } * It means that 5 device will be used for Bypass scheduler. */ -DECLARE_VPU_CONFIG(HDDL_DEVICE_TAG); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_DEVICE_TAG); /** * @brief [Only for OpenVINO Intel HDDL device] @@ -145,7 +155,7 @@ DECLARE_VPU_CONFIG(HDDL_DEVICE_TAG); * is allocated on multiple other devices (also set BIND_DEVICE to "False"), then inference through any handle of these * networks may be executed on any of these devices those have the network loaded. */ -DECLARE_VPU_CONFIG(HDDL_BIND_DEVICE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_BIND_DEVICE); /** * @brief [Only for OpenVINO Intel HDDL device] @@ -155,7 +165,7 @@ DECLARE_VPU_CONFIG(HDDL_BIND_DEVICE); * Scheduler), the device with a larger number has a higher priority, and more inference tasks will be fed to it with * priority. */ -DECLARE_VPU_CONFIG(HDDL_RUNTIME_PRIORITY); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_RUNTIME_PRIORITY); /** * @brief [Only for OpenVINO Intel HDDL device] @@ -164,7 +174,7 @@ DECLARE_VPU_CONFIG(HDDL_RUNTIME_PRIORITY); * (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device * can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them. */ -DECLARE_VPU_CONFIG(HDDL_USE_SGAD); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_USE_SGAD); /** * @brief [Only for OpenVINO Intel HDDL device] @@ -173,6 +183,6 @@ DECLARE_VPU_CONFIG(HDDL_USE_SGAD); * can use this device grouped by calling this group id while other client can't use this device * Each device has their own group id. Device in one group shares same group id. */ -DECLARE_VPU_CONFIG(HDDL_GROUP_DEVICE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_GROUP_DEVICE); } // namespace InferenceEngine diff --git a/src/inference/include/ie/vpu/myriad_config.hpp b/src/inference/include/ie/vpu/myriad_config.hpp index 092fea346e24d0..7fc7d774941526 100644 --- a/src/inference/include/ie/vpu/myriad_config.hpp +++ b/src/inference/include/ie/vpu/myriad_config.hpp @@ -11,6 +11,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "vpu_config.hpp" namespace InferenceEngine { @@ -22,32 +32,32 @@ namespace InferenceEngine { * CONFIG_VALUE(YES) * CONFIG_VALUE(NO) (default value) */ -DECLARE_VPU_CONFIG(MYRIAD_ENABLE_FORCE_RESET); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_FORCE_RESET); /** * @brief This option allows to specify device memory type. */ -DECLARE_VPU_CONFIG(MYRIAD_DDR_TYPE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_TYPE); /** * @brief Supported keys definition for InferenceEngine::MYRIAD_DDR_TYPE option. */ -DECLARE_VPU_CONFIG(MYRIAD_DDR_AUTO); -DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_2GB); -DECLARE_VPU_CONFIG(MYRIAD_DDR_SAMSUNG_2GB); -DECLARE_VPU_CONFIG(MYRIAD_DDR_HYNIX_2GB); -DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_1GB); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_AUTO); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_2GB); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_SAMSUNG_2GB); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_HYNIX_2GB); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_1GB); /** * @brief This option allows to specify protocol. */ -DECLARE_VPU_CONFIG(MYRIAD_PROTOCOL); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_PROTOCOL); /** * @brief Supported keys definition for InferenceEngine::MYRIAD_PROTOCOL option. */ -DECLARE_VPU_CONFIG(MYRIAD_PCIE); -DECLARE_VPU_CONFIG(MYRIAD_USB); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_PCIE); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_USB); /** * @brief Optimize vpu plugin execution to maximize throughput. @@ -57,11 +67,11 @@ DECLARE_VPU_CONFIG(MYRIAD_USB); * 2 * 3 */ -DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS); /** * @brief Default key definition for InferenceEngine::MYRIAD_THROUGHPUT_STREAMS option. */ -DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS_AUTO); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS_AUTO); } // namespace InferenceEngine diff --git a/src/inference/include/ie/vpu/vpu_config.hpp b/src/inference/include/ie/vpu/vpu_config.hpp index 765cd4c27ca61b..1755ed4d9fff0c 100644 --- a/src/inference/include/ie/vpu/vpu_config.hpp +++ b/src/inference/include/ie/vpu/vpu_config.hpp @@ -12,6 +12,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include "ie_api.h" @@ -31,7 +41,7 @@ namespace InferenceEngine { * CONFIG_VALUE(YES) (default value) * CONFIG_VALUE(NO) */ -DECLARE_VPU_CONFIG(MYRIAD_ENABLE_HW_ACCELERATION); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_HW_ACCELERATION); /** * @brief The flag for adding to the profiling information the time of obtaining a tensor. @@ -39,12 +49,12 @@ DECLARE_VPU_CONFIG(MYRIAD_ENABLE_HW_ACCELERATION); * CONFIG_VALUE(YES) * CONFIG_VALUE(NO) (default value) */ -DECLARE_VPU_CONFIG(MYRIAD_ENABLE_RECEIVING_TENSOR_TIME); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_RECEIVING_TENSOR_TIME); /** * @brief This option allows to pass custom layers binding xml. * If layer is present in such an xml, it would be used during inference even if the layer is natively supported */ -DECLARE_VPU_CONFIG(MYRIAD_CUSTOM_LAYERS); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_CUSTOM_LAYERS); } // namespace InferenceEngine From 4a73e02391741d683a8a9870390922de8086c976 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 9 Jun 2023 17:20:20 +0400 Subject: [PATCH 2/4] Remove legacy API from GPU headers --- .../openvino/runtime/intel_gpu/ocl/dx.hpp | 55 ++++++++------- .../openvino/runtime/intel_gpu/ocl/ocl.hpp | 70 ++++++++++--------- .../openvino/runtime/intel_gpu/ocl/va.hpp | 38 +++++----- 3 files changed, 85 insertions(+), 78 deletions(-) diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp index 8621e350fc6493..0648dcab1e3a1b 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp @@ -25,6 +25,7 @@ #include #include "openvino/runtime/intel_gpu/ocl/ocl.hpp" +#include "openvino/runtime/intel_gpu/remote_properties.hpp" namespace ov { namespace intel_gpu { @@ -44,9 +45,9 @@ class D3DBufferTensor : public ClBufferTensor { * @param tensor a tensor to check */ static void type_check(const Tensor& tensor) { - RemoteTensor::type_check( - tensor, - {{GPU_PARAM_KEY(DEV_OBJECT_HANDLE), {}}, {GPU_PARAM_KEY(SHARED_MEM_TYPE), {GPU_PARAM_VALUE(DX_BUFFER)}}}); + RemoteTensor::type_check(tensor, + {{ov::intel_gpu::dev_object_handle.name(), {}}, + {ov::intel_gpu::shared_mem_type.name(), {ov::intel_gpu::SharedMemType::DX_BUFFER}}}); } /** @@ -54,7 +55,8 @@ class D3DBufferTensor : public ClBufferTensor { * @return Pointer to underlying ID3D11Buffer interface */ operator ID3D11Buffer*() { - return static_cast(get_params().at(GPU_PARAM_KEY(DEV_OBJECT_HANDLE)).as()); + return static_cast( + get_params().at(ov::intel_gpu::dev_object_handle.name()).as()); } }; @@ -73,9 +75,9 @@ class D3DSurface2DTensor : public ClImage2DTensor { */ static void type_check(const Tensor& remote_tensor) { RemoteTensor::type_check(remote_tensor, - {{GPU_PARAM_KEY(DEV_OBJECT_HANDLE), {}}, - {GPU_PARAM_KEY(VA_PLANE), {}}, - {GPU_PARAM_KEY(SHARED_MEM_TYPE), {GPU_PARAM_VALUE(VA_SURFACE)}}}); + {{ov::intel_gpu::dev_object_handle.name(), {}}, + {ov::intel_gpu::va_plane.name(), {}}, + {ov::intel_gpu::shared_mem_type.name(), {ov::intel_gpu::SharedMemType::VA_SURFACE}}}); } /** @@ -83,7 +85,8 @@ class D3DSurface2DTensor : public ClImage2DTensor { * @return Pointer to underlying ID3D11Texture2D interface */ operator ID3D11Texture2D*() { - return static_cast(get_params().at(GPU_PARAM_KEY(DEV_OBJECT_HANDLE)).as()); + return static_cast( + get_params().at(ov::intel_gpu::dev_object_handle.name()).as()); } /** @@ -91,7 +94,7 @@ class D3DSurface2DTensor : public ClImage2DTensor { * @return Plane ID */ uint32_t plane() { - return get_params().at(GPU_PARAM_KEY(VA_PLANE)).as(); + return get_params().at(ov::intel_gpu::va_plane.name()).as(); } }; @@ -113,9 +116,9 @@ class D3DContext : public ClContext { * @param remote_context A remote context to check */ static void type_check(const RemoteContext& remote_context) { - RemoteContext::type_check( - remote_context, - {{GPU_PARAM_KEY(VA_DEVICE), {}}, {GPU_PARAM_KEY(CONTEXT_TYPE), {GPU_PARAM_VALUE(VA_SHARED)}}}); + RemoteContext::type_check(remote_context, + {{ov::intel_gpu::va_device.name(), {}}, + {ov::intel_gpu::context_type.name(), {ov::intel_gpu::ContextType::VA_SHARED}}}); } /** @@ -123,7 +126,7 @@ class D3DContext : public ClContext { * @return Pointer to underlying ID3D11Device interface */ operator ID3D11Device*() { - return static_cast(get_params().at(GPU_PARAM_KEY(VA_DEVICE)).as()); + return static_cast(get_params().at(ov::intel_gpu::va_device.name()).as()); } /** @@ -136,9 +139,9 @@ class D3DContext : public ClContext { D3DContext(Core& core, ID3D11Device* device, int target_tile_id = -1) : ClContext(core, (cl_context) nullptr) { // clang-format off AnyMap context_params = { - {GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)}, - {GPU_PARAM_KEY(VA_DEVICE), static_cast(device)}, - {GPU_PARAM_KEY(TILE_ID), target_tile_id} + {ov::intel_gpu::context_type.name(), ov::intel_gpu::ContextType::VA_SHARED}, + {ov::intel_gpu::va_device.name(), static_cast(device)}, + {ov::intel_gpu::tile_id.name(), target_tile_id} }; *this = core.create_context(device_name, context_params).as(); } @@ -152,12 +155,12 @@ class D3DContext : public ClContext { * @return A pair of remote tensors for each plane */ std::pair create_tensor_nv12(const size_t height, const size_t width, ID3D11Texture2D* nv12_surf) { - AnyMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, - {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), static_cast(nv12_surf)}, - {GPU_PARAM_KEY(VA_PLANE), uint32_t(0)}}; + AnyMap tensor_params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::VA_SURFACE}, + {ov::intel_gpu::dev_object_handle.name(), static_cast(nv12_surf)}, + {ov::intel_gpu::va_plane.name(), uint32_t(0)}}; auto y_tensor = create_tensor(element::u8, {1, 1, height, width}, tensor_params); - tensor_params[GPU_PARAM_KEY(MEM_HANDLE)] = static_cast(nv12_surf); - tensor_params[GPU_PARAM_KEY(VA_PLANE)] = uint32_t(1); + tensor_params[ov::intel_gpu::mem_handle.name()] = static_cast(nv12_surf); + tensor_params[ov::intel_gpu::va_plane.name()] = uint32_t(1); auto uv_tensor = create_tensor(element::u8, {1, 2, height / 2, width / 2}, tensor_params); return std::make_pair(y_tensor.as(), uv_tensor.as()); } @@ -170,8 +173,8 @@ class D3DContext : public ClContext { * @return A remote tensor instance */ D3DBufferTensor create_tensor(const element::Type type, const Shape& shape, ID3D11Buffer* buffer) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(DX_BUFFER)}, - {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), static_cast(buffer)}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::DX_BUFFER}, + {ov::intel_gpu::dev_object_handle.name(), static_cast(buffer)}}; create_tensor(type, shape, params).as(); } @@ -188,9 +191,9 @@ class D3DContext : public ClContext { const Shape& shape, ID3D11Texture2D* surface, uint32_t plane = 0) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, - {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), static_cast(surface)}, - {GPU_PARAM_KEY(VA_PLANE), plane}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::VA_SURFACE}, + {ov::intel_gpu::dev_object_handle.name(), static_cast(surface)}, + {ov::intel_gpu::va_plane.name(), plane}}; return create_tensor(type, shape, params).as(); } }; diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp index 484b64c1c3169b..39e1452006822c 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp @@ -13,9 +13,9 @@ #include #include -#include "gpu/gpu_params.hpp" #include "openvino/runtime/core.hpp" #include "openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp" +#include "openvino/runtime/intel_gpu/properties.hpp" #include "openvino/runtime/remote_context.hpp" #include "openvino/runtime/remote_tensor.hpp" @@ -55,8 +55,9 @@ class ClBufferTensor : public RemoteTensor { static void type_check(const Tensor& tensor) { RemoteTensor::type_check( tensor, - {{GPU_PARAM_KEY(MEM_HANDLE), {}}, - {GPU_PARAM_KEY(SHARED_MEM_TYPE), {GPU_PARAM_VALUE(OCL_BUFFER), GPU_PARAM_VALUE(DX_BUFFER)}}}); + {{ov::intel_gpu::mem_handle.name(), {}}, + {ov::intel_gpu::shared_mem_type.name(), + {ov::intel_gpu::SharedMemType::OCL_BUFFER, ov::intel_gpu::SharedMemType::DX_BUFFER}}}); } /** @@ -64,7 +65,7 @@ class ClBufferTensor : public RemoteTensor { * @return underlying OpenCL memory object handle */ cl_mem get() { - return static_cast(get_params().at(GPU_PARAM_KEY(MEM_HANDLE)).as()); + return static_cast(get_params().at(ov::intel_gpu::mem_handle.name()).as()); } /** @@ -100,8 +101,9 @@ class ClImage2DTensor : public RemoteTensor { static void type_check(const Tensor& tensor) { RemoteTensor::type_check( tensor, - {{GPU_PARAM_KEY(MEM_HANDLE), {}}, - {GPU_PARAM_KEY(SHARED_MEM_TYPE), {GPU_PARAM_VALUE(OCL_IMAGE2D), GPU_PARAM_VALUE(VA_SURFACE)}}}); + {{ov::intel_gpu::mem_handle.name(), {}}, + {ov::intel_gpu::shared_mem_type.name(), + {ov::intel_gpu::SharedMemType::OCL_IMAGE2D, ov::intel_gpu::SharedMemType::VA_SURFACE}}}); } /** @@ -109,7 +111,7 @@ class ClImage2DTensor : public RemoteTensor { * @return underlying OpenCL memory object handle */ cl_mem get() { - return static_cast(get_params().at(GPU_PARAM_KEY(MEM_HANDLE)).as()); + return static_cast(get_params().at(ov::intel_gpu::mem_handle.name()).as()); } /** @@ -144,11 +146,11 @@ class USMTensor : public RemoteTensor { */ static void type_check(const Tensor& tensor) { RemoteTensor::type_check(tensor, - {{GPU_PARAM_KEY(MEM_HANDLE), {}}, - {GPU_PARAM_KEY(SHARED_MEM_TYPE), - {GPU_PARAM_VALUE(USM_USER_BUFFER), - GPU_PARAM_VALUE(USM_HOST_BUFFER), - GPU_PARAM_VALUE(USM_DEVICE_BUFFER)}}}); + {{ov::intel_gpu::mem_handle.name(), {}}, + {ov::intel_gpu::shared_mem_type.name(), + {ov::intel_gpu::SharedMemType::USM_USER_BUFFER, + ov::intel_gpu::SharedMemType::USM_HOST_BUFFER, + ov::intel_gpu::SharedMemType::USM_DEVICE_BUFFER}}}); } /** @@ -156,7 +158,7 @@ class USMTensor : public RemoteTensor { * @return underlying USM pointer */ void* get() { - return static_cast(get_params().at(GPU_PARAM_KEY(MEM_HANDLE)).as()); + return static_cast(get_params().at(ov::intel_gpu::mem_handle.name()).as()); } }; @@ -183,8 +185,9 @@ class ClContext : public RemoteContext { */ static void type_check(const RemoteContext& remote_context) { RemoteContext::type_check(remote_context, - {{GPU_PARAM_KEY(OCL_CONTEXT), {}}, - {GPU_PARAM_KEY(CONTEXT_TYPE), {GPU_PARAM_VALUE(OCL), GPU_PARAM_VALUE(VA_SHARED)}}}); + {{ov::intel_gpu::ocl_context.name(), {}}, + {ov::intel_gpu::context_type.name(), + {ov::intel_gpu::ContextType::OCL, ov::intel_gpu::ContextType::VA_SHARED}}}); } /** @@ -194,9 +197,9 @@ class ClContext : public RemoteContext { * @param ctx_device_id An ID of device to be used from ctx */ ClContext(Core& core, cl_context ctx, int ctx_device_id = 0) { - AnyMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL)}, - {GPU_PARAM_KEY(OCL_CONTEXT), static_cast(ctx)}, - {GPU_PARAM_KEY(OCL_CONTEXT_DEVICE_ID), ctx_device_id}}; + AnyMap context_params = {{ov::intel_gpu::context_type.name(), ov::intel_gpu::ContextType::OCL}, + {ov::intel_gpu::ocl_context.name(), static_cast(ctx)}, + {ov::intel_gpu::ocl_context_device_id.name(), ctx_device_id}}; *this = core.create_context(device_name, context_params).as(); } @@ -210,9 +213,9 @@ class ClContext : public RemoteContext { cl_context ctx; auto res = clGetCommandQueueInfo(queue, CL_QUEUE_CONTEXT, sizeof(cl_context), &ctx, nullptr); OPENVINO_ASSERT(res == CL_SUCCESS, "Can't get context from given opencl queue"); - AnyMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL)}, - {GPU_PARAM_KEY(OCL_CONTEXT), static_cast(ctx)}, - {GPU_PARAM_KEY(OCL_QUEUE), static_cast(queue)}}; + AnyMap context_params = {{ov::intel_gpu::context_type.name(), ov::intel_gpu::ContextType::OCL}, + {ov::intel_gpu::ocl_context.name(), static_cast(ctx)}, + {ov::intel_gpu::ocl_queue.name(), static_cast(queue)}}; *this = core.create_context(device_name, context_params).as(); } @@ -221,7 +224,7 @@ class ClContext : public RemoteContext { * @return `cl_context` */ cl_context get() { - return static_cast(get_params().at(GPU_PARAM_KEY(OCL_CONTEXT)).as()); + return static_cast(get_params().at(ov::intel_gpu::ocl_context.name()).as()); } /** @@ -251,10 +254,11 @@ class ClContext : public RemoteContext { const cl::Image2D& nv12_image_plane_uv) { size_t width = nv12_image_plane_y.getImageInfo(); size_t height = nv12_image_plane_y.getImageInfo(); - AnyMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_IMAGE2D)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(nv12_image_plane_y.get())}}; + AnyMap tensor_params = { + {ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::OCL_IMAGE2D}, + {ov::intel_gpu::mem_handle.name(), static_cast(nv12_image_plane_y.get())}}; auto y_tensor = create_tensor(element::u8, {1, 1, height, width}, tensor_params); - tensor_params[GPU_PARAM_KEY(MEM_HANDLE)] = static_cast(nv12_image_plane_uv.get()); + tensor_params[ov::intel_gpu::mem_handle.name()] = static_cast(nv12_image_plane_uv.get()); auto uv_tensor = create_tensor(element::u8, {1, 2, height / 2, width / 2}, tensor_params); return std::make_pair(y_tensor.as(), uv_tensor.as()); } @@ -267,8 +271,8 @@ class ClContext : public RemoteContext { * @return A remote tensor instance */ ClBufferTensor create_tensor(const element::Type type, const Shape& shape, const cl_mem buffer) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_BUFFER)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(buffer)}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::OCL_BUFFER}, + {ov::intel_gpu::mem_handle.name(), static_cast(buffer)}}; return create_tensor(type, shape, params).as(); } @@ -291,8 +295,8 @@ class ClContext : public RemoteContext { * @return A remote tensor instance */ ClImage2DTensor create_tensor(const element::Type type, const Shape& shape, const cl::Image2D& image) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_IMAGE2D)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(image.get())}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::OCL_IMAGE2D}, + {ov::intel_gpu::mem_handle.name(), static_cast(image.get())}}; return create_tensor(type, shape, params).as(); } @@ -304,8 +308,8 @@ class ClContext : public RemoteContext { * @return A remote tensor instance */ USMTensor create_tensor(const element::Type type, const Shape& shape, void* usm_ptr) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_USER_BUFFER)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(usm_ptr)}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::USM_USER_BUFFER}, + {ov::intel_gpu::mem_handle.name(), static_cast(usm_ptr)}}; return create_tensor(type, shape, params).as(); } @@ -316,7 +320,7 @@ class ClContext : public RemoteContext { * @return A remote tensor instance */ USMTensor create_usm_host_tensor(const element::Type type, const Shape& shape) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_HOST_BUFFER)}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::USM_HOST_BUFFER}}; return create_tensor(type, shape, params).as(); } @@ -327,7 +331,7 @@ class ClContext : public RemoteContext { * @return A remote tensor instance */ USMTensor create_usm_device_tensor(const element::Type type, const Shape& shape) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_DEVICE_BUFFER)}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::USM_DEVICE_BUFFER}}; return create_tensor(type, shape, params).as(); } }; diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp index 06e5812ed165f1..196373caa15931 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp @@ -43,16 +43,16 @@ class VASurfaceTensor : public ClImage2DTensor { */ static void type_check(const Tensor& tensor) { RemoteTensor::type_check(tensor, - {{GPU_PARAM_KEY(DEV_OBJECT_HANDLE), {}}, - {GPU_PARAM_KEY(VA_PLANE), {}}, - {GPU_PARAM_KEY(SHARED_MEM_TYPE), {GPU_PARAM_VALUE(VA_SURFACE)}}}); + {{ov::intel_gpu::dev_object_handle.name(), {}}, + {ov::intel_gpu::va_plane.name(), {}}, + {ov::intel_gpu::shared_mem_type.name(), {ov::intel_gpu::SharedMemType::VA_SURFACE}}}); } /** * @brief VASurfaceID conversion operator for the VASurfaceTensor object. * @return `VASurfaceID` handle */ operator VASurfaceID() { - return static_cast(get_params().at(GPU_PARAM_KEY(DEV_OBJECT_HANDLE)).as()); + return static_cast(get_params().at(ov::intel_gpu::dev_object_handle.name()).as()); } /** @@ -60,7 +60,7 @@ class VASurfaceTensor : public ClImage2DTensor { * @return Plane ID */ uint32_t plane() { - return get_params().at(GPU_PARAM_KEY(VA_PLANE)).as(); + return get_params().at(ov::intel_gpu::va_plane.name()).as(); } }; @@ -82,9 +82,9 @@ class VAContext : public ClContext { * @param remote_context A remote context to check */ static void type_check(const RemoteContext& remote_context) { - RemoteContext::type_check( - remote_context, - {{GPU_PARAM_KEY(VA_DEVICE), {}}, {GPU_PARAM_KEY(CONTEXT_TYPE), {GPU_PARAM_VALUE(VA_SHARED)}}}); + RemoteContext::type_check(remote_context, + {{ov::intel_gpu::va_device.name(), {}}, + {ov::intel_gpu::context_type.name(), {ov::intel_gpu::ContextType::VA_SHARED}}}); } /** @@ -92,7 +92,7 @@ class VAContext : public ClContext { * @return Underlying `VADisplay` object handle */ operator VADisplay() { - return static_cast(get_params().at(GPU_PARAM_KEY(VA_DEVICE)).as()); + return static_cast(get_params().at(ov::intel_gpu::va_device.name()).as()); } /** @@ -103,9 +103,9 @@ class VAContext : public ClContext { * that root device should be used */ VAContext(Core& core, VADisplay device, int target_tile_id = -1) : ClContext(core, (cl_context) nullptr) { - AnyMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)}, - {GPU_PARAM_KEY(VA_DEVICE), static_cast(device)}, - {GPU_PARAM_KEY(TILE_ID), target_tile_id}}; + AnyMap context_params = {{ov::intel_gpu::context_type.name(), ov::intel_gpu::ContextType::VA_SHARED}, + {ov::intel_gpu::va_device.name(), static_cast(device)}, + {ov::intel_gpu::tile_id.name(), target_tile_id}}; *this = core.create_context(device_name, context_params).as(); } @@ -120,11 +120,11 @@ class VAContext : public ClContext { std::pair create_tensor_nv12(const size_t height, const size_t width, const VASurfaceID nv12_surf) { - AnyMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, - {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), nv12_surf}, - {GPU_PARAM_KEY(VA_PLANE), uint32_t(0)}}; + AnyMap tensor_params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::VA_SURFACE}, + {ov::intel_gpu::dev_object_handle.name(), nv12_surf}, + {ov::intel_gpu::va_plane.name(), uint32_t(0)}}; auto y_tensor = create_tensor(element::u8, {1, 1, height, width}, tensor_params); - tensor_params[GPU_PARAM_KEY(VA_PLANE)] = uint32_t(1); + tensor_params[ov::intel_gpu::va_plane.name()] = uint32_t(1); auto uv_tensor = create_tensor(element::u8, {1, 2, height / 2, width / 2}, tensor_params); return std::make_pair(y_tensor.as(), uv_tensor.as()); } @@ -141,9 +141,9 @@ class VAContext : public ClContext { const Shape& shape, const VASurfaceID surface, const uint32_t plane = 0) { - AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, - {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), surface}, - {GPU_PARAM_KEY(VA_PLANE), plane}}; + AnyMap params = {{ov::intel_gpu::shared_mem_type.name(), ov::intel_gpu::SharedMemType::VA_SURFACE}, + {ov::intel_gpu::dev_object_handle.name(), surface}, + {ov::intel_gpu::va_plane.name(), plane}}; return create_tensor(type, shape, params).as(); } }; From de884f00e7d794232dfcebae7f4505f621f88c7d Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 9 Jun 2023 17:41:10 +0400 Subject: [PATCH 3/4] Fixed include --- src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp index 39e1452006822c..9cca8c95e37d50 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp @@ -16,6 +16,7 @@ #include "openvino/runtime/core.hpp" #include "openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp" #include "openvino/runtime/intel_gpu/properties.hpp" +#include "openvino/runtime/intel_gpu/remote_properties.hpp" #include "openvino/runtime/remote_context.hpp" #include "openvino/runtime/remote_tensor.hpp" From 752aa56c298e92209745b71832b08e5022190953 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 9 Jun 2023 18:18:54 +0400 Subject: [PATCH 4/4] Fixed type_check --- .../openvino/runtime/intel_gpu/ocl/dx.hpp | 17 +++++---- .../openvino/runtime/intel_gpu/ocl/ocl.hpp | 37 ++++++++++--------- .../openvino/runtime/intel_gpu/ocl/va.hpp | 12 +++--- 3 files changed, 36 insertions(+), 30 deletions(-) diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp index 0648dcab1e3a1b..1e50b0dbd61440 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp @@ -46,8 +46,9 @@ class D3DBufferTensor : public ClBufferTensor { */ static void type_check(const Tensor& tensor) { RemoteTensor::type_check(tensor, - {{ov::intel_gpu::dev_object_handle.name(), {}}, - {ov::intel_gpu::shared_mem_type.name(), {ov::intel_gpu::SharedMemType::DX_BUFFER}}}); + {{std::string(ov::intel_gpu::dev_object_handle.name()), {}}, + {std::string(ov::intel_gpu::shared_mem_type.name()), + {ov::Any(ov::intel_gpu::SharedMemType::DX_BUFFER).as()}}}); } /** @@ -75,9 +76,10 @@ class D3DSurface2DTensor : public ClImage2DTensor { */ static void type_check(const Tensor& remote_tensor) { RemoteTensor::type_check(remote_tensor, - {{ov::intel_gpu::dev_object_handle.name(), {}}, - {ov::intel_gpu::va_plane.name(), {}}, - {ov::intel_gpu::shared_mem_type.name(), {ov::intel_gpu::SharedMemType::VA_SURFACE}}}); + {{std::string(ov::intel_gpu::dev_object_handle.name()), {}}, + {std::string(ov::intel_gpu::va_plane.name()), {}}, + {std::string(ov::intel_gpu::shared_mem_type.name()), + {ov::Any(ov::intel_gpu::SharedMemType::VA_SURFACE).as()}}}); } /** @@ -117,8 +119,9 @@ class D3DContext : public ClContext { */ static void type_check(const RemoteContext& remote_context) { RemoteContext::type_check(remote_context, - {{ov::intel_gpu::va_device.name(), {}}, - {ov::intel_gpu::context_type.name(), {ov::intel_gpu::ContextType::VA_SHARED}}}); + {{std::string(ov::intel_gpu::va_device.name()), {}}, + {std::string(ov::intel_gpu::context_type.name()), + {ov::Any(ov::intel_gpu::ContextType::VA_SHARED).as()}}}); } /** diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp index 9cca8c95e37d50..ff7d1b2b1290bc 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp @@ -54,11 +54,11 @@ class ClBufferTensor : public RemoteTensor { * @param tensor a tensor to check */ static void type_check(const Tensor& tensor) { - RemoteTensor::type_check( - tensor, - {{ov::intel_gpu::mem_handle.name(), {}}, - {ov::intel_gpu::shared_mem_type.name(), - {ov::intel_gpu::SharedMemType::OCL_BUFFER, ov::intel_gpu::SharedMemType::DX_BUFFER}}}); + RemoteTensor::type_check(tensor, + {{std::string(ov::intel_gpu::mem_handle.name()), {}}, + {std::string(ov::intel_gpu::shared_mem_type.name()), + {ov::Any(ov::intel_gpu::SharedMemType::OCL_BUFFER).as(), + ov::Any(ov::intel_gpu::SharedMemType::DX_BUFFER).as()}}}); } /** @@ -100,11 +100,11 @@ class ClImage2DTensor : public RemoteTensor { * @param tensor a tensor to check */ static void type_check(const Tensor& tensor) { - RemoteTensor::type_check( - tensor, - {{ov::intel_gpu::mem_handle.name(), {}}, - {ov::intel_gpu::shared_mem_type.name(), - {ov::intel_gpu::SharedMemType::OCL_IMAGE2D, ov::intel_gpu::SharedMemType::VA_SURFACE}}}); + RemoteTensor::type_check(tensor, + {{std::string(ov::intel_gpu::mem_handle.name()), {}}, + {std::string(ov::intel_gpu::shared_mem_type.name()), + {ov::Any(ov::intel_gpu::SharedMemType::OCL_IMAGE2D).as(), + ov::Any(ov::intel_gpu::SharedMemType::VA_SURFACE).as()}}}); } /** @@ -147,11 +147,11 @@ class USMTensor : public RemoteTensor { */ static void type_check(const Tensor& tensor) { RemoteTensor::type_check(tensor, - {{ov::intel_gpu::mem_handle.name(), {}}, - {ov::intel_gpu::shared_mem_type.name(), - {ov::intel_gpu::SharedMemType::USM_USER_BUFFER, - ov::intel_gpu::SharedMemType::USM_HOST_BUFFER, - ov::intel_gpu::SharedMemType::USM_DEVICE_BUFFER}}}); + {{std::string(ov::intel_gpu::mem_handle.name()), {}}, + {std::string(ov::intel_gpu::shared_mem_type.name()), + {ov::Any(ov::intel_gpu::SharedMemType::USM_USER_BUFFER).as(), + ov::Any(ov::intel_gpu::SharedMemType::USM_HOST_BUFFER).as(), + ov::Any(ov::intel_gpu::SharedMemType::USM_DEVICE_BUFFER).as()}}}); } /** @@ -186,9 +186,10 @@ class ClContext : public RemoteContext { */ static void type_check(const RemoteContext& remote_context) { RemoteContext::type_check(remote_context, - {{ov::intel_gpu::ocl_context.name(), {}}, - {ov::intel_gpu::context_type.name(), - {ov::intel_gpu::ContextType::OCL, ov::intel_gpu::ContextType::VA_SHARED}}}); + {{std::string(ov::intel_gpu::ocl_context.name()), {}}, + {std::string(ov::intel_gpu::context_type.name()), + {ov::Any(ov::intel_gpu::ContextType::OCL).as(), + ov::Any(ov::intel_gpu::ContextType::VA_SHARED).as()}}}); } /** diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp index 196373caa15931..91415dae081566 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp @@ -43,9 +43,10 @@ class VASurfaceTensor : public ClImage2DTensor { */ static void type_check(const Tensor& tensor) { RemoteTensor::type_check(tensor, - {{ov::intel_gpu::dev_object_handle.name(), {}}, - {ov::intel_gpu::va_plane.name(), {}}, - {ov::intel_gpu::shared_mem_type.name(), {ov::intel_gpu::SharedMemType::VA_SURFACE}}}); + {{std::string(ov::intel_gpu::dev_object_handle.name()), {}}, + {std::string(ov::intel_gpu::va_plane.name()), {}}, + {std::string(ov::intel_gpu::shared_mem_type.name()), + {ov::Any(ov::intel_gpu::SharedMemType::VA_SURFACE).as()}}}); } /** * @brief VASurfaceID conversion operator for the VASurfaceTensor object. @@ -83,8 +84,9 @@ class VAContext : public ClContext { */ static void type_check(const RemoteContext& remote_context) { RemoteContext::type_check(remote_context, - {{ov::intel_gpu::va_device.name(), {}}, - {ov::intel_gpu::context_type.name(), {ov::intel_gpu::ContextType::VA_SHARED}}}); + {{std::string(ov::intel_gpu::va_device.name()), {}}, + {std::string(ov::intel_gpu::context_type.name()), + {ov::Any(ov::intel_gpu::ContextType::VA_SHARED).as()}}}); } /**