Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deprecate plugins config keys #17974

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions src/inference/include/ie/cpu/cpu_config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,16 @@
*/
#pragma once

#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif

#include "ie_plugin_config.hpp"

namespace InferenceEngine {
Expand Down Expand Up @@ -38,9 +48,9 @@ namespace CPUConfigParams {
* PluginConfigParams::YES or PluginConfigParams::NO
* If not set explicitly, the behavior is kept in runtime enviroment where compile_model is called.
*/
DECLARE_CPU_CONFIG_KEY(DENORMALS_OPTIMIZATION);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CPU_CONFIG_KEY(DENORMALS_OPTIMIZATION);

DECLARE_CPU_CONFIG_KEY(SPARSE_WEIGHTS_DECOMPRESSION_RATE);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CPU_CONFIG_KEY(SPARSE_WEIGHTS_DECOMPRESSION_RATE);

} // namespace CPUConfigParams
} // namespace InferenceEngine
79 changes: 38 additions & 41 deletions src/inference/include/ie/gna/gna_config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,16 @@

#pragma once

#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif

#include "ie_plugin_config.hpp"

namespace InferenceEngine {
Expand Down Expand Up @@ -40,128 +50,119 @@ namespace GNAConfigParams {
* @details For multiple input case, individual scale factors can be passed, using
* KEY_GNA_SCALE_FACTOR[_input_layer_name] where input_layer can be obtained from CNNNetwork::GetInputsInfo
*/
DECLARE_GNA_CONFIG_KEY(SCALE_FACTOR);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(SCALE_FACTOR);

/**
* @brief By default gna api works with Int16 weights precision, however this can be adjusted if necessary,
* currently supported values are I16, I8
*/
DECLARE_GNA_CONFIG_KEY(PRECISION);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(PRECISION);

/**
* @brief if turned on, dump GNA firmware model into specified file
*/
DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE);

/**
* @brief GNA proc_type setting that should be one of GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_EXACT
*/
DECLARE_GNA_CONFIG_KEY(DEVICE_MODE);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(DEVICE_MODE);

/**
* @brief Specific software acceleration mode.
* Uses Intel GNA if available, otherwise uses software execution mode on CPU.
*/
DECLARE_GNA_CONFIG_VALUE(AUTO);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AUTO);

/**
* @brief Specific software acceleration mode.
* Uses Intel GNA if available, otherwise raises an error.
*/
DECLARE_GNA_CONFIG_VALUE(HW);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(HW);

/**
* @brief Specific software acceleration mode.
* Uses Intel GNA if available, otherwise raises an error.
* If the hardware queue is not empty, automatically falls back to CPU in the bit-exact mode.
*/
DECLARE_GNA_CONFIG_VALUE(HW_WITH_SW_FBACK);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(HW_WITH_SW_FBACK);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(SW);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SW);

/**
* @brief Specific software acceleration mode.
* Executes the GNA-compiled graph on CPU performing calculations
* in the same precision as the Intel GNA in the bit-exact mode.
*/
DECLARE_GNA_CONFIG_VALUE(SW_EXACT);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SW_EXACT);

/**
* @brief Specific software acceleration mode.
* Executes the GNA-compiled graph on CPU but substitutes parameters and calculations
* from low precision to floating point
*/
DECLARE_GNA_CONFIG_VALUE(SW_FP32);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SW_FP32);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(GEN);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(GEN);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(GEN_EXACT);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(GEN_EXACT);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(SSE);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SSE);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(SSE_EXACT);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(SSE_EXACT);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(AVX1);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX1);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(AVX1_EXACT);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX1_EXACT);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(AVX2);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX2);

/**
* @brief Specific software acceleration mode.
* @deprecated Mode is deprecated and will be removed in a future release.
* Use InferenceEngine::GNAConfigParams::SW_EXACT instead.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GNAConfigParams::SW_EXACT instead")
DECLARE_GNA_CONFIG_VALUE(AVX2_EXACT);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(AVX2_EXACT);

/**
* @brief The option to override the GNA HW execution target. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0.
Expand All @@ -171,21 +172,21 @@ DECLARE_GNA_CONFIG_VALUE(AVX2_EXACT);
* A fully supported GNA HW generation means it must be supported by both the OV GNA Plugin and the core GNA Library.
* For the OV GNA Plugin 2022.1, the latest supported GNA HW generation corresponds to GNA_TARGET_3_0.
*/
DECLARE_GNA_CONFIG_KEY(EXEC_TARGET);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(EXEC_TARGET);

DECLARE_GNA_CONFIG_VALUE(TARGET_2_0);
DECLARE_GNA_CONFIG_VALUE(TARGET_3_0);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(TARGET_2_0);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_VALUE(TARGET_3_0);

/**
* @brief The option to override the GNA HW compile target. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0.
* By default the same as GNA_EXEC_TARGET.
*/
DECLARE_GNA_CONFIG_KEY(COMPILE_TARGET);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(COMPILE_TARGET);

/**
* @brief if enabled produced minimum memory footprint for loaded network in GNA memory, default value is YES
*/
DECLARE_GNA_CONFIG_KEY(COMPACT_MODE);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(COMPACT_MODE);

/**
* @brief The option to enable/disable uniformly distributed PWL algorithm.
Expand All @@ -195,17 +196,15 @@ DECLARE_GNA_CONFIG_KEY(COMPACT_MODE);
* Uniform distribution usually gives poor approximation with same number of segments
* @deprecated The config key is deprecated and will be removed in a future release.
*/
INFERENCE_ENGINE_DEPRECATED("The config key is deprected and will be removed")
DECLARE_GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN);

/**
* @brief The option to allow to specify the maximum error percent that the optimized algorithm finding
* will use to find PWL functions.
* By default (in case of NO value set), 1.0 value is used.
* @deprecated The config key is deprecated and will be removed in a future release.
*/
INFERENCE_ENGINE_DEPRECATED("The config key is deprected and will be removed")
DECLARE_GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT);

/**
* @brief By default, the GNA plugin uses one worker thread for inference computations.
Expand All @@ -215,16 +214,15 @@ DECLARE_GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT);
* of issuing. Additionally, in this case, software modes do not implement any serializations.
* @deprecated The config key is deprecated and will be removed in a future release
*/
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
DECLARE_GNA_CONFIG_KEY(LIB_N_THREADS);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_GNA_CONFIG_KEY(LIB_N_THREADS);
} // namespace GNAConfigParams

namespace Metrics {
/**
* @brief Metric to get a std::string of GNA Library version, usually in the form
* <API_REVISION>.<RELEASE_LINE>.<RELEASE>.<BUILD>
*/
DECLARE_METRIC_KEY(GNA_LIBRARY_FULL_VERSION, std::string);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(GNA_LIBRARY_FULL_VERSION, std::string);
} // namespace Metrics

namespace PluginConfigParams {
Expand All @@ -236,8 +234,7 @@ namespace PluginConfigParams {
* PluginConfigParams::YES or PluginConfigParams::NO
* @deprecated The config key is deprecated and will be removed in a future release
*/
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
DECLARE_CONFIG_KEY(SINGLE_THREAD);
INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(SINGLE_THREAD);

} // namespace PluginConfigParams

Expand Down
12 changes: 11 additions & 1 deletion src/inference/include/ie/gpu/details/gpu_context_helpers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,16 @@
*/
#pragma once

#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif

#include <string>

#include "ie_parameter.hpp"
Expand All @@ -22,7 +32,7 @@ namespace details {
* @brief This wrapper class is used to obtain low-level handles
* from remote blob or context object parameters.
*/
class param_map_obj_getter {
class INFERENCE_ENGINE_1_0_DEPRECATED param_map_obj_getter {
protected:
/**
* @brief Template function that returns specified
Expand Down
Loading