diff --git a/.github/workflows/job_cxx_unit_tests.yml b/.github/workflows/job_cxx_unit_tests.yml index e484dc59696282..85df068644290c 100644 --- a/.github/workflows/job_cxx_unit_tests.yml +++ b/.github/workflows/job_cxx_unit_tests.yml @@ -211,13 +211,6 @@ jobs: --gtest_filter=*smoke* \ --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml - - name: Inference Engine C API tests - if: fromJSON(inputs.affected-components).C_API.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/InferenceEngineCAPITests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineCAPITests.xml - - name: OpenVINO C API tests if: fromJSON(inputs.affected-components).C_API.test run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 312bc1a4f7395f..d695a5dd150388 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -656,12 +656,6 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml - - name: Inference Engine C API tests - if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test - shell: cmd - run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineCAPITests.xml - - name: OpenVINO C API tests if: ${{ 'false' }} # Ticket: 123594 shell: cmd diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst index 754d2b12d51583..8941c189c827d6 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst @@ -49,13 +49,6 @@ Based on the steps, the following code demonstrates how to change the applicatio :language: cpp :fragment: ie:create_core - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ie_common.c - :language: cpp - :fragment: ie:create_core - **API 2.0** .. tab-set:: @@ -107,14 +100,6 @@ to write extensions. However, you can also load the old extensions to the new Op :language: cpp :fragment: ie:load_old_extension - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ie_common.c - :language: cpp - :fragment: ie:load_old_extension - - **API 2.0** .. tab-set:: @@ -162,14 +147,6 @@ to write extensions. However, you can also load the old extensions to the new Op :language: cpp :fragment: ie:read_model - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ie_common.c - :language: cpp - :fragment: ie:read_model - - **API 2.0** .. tab-set:: @@ -229,14 +206,6 @@ preprocessing may be necessary. See :doc:`preprocessing in API 2.0 - -static void completion_callback(void *args) { - // Operations after infer -} - -int main() { - //! [ie:create_core] - ie_core_t *core = nullptr; - ie_core_create("", &core); - //! [ie:create_core] - - //! [ie:read_model] - ie_network_t *network = nullptr; - ie_core_read_network(core, "model.xml", nullptr, &network); - //! [ie:read_model] - - //! [ie:compile_model] - ie_executable_network_t *exe_network = nullptr; - ie_core_load_network(core, network, "CPU", nullptr, &exe_network); - //! [ie:compile_model] - - //! [ie:create_infer_request] - ie_infer_request_t *infer_request = nullptr; - ie_exec_network_create_infer_request(exe_network, &infer_request); - //! [ie:create_infer_request] - - char *input_name = nullptr; - ie_network_get_input_name(network, 0, &input_name); - //! [ie:get_input_tensor] - // fill first blob - ie_blob_t *input_blob1 = nullptr; - { - ie_infer_request_get_blob(infer_request, input_name, &input_blob1); - ie_blob_buffer_t buffer; - ie_blob_get_buffer(input_blob1, &buffer); - // Original I64 precision was converted to I32 - int32_t* blob_internal_buffer = (int32_t*)buffer.buffer; - // Fill data ... - } - // fill second blob - ie_blob_t *input_blob2 = nullptr; - { - ie_infer_request_get_blob(infer_request, "data2", &input_blob2); - ie_blob_buffer_t buffer; - ie_blob_get_buffer(input_blob2, &buffer); - // Original I64 precision was converted to I32 - int32_t* blob_internal_buffer = (int32_t*)buffer.buffer; - // Fill data ... - } - - //! [ie:get_input_tensor] - - //! [ie:inference] - ie_infer_request_infer(infer_request); - //! [ie:inference] - - //! [ie:start_async_and_wait] - // NOTE: For demonstration purposes we are trying to set callback - ie_complete_call_back_t callback; - callback.completeCallBackFunc = completion_callback; - callback.args = infer_request; - ie_infer_set_completion_callback(infer_request, &callback); - // Start inference without blocking current thread - ie_infer_request_infer_async(infer_request); - // Wait for 10 milisecond - IEStatusCode waitStatus = ie_infer_request_wait(infer_request, 10); - // Wait for inference completion - ie_infer_request_wait(infer_request, -1); - //! [ie:start_async_and_wait] - - //! [ie:get_output_tensor] - // get output blob by name - ie_blob_t *output_blob = nullptr; - ie_infer_request_get_blob(infer_request, "output_name", &output_blob); - // get blob buffer - ie_blob_buffer_t out_buffer; - ie_blob_get_buffer(output_blob, &out_buffer); - // get data - float *data = (float *)(out_buffer.buffer); - // process output data - //! [ie:get_output_tensor] - - //! [ie:load_old_extension] - ie_core_add_extension(core, "path_to_extension_library.so", "CPU"); - //! [ie:load_old_extension] - ie_blob_free(&output_blob); - ie_blob_free(&input_blob2); - ie_blob_free(&input_blob1); - ie_network_name_free(&input_name); - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); - return 0; -} diff --git a/docs/snippets/ov_preprocessing_migration.c b/docs/snippets/ov_preprocessing_migration.c index dfad9bb9669efe..e34b892823c780 100644 --- a/docs/snippets/ov_preprocessing_migration.c +++ b/docs/snippets/ov_preprocessing_migration.c @@ -145,11 +145,3 @@ int main_new() { return 0; } -int main_old() { - { - //! [c_api_ppp] - // No preprocessing related interfaces provided by C API 1.0 - //! [c_api_ppp] - } - return 0; -} diff --git a/docs/snippets/ov_properties_migration.c b/docs/snippets/ov_properties_migration.c index 0881a791c5dfbb..745d7bf8bbeb95 100644 --- a/docs/snippets/ov_properties_migration.c +++ b/docs/snippets/ov_properties_migration.c @@ -1,4 +1,3 @@ -#include #include int main_new() { @@ -57,64 +56,3 @@ ov_core_free(core); return 0; } - -int main_old() { -ie_core_t *core = NULL; -ie_core_create("", &core); - -//! [core_get_metric] -ie_param_t full_device_name; -full_device_name.params = NULL; -ie_core_get_metric(core, "CPU", "FULL_DEVICE_NAME", &full_device_name); -ie_param_free(&full_device_name); -//! [core_get_metric] - -//! [core_get_config] -ie_param_t num_streams; -num_streams.params = NULL; -ie_core_get_config(core, "CPU", "CPU_THROUGHPUT_STREAMS", &num_streams); -ie_param_free(&num_streams); -//! [core_get_config] - -//! [core_set_config] -ie_config_t config = {"PERF_COUNT", "YES", NULL}; -ie_core_set_config(core, &config, "CPU"); -//! [core_set_config] - -ie_network_t *network = NULL; -ie_core_read_network(core, "sample.xml", "sample.bin", &network); -//! [core_load_network] -ie_config_t config_1 = {"DEVICE_PRIORITIES", "CPU, GPU", NULL}; -ie_config_t config_2 = {"PERFORMANCE_HINT", "THROUGHPUT", &config_1}; -ie_config_t config_3 = {"ENFORCE_BF16", "NO", &config_2}; -ie_executable_network_t *exe_network = NULL; -ie_core_load_network(core, network, "MULTI", &config_3, &exe_network); -//! [core_load_network] - -//! [executable_network_set_config] -// turn CPU off for multi-device executio -ie_config_t config_param = {"DEVICE_PRIORITIES", "GPU", NULL}; -ie_exec_network_set_config(exe_network, &config_param); -//! [executable_network_set_config] - -{ -//! [executable_network_get_metric] -ie_param_t nireq; -nireq.params = NULL; -ie_exec_network_get_metric(exe_network, "OPTIMAL_NUMBER_OF_INFER_REQUESTS", &nireq); -ie_param_free(&nireq); -//! [executable_network_get_metric] -} - -{ -//! [executable_network_get_config] -ie_param_t perf_model; -perf_model.params = NULL; -ie_exec_network_get_config(exe_network, "PERFORMANCE_HINT", &perf_model); -//! [executable_network_get_config] -} -ie_exec_network_free(&exe_network); -ie_network_free(&network); -ie_core_free(&core); -return 0; -} diff --git a/src/bindings/c/README.md b/src/bindings/c/README.md index 01c0a98a51aaf3..c96b7b4d729043 100644 --- a/src/bindings/c/README.md +++ b/src/bindings/c/README.md @@ -29,7 +29,7 @@ OpenVINO C API has the following structure: * [src](./src) contains the implementations of all C APIs. * [tests](./tests) contains all tests for OpenVINO C APIs. [Learn more](./docs/how_to_write_unit_test.md). -> **NOTE**: Using API 2.0 is strongly recommended. Legacy API (for C) [header file](./include/c_api/ie_c_api.h), [source file](./src/ie_c_api.cpp), [unit test](./tests/ie_c_api_test.cpp) are also included in the component, but the legacy API is no longer extended. +> **NOTE**: Using API 2.0 is strongly recommended. Legacy API (for C) has been removed from 2024.0. ## Tutorials diff --git a/src/bindings/c/docs/api_overview.md b/src/bindings/c/docs/api_overview.md index ee8d166bbc8112..f73247f3f8a926 100644 --- a/src/bindings/c/docs/api_overview.md +++ b/src/bindings/c/docs/api_overview.md @@ -1,327 +1,410 @@ -# Overview of Inference Engine C* API +# Overview of OpenVINO C* API 2.0 -> **NOTE**: It is a preview version of the Inference Engine C* API for evaluation purpose only. -> Module structure and API itself may be changed in future releases. - -This API provides a simplified interface for Inference Engine functionality that allows to: +This API provides a simplified interface for OpenVINO functionality that allows to: - handle the models -- load and configure Inference Engine plugins based on device names +- load and configure OpenVINO plugins based on device names - perform inference in synchronous and asynchronous modes with arbitrary number of infer requests (the number of infer requests may be limited by target device capabilities) ## Supported OSes -Currently the Inference Engine C* API is supported on Ubuntu* 16.04, Microsoft Windows* 10 and CentOS* 7.3 OSes. +Currently the OpenVINO C API is supported on Ubuntu* 18.04/20.04/22.04 Microsoft Windows* 10/11 and CentOS* 7.3/10.15 and above OSes. Supported Python* versions: -- On Ubuntu 16.04: 2.7, 3.5, 3.6 -- On Windows 10: 3.5, 3.6 -- On CentOS 7.3: 3.4, 3.5, 3.6 + - Ubuntu 22.04 long-term support (LTS), 64-bit (Kernel 5.15+) + - Ubuntu 20.04 long-term support (LTS), 64-bit (Kernel 5.15+) + - Ubuntu 18.04 long-term support (LTS) with limitations, 64-bit (Kernel 5.4+) + - Windows* 10 + - Windows* 11 + - macOS* 10.15 and above, 64-bit + - macOS 11 and above, ARM64 + - Red Hat Enterprise Linux* 8, 64-bit + - Debian 9 ARM64 and ARM + - CentOS 7 64-bit ## Setting Up the Environment -To configure the environment for the Inference Engine C* API, run: +To configure the environment for the OpenVINO C* API, run: -- On Ubuntu 16.04: `source /setupvars.sh .` -- On Windows 10: XXXX +- On Ubuntu 20.04/22.04: `source /setupvars.sh .` +- On Windows 10/11: `\setupvars.bat ` The script automatically detects latest installed C* version and configures required environment if the version is supported. -If you want to use certain version of C*, set the environment variable XXXXX -after running the environment configuration script. + ## Struct ``` -typedef struct ie_core_version { +typedef struct ov_version { + + const char* buildNumber; -​ size_t major; + const char* description; -​ size_t minor; +} ov_version_t; +``` + +``` +typedef struct { -​ const char *build_number; + const char* device_name; -​ const char *description; + ov_version_t version; -}ie_core_version_t; +} ov_core_version_t; ``` ``` -typedef struct ie_config { +typedef struct { -​ char *name; + ov_core_version_t* versions; -​ char *value; + size_t size; -}ie_config_t; +} ov_core_version_list_t; ``` ``` -typedef struct ie_param { +typedef struct { -​ union { //To be continue, to collect metric and config parameters + char** devices; -​ }; + size_t size; -}ie_param_t; +} ov_available_devices_t; ``` ``` -typedef struct ie_param_config { +typedef struct ov_dimension { -​ char *name; + int64_t min; + + int64_t max; -​ ie_param_t *param; - -}ie_param_config_t; +} ov_dimension_t; ``` ``` -typedef struct desc { +typedef struct { + + int64_t rank; -​ char msg[256]; + int64_t* dims; -}desc_t; +} ov_shape_t; ``` ``` -typedef struct dimensions { +typedef struct ov_partial_shape { + + ov_rank_t rank; -​ size_t dims[8]; + ov_dimension_t* dims; -}dimensions_t; +} ov_partial_shape_t; ``` ``` -struct tensor_desc { +typedef struct { + + enum Status { + + NOT_RUN, -​ layout_t layout; + OPTIMIZED_OUT, -​ dimensions_t dims; + EXECUTED -​ precision_e precision; + } status; -}; + int64_t real_time; + int64_t cpu_time; + const char* node_name; + + const char* exec_type; + + const char* node_type; + +} ov_profiling_info_t; ``` ``` -typedef void (*completeCallBackFunc)(ie_infer_request_t *infer_request, int *status); +typedef struct { + + ov_profiling_info_t* profiling_infos; + + size_t size; + +} ov_profiling_info_list_t; ``` ``` -enum precision_e{ +typedef struct { -​ UNSPECIFIED = 255, /**< Unspecified value. Used by default */ + void(OPENVINO_C_API_CALLBACK* callback_func)(void* args); -​ MIXED = 0, /**< Mixed value. Can be received from network. No applicable for tensors */ + void* args; -​ FP32 = 10, /**< 32bit floating point value */ +} ov_callback_t; +``` -​ FP16 = 11, /**< 16bit floating point value */ +``` +typedef enum { - BF16 = 12, /**< 16bit floating point value, 8 bit for exponent, 7 bit for mantisa*/ + UNDEFINED = 0U, //!< Undefined element type - FP64 = 13, /**< 64bit floating point value */ + DYNAMIC, //!< Dynamic element type -​ Q78 = 20, /**< 16bit specific signed fixed point precision */ + BOOLEAN, //!< boolean element type -​ I16 = 30, /**< 16bit signed integer value */ + BF16, //!< bf16 element type -​ U4 = 39, /**< 4bit unsigned integer value */ + F16, //!< f16 element type -​ U8 = 40, /**< 8bit unsigned integer value */ + F32, //!< f32 element type -​ I4 = 49, /**< 4bit signed integer value */ + F64, //!< f64 element type -​ I8 = 50, /**< 8bit signed integer value */ + I4, //!< i4 element type -​ U16 = 60, /**< 16bit unsigned integer value */ + I8, //!< i8 element type -​ I32 = 70, /**< 32bit signed integer value */ + I16, //!< i16 element type -​ I64 = 72, /**< 64bit signed integer value */ + I32, //!< i32 element type -​ U64 = 73, /**< 64bit unsigned integer value */ + I64, //!< i64 element type -​ U32 = 74, /**< 32bit unsigned integer value */ + U1, //!< binary element type -​ BIN = 71, /**< 1bit integer value */ + U4, //!< u4 element type -​ CUSTOM = 80 /**< custom precision has it's own name and size of elements */ + U8, //!< u8 element type -}; -``` + U16, //!< u16 element type + + U32, //!< u32 element type + U64, //!< u64 element type + + NF4, //!< nf4 element type + +} ov_element_type_e; ``` -enum layout_t { -​ ANY = 0, // "any" layout +``` +typedef enum { -​ // I/O data layouts + OK = 0, //!< SUCCESS -​ NCHW = 1, + GENERAL_ERROR = -1, //!< GENERAL_ERROR -​ NHWC = 2, + NOT_IMPLEMENTED = -2, //!< NOT_IMPLEMENTED -​ NCDHW = 3, + NETWORK_NOT_LOADED = -3, //!< NETWORK_NOT_LOADED -​ NDHWC = 4, + PARAMETER_MISMATCH = -4, //!< PARAMETER_MISMATCH -​ // weight layouts + NOT_FOUND = -5, //!< NOT_FOUND -​ OIHW = 64, + OUT_OF_BOUNDS = -6, //!< OUT_OF_BOUNDS -​ // Scalar + UNEXPECTED = -7, //!< UNEXPECTED -​ SCALAR = 95, + REQUEST_BUSY = -8, //!< REQUEST_BUSY -​ // bias layouts + RESULT_NOT_READY = -9, //!< RESULT_NOT_READY -​ C = 96, + NOT_ALLOCATED = -10, //!< NOT_ALLOCATED -​ // Single image layout (for mean image) + INFER_NOT_STARTED = -11, //!< INFER_NOT_STARTED -​ CHW = 128, + NETWORK_NOT_READ = -12, //!< NETWORK_NOT_READ -​ // 2D + INFER_CANCELLED = -13, //!< INFER_CANCELLED -​ HW = 192, + INVALID_C_PARAM = -14, //!< INVALID_C_PARAM -​ NC = 193, + UNKNOWN_C_ERROR = -15, //!< UNKNOWN_C_ERROR -​ CN = 194, + NOT_IMPLEMENT_C_METHOD = -16, //!< NOT_IMPLEMENT_C_METHOD + UNKNOW_EXCEPTION = -17, //!< UNKNOW_EXCEPTION -​ BLOCKED = 200, +} ov_status_e; -}; ``` ``` -enum colorformat_e { +typedef enum { + + UNDEFINE = 0U, //!< Undefine color format + + NV12_SINGLE_PLANE, //!< Image in NV12 format as single tensor + + NV12_TWO_PLANES, //!< Image in NV12 format represented as separate tensors for Y and UV planes. -​ RAW = 0u, ///< Plain blob (default), no extra color processing required + I420_SINGLE_PLANE, //!< Image in I420 (YUV) format as single tensor -​ RGB, ///< RGB color format + I420_THREE_PLANES, //!< Image in I420 format represented as separate tensors for Y, U and V planes. -​ BGR, ///< BGR color format, default in OpenVINO + RGB, //!< Image in RGB interleaved format (3 channels) -​ GRAY, ///< GRAY color format + BGR, //!< Image in BGR interleaved format (3 channels) -​ RGBX, ///< RGBX color format with X ignored during inference + GRAY, //!< Image in GRAY format (1 channel) -​ BGRX, ///< BGRX color format with X ignored during inference -}; + RGBX, //!< Image in RGBX interleaved format (4 channels) + + BGRX //!< Image in BGRX interleaved format (4 channels) + +} ov_color_format_e; ``` ``` -enum resize_alg_e { +typedef enum { -​ NO_RESIZE = 0, + RESIZE_LINEAR, //!< linear algorithm -​ RESIZE_BILINEAR, + RESIZE_CUBIC, //!< cubic algorithm -​ RESIZE_AREA + RESIZE_NEAREST //!< nearest algorithm -}; +} ov_preprocess_resize_algorithm_e; ``` +## Properties + +### common properties ``` -struct roi_e { +OPENVINO_C_VAR(const char*) ov_property_key_supported_properties; -​ size_t id; // ID of a roi +OPENVINO_C_VAR(const char*) ov_property_key_available_devices; -​ size_t posX; // W upper left coordinate of roi +OPENVINO_C_VAR(const char*) ov_property_key_optimal_number_of_infer_requests; -​ size_t posY; // H upper left coordinate of roi +OPENVINO_C_VAR(const char*) ov_property_key_range_for_async_infer_requests; -​ size_t sizeX; // W size of roi +OPENVINO_C_VAR(const char*) ov_property_key_range_for_streams; -​ size_t sizeY; // H size of roi +OPENVINO_C_VAR(const char*) ov_property_key_device_full_name; -}; -``` +OPENVINO_C_VAR(const char*) ov_property_key_device_capabilities; -``` -enum IEStatusCode { +OPENVINO_C_VAR(const char*) ov_property_key_model_name; -​ OK = 0, +OPENVINO_C_VAR(const char*) ov_property_key_optimal_batch_size; -​ GENERAL_ERROR = -1, +OPENVINO_C_VAR(const char*) ov_property_key_max_batch_size; -​ NOT_IMPLEMENTED = -2, +OPENVINO_C_VAR(const char*) ov_property_key_cache_dir; -​ NETWORK_NOT_LOADED = -3, +OPENVINO_C_VAR(const char*) ov_property_key_num_streams; -​ PARAMETER_MISMATCH = -4, +OPENVINO_C_VAR(const char*) ov_property_key_affinity; -​ NOT_FOUND = -5, +OPENVINO_C_VAR(const char*) ov_property_key_inference_num_threads; -​ OUT_OF_BOUNDS = -6, +OPENVINO_C_VAR(const char*) ov_property_key_hint_enable_cpu_pinning; -​ /* +OPENVINO_C_VAR(const char*) ov_property_key_hint_enable_hyper_threading; -​ \* @brief exception not of std::exception derived type was thrown +OPENVINO_C_VAR(const char*) ov_property_key_hint_performance_mode; -​ */ +OPENVINO_C_VAR(const char*) ov_property_key_hint_scheduling_core_type; -​ UNEXPECTED = -7, +OPENVINO_C_VAR(const char*) ov_property_key_hint_inference_precision; -​ REQUEST_BUSY = -8, +OPENVINO_C_VAR(const char*) ov_property_key_hint_num_requests; -​ RESULT_NOT_READY = -9, +OPENVINO_C_VAR(const char*) ov_property_key_log_level; -​ NOT_ALLOCATED = -10, +OPENVINO_C_VAR(const char*) ov_property_key_hint_model_priority; -​ INFER_NOT_STARTED = -11, +OPENVINO_C_VAR(const char*) ov_property_key_enable_profiling; -​ NETWORK_NOT_READ = -12 +OPENVINO_C_VAR(const char*) ov_property_key_device_priorities; -}; +OPENVINO_C_VAR(const char*) ov_property_key_hint_execution_mode; + +OPENVINO_C_VAR(const char*) ov_property_key_force_tbb_terminate; + +OPENVINO_C_VAR(const char*) ov_property_key_enable_mmap; + +OPENVINO_C_VAR(const char*) ov_property_key_auto_batch_timeout; +``` + +### AUTO plugin specified properties ``` +OPENVINO_C_VAR(const char*) ov_property_key_intel_auto_device_bind_buffer; +OPENVINO_C_VAR(const char*) ov_property_key_intel_auto_enable_startup_fallback; +OPENVINO_C_VAR(const char*) ov_property_key_intel_auto_enable_runtime_fallback; +``` -- `const char *ie_c_api_version(void)` +### GPU plugin specified properties +``` +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_context_type; - - Description: Returns number of version that is exported. +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_ocl_context; - - Parameters: None. +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_ocl_context_device_id; - - Return value: Version number of the API. +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_tile_id; - - Usage example: +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_ocl_queue; - ``` - const char *ver_num=ie_c_api_version(); - ``` +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_va_device; -## IECore +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_shared_mem_type; -This strcut represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces. +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_mem_handle; + +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_dev_object_handle; + +OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_va_plane; +``` + + +## OV Core + +This strcut represents OpenVINO entity and allows you to manipulate with plugins using unified interfaces. ### Create -- `IEStatusCode ie_core_create(char *xml_config_file, ie_core_t *core_result)` +- `ov_status_e ov_core_create(ov_core_t** core)` - > Note: create an ie_core_t instance with default configuration when xml_config_file=null. + > Note: Constructs OpenVINO Core instance by default. + + - Parameters: + + - `core` - A pointer to the newly created `ov_core_t`. + + - Return value: Status code of the operation: OK(0) for success. + + +- `ov_status_e ov_core_create_with_config(const char* xml_config_file, ov_core_t** core)` + + > Note: Constructs OpenVINO Core instance using XML configuration file with devices description. - Parameters: - `xml_config_file`- A full path to`.xml` file containing plugins configuration. If the parameter is not specified, the default configuration is handled automatically. - - `core_result` - A pointer to the newly created `ie_core_t`. + - `core` - A pointer to the newly created `ov_core_t`. - Return value: Status code of the operation: OK(0) for success. - Usage examples: - Create an `ie_core_t` t instance with a custom configuration location specified: + Create an `ov_core_t` t instance with a custom configuration location specified: ``` char *xml_config_file="/localdisk/plugins/my_custom_cfg.xml"; - ie_core_t ie; - IEStatusCode status = ie_core_create(xml_config_file,ie); + ov_core_t* core; + ov_status_e status = ov_core_create_with_config(xml_config_file, &core); ``` .`xml` file has the following structure: @@ -342,453 +425,673 @@ This strcut represents an Inference Engine entity and allows you to manipulate w ``` -### Methods +### Methods -- `IEStatusCode ie_core_get_versions(ie_core_t *core, char *device_name, ie_core_version_t *version_result)` +- `ov_status_e ov_get_openvino_version(ov_version_t* version)` - - Description: Returns a `ie_core_version_t` with versions of the plugin specified. + - Description: Get version of OpenVINO. - Parameters: - - `core` -A pointer to `ie_core_t` instance. - - `device_name` - Name of the registered plugin. - - `version_result` - Dictionary mapping a plugin name . + - `ov_version_t` - a pointer to the version. - Return value: Status of the operation: OK(0) for success. - Usage example: ``` - char *xml_config_file="/localdisk/plugins/my_custom_cfg.xml"; - char *device_name="CPU"; - ie_core_t *ie; - ie_core_version_t *version; - IEStatusCode status= ie_core_create(xml_config_file, ie); - IEStatusCode status2=ie_core_get_versions(ie,device_name, version); - print("description:%s, major:%d, minor:%d, build_number:%s.\n",version- >description, version->major, version->minor, version->build_number); + ov_version_t version = {.description = NULL, .buildNumber = NULL}; + ov_get_openvino_version(&version); + printf("description : %s \n", version.description); + printf("build number: %s \n", version.buildNumber); + ov_version_free(&version); ``` -- `IEStatusCode ie_core_load_network(ie_core_t *core, ie_network_t *network, const char *device_name, ie_config_t config, ie_executable_network_t *exec_network_result)` +- `ov_status_e ov_core_read_model(const ov_core_t* core, const char* model_path, const char* bin_path, ov_model_t** model)` - - Description: Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name and creates an `ie_executable_network_t` instance of the `ie_network_t` struct. - You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware resources). + - Description: Reads models from IR / ONNX / PDPD / TF / TFLite formats to create ov_model_t. + You can create as many ov_model_t as you need and use them simultaneously (up to the limitation of the hardware resources). - Parameters: - - `core` - A pointer to `ie_core_t` instance. - - `network` - A pointer to `ie_network_t` instance. - - `device_name` - A device name of a target plugin. - - `config` - A dictionary of plugin configuration keys and their values. - - `exec_network_result` - A pointer to the newly loaded network. + - `core` - A pointer to `ov_core_t` instance. + - `model_path` - Path to a model. + - `bin_path` - Path to a data file. + - `model` - A pointer to the newly created model. - Return value: Status code of the operation: OK(0) for success. - Usage example: ``` - + ov_core_t* core = NULL; + ov_core_create(&core); + ov_model_t* model = NULL; + ov_core_read_model(core, "model.xml", "model.bin", &model); ``` -- `IEStatusCode ie_core_set_config(ie_core_t *core, ie_config_t *ie_core_config, const char *device_name)` +- `ov_status_e ov_core_compile_model(const ov_core_t* core, + const ov_model_t* model, + const char* device_name, + const size_t property_args_size, + ov_compiled_model_t** compiled_model, + ...);` - - Description: Sets a configuration for a plugin. + - Description: Creates a compiled model from a source model object. - Parameters: - - `core`- A pointer to `ie_core_t` instance. - - `ie_core_config` - A dictionary of configuration parameters as keys and their values. - - `device_name` - A device name of a target plugin. + - `core`- A pointer to `ov_core_t` instance. + - `model Model` - An object acquired from Core::read_model. + - `device_name` - Name of a device to load a model to. + - `property_args_size` - How many properties args will be passed, each property contains 2 args: key and value. + - `compiled_model` - A pointer to the newly created compiled_model. + - `...` - property paramater, optional pack of pairs: relevant only for this load operation operation. + - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_core_register_plugin(ie_core_t *core, const char *plugin, const char *device_name )` - - Description: Registers a new device and a plugin which implement this device inside Inference Engine. + - Usage example: + + ``` + ov_core_t* core = nullptr; + ov_core_create(&core); + ov_model_t* model = nullptr; + ov_core_read_model(core, xml_file_name.c_str(), bin_file_name.c_str(), &model); + const char* key = ov_property_key_hint_performance_mode; + const char* num = "LATENCY"; + ov_compiled_model_t* compiled_model = nullptr; + ov_core_compile_model(core, model, "CPU", 2, &compiled_model, key, num); + ... + ov_compiled_model_free(compiled_model); + ov_model_free(model); + ov_core_free(core); + ``` + +- `ov_status_e ov_core_set_property(const ov_core_t* core, const char* device_name, ...)` + + - Description: Sets properties for a device, acceptable keys can be found in ov_property_key_xxx. - Parameters: - - `core` - A pointer to `ie_core_t` instance. - - `plugin` - A path (absolute or relative) or name of a plugin. Depending on platform, plugin is wrapped with shared library suffix and prefix to identify library full name - - `device_name` - A target device name for the plugin. If not specified, the method registers. - a plugin with the default name. + - `core` - A pointer to `ov_core_t` instance. + - `device_name` - Name of a device. + - `...` - property paramaters, optional pack of pairs: . - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_core_register_plugins(ie_core_t *core, const char *xml_config_file)` - - Description: Registers plugins specified in an `.xml` configuration file + - Usage example: + + ``` + ov_core_t* core = nullptr; + ov_core_create(&core); + const char* key_1 = ov_property_key_inference_num_threads; + const char* value_1 = "12"; + const char* key_2 = ov_property_key_num_streams; + const char* value_2 = "7"; + ov_core_set_property(core, "CPU", key_1, value_1, key_2, value_2); + ... + ov_core_free(core); + ``` + +- `ov_status_e ov_core_get_property(const ov_core_t* core, const char* device_name, const char* property_key, char** property_value)` + + - Description: Gets properties related to device behaviour. - Parameters: - - `core` - A pointer to `ie_core_t` instance. - - `xml_config_file` - A full path to `.xml` file containing plugins configuration. + - `core` - A pointer to `ov_core_t` instance. + - `device_name` - Name of a device. + - `property_key` - Property key. + - `property_value` - A pointer to property value with string format. - Return value: Status code of the operation: 0 for success. -- `IEStatusCode ie_core_unregister_plugin(ie_core_t *core, const char *device_name)` + - Usage example: + + ``` + ov_core_t* core = nullptr; + ov_core_create(&core); + const char* key = ov_property_key_hint_performance_mode; + const char* mode = "LATENCY"; + ov_core_set_property(core, "CPU", key, mode); + char* ret = nullptr; + ov_core_get_property(core, "CPU", key, &ret); + ov_free(ret); + ... + ov_core_free(core); + ``` - - Description: Unregisters a plugin with a specified device name +- `ov_status_e ov_core_import_model(const ov_core_t* core, + const char* content, + const size_t content_size, + const char* device_name, + ov_compiled_model_t** compiled_model);` + + - Description: Imports a compiled model from the previously exported one. - Parameters: - - `core` - A pointer `ie_core_t` instance. - - `device_name` - A device name of the plugin to unregister. + - `core` - A pointer `ov_core_t` instance. + - `content` - A pointer to content of the exported model. + - `content_size` - Number of bytes in the exported network. + - `device_name` - Name of a device to import a compiled model for. + - `compiled_model` - A pointer to the newly created compiled_model. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_core_add_extension(ie_core_t *core, const char *extension_path, const char *device_name)` +- `ov_status_e ov_core_get_versions_by_device_name(const ov_core_t* core, const char* device_name, ov_core_version_list_t* versions)` - - Description: Loads extension library to the plugin with a specified device name. + - Description: Returns device plugins version information. - Parameters: - - `core` - A pointer `ie_core_t` instance. - - `extension_path` - Path to the extensions library file to load to a plugin. - - `device_name` - A device name of a plugin to load the extensions to. + - `core` - A pointer `ov_core_t` instance. + - `device_name` - A device name to identify a plugin. + - `versions` - A pointer to versions corresponding to device_name. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_core_get_metric(ie_core_t *core, const char *device_name, const char *metric_name, ie_param_t *param_result)` - - Description: Gets a general runtime metric for dedicated hardware. Enables to request common device properties, which are `ie_executable_network_t` agnostic, such as device name, temperature, and other devices-specific values. +- `ov_status_e ov_core_create_context(const ov_core_t* core, + const char* device_name, + const size_t context_args_size, + ov_remote_context_t** context, + ...);` + + - Description: Creates a new remote shared context object on the specified accelerator device using specified plugin-specific low-level device API parameters (device handle, pointer, context, etc.). - Parameters: - - `core` - A pointer `ie_core_t` instance. - - `device_name` - A name of a device to get a metric value. - - `metric_name` - A metric name to request. - - `param_result` - A metric value corresponding to a metric key. + - `core` - A pointer `ov_core_t` instance. + - `device_name` - Device name to identify a plugin. + - `context_args_size` - How many property args will be for this remote context creation. + - `context` - A pointer to the newly created remote context. + - `...` - variadic parmameters Actual property parameter for remote context - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_core_get_config(ie_core_t *core, const char *device_name, const char *config_name, ie_param_t *param_result)` - - Description: Gets a configuration dedicated to device behavior. The method targets to extract information which can be set via SetConfig method. +- `ov_status_e ov_core_compile_model_with_context(const ov_core_t* core, + const ov_model_t* model, + const ov_remote_context_t* context, + const size_t property_args_size, + ov_compiled_model_t** compiled_model, + ...);` + + - Description: Creates a compiled model from a source model within a specified remote context. - Parameters: - - `core` - A pointer `ie_core_t` instance. - - `device_name` - A name of a device to get a metric value. - - `config_name` - A configuration value corresponding to a configuration key. - - `param_result` - A metric value corresponding to a metric key. + - `core` - A pointer `ov_core_t` instance. + - `model` - Model object acquired from ov_core_read_model. + - `context` - A pointer to the newly created remote context. + - `property_args_size` - How many args will be for this compiled model. + - `compiled_model` - A pointer to the newly created compiled_model. + - `...` - variadic parmameters Actual property parameter for remote context - Return value: Status code of the operation: OK(0) for success. +## OV Model - -## IENetwork - -This struct contains the information about the network model read from IR and allows you to manipulate with some model parameters such as layers affinity and output layers. +This struct contains the information about the model read from IR and allows you to manipulate with some model parameters such as layers affinity and output layers. ### Methods -- `IEStatusCode ie_network_read(char *xml, char *weights_file, ie_network_t *network_result)` - - Description: Reads the model from the `.xml` and `.bin` files of the IR. +- `ov_status_e ov_model_free(ov_model_t* model)` + - Description: Release the memory allocated by ov_model_t. - Parameters: - - `xml_file` - `.xml` file's path of the IR. - - `weights_file` - `.bin` file's path of the IR. - - `network_result` - A pointer to the newly created network. + - `model` - A pointer to the ov_model_t to free memory.. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_free(ie_network_t *network)` - - Description: When network is loaded into the Inference Engine, it is not required anymore and should be released. + +- `ov_status_e ov_model_const_input(const ov_model_t* model, ov_output_const_port_t** input_port);` + - Description: Get a const input port of ov_model_t,which only support single input model. - Parameters: - - `network` - The pointer to the instance of the `ie_network_t` to free. + - `model` - A pointer to the ov_model_t. + - `input_port` - A pointer to the `ov_output_const_port_t`. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_input_numbers(ie_network_t *network, size_t *size_result)` - - Description: Gets number of inputs for the `IENetwork` instance. + +- `ov_status_e ov_model_input(const ov_model_t* model, ov_output_port_t** input_port);` + - Description: Get single input port of ov_model_t, which only support single input model. - Parameters: - - `network` - The instance of the `ie_network_t` to get size of input information for this instance. - - `size_result` - A number of the instance's input information. + - `model` - A pointer to the ov_model_t. + - `input_port` - A pointer to the `ov_output_port_t`. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_input_name(ie_network_t *network, size_t number, char *name_result)` - - Description: Gets name corresponding to the "number". + +- `ov_status_e ov_model_input_by_name(const ov_model_t* model, const char* tensor_name, ov_output_port_t** input_port)` + - Description: Get an input port of ov_model_t by name. - Parameters: - - `network` - The instance of the `ie_network_t` to get input information. - - `number` - An id of input information . - - `name_result` - Input name corresponding to the "number". + - `model` - A pointer to the ov_model_t. + - `tensor_name` - Input tensor name (char *). + - `input_port` - A pointer to the `ov_output_port_t`. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_output_numbers(ie_network_t *network, size_t size_result)` - - Description: Gets number of output for the `ie_network_t` instance. + +- `ov_status_e ov_model_input_by_index(const ov_model_t* model, const size_t index, ov_output_port_t** input_port)` + - Description: Get an input port of ov_model_t by name. - Parameters: - - `network` - The instance of the `ie_network_t` to get size of output information for this instance. - - `size_result` - A number of the instance's output information. + - `model` - A pointer to the ov_model_t. + - `index` - Input tensor index. + - `input_port` - A pointer to the `ov_output_port_t`. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_output_name(ie_network_t *network, size_t number, char *name_result)` - - Description: Gets output name corresponding to the "number". + +- `ov_status_e ov_model_const_output(const ov_model_t* model, ov_output_const_port_t** output_port);` + - Description: Get a single const output port of ov_model_t, which only support single output model. - Parameters: - - `network` - The instance of the `ie_network_t` to get out information of nth layer for this instance. - - `number` - An id of output information. - - `name_result` - A output name corresponding to the "number". + - `model` - A pointer to the ov_model_t. + - `output_port` - A pointer to the ov_output_const_port_t. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_input_precision(ie_network_t *network, char *input_name, precision_e *prec_result)` - - Description: Gets a precision of the input data named "input_name". + +- `ov_status_e ov_model_output(const ov_model_t* model, ov_output_port_t** output_port);` + - Description: Get a single output port of ov_model_t, which only support single output model. - Parameters: - - `network` - A pointer to ie_network_t instance. - - `input_name` - Name of input data. - - `prec_result` - A pointer to the precision used for input blob creation. + - `model` - A pointer to the ov_model_t. + - `output_port` - A pointer to the ov_output_port_t. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_set_input_precision(ie_network_t *network, char *input_name, precision_e p)` - - Description: Changes the precision of the input data named "input_name". + +- `ov_status_e ov_model_inputs_size(const ov_model_t* model, size_t* input_size);` + - Description: Get the input size of ov_model_t. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input_name` - Name of input data. - - `p` - A new precision of the input data to set (eg. precision_e.FP16). + - `model` - A pointer to the ov_model_t. + - `input_size` - The model's input size. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_input_layout(ie_network_t *network, char *input_name, layout_t *layout_result)` - - Description: Gets a layout of the input data named "input_name". + +- `ov_status_e ov_model_outputs_size(const ov_model_t* model, size_t* output_size);` + - Description: Get the input size of ov_model_t. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input_name` - Name of input data. - - `layout_result` - A pointer to the layout used for input blob creation. + - `model` - A pointer to the ov_model_t. + - `output_size` - The model's output size. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_set_input_layout(ie_network_t *network, char *input_name, layout_t l)` - - Description: Changes the layout of the input data named "input_name". This function should be called before loading the network to the plugin + +- `bool ov_model_is_dynamic(const ov_model_t* model)` + - Description: Returns true if any of the ops defined in the model is dynamic shape. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input_name` - Name of input data. - - `layout` - Network layer layout (eg. layout_t.NCHW). - - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_input_dims(ie_network_t *network, char *input_name, dimensions_t *dims_result)` - - Description: Gets dimensions/shape of the input data with reversed order. + - `model` - A pointer to the ov_model_t. + - Return value: true if model contains dynamic shapes. + +- `ov_status_e ov_model_reshape(const ov_model_t* model, + const char** tensor_names, + const ov_partial_shape_t* partial_shapes, + size_t size)` + - Description: Do reshape in model with a list of . - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input_name` - Name of input data. - - `dims_result` - A pointer to the dimensions used for input blob creation. + - `model` - A pointer to the ov_model_t. + - `tensor_names` - The list of input tensor names. + - `partialShape` - A PartialShape list. + - `size` - The item count in the list. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_input_resize_algorithm(ie_network_t *network, char *input_name, resize_alg_e *resize_alg_result)` - - Description: Gets pre-configured resize algorithm. + +- `ov_status_e ov_model_get_friendly_name(const ov_model_t* model, char** friendly_name)` + - Description: Gets the friendly name for a model. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input_name` - Name of input data. - - `resize_alg_result` - The pointer to the resize algorithm used for input blob creation. + - `model` - A pointer to the ov_model_t. + - `friendly_name` - The model's friendly name. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_set_input_resize_algorithm(ie_network_t *network, char *input_name, resize_alg_e resize_algo)` - - Description: Sets resize algorithm to be used during pre-processing + + +## Node + +This struct contains the information about the model's port. + +### Methods + +- `ov_status_e ov_const_port_get_shape(const ov_output_const_port_t* port, ov_shape_t* tensor_shape)` + - Description: Get the shape of port object. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input_name` - Name of input data. - - `resize_algo` - Resize algorithm. + - `port` - A pointer to ov_output_const_port_t. + - `tensor_shape` - Returned tensor shape. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_color_format(ie_network_t *network, char *input_name, colorformat_e *colformat_result)` - - Description: Gets color format of the input data named "input_name". + +- `ov_status_e ov_port_get_shape(const ov_output_port_t* port, ov_shape_t* tensor_shape)` + - Description: Get the shape of port object. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input` - Name of input data. - - `colformat_result` - Input color format of the input data named "input_name". + - `port` - A pointer to ov_output_port_t. + - `tensor_shape` - Returned tensor shape. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_set_color_format(ie_network_t *network, char *input_name, colorformat_e color_format)` - - Description: Changes the color format of the input data named "input_name". + +- `ov_status_e ov_port_get_any_name(const ov_output_const_port_t* port, char** tensor_name)` + - Description: Get the tensor name of port. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `input_name` - Name of input data. - - `color_format` - Color format of the input data . + - `port` - A pointer to ov_output_port_t. + - `tensor_name` - Returned tensor name. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_output_precision(ie_network_t *network, char *output_name, precision_e *prec_result)` - - Description: Get output precision of the output data named "output_name". + +- `ov_status_e ov_port_get_partial_shape(const ov_output_const_port_t* port, ov_partial_shape_t* partial_shape)` + - Description: Get the partial shape of port. - Parameters: - - `network` - A pointer `ie_network_t` instance. - - `output_name` - Name of output date. - - `precision_e` - Output precision of the output data named "output_name". + - `port` - A pointer to ov_output_const_port_t. + - `partial_shape` - Partial shape. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_set_output_precision(ie_network_t *network, char *output_name, precision_e p)` - - Description: Sets a precision type of the output date named "output_name". + +- `ov_status_e ov_port_get_element_type(const ov_output_const_port_t* port, ov_element_type_e* tensor_type)` + - Description: Get the tensor type of port. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `outputName` - Name of output data. - - `p` - Precision of the output data (eg. precision_e.FP16). + - `port` - A pointer to ov_output_const_port_t. + - `tensor_type` - Returned tensor type. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_output_layout(ie_network_t *network, char *output_name, layout_t *layout_result)` - - Description: Get output layout of the output date named "output_name" in the network. + +- `void ov_output_port_free(ov_output_port_t* port)` + - Description: free port object. + - Parameters: + - `port` - A pointer to ov_output_port_t. + - Return value: no return. + +- `void ov_output_const_port_free(ov_output_const_port_t* port)` + - Description: free const port object. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `output_name` - Name of output data. - - `layout_result` - Layout value of the output data named "output_name". + - `port` - A pointer to ov_output_const_port_t. + - Return value: no return. + +## CompiledModel + +This struct represents a compiled model instance loaded to plugin and ready for inference. + +### Methods + +- `ov_status_e ov_compiled_model_inputs_size(const ov_compiled_model_t* compiled_model, size_t* size)` + - Description: Get the input size of ov_compiled_model_t. + - Parameters: + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `input_size` - The compiled_model's input size. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_set_output_layout(ie_network_t *network, char *output_name, c l)` - - Description: Sets the layout value for output data named "output_name". + +- `ov_status_e ov_compiled_model_input(const ov_compiled_model_t* compiled_model, ov_output_const_port_t** input_port)` + - Description: - Get the single const input port of ov_compiled_model_t, which only support single input model. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `output_name` - Name of output data. - - `l` - Layout value to set (eg. output_name.NCHW). + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `input_port` - A pointer to the `ov_output_const_port_t` instance. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_network_get_output_dims(ie_network_t *network, char *output_name, dimensions_t *dims_result)` - - Description: Get output dimension of output data named "output_name" in the network. + +- `ov_status_e ov_compiled_model_input_by_index(const ov_compiled_model_t* compiled_model, + const size_t index, + ov_output_const_port_t** input_port)` + - Description: Get a const input port of ov_compiled_model_t by port index. - Parameters: - - `network` - A pointer to `ie_network_t` instance. - - `output_name` - Name of output data. - - `dims_result` - Dimensions value of the output data named "output_name". + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `index`: Input index. + - `input_port` - A pointer to the `ov_output_const_port_t` instance. - Return value: Status code of the operation: OK(0) for success. -## ExecutableNetwork +- `ov_status_e ov_compiled_model_input_by_name(const ov_compiled_model_t* compiled_model, + const char* name, + ov_output_const_port_t** input_port)` + - Description: - Get a const input port of ov_compiled_model_t by name. + - Parameters: + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `name` - input tensor name. + - `input_port` - A pointer to the `ov_output_const_port_t` instance. + - Return value: Status code of the operation: OK(0) for success. -This struct represents a network instance loaded to plugin and ready for inference. +- `ov_compiled_model_outputs_size(const ov_compiled_model_t* compiled_model, size_t* size)` + - Description: - Get the output size of ov_compiled_model_t. + - Parameters: + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `size` - The compiled_model's output size. + - Return value: Status code of the operation: OK(0) for success. -### Methods +- `ov_status_e ov_compiled_model_output(const ov_compiled_model_t* compiled_model, ov_output_const_port_t** output_port)` + - Description: - Get the single const output port of ov_compiled_model_t, which only support single output model. + - Parameters: + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `output_port` - A pointer to the `ov_output_const_port_t` instance. + - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t *ie_exec_network, desc_t *desc, ie_infer_request_t **req)` +- `ov_status_e ov_compiled_model_output_by_index(const ov_compiled_model_t* compiled_model, + const size_t index, + ov_output_const_port_t** output_port)` + - Description: Get a const input port of ov_compiled_model_t by port index. + - Parameters: + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `index`: Output index. + - `output_port` - A pointer to the `ov_output_const_port_t` instance. + - Return value: Status code of the operation: OK(0) for success. - - Description: Creates an inference request instance used to infer the network. The created request has allocated input and output blobs (that can be changed later). +- `ov_status_e ov_compiled_model_output_by_name(const ov_compiled_model_t* compiled_model, + const char* name, + ov_output_const_port_t** output_port)` + - Description: - Get a const output port of ov_compiled_model_t by name. - Parameters: - - `ie_exec_network` - A pointer to `ie_executable_network_t` instance. - - `desc` - A pointer to a `desc_t` instance. - - `req` - A pointer to the newly created `ie_infer_request_t` instance. + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `name` - input tensor name. + - `output_port` - A pointer to the `ov_output_const_port_t` instance. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_exec_network_get_metric(ie_executable_network_t *ie_exec_network, const char *metric_name, ie_param_t *param_result)` - - Description: - Gets general runtime metric for an executable network. It can be network name, actual device ID on which executable network is running or all other properties which cannot be changed dynamically. +- `ov_status_e ov_compiled_model_get_runtime_model(const ov_compiled_model_t* compiled_model, ov_model_t** model)` + - Description: - Gets runtime model information from a device. - Parameters: - - `ie_exec_network`: A pointer to `ie_executable_network_t` instance. - - `metric_name` - A metric name to request. - - `param_result` - A metric value corresponding to a metric key. + - `compiled_model` - A pointer to the `ov_compiled_model_t` instance. + - `model` - A pointer to the `ov_model_t` instance. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_exec_network_set_config(ie_executable_network_t *ie_exec_network, ie_param_config_t *param_config, desc_t *desc)` - - Description: Sets a configuration for current executable network. +- `ov_status_e ov_compiled_model_create_infer_request(const ov_compiled_model_t* compiled_model, ov_infer_request_t** infer_request)` + - Description: - Creates an inference request object used to infer the compiled model. - Parameters: - - `ie_exec_network`: A pointer to `ie_executable_network_t` instance. - - `config`: An config for current executable network. + - `compiled_model` - A pointer to `ov_compiled_model_t` instance. + - `infer_request` - A pointer to `ov_infer_request_t` instance. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_exec_network_get_config(ie_executable_network_t *ie_exec_network, const char *metric_config, ie_param_t *param_result)` - - Description: - Gets configuration for current executable network. The method is responsible to extract information - - which affects executable network execution +- `ov_status_e ov_compiled_model_set_property(const ov_compiled_model_t* compiled_model, ...)` + - Description: - Sets properties for a device, acceptable keys can be found in ov_property_key_xxx. - Parameters: - - `ie_exec_network` - A pointer to `ie_executable_network_t` instance. - - `metric_config` - A configuration parameter name to request. - - `param_result` - A configuration value corresponding to a configuration key. + - `compiled_model` - A pointer to `ov_compiled_model_t` instance. + - `...` variadic paramaters, the format is . - Return value: Status code of the operation: OK(0) for success. +- `ov_status_e ov_compiled_model_get_property(const ov_compiled_model_t* compiled_model, + const char* property_key, + char** property_value)` + - Description: - Gets properties for current compiled model. + - Parameters: + - `compiled_model` - A pointer to `ov_compiled_model_t` instance. + - `property_key` - Property key. + - `property_value` - A pointer to property value. + - Return value: Status code of the operation: OK(0) for success. +- `ov_status_e ov_compiled_model_export_model(const ov_compiled_model_t* compiled_model, const char* export_model_path)` + - Description: - Exports the current compiled model to an output stream `std::ostream`. + - Parameters: + - `compiled_model` - A pointer to `ov_compiled_model_t` instance. + - `export_model_path` - Path to the file. + - Return value: Status code of the operation: OK(0) for success. +- `void ov_compiled_model_free(ov_compiled_model_t* compiled_model)` + - Description: - Release the memory allocated by ov_compiled_model_t`. + - Parameters: + - `compiled_model` - A pointer to `ov_compiled_model_t` instance. + - Return value: None ## InferRequest -This struct provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution and to set and get output data. +This struct provides an interface to infer requests of `ov_compiled_model_t` and serves to handle infer requests execution and to set and get output data. ### Methods -- `IEStatusCode *ie_infer_request_get_blob(ie_infer_request_t *infer_request, const char *name, ie_blob_t **blob_result)` +- `ov_status_e ov_infer_request_set_tensor(ov_infer_request_t* infer_request, const char* tensor_name, const ov_tensor_t* tensor)` - - Description: Get a Blob corresponding to blob name. + - Description: Set an input/output tensor to infer on by the name of tensor. - Parameters: - - `infer_request` - A pointer to `ie_infer_request_t` instance - - `name` - Blob name. - - `blob_result` - A pointer to the blob corresponding to the blob name. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `tensor_name` - Name of the input or output tensor. + - `tensor` - Reference to the tensor. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_infer_request_set_blob(ie_infer_request_t *infer_request, ie_blob_t *blob)` +- `ov_status_e ov_infer_request_set_tensor_by_port(ov_infer_request_t* infer_request, + const ov_output_port_t* port, + const ov_tensor_t* tensor)` - - Description: Sets the blob in a inference request. + - Description: Set an input/output tensor to infer request for the port. - Parameters: - - `infer_request`: A pointer to `ie_infer_request_t` instance. - - `blob ` - A pointer to `ie_blob_t` instance. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `port ` - Port of the input or output tensor, which can be got by calling ov_model_t/ov_compiled_model_t interface. + - `tensor` - Reference to the tensor. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_infer_request_infer(ie_infer_request_t *infer_request)` +- `ov_status_e ov_infer_request_set_input_tensor_by_index(ov_infer_request_t* infer_request, + const size_t idx, + const ov_tensor_t* tensor)` - - Description: Starts synchronous inference of the infer request and fill outputs array + - Description: Set an input tensor to infer on by the index of tensor. - Parameters: - - `infer_request`: A pointer to `ie_infer_request_t` instance. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `idx` - Index of the input port. + - `tensor` - Reference to the tensor. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_infer_request_infer_async(ie_infer_request_t *infer_request)` +- `ov_status_e ov_infer_request_set_input_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor)` - - Description: Starts asynchronous inference of the infer request and fill outputs array. + - Description: Set an input tensor for the model with single input to infer on. - Parameters: - - `infer_request` - A pointer to `ie_infer_request_t` instance. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `tensor` - Reference to the tensor. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_infer_set_completion_callback(ie_infer_request_t *infer_request,completeCallBackFunc callback)` +- `ov_status_e ov_infer_request_set_output_tensor_by_index(ov_infer_request_t* infer_request, + const size_t idx, + const ov_tensor_t* tensor)` - - Description: Sets a callback function that will be called on success or failure of asynchronous request. + - Description: Set an output tensor to infer by the index of output tensor. - Parameters: - - `infer_request` - A pointer to a `ie_infer_request_t` instance. - - `callback` - A function to be called. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `idx` - Index of the input port. + - `tensor` - Reference to the tensor. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_infer_request_wait(ie_infer_request_t *infer_request, int64_t timeout)` +- `ov_status_e ov_infer_request_set_output_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor)` - - Description: Waits for the result to become available. Blocks until specified timeout elapses or the result becomes available, whichever comes first. + - Description: Set an output tensor to infer models with single output. + - Parameters: + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `tensor` - Reference to the tensor. + - Return value: Status code of the operation: OK(0) for success. - NOTE:** There are special values of the timeout parameter: +- `ov_status_e ov_infer_request_get_tensor(const ov_infer_request_t* infer_request, const char* tensor_name, ov_tensor_t** tensor)` - - 0 - Immediately returns the inference status. It does not block or interrupt execution. - ind statuses meaning. - - -1 - Waits until inference result becomes available (default value). + - Description: Get an input/output tensor by the name of tensor. + - Parameters: + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `tensor_name` - Name of the input or output tensor. + - `tensor` - Reference to the tensor. + - Return value: Status code of the operation: OK(0) for success. +- `ov_status_e ov_infer_request_get_tensor_by_port(const ov_infer_request_t* infer_request, + const ov_output_port_t* port, + ov_tensor_t** tensor)` + + - Description: Get an input/output tensor by port. - Parameters: + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `port ` - Port of the tensor to get. + - `tensor` - Reference to the tensor. + - Return value: Status code of the operation: OK(0) for success. - - `infer_request` -A pointer to a `ie_infer_request_t` instance. - - `timeout` - Time to wait in milliseconds or special (0, -1) cases described above. If not specified, `timeout` value is set to -1 by default. +- `ov_status_e ov_infer_request_get_input_tensor_by_index(const ov_infer_request_t* infer_request, + const size_t idx, + ov_tensor_t** tensor)` + - Description: Get an input tensor by the index of input tensor. + - Parameters: + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `idx ` - Index of the tensor to get. + - `tensor` - Reference to the tensor. - Return value: Status code of the operation: OK(0) for success. -## Blob +- `ov_status_e ov_infer_request_get_output_tensor_by_index(const ov_infer_request_t* infer_request, + const size_t idx, + ov_tensor_t** tensor)` -### Methods + - Description: Get an output tensor by the index of output tensor. + - Parameters: + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `idx ` - Index of the tensor to get. + - `tensor` - Reference to the tensor. + - Return value: Status code of the operation: OK(0) for success. -/*The structure of the blobs has complex structure, below functions represent creation of memory blobs from the scratch or on top of existing memory These functions return handle to the blob to be used in other ie_* functions*/ +- `ov_status_e ov_infer_request_infer(ov_infer_request_t* infer_request)` -- `IEStatusCode make_memory_blob(const tensor_desc *tensorDesc, ie_blob_t *blob_result)` - - Description: Creates a `ie_blob_t` instance with the specified dimensions and layout but does not allocate the memory. Use the allocate() method to allocate memory. `tensor_desc` Defines the layout and dims of the blob. + - Description: Infer specified input(s) in synchronous mode. - Parameters: - - `tensorDesc` - Defines the layout and dims of the blob. - - `blob_result` - A pointer to an empty ie_blob_t instance. - - Return value: Status code of the operation: OK(0) for success. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode make_memory_blob_from_preallocated_memory(const tensor_desc *tensorDesc, void *ptr, size_t size = 0, ie_blob_t *blob_result)` - - Description: The constructor creates a `ie_blob_t` instance with the specified dimensions and layout on the pre-allocated memory. The allocate() call is not required. +- `ov_status_e ov_infer_request_start_async(ov_infer_request_t* infer_request)` + + - Description: Start inference of specified input(s) in asynchronous mode. - Parameters: - - `tensorDesc` - Tensor description for Blob creation. - - `ptr` - A pointer to the pre-allocated memory. - - `size` -Length of the pre-allocated array. If not set, size is assumed equal to the dot product of dims. - - `blob_result` - A pointer to the newly created ie_blob_t instance. - - Return value: Status code of the operation: OK(0) for success. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - Return value: Status code of the operation: OK(0) for success. + +- `ov_status_e ov_infer_request_cancel(ov_infer_request_t* infer_request)` -- `IEStatusCode make_memory_blob_with_roi(const ie_blob_t **inputBlob, const roi_e *roi, ie_blob_t *blob_result)` - - Description: Creates a blob describing given roi instance based on the given blob with pre-allocated memory. + - Description: Cancel inference request. - Parameters: - - `inputBlob` - Original blob with pre-allocated memory. - - `roi` - A roi object inside of the original blob. - - `blob_result` - A pointer to the newly created blob. - - Return value: Status code of the operation: OK(0) for success. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - Return value: Status code of the operation: OK(0) for success. + +- `ov_status_e ov_infer_request_wait_for(ov_infer_request_t* infer_request, const int64_t timeout);` -- `IEStatusCode ie_blob_size(ie_blob_t *blob, int *size_result)` - - Description: Gets the total number of elements, which is a product of all the dimensions. + - Description: Waits for the result to become available. Blocks until the specified timeout has elapsed or the result becomes available, whichever comes first. - Parameters: - - `blob` - A pointer to the blob. - - `size_result` - The total number of elements. - - Return value: Status code of the operation: OK(0) for success. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `timeout` - Maximum duration, in milliseconds, to block for. + - Return value: Status code of the operation: OK(0) for success. + +- `ov_status_e ov_infer_request_set_callback(ov_infer_request_t* infer_request, const ov_callback_t* callback)` -- `IEStatusCode ie_blob_byte_size(ie_blob_t *blob, int *bsize_result)` - - Description: Gets the size of the current Blob in bytes. + - Description: Set callback function, which will be called when inference is done. - Parameters: - - `blob` - A pointer to the blob. - - `bsize_result` - The size of the current Blob in bytes. - - Return value: Status code of the operation: OK(0) for success. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `callback` A function to be called. + - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_blob_allocate(ie_blob_t *blob)` - - Description: Allocates memory for blob. +- `void ov_infer_request_free(ov_infer_request_t* infer_request)` + + - Description: Release the memory allocated by ov_infer_request_t. - Parameters: - - `blob` - A pointer to an empty blob. - - Return value: Status code of the operation: OK(0) for success. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - Return value: None. + +- `void ov_infer_request_get_profiling_info(const ov_infer_request_t* infer_request, ov_profiling_info_list_t* profiling_infos)` -- `IEStatusCode ie_blob_deallocate(ie_blob_t *blob)` - - Description: Releases previously allocated data. + - Description: Query performance measures per layer to identify the most time consuming operation. - Parameters: - - `blob` - A pointer to the blob. - - Return value: Status code of the operation: OK(0) for success. + - `infer_request` - A pointer to `ov_infer_request_t` instance. + - `profiling_infos` - Vector of profiling information for operations in a model. + - Return value: None. + +## Tensor -- `IEStatusCode ie_blob_buffer(ie_blob_t *blob, void *buffer)` - - Description: Gets access to the allocated memory . +### Methods + +- `ov_status_e ov_tensor_create_from_host_ptr(const ov_element_type_e type, + const ov_shape_t shape, + void* host_ptr, + ov_tensor_t** tensor)` + - Description: Constructs Tensor using element type, shape and external host ptr. - Parameters: - - `blob` - A pointer to the blob. - - `buffer` - A pointer to the coped date from the given pointer to the blob. + - `type` - Tensor element type + - `shape` - Tensor shape + - `host_ptr` - Pointer to pre-allocated host memory + - `tensor` - A point to ov_tensor_t - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_blob_cbuffer(ie_blob_t *blob, const void *cbuffer)` - - Description: Gets read-only access to the allocated memory. +- `ov_status_e ov_tensor_create(const ov_element_type_e type, const ov_shape_t shape, ov_tensor_t** tensor)` + - Description: Constructs Tensor using element type and shape. Allocate internal host storage using default allocator. - Parameters: - - `blob` - A pointer to the blob. - - `cbuffer` - A pointer to the coped date from the given pointer to the blob and the date is read-only. + - `type` - Tensor element type + - `shape` - Tensor shape + - `tensor` - A point to ov_tensor_t - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_blob_get_dims(ie_blob_t *blob, dimensions_t *dims_result)` - - Description: Gets dimensions of blob instance's tensor. +- `ov_status_e ov_tensor_get_shape(const ov_tensor_t* tensor, ov_shape_t* shape)` + - Description: Get shape for tensor. - Parameters: - - `blob` - A pointer to the blob. - - `dims_result` - A pointer to the dimensions of blob instance's tensor. - - Return value: Status code of the operation: OK(0) for success. + - `tensor` - A point to ov_tensor_t + - `shape` - Tensor shape + - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_blob_get_layout(ie_blob_t *blob, layout_t *layout_result)` - - Description: Gets layout of blob instance's tensor. +- `ov_status_e ov_tensor_get_element_type(const ov_tensor_t* tensor, ov_element_type_e* type)` + - Description: Get type for tensor. - Parameters: - - `blob` - A pointer to the blob. - - `layout_result` - A pointer to the layout of blob instance's tensor. + - `tensor` - A point to ov_tensor_t + - `type` - Tensor element type. - Return value: Status code of the operation: OK(0) for success. -- `IEStatusCode ie_blob_get_precision(ie_blob_t *blob, precision_e *prec_result)` - - Description: Gets precision of blob instance's tensor. +- `ov_status_e ov_tensor_get_byte_size(const ov_tensor_t* tensor, size_t* byte_size)` + - Description: Get byte size for tensor. - Parameters: - - `blob` - A pointer to the blob. - - `prec_result` - A pointer to the precision of blob instance's tensor. - - Return value: Status code of the operation: OK(0) for success. + - `tensor` - A point to ov_tensor_t + - `byte_size` - The size of the current Tensor in bytes. + - Return value: Status code of the operation: OK(0) for success. + +- `ov_status_e ov_tensor_data(const ov_tensor_t* tensor, void** data)` + - Description: Provides an access to the underlaying host memory. + - Parameters: + - `tensor` - A point to ov_tensor_t + - `data` - A point to host memory. + - Return value: Status code of the operation: OK(0) for success. + +- `void ov_tensor_free(ov_tensor_t* tensor)` + - Description: Free ov_tensor_t. + - Parameters: + - `tensor` - A point to ov_tensor_t + - Return value: None. diff --git a/src/bindings/c/include/c_api/ie_c_api.h b/src/bindings/c/include/c_api/ie_c_api.h deleted file mode 100644 index 5683f583c4e7fb..00000000000000 --- a/src/bindings/c/include/c_api/ie_c_api.h +++ /dev/null @@ -1,1164 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @file ie_c_api.h - * C API of Inference Engine bridge unlocks using of OpenVINO Inference Engine - * library and all its plugins in native applications disabling usage - * of C++ API. The scope of API covers significant part of C++ API and includes - * an ability to read model from the disk, modify input and output information - * to correspond their runtime representation like data types or memory layout, - * load in-memory model to Inference Engine on different devices including - * heterogeneous and multi-device modes, manage memory where input and output - * is allocated and manage inference flow. - **/ - -/** - * @defgroup ie_c_api Inference Engine C API - * Inference Engine C API - */ - -#ifndef IE_C_API_H -#define IE_C_API_H - -#include -#include - -#include "openvino/c/deprecated.h" - -#ifdef __cplusplus -# define INFERENCE_ENGINE_C_API_EXTERN extern "C" -#else -# define INFERENCE_ENGINE_C_API_EXTERN -#endif - -#define IE_1_0_DEPRECATED \ - OPENVINO_DEPRECATED("The Inference Engine API is deprecated and will be removed in the 2024.0 release. " \ - "For instructions on transitioning to the new API, please refer to " \ - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") - -#if !defined(IN_OV_COMPONENT) && !defined(C_API_LEGACY_HEADER_INCLUDED) -# define C_API_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( - "The legacy C API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The legacy C API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#if defined(OPENVINO_STATIC_LIBRARY) || defined(__GNUC__) && (__GNUC__ < 4) -# define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __VA_ARGS__ IE_1_0_DEPRECATED -# define IE_NODISCARD -#else -# if defined(_WIN32) || defined(__CYGWIN__) -# define INFERENCE_ENGINE_C_API_CALLBACK __cdecl -# ifdef openvino_c_EXPORTS -# define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __declspec(dllexport) __VA_ARGS__ __cdecl -# else -# define INFERENCE_ENGINE_C_API(...) \ - INFERENCE_ENGINE_C_API_EXTERN __declspec(dllimport) __VA_ARGS__ IE_1_0_DEPRECATED __cdecl -# endif -# define IE_NODISCARD -# else -# define INFERENCE_ENGINE_C_API(...) \ - INFERENCE_ENGINE_C_API_EXTERN __attribute__((visibility("default"))) __VA_ARGS__ IE_1_0_DEPRECATED -# define IE_NODISCARD __attribute__((warn_unused_result)) -# endif -#endif - -#ifndef INFERENCE_ENGINE_C_API_CALLBACK -# define INFERENCE_ENGINE_C_API_CALLBACK -#endif - -typedef struct ie_core ie_core_t; -typedef struct ie_network ie_network_t; -typedef struct ie_executable ie_executable_network_t; -typedef struct ie_infer_request ie_infer_request_t; -typedef struct ie_blob ie_blob_t; - -/** - * @struct ie_version - * @brief Represents an API version information that reflects the set of supported features - */ -typedef struct ie_version { - char* api_version; //!< A string representing Inference Engine version -} ie_version_t; - -/** - * @struct ie_core_version - * @brief Represents version information that describes devices and the inference engine runtime library - */ -typedef struct ie_core_version { - size_t major; //!< A major version - size_t minor; //!< A minor version - const char* device_name; //!< A device name - const char* build_number; //!< A build number - const char* description; //!< A device description -} ie_core_version_t; - -/** - * @struct ie_core_versions - * @brief Represents all versions information that describes all devices and the inference engine runtime library - */ -typedef struct ie_core_versions { - ie_core_version_t* versions; //!< An array of device versions - size_t num_vers; //!< A number of versions in the array -} ie_core_versions_t; - -/** - * @struct ie_config - * @brief Represents configuration information that describes devices - */ -typedef struct ie_config { - const char* name; //!< A configuration key - const char* value; //!< A configuration value - struct ie_config* next; //!< A pointer to the next configuration value -} ie_config_t; - -/** - * @struct ie_param - * @brief metric and config parameters. - */ -typedef struct ie_param { - union { - char* params; - unsigned int number; - unsigned int range_for_async_infer_request[3]; - unsigned int range_for_streams[2]; - }; -} ie_param_t; - -/** - * @struct ie_param_config - * @brief Represents configuration parameter information - */ -typedef struct ie_param_config { - char* name; - ie_param_t* param; -} ie_param_config_t; - -/** - * @struct desc - * @brief Represents detailed information for an error - */ -typedef struct desc { - char msg[256]; //!< A description message -} desc_t; - -/** - * @struct dimensions - * @brief Represents dimensions for input or output data - */ -typedef struct dimensions { - size_t ranks; //!< A runk representing a number of dimensions - size_t dims[8]; //!< An array of dimensions -} dimensions_t; - -/** - * @enum layout_e - * @brief Layouts that the inference engine supports - */ -typedef enum { - ANY = 0, //!< "ANY" layout - - // I/O data layouts - NCHW = 1, //!< "NCHW" layout - NHWC = 2, //!< "NHWC" layout - NCDHW = 3, //!< "NCDHW" layout - NDHWC = 4, //!< "NDHWC" layout - - // weight layouts - OIHW = 64, //!< "OIHW" layout - - // Scalar - SCALAR = 95, //!< "SCALAR" layout - - // bias layouts - C = 96, //!< "C" layout - - // Single image layout (for mean image) - CHW = 128, //!< "CHW" layout - - // 2D - HW = 192, //!< "HW" layout - NC = 193, //!< "NC" layout - CN = 194, //!< "CN" layout - - BLOCKED = 200, //!< "BLOCKED" layout -} layout_e; - -/** - * @enum precision_e - * @brief Precisions that the inference engine supports - */ -typedef enum { - UNSPECIFIED = 255, /**< Unspecified value. Used by default */ - MIXED = 0, /**< Mixed value. Can be received from network. No applicable for tensors */ - FP32 = 10, /**< 32bit floating point value */ - FP16 = 11, /**< 16bit floating point value */ - FP64 = 13, /**< 64bit floating point value */ - Q78 = 20, /**< 16bit specific signed fixed point precision */ - I16 = 30, /**< 16bit signed integer value */ - U4 = 39, /**< 4bit unsigned integer value */ - U8 = 40, /**< 8bit unsigned integer value */ - I4 = 49, /**< 4bit signed integer value */ - I8 = 50, /**< 8bit signed integer value */ - U16 = 60, /**< 16bit unsigned integer value */ - I32 = 70, /**< 32bit signed integer value */ - I64 = 72, /**< 64bit signed integer value */ - U64 = 73, /**< 64bit unsigned integer value */ - U32 = 74, /**< 32bit unsigned integer value */ - BIN = 71, /**< 1bit integer value */ - CUSTOM = 80 /**< custom precision has it's own name and size of elements */ -} precision_e; - -/** - * @struct tensor_desc - * @brief Represents detailed information for a tensor - */ -typedef struct tensor_desc { - layout_e layout; - dimensions_t dims; - precision_e precision; -} tensor_desc_t; - -/** - * @enum colorformat_e - * @brief Extra information about input color format for preprocessing - */ -typedef enum { - RAW = 0u, //!< Plain blob (default), no extra color processing required - RGB, //!< RGB color format - BGR, //!< BGR color format, default in OpenVINO - RGBX, //!< RGBX color format with X ignored during inference - BGRX //!< BGRX color format with X ignored during inference -} colorformat_e; - -/** - * @enum resize_alg_e - * @brief Represents the list of supported resize algorithms. - */ -typedef enum { - NO_RESIZE = 0, //!< "No resize" mode - RESIZE_BILINEAR, //!< "Bilinear resize" mode - RESIZE_AREA //!< "Area resize" mode -} resize_alg_e; - -/** - * @enum IEStatusCode - * @brief This enum contains codes for all possible return values of the interface functions - */ -typedef enum { - OK = 0, - GENERAL_ERROR = -1, - NOT_IMPLEMENTED = -2, - NETWORK_NOT_LOADED = -3, - PARAMETER_MISMATCH = -4, - NOT_FOUND = -5, - OUT_OF_BOUNDS = -6, - /* - * @brief exception not of std::exception derived type was thrown - */ - UNEXPECTED = -7, - REQUEST_BUSY = -8, - RESULT_NOT_READY = -9, - NOT_ALLOCATED = -10, - INFER_NOT_STARTED = -11, - NETWORK_NOT_READ = -12, - INFER_CANCELLED = -13, -} IEStatusCode; - -/** - * @struct roi_t - * @brief This structure describes roi data. - */ -typedef struct roi { - size_t id; //!< ID of a roi - size_t posX; //!< W upper left coordinate of roi - size_t posY; //!< H upper left coordinate of roi - size_t sizeX; //!< W size of roi - size_t sizeY; //!< H size of roi -} roi_t; - -/** - * @struct input_shape - * @brief Represents shape for input data - */ -typedef struct input_shape { - char* name; - dimensions_t shape; -} input_shape_t; - -/** - * @struct input_shapes - * @brief Represents shapes for all input data - */ -typedef struct input_shapes { - input_shape_t* shapes; - size_t shape_num; -} input_shapes_t; - -/** - * @struct ie_blob_buffer - * @brief Represents copied data from the given blob. - */ -typedef struct ie_blob_buffer { - union { - void* buffer; //!< buffer can be written - const void* cbuffer; //!< cbuffer is read-only - }; -} ie_blob_buffer_t; - -/** - * @struct ie_complete_call_back - * @brief Completion callback definition about the function and args - */ -typedef struct ie_complete_call_back { - void(INFERENCE_ENGINE_C_API_CALLBACK* completeCallBackFunc)(void* args); - void* args; -} ie_complete_call_back_t; - -/** - * @struct ie_available_devices - * @brief Represent all available devices. - */ -typedef struct ie_available_devices { - char** devices; - size_t num_devices; -} ie_available_devices_t; - -/** - * @brief Returns number of version that is exported. Use the ie_version_free() to free memory. - * @return Version number of the API. - */ -INFERENCE_ENGINE_C_API(ie_version_t) ie_c_api_version(void); - -/** - * @brief Release the memory allocated by ie_c_api_version. - * @param version A pointer to the ie_version_t to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_version_free(ie_version_t* version); - -/** - * @brief Release the memory allocated by ie_param_t. - * @param param A pointer to the ie_param_t to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_param_free(ie_param_t* param); - -// Core - -/** - * @defgroup Core Core - * @ingroup ie_c_api - * Set of functions dedicated to working with registered plugins and loading - * network to the registered devices. - * @{ - */ - -/** - * @brief Constructs Inference Engine Core instance using XML configuration file with devices description. - * See RegisterPlugins for more details. Use the ie_core_free() method to free memory. - * @ingroup Core - * @param xml_config_file A path to .xml file with devices to load from. If XML configuration file is not specified, - * then default Inference Engine devices are loaded from the default plugin.xml file. - * @param core A pointer to the newly created ie_core_t. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_create(const char* xml_config_file, ie_core_t** core); - -/** - * @brief Releases memory occupied by core. - * @ingroup Core - * @param core A pointer to the core to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_core_free(ie_core_t** core); - -/** - * @brief Gets version information of the device specified. Use the ie_core_versions_free() method to free memory. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param device_name Name to identify device. - * @param versions A pointer to versions corresponding to device_name. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_get_versions(const ie_core_t* core, const char* device_name, ie_core_versions_t* versions); - -/** - * @brief Releases memory occupied by ie_core_versions. - * @ingroup Core - * @param vers A pointer to the ie_core_versions to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_core_versions_free(ie_core_versions_t* vers); - -/** - * @brief Reads the model from the .xml and .bin files of the IR. Use the ie_network_free() method to free memory. - * @ingroup Core - * @param core A pointer to the ie_core_t instance. - * @param xml .xml file's path of the IR. - * @param weights_file .bin file's path of the IR, if path is empty, will try to read bin file with the same name as xml - * and if bin file with the same name was not found, will load IR without weights. - * @param network A pointer to the newly created network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_read_network(ie_core_t* core, const char* xml, const char* weights_file, ie_network_t** network); - -/** - * @brief Reads the model from an xml string and a blob of the bin part of the IR. Use the ie_network_free() method to - * free memory. - * @ingroup Core - * @param core A pointer to the ie_core_t instance. - * @param xml_content Xml content of the IR. - * @param xml_content_size Number of bytes in the xml content of the IR. - * @param weight_blob Blob containing the bin part of the IR. - * @param network A pointer to the newly created network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_read_network_from_memory(ie_core_t* core, - const uint8_t* xml_content, - size_t xml_content_size, - const ie_blob_t* weight_blob, - ie_network_t** network); - -/** - * @brief Creates an executable network from a network previously exported to a file. Users can create as many networks - * as they need and use them simultaneously (up to the limitation of the hardware resources). Use the - * ie_exec_network_free() method to free memory. - * @ingroup Core - * @param core A pointer to the ie_core_t instance. - * @param file_name A path to the location of the exported file. - * @param device_name A name of the device to load the network to. - * @param config Device configuration. - * @param exe_network A pointer to the newly created executable network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_import_network(ie_core_t* core, - const char* file_name, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network); - -/** - * @brief Creates an executable network from a network previously exported to memory. Users can create as many networks - * as they need and use them simultaneously (up to the limitation of the hardware resources). Use the - * ie_exec_network_free() method to free memory. - * @ingroup Core - * @param core A pointer to the ie_core_t instance. - * @param content A pointer to content of the exported network. - * @param content_size Number of bytes in the exported network. - * @param device_name A name of the device to load the network to. - * @param config Device configuration. - * @param exe_network A pointer to the newly created executable network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_import_network_from_memory(ie_core_t* core, - const uint8_t* content, - size_t content_size, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network); - -/** - * @brief Exports an executable network to a .bin file. - * @ingroup Core - * @param exe_network A pointer to the newly created executable network. - * @param file_name Path to the file to export the network to. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_export_network(ie_executable_network_t* exe_network, const char* file_name); - -/** - * @brief Creates an executable network from a given network object. Users can create as many networks as they need and - * use them simultaneously (up to the limitation of the hardware resources). Use the ie_exec_network_free() method to - * free memory. - * @ingroup Core - * @param core A pointer to the ie_core_t instance. - * @param network A pointer to the input ie_network instance to create the executable network from. - * @param device_name Name of the device to load the network to. - * @param config Device configuration. - * @param exe_network A pointer to the newly created executable network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_load_network(ie_core_t* core, - const ie_network_t* network, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network); - -/** - * @brief Reads model and creates an executable network from IR or ONNX file. Users can create as many networks as they - * need and use them simultaneously (up to the limitation of the hardware resources). Use the ie_exec_network_free() - * method to free memory. - * @ingroup Core - * @param core A pointer to the ie_core_t instance. - * @param xml .xml file's path of the IR. Weights file name will be calculated automatically - * @param device_name Name of device to load network to. - * @param config Device configuration. - * @param exe_network A pointer to the newly created executable network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_load_network_from_file(ie_core_t* core, - const char* xml, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network); - -/** - * @brief Sets configuration for device. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param ie_core_config Device configuration. - * @param device_name An optional name of a device. If device name is not specified, - * the config is set for all the registered devices. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_set_config(ie_core_t* core, const ie_config_t* ie_core_config, const char* device_name); - -/** - * @brief Registers a new device and a plugin which implement this device inside Inference Engine. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param plugin - A path (absolute or relative) or name of a plugin. Depending on platform, - * plugin is wrapped with shared library suffix and prefix to identify library full name - * @param device_name A device name to register plugin for. If not specified, the method registers - * a plugin with the default name. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_register_plugin(ie_core_t* core, const char* plugin, const char* device_name); - -/** - * @brief Registers plugins specified in an ".xml" configuration file. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param xml_config_file A full path to ".xml" file containing plugins configuration. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_register_plugins(ie_core_t* core, const char* xml_config_file); - -/** - * @brief Unregisters a plugin with a specified device name. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param device_name A device name of the device to unregister. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_unregister_plugin(ie_core_t* core, const char* device_name); - -/** - * @brief Loads extension library to the device with a specified device name. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param extension_path Path to the extensions library file to load to a device. - * @param device_name A device name of a device to load the extensions to. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_add_extension(ie_core_t* core, const char* extension_path, const char* device_name); - -/** - * @brief Gets general runtime metric for dedicated hardware. The method is needed to request common device properties - * which are executable network agnostic. It can be device name, temperature, other devices-specific values. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param device_name A name of a device to get a metric value. - * @param metric_name A metric name to request. - * @param param_result A metric value corresponding to the metric_name. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_get_metric(const ie_core_t* core, const char* device_name, const char* metric_name, ie_param_t* param_result); - -/** - * @brief Gets configuration dedicated to device behaviour. The method is targeted to extract information - * which can be set via SetConfig method. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param device_name A name of a device to get a configuration value. - * @param config_name Name of a configuration. - * @param param_result A configuration value corresponding to the config_name. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_get_config(const ie_core_t* core, const char* device_name, const char* config_name, ie_param_t* param_result); - -/** - * @brief Gets available devices for neural network inference. - * @ingroup Core - * @param core A pointer to ie_core_t instance. - * @param avai_devices The devices are returned as { CPU, GPU.0, GPU.1 } - * If there more than one device of specific type, they are enumerated with .# suffix - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_core_get_available_devices(const ie_core_t* core, ie_available_devices_t* avai_devices); - -/** - * @brief Releases memory occpuied by ie_available_devices_t - * @ingroup Core - * @param avai_devices A pointer to the ie_available_devices_t to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_core_available_devices_free(ie_available_devices_t* avai_devices); - -/** @} */ // end of Core - -// ExecutableNetwork - -/** - * @defgroup ExecutableNetwork ExecutableNetwork - * @ingroup ie_c_api - * Set of functions representing of neural networks been loaded to device. - * @{ - */ - -/** - * @brief Releases memory occupied by ExecutableNetwork. - * @ingroup ExecutableNetwork - * @param ie_exec_network A pointer to the ExecutableNetwork to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_exec_network_free(ie_executable_network_t** ie_exec_network); - -/** - * @brief Creates an inference request instance used to infer the network. The created request has allocated input - * and output blobs (that can be changed later). Use the ie_infer_request_free() method to free memory. - * @ingroup ExecutableNetwork - * @param ie_exec_network A pointer to ie_executable_network_t instance. - * @param request A pointer to the newly created ie_infer_request_t instance - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_exec_network_create_infer_request(ie_executable_network_t* ie_exec_network, ie_infer_request_t** request); - -/** - * @brief Gets general runtime metric for an executable network. It can be network name, actual device ID on which - * executable network is running or all other properties which cannot be changed dynamically. - * @ingroup ExecutableNetwork - * @param ie_exec_network A pointer to ie_executable_network_t instance. - * @param metric_name A metric name to request. - * @param param_result A metric value corresponding to the metric_name. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_exec_network_get_metric(const ie_executable_network_t* ie_exec_network, - const char* metric_name, - ie_param_t* param_result); - -/** - * @brief Sets configuration for current executable network. Currently, the method can be used - * when the network run on the Multi device and the configuration parameter is only can be "MULTI_DEVICE_PRIORITIES" - * @ingroup ExecutableNetwork - * @param ie_exec_network A pointer to ie_executable_network_t instance. - * @param param_config A pointer to device configuration.. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_exec_network_set_config(ie_executable_network_t* ie_exec_network, const ie_config_t* param_config); - -/** - * @brief Gets configuration for current executable network. The method is responsible to - * extract information which affects executable network execution. - * @ingroup ExecutableNetwork - * @param ie_exec_network A pointer to ie_executable_network_t instance. - * @param metric_config A configuration parameter name to request. - * @param param_result A configuration value corresponding to a configuration parameter name. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_exec_network_get_config(const ie_executable_network_t* ie_exec_network, - const char* metric_config, - ie_param_t* param_result); - -/** @} */ // end of ExecutableNetwork - -// InferRequest - -/** - * @defgroup InferRequest InferRequest - * @ingroup ie_c_api - * Set of functions responsible for dedicated inference for certain - * ExecutableNetwork. - * @{ - */ - -/** - * @brief Releases memory occupied by ie_infer_request_t instance. - * @ingroup InferRequest - * @param infer_request A pointer to the ie_infer_request_t to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_infer_request_free(ie_infer_request_t** infer_request); - -/** - * @brief Gets input/output data for inference - * @ingroup InferRequest - * @param infer_request A pointer to ie_infer_request_t instance. - * @param name Name of input or output blob. - * @param blob A pointer to input or output blob. The type of Blob must match the network input precision and size. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_infer_request_get_blob(ie_infer_request_t* infer_request, const char* name, ie_blob_t** blob); - -/** - * @brief Sets input/output data to inference. - * @ingroup InferRequest - * @param infer_request A pointer to ie_infer_request_t instance. - * @param name Name of input or output blob. - * @param blob Reference to input or output blob. The type of a blob must match the network input precision and size. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_infer_request_set_blob(ie_infer_request_t* infer_request, const char* name, const ie_blob_t* blob); - -/** - * @brief Starts synchronous inference of the infer request and fill outputs. - * @ingroup InferRequest - * @param infer_request A pointer to ie_infer_request_t instance. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_infer(ie_infer_request_t* infer_request); - -/** - * @brief Starts asynchronous inference of the infer request and fill outputs. - * @ingroup InferRequest - * @param infer_request A pointer to ie_infer_request_t instance. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_infer_async(ie_infer_request_t* infer_request); - -/** - * @brief Sets a callback function that will be called on success or failure of asynchronous request - * @ingroup InferRequest - * @param infer_request A pointer to ie_infer_request_t instance. - * @param callback A function to be called. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_infer_set_completion_callback(ie_infer_request_t* infer_request, ie_complete_call_back_t* callback); - -/** - * @brief Waits for the result to become available. Blocks until specified timeout elapses or the result becomes - * available, whichever comes first. - * @ingroup InferRequest - * @param infer_request A pointer to ie_infer_request_t instance. - * @param timeout Maximum duration in milliseconds to block for - * @note There are special cases when timeout is equal some value of the WaitMode enum: - * * 0 - Immediately returns the inference status. It does not block or interrupt execution. - * * -1 - waits until inference result becomes available - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_infer_request_wait(ie_infer_request_t* infer_request, const int64_t timeout); - -/** @} */ // end of InferRequest - -// Network - -/** - * @defgroup Network Network - * @ingroup ie_c_api - * Set of functions managing network been read from the IR before loading - * of it to the device. - * @{ - */ - -/** - * @brief When network is loaded into the Infernece Engine, it is not required anymore and should be released - * @ingroup Network - * @param network The pointer to the instance of the ie_network_t to free. - */ -INFERENCE_ENGINE_C_API(void) ie_network_free(ie_network_t** network); - -/** - * @brief Get name of network. - * @ingroup Network - * @param network A pointer to the instance of the ie_network_t to get a name from. - * @param name Name of the network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_name(const ie_network_t* network, char** name); - -/** - * @brief Gets number of inputs for the network. - * @ingroup Network - * @param network A pointer to the instance of the ie_network_t to get number of input information. - * @param size_result A number of the instance's input information. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_inputs_number(const ie_network_t* network, size_t* size_result); - -/** - * @brief Gets name corresponding to the "number". Use the ie_network_name_free() method to free memory. - * @ingroup Network - * @param network A pointer to theinstance of the ie_network_t to get input information. - * @param number An id of input information . - * @param name Input name corresponding to the number. - * @return status Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_input_name(const ie_network_t* network, size_t number, char** name); - -/** - * @brief Gets a precision of the input data provided by user. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param prec_result A pointer to the precision used for input blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_input_precision(const ie_network_t* network, const char* input_name, precision_e* prec_result); - -/** - * @brief Changes the precision of the input data provided by the user. - * This function should be called before loading the network to the device. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param p A new precision of the input data to set (eg. precision_e.FP16). - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_set_input_precision(ie_network_t* network, const char* input_name, const precision_e p); - -/** - * @brief Gets a layout of the input data. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param layout_result A pointer to the layout used for input blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_input_layout(const ie_network_t* network, const char* input_name, layout_e* layout_result); - -/** - * @brief Changes the layout of the input data named "input_name". - * This function should be called before loading the network to the device. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param l A new layout of the input data to set. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_set_input_layout(ie_network_t* network, const char* input_name, const layout_e l); - -/** - * @brief Gets dimensions/shape of the input data with reversed order. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param dims_result A pointer to the dimensions used for input blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_input_dims(const ie_network_t* network, const char* input_name, dimensions_t* dims_result); - -/** - * @brief Gets pre-configured resize algorithm. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param resize_alg_result The pointer to the resize algorithm used for input blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_input_resize_algorithm(const ie_network_t* network, - const char* input_name, - resize_alg_e* resize_alg_result); - -/** - * @brief Sets resize algorithm to be used during pre-processing - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param resize_algo Resize algorithm. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_set_input_resize_algorithm(ie_network_t* network, const char* input_name, const resize_alg_e resize_algo); - -/** - * @brief Gets color format of the input data. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param colformat_result The pointer to the color format used for input blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_color_format(const ie_network_t* network, const char* input_name, colorformat_e* colformat_result); - -/** - * @brief Changes the color format of the input data. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param input_name Name of input data. - * @param color_format Color format of the input data. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_set_color_format(ie_network_t* network, const char* input_name, const colorformat_e color_format); - -/** - * @brief Helper method collect all input shapes with input names of corresponding input data. - * Use the ie_network_input_shapes_free() method to free memory. - * @ingroup Network - * @param network A pointer to the instance of the ie_network_t to get input shapes. - * @param shapes A pointer to the input_shapes. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_input_shapes(ie_network_t* network, input_shapes_t* shapes); - -/** - * @brief Run shape inference with new input shapes for the network. - * @ingroup Network - * @param network A pointer to the instance of the ie_network_t to reshape. - * @param shapes A new input shapes to set for the network. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_reshape(ie_network_t* network, const input_shapes_t shapes); - -/** - * @brief Gets number of output for the network. - * @ingroup Network - * @param network A pointer to the instance of the ie_network_t to get number of output information. - * @param size_result A number of the network's output information. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_outputs_number(const ie_network_t* network, size_t* size_result); - -/** - * @brief Gets name corresponding to the "number". Use the ie_network_name_free() method to free memory. - * @ingroup Network - * @param network A pointer to theinstance of the ie_network_t to get output information. - * @param number An id of output information . - * @param name Output name corresponding to the number. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_output_name(const ie_network_t* network, const size_t number, char** name); - -/** - * @brief Gets a precision of the output data named "output_name". - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param output_name Name of output data. - * @param prec_result A pointer to the precision used for output blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_output_precision(const ie_network_t* network, const char* output_name, precision_e* prec_result); - -/** - * @brief Changes the precision of the output data named "output_name". - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param output_name Name of output data. - * @param p A new precision of the output data to set (eg. precision_e.FP16). - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_set_output_precision(ie_network_t* network, const char* output_name, const precision_e p); - -/** - * @brief Gets a layout of the output data. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param output_name Name of output data. - * @param layout_result A pointer to the layout used for output blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_output_layout(const ie_network_t* network, const char* output_name, layout_e* layout_result); - -/** - * @brief Changes the layout of the output data named "output_name". - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param output_name Name of output data. - * @param l A new layout of the output data to set. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_set_output_layout(ie_network_t* network, const char* output_name, const layout_e l); - -/** - * @brief Gets dimensions/shape of the output data with reversed order. - * @ingroup Network - * @param network A pointer to ie_network_t instance. - * @param output_name Name of output data. - * @param dims_result A pointer to the dimensions used for output blob creation. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_network_get_output_dims(const ie_network_t* network, const char* output_name, dimensions_t* dims_result); - -/** - * @brief Releases memory occupied by input_shapes. - * @ingroup Network - * @param inputShapes A pointer to the input_shapes to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_network_input_shapes_free(input_shapes_t* inputShapes); - -/** - * @brief Releases momory occupied by input_name or output_name. - * @ingroup Network - * @param name A pointer to the input_name or output_name to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_network_name_free(char** name); - -/** @} */ // end of InferRequest - -// Blob - -/** - * @defgroup Blob Blob - * @ingroup ie_c_api - * Set of functions allowing to research memory from infer requests or make new - * memory objects to be passed to InferRequests. - * @{ - */ - -/** - * @brief Creates a blob with the specified dimensions, layout and to allocate memory. - * @ingroup Blob - * @param tensorDesc Tensor descriptor for Blob creation. - * @param blob A pointer to the newly created blob. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_blob_make_memory(const tensor_desc_t* tensorDesc, ie_blob_t** blob); - -/** - * @brief Creates a blob with the given tensor descriptor from the pointer to the pre-allocated memory. - * @ingroup Blob - * @param tensorDesc Tensor descriptor for Blob creation. - * @param ptr Pointer to the pre-allocated memory. - * @param size Length of the pre-allocated array. - * @param blob A pointer to the newly created blob. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_blob_make_memory_from_preallocated(const tensor_desc_t* tensorDesc, void* ptr, size_t size, ie_blob_t** blob); - -/** - * @brief Creates a blob describing given roi_t instance based on the given blob with pre-allocated memory. - * @ingroup Blob - * @param inputBlob original blob with pre-allocated memory. - * @param roi A roi_tinstance inside of the original blob. - * @param blob A pointer to the newly created blob. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_blob_make_memory_with_roi(const ie_blob_t* inputBlob, const roi_t* roi, ie_blob_t** blob); - -/** - * @brief Gets the total number of elements, which is a product of all the dimensions. - * @ingroup Blob - * @param blob A pointer to the blob. - * @param size_result The total number of elements. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_size(ie_blob_t* blob, int* size_result); - -/** - * @brief Gets the size of the current Blob in bytes. - * @ingroup Blob - * @param blob A pointer to the blob. - * @param bsize_result The size of the current blob in bytes. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_byte_size(ie_blob_t* blob, int* bsize_result); - -/** - * @brief Releases previously allocated data - * @ingroup Blob - * @param blob A pointer to the blob to free memory. - */ -INFERENCE_ENGINE_C_API(void) ie_blob_deallocate(ie_blob_t** blob); - -/** - * @brief Gets access to the allocated memory . - * @ingroup Blob - * @param blob A pointer to the blob. - * @param blob_buffer A pointer to the copied data from the given blob. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_blob_get_buffer(const ie_blob_t* blob, ie_blob_buffer_t* blob_buffer); - -/** - * @brief Gets read-only access to the allocated memory. - * @ingroup Blob - * @param blob A pointer to the blob. - * @param blob_cbuffer A pointer to the coped data from the given pointer to the blob and the data is read-only. - * @return Status code of the operation: OK(0) for success - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_blob_get_cbuffer(const ie_blob_t* blob, ie_blob_buffer_t* blob_cbuffer); - -/** - * @brief Gets dimensions of blob's tensor. - * @ingroup Blob - * @param blob A pointer to the blob. - * @param dims_result A pointer to the dimensions of blob's tensor. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_dims(const ie_blob_t* blob, dimensions_t* dims_result); - -/** - * @brief Gets layout of blob's tensor. - * @ingroup Blob - * @param blob A pointer to the blob. - * @param layout_result A pointer to the layout of blob's tensor. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_layout(const ie_blob_t* blob, layout_e* layout_result); - -/** - * @brief Gets precision of blob's tensor. - * @ingroup Blob - * @param blob A pointer to the blob. - * @param prec_result A pointer to the precision of blob's tensor. - * @return Status code of the operation: OK(0) for success. - */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) -ie_blob_get_precision(const ie_blob_t* blob, precision_e* prec_result); - -/** - * @brief Releases the memory occupied by the ie_blob_t pointer. - * @ingroup Blob - * @param blob A pointer to the blob pointer to release memory. - */ -INFERENCE_ENGINE_C_API(void) ie_blob_free(ie_blob_t** blob); - -/** @} */ // end of Blob - -/** - * @brief Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing - * dependent resources - * - * @note This function should be used by advanced user to control unload the resources. - * - * You might want to use this function if you are developing a dynamically-loaded library which should clean up all - * resources after itself when the library is unloaded. - */ -INFERENCE_ENGINE_C_API(void) ie_shutdown(); - -#endif // IE_C_API_H diff --git a/src/bindings/c/include/openvino/c/ov_model.h b/src/bindings/c/include/openvino/c/ov_model.h index 842ece84dd8781..1ee78f6e528575 100644 --- a/src/bindings/c/include/openvino/c/ov_model.h +++ b/src/bindings/c/include/openvino/c/ov_model.h @@ -94,7 +94,7 @@ OPENVINO_C_API(ov_status_e) ov_model_input_by_index(const ov_model_t* model, const size_t index, ov_output_port_t** input_port); /** - * @brief Get a single const output port of ov_model_t, which only support single output model.. + * @brief Get a single const output port of ov_model_t, which only support single output model. * @ingroup ov_model_c_api * @param model A pointer to the ov_model_t. * @param output_port A pointer to the ov_output_const_port_t. @@ -126,7 +126,7 @@ OPENVINO_C_API(ov_status_e) ov_model_const_output_by_name(const ov_model_t* model, const char* tensor_name, ov_output_const_port_t** output_port); /** - * @brief Get an single output port of ov_model_t, which only support single output model. + * @brief Get a single output port of ov_model_t, which only support single output model. * @ingroup ov_model_c_api * @param model A pointer to the ov_model_t. * @param output_port A pointer to the ov_output_const_port_t. @@ -178,7 +178,7 @@ OPENVINO_C_API(ov_status_e) ov_model_outputs_size(const ov_model_t* model, size_t* output_size); /** - * @brief Returns true if any of the ops defined in the model is dynamic shape.. + * @brief Returns true if any of the ops defined in the model is dynamic shape. * @param model A pointer to the ov_model_t. * @return true if model contains dynamic shapes */ diff --git a/src/bindings/c/include/openvino/c/ov_tensor.h b/src/bindings/c/include/openvino/c/ov_tensor.h index 13e4769fb9c351..a2369f33260a24 100644 --- a/src/bindings/c/include/openvino/c/ov_tensor.h +++ b/src/bindings/c/include/openvino/c/ov_tensor.h @@ -22,7 +22,7 @@ typedef struct ov_tensor ov_tensor_t; /** - * @brief Constructs Tensor using element type and shape. Allocate internal host storage using default allocator + * @brief Constructs Tensor using element type, shape and external host ptr. * @ingroup ov_tensor_c_api * @param type Tensor element type * @param shape Tensor shape diff --git a/src/bindings/c/src/CMakeLists.txt b/src/bindings/c/src/CMakeLists.txt index 94298cc80433d0..bae88412fd0600 100644 --- a/src/bindings/c/src/CMakeLists.txt +++ b/src/bindings/c/src/CMakeLists.txt @@ -10,7 +10,6 @@ ov_disable_deprecated_warnings() add_definitions(-DIN_OV_COMPONENT) file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.h ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) -file(GLOB_RECURSE LEGACY_HEADERS ${OpenVINO_C_API_SOURCE_DIR}/include/c_api/*.h) file(GLOB_RECURSE HEADERS ${OpenVINO_C_API_SOURCE_DIR}/include/openvino/*.h) # create library @@ -59,11 +58,6 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets INCLUDES DESTINATION ${OV_CPACK_INCLUDEDIR} ${OV_CPACK_INCLUDEDIR}/ie) -install(DIRECTORY ${OpenVINO_C_API_SOURCE_DIR}/include/c_api - DESTINATION ${OV_CPACK_INCLUDEDIR}/ie - COMPONENT ${OV_CPACK_COMP_CORE_C_DEV} - ${OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL}) - install(DIRECTORY ${OpenVINO_C_API_SOURCE_DIR}/include/openvino/ DESTINATION ${OV_CPACK_INCLUDEDIR}/openvino COMPONENT ${OV_CPACK_COMP_CORE_C_DEV} diff --git a/src/bindings/c/src/ie_c_api.cpp b/src/bindings/c/src/ie_c_api.cpp deleted file mode 100644 index 7fab00614e816a..00000000000000 --- a/src/bindings/c/src/ie_c_api.cpp +++ /dev/null @@ -1,1709 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "c_api/ie_c_api.h" - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ie_compound_blob.h" -#include "inference_engine.hpp" - -namespace IE = InferenceEngine; - -/** - * @struct ie_core - * @brief This struct represents Inference Engine Core entity. - */ -struct ie_core { - IE::Core object; -}; - -/** - * @struct ie_executable - * @brief This is an interface of an executable network - */ -struct ie_executable { - IE::ExecutableNetwork object; -}; - -/** - * @struct ie_infer_request - * @brief This is an interface of asynchronous infer request - */ -struct ie_infer_request { - IE::InferRequest object; -}; - -/** - * @struct ie_blob - * @brief This struct represents a universal container in the Inference Engine - */ -struct ie_blob { - IE::Blob::Ptr object; -}; - -/** - * @struct ie_network - * @brief This is the main interface to describe the NN topology - */ -struct ie_network { - IE::CNNNetwork object; -}; - -/** - * @struct mem_stringbuf - * @brief This struct puts memory buffer to stringbuf. - */ -struct mem_stringbuf : std::streambuf { - mem_stringbuf(const char* buffer, size_t sz) { - char* bptr(const_cast(buffer)); - setg(bptr, bptr, bptr + sz); - } - - pos_type seekoff(off_type off, - std::ios_base::seekdir dir, - std::ios_base::openmode which = std::ios_base::in) override { - switch (dir) { - case std::ios_base::beg: - setg(eback(), eback() + off, egptr()); - break; - case std::ios_base::end: - setg(eback(), egptr() + off, egptr()); - break; - case std::ios_base::cur: - setg(eback(), gptr() + off, egptr()); - break; - default: - return pos_type(off_type(-1)); - } - return (gptr() < eback() || gptr() > egptr()) ? pos_type(off_type(-1)) : pos_type(gptr() - eback()); - } - - pos_type seekpos(pos_type pos, std::ios_base::openmode which) override { - return seekoff(pos, std::ios_base::beg, which); - } -}; - -/** - * @struct mem_istream - * @brief This struct puts stringbuf buffer to istream. - */ -struct mem_istream : virtual mem_stringbuf, std::istream { - mem_istream(const char* buffer, size_t sz) - : mem_stringbuf(buffer, sz), - std::istream(static_cast(this)) {} -}; - -std::map status_map = { - {IE::StatusCode::GENERAL_ERROR, IEStatusCode::GENERAL_ERROR}, - {IE::StatusCode::INFER_NOT_STARTED, IEStatusCode::INFER_NOT_STARTED}, - {IE::StatusCode::NETWORK_NOT_LOADED, IEStatusCode::NETWORK_NOT_LOADED}, - {IE::StatusCode::NETWORK_NOT_READ, IEStatusCode::NETWORK_NOT_READ}, - {IE::StatusCode::NOT_ALLOCATED, IEStatusCode::NOT_ALLOCATED}, - {IE::StatusCode::NOT_FOUND, IEStatusCode::NOT_FOUND}, - {IE::StatusCode::NOT_IMPLEMENTED, IEStatusCode::NOT_IMPLEMENTED}, - {IE::StatusCode::OK, IEStatusCode::OK}, - {IE::StatusCode::OUT_OF_BOUNDS, IEStatusCode::OUT_OF_BOUNDS}, - {IE::StatusCode::PARAMETER_MISMATCH, IEStatusCode::PARAMETER_MISMATCH}, - {IE::StatusCode::REQUEST_BUSY, IEStatusCode::REQUEST_BUSY}, - {IE::StatusCode::RESULT_NOT_READY, IEStatusCode::RESULT_NOT_READY}, - {IE::StatusCode::UNEXPECTED, IEStatusCode::UNEXPECTED}}; - -std::map precision_map = {{IE::Precision::UNSPECIFIED, precision_e::UNSPECIFIED}, - {IE::Precision::MIXED, precision_e::MIXED}, - {IE::Precision::FP32, precision_e::FP32}, - {IE::Precision::FP16, precision_e::FP16}, - {IE::Precision::FP64, precision_e::FP64}, - {IE::Precision::Q78, precision_e::Q78}, - {IE::Precision::I16, precision_e::I16}, - {IE::Precision::U4, precision_e::U4}, - {IE::Precision::U8, precision_e::U8}, - {IE::Precision::I4, precision_e::I4}, - {IE::Precision::I8, precision_e::I8}, - {IE::Precision::U16, precision_e::U16}, - {IE::Precision::I32, precision_e::I32}, - {IE::Precision::U32, precision_e::U32}, - {IE::Precision::I64, precision_e::I64}, - {IE::Precision::U64, precision_e::U64}, - {IE::Precision::BIN, precision_e::BIN}, - {IE::Precision::CUSTOM, precision_e::CUSTOM}}; - -std::map layout_map = {{IE::Layout::ANY, layout_e::ANY}, - {IE::Layout::NCHW, layout_e::NCHW}, - {IE::Layout::NHWC, layout_e::NHWC}, - {IE::Layout::NCDHW, layout_e::NCDHW}, - {IE::Layout::NDHWC, layout_e::NDHWC}, - {IE::Layout::OIHW, layout_e::OIHW}, - {IE::Layout::SCALAR, layout_e::SCALAR}, - {IE::Layout::C, layout_e::C}, - {IE::Layout::CHW, layout_e::CHW}, - {IE::Layout::HW, layout_e::HW}, - {IE::Layout::NC, layout_e::NC}, - {IE::Layout::CN, layout_e::CN}, - {IE::Layout::BLOCKED, layout_e::BLOCKED}}; - -std::map resize_alg_map = { - {IE::ResizeAlgorithm::NO_RESIZE, resize_alg_e::NO_RESIZE}, - {IE::ResizeAlgorithm::RESIZE_AREA, resize_alg_e::RESIZE_AREA}, - {IE::ResizeAlgorithm::RESIZE_BILINEAR, resize_alg_e::RESIZE_BILINEAR}}; - -std::map colorformat_map = {{IE::ColorFormat::RAW, colorformat_e::RAW}, - {IE::ColorFormat::RGB, colorformat_e::RGB}, - {IE::ColorFormat::BGR, colorformat_e::BGR}, - {IE::ColorFormat::BGRX, colorformat_e::BGRX}, - {IE::ColorFormat::RGBX, colorformat_e::RGBX}}; - -#define CATCH_IE_EXCEPTION(StatusCode, ExceptionType) \ - catch (const IE::ExceptionType&) { \ - return IEStatusCode::StatusCode; \ - } - -#define CATCH_OV_EXCEPTION(StatusCode, ExceptionType) \ - catch (const ov::ExceptionType&) { \ - return IEStatusCode::StatusCode; \ - } - -#define CATCH_IE_EXCEPTIONS \ - CATCH_OV_EXCEPTION(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_OV_EXCEPTION(GENERAL_ERROR, Exception) \ - CATCH_IE_EXCEPTION(GENERAL_ERROR, GeneralError) \ - CATCH_IE_EXCEPTION(NOT_IMPLEMENTED, NotImplemented) \ - CATCH_IE_EXCEPTION(NETWORK_NOT_LOADED, NetworkNotLoaded) \ - CATCH_IE_EXCEPTION(PARAMETER_MISMATCH, ParameterMismatch) \ - CATCH_IE_EXCEPTION(NOT_FOUND, NotFound) \ - CATCH_IE_EXCEPTION(OUT_OF_BOUNDS, OutOfBounds) \ - CATCH_IE_EXCEPTION(UNEXPECTED, Unexpected) \ - CATCH_IE_EXCEPTION(REQUEST_BUSY, RequestBusy) \ - CATCH_IE_EXCEPTION(RESULT_NOT_READY, ResultNotReady) \ - CATCH_IE_EXCEPTION(NOT_ALLOCATED, NotAllocated) \ - CATCH_IE_EXCEPTION(INFER_NOT_STARTED, InferNotStarted) \ - CATCH_IE_EXCEPTION(NETWORK_NOT_READ, NetworkNotRead) \ - CATCH_IE_EXCEPTION(INFER_CANCELLED, InferCancelled) \ - catch (const std::exception&) { \ - return IEStatusCode::UNEXPECTED; \ - } - -/** - *@brief convert the config type data to map type data. - */ -inline std::map config2Map(const ie_config_t* config) { - std::map m; - const ie_config_t* tmp = config; - while (tmp && tmp->name && tmp->value) { - m[tmp->name] = tmp->value; - tmp = tmp->next; - } - return m; -} - -inline std::map config2ParamMap(const ie_config_t* config) { - std::map param_map; - const ie_config_t* tmp = config; - - while (tmp) { - IE::Parameter param = IE::Parameter(std::string(tmp->value)); - param_map[tmp->name] = param; - tmp = tmp->next; - } - return param_map; -} - -/** - *@brief convert the parameter. - */ -inline void parameter2IEparam(const IE::Parameter param, ie_param_t* ie_param) { - if (param.is()) { - std::unique_ptr params_temp(new char[param.as().length() + 1]); - ie_param->params = params_temp.release(); - memcpy(ie_param->params, param.as().c_str(), param.as().length() + 1); - } else if (param.is>()) { - auto val = param.as>(); - if (val.size() > 0) { - std::string tmp = val[0]; - for (size_t i = 1; i < val.size(); ++i) { - tmp = tmp + ", " + val[i]; - } - - std::unique_ptr params_temp(new char[tmp.length() + 1]); - ie_param->params = params_temp.release(); - memcpy(ie_param->params, tmp.c_str(), tmp.length() + 1); - } else { - std::unique_ptr params_temp(new char[1]); - ie_param->params = params_temp.release(); - memcpy(ie_param->params, "", sizeof(char)); - } - } else if (param.is>()) { - auto val = param.as>(); - ie_param->range_for_streams[0] = std::get<0>(val); - ie_param->range_for_streams[1] = std::get<1>(val); - } else if (param.is>()) { - auto val = param.as>(); - ie_param->range_for_async_infer_request[0] = std::get<0>(val); - ie_param->range_for_async_infer_request[1] = std::get<1>(val); - ie_param->range_for_async_infer_request[2] = std::get<2>(val); - } else if (param.is()) { - auto val = param.as(); - ie_param->number = val; - } -} - -ie_version_t ie_c_api_version(void) { - auto version = IE::GetInferenceEngineVersion(); - std::string version_str = version->buildNumber; - - ie_version_t version_res; - std::unique_ptr ver(new char[version_str.length() + 1]); - version_res.api_version = ver.release(); - memcpy(version_res.api_version, version_str.c_str(), version_str.length() + 1); - - return version_res; -} - -void ie_version_free(ie_version_t* version) { - if (version) { - delete[] version->api_version; - version->api_version = NULL; - } -} - -void ie_param_free(ie_param_t* param) { - if (param && param->params) { - delete[] param->params; - param->params = NULL; - } -} - -IEStatusCode ie_core_create(const char* xml_config_file, ie_core_t** core) { - if (xml_config_file == nullptr || core == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - try { - auto object = IE::Core(xml_config_file); - *core = new ie_core_t{std::move(object)}; - } - CATCH_IE_EXCEPTIONS - - return status; -} - -void ie_core_free(ie_core_t** core) { - if (core) { - delete *core; - *core = NULL; - } -} - -IEStatusCode ie_core_get_versions(const ie_core_t* core, const char* device_name, ie_core_versions_t* versions) { - if (core == nullptr || device_name == nullptr || versions == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - try { - std::map IEversions = core->object.GetVersions(device_name); - size_t num = IEversions.size(); - if (num == 0) { - return IEStatusCode::NOT_FOUND; - } - - std::unique_ptr vers_ptrs(new ie_core_version_t[num]); - assert(vers_ptrs); - - versions->num_vers = num; - - std::map::iterator iter = IEversions.begin(); - for (size_t i = 0; i < num; ++i, ++iter) { - std::unique_ptr deviceName(new char[iter->first.length() + 1]); - char* _deviceName = deviceName.release(); - memcpy(_deviceName, iter->first.c_str(), iter->first.length() + 1); - vers_ptrs[i].device_name = _deviceName; - IE_SUPPRESS_DEPRECATED_START - vers_ptrs[i].major = iter->second.apiVersion.major; - vers_ptrs[i].minor = iter->second.apiVersion.minor; - IE_SUPPRESS_DEPRECATED_END - vers_ptrs[i].build_number = iter->second.buildNumber; - vers_ptrs[i].description = iter->second.description; - } - versions->versions = vers_ptrs.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -void ie_core_versions_free(ie_core_versions_t* vers) { - if (vers) { - for (size_t i = 0; i < vers->num_vers; ++i) { - delete[] const_cast(vers->versions[i].device_name); - vers->versions[i].device_name = NULL; - } - delete[] vers->versions; - vers->versions = NULL; - } -} - -IEStatusCode ie_core_read_network(ie_core_t* core, const char* xml, const char* weights_file, ie_network_t** network) { - if (core == nullptr || xml == nullptr || network == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - - try { - std::unique_ptr network_result(new ie_network_t); - std::string bin = ""; - if (weights_file) { - bin = weights_file; - } - network_result->object = core->object.ReadNetwork(xml, bin); - *network = network_result.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_read_network_from_memory(ie_core_t* core, - const uint8_t* xml_content, - size_t xml_content_size, - const ie_blob_t* weight_blob, - ie_network_t** network) { - if (core == nullptr || xml_content == nullptr || network == nullptr || weight_blob == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - - try { - std::unique_ptr network_result(new ie_network_t); - network_result->object = - core->object.ReadNetwork(std::string(reinterpret_cast(xml_content), - reinterpret_cast(xml_content + xml_content_size)), - weight_blob->object); - *network = network_result.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_import_network(ie_core_t* core, - const char* file_name, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || file_name == nullptr || device_name == nullptr || exe_network == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - std::map conf_map = config2Map(config); - std::unique_ptr exe_net(new ie_executable_network_t); - - exe_net->object = core->object.ImportNetwork(file_name, device_name, conf_map); - *exe_network = exe_net.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_import_network_from_memory(ie_core_t* core, - const uint8_t* content, - size_t content_size, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network) { - if (core == nullptr || content == nullptr || device_name == nullptr || exe_network == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - try { - mem_istream model_stream(reinterpret_cast(content), content_size); - - std::map conf_map = config2Map(config); - std::unique_ptr exe_net(new ie_executable_network_t); - - exe_net->object = core->object.ImportNetwork(model_stream, device_name, conf_map); - *exe_network = exe_net.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_export_network(ie_executable_network_t* exe_network, const char* file_name) { - IEStatusCode status = IEStatusCode::OK; - - if (file_name == nullptr || exe_network == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - try { - exe_network->object.Export(file_name); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_load_network(ie_core_t* core, - const ie_network_t* network, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || network == nullptr || device_name == nullptr || exe_network == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - std::map conf_map = config2Map(config); - std::unique_ptr exe_net(new ie_executable_network_t); - - // create plugin in the registery and then create ExecutableNetwork. - exe_net->object = core->object.LoadNetwork(network->object, device_name, conf_map); - *exe_network = exe_net.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_load_network_from_file(ie_core_t* core, - const char* xml, - const char* device_name, - const ie_config_t* config, - ie_executable_network_t** exe_network) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || xml == nullptr || device_name == nullptr || exe_network == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - std::map conf_map = config2Map(config); - std::unique_ptr exe_net(new ie_executable_network_t); - - exe_net->object = core->object.LoadNetwork(xml, device_name, conf_map); - *exe_network = exe_net.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_set_config(ie_core_t* core, const ie_config_t* ie_core_config, const char* device_name) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || ie_core_config == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - const std::map conf_map = config2Map(ie_core_config); - std::string deviceName; - if (device_name != nullptr) { - deviceName = std::string(device_name); - } - - try { - core->object.SetConfig(conf_map, deviceName); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_register_plugin(ie_core_t* core, const char* plugin, const char* device_name) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || plugin == nullptr || device_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - core->object.RegisterPlugin(plugin, device_name); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_register_plugins(ie_core_t* core, const char* xml_config_file) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || xml_config_file == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - core->object.RegisterPlugins(xml_config_file); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_unregister_plugin(ie_core_t* core, const char* device_name) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || device_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - core->object.UnregisterPlugin(device_name); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_add_extension(ie_core_t* core, const char* extension_path, const char* device_name) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || extension_path == nullptr || device_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - auto extension_ptr = std::make_shared(std::string{extension_path}); - auto extension = std::dynamic_pointer_cast(extension_ptr); - core->object.AddExtension(extension, device_name); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_get_metric(const ie_core_t* core, - const char* device_name, - const char* metric_name, - ie_param_t* param_result) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || device_name == nullptr || metric_name == nullptr || param_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::Parameter param = core->object.GetMetric(device_name, metric_name); - parameter2IEparam(param, param_result); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_get_config(const ie_core_t* core, - const char* device_name, - const char* config_name, - ie_param_t* param_result) { - IEStatusCode status = IEStatusCode::OK; - - if (core == nullptr || device_name == nullptr || config_name == nullptr || param_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::Parameter param = core->object.GetConfig(device_name, config_name); - - // convert the parameter to ie_param_t - parameter2IEparam(param, param_result); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_core_get_available_devices(const ie_core_t* core, ie_available_devices_t* avai_devices) { - if (core == nullptr || avai_devices == nullptr) - return IEStatusCode::GENERAL_ERROR; - - try { - std::vector _devices = core->object.GetAvailableDevices(); - avai_devices->num_devices = _devices.size(); - std::unique_ptr dev_ptrs(new char*[avai_devices->num_devices]); - assert(dev_ptrs); - - for (size_t i = 0; i < avai_devices->num_devices; ++i) { - std::unique_ptr device_name(new char[_devices[i].length() + 1]); - assert(device_name); - dev_ptrs[i] = device_name.release(); - memcpy(dev_ptrs[i], _devices[i].c_str(), _devices[i].length() + 1); - } - avai_devices->devices = dev_ptrs.release(); - } - CATCH_IE_EXCEPTIONS - - return IEStatusCode::OK; -} - -void ie_core_available_devices_free(ie_available_devices_t* avai_devices) { - if (avai_devices->devices) { - for (size_t i = 0; i < avai_devices->num_devices; ++i) { - if (avai_devices->devices[i]) { - delete[] avai_devices->devices[i]; - avai_devices->devices[i] = NULL; - } - } - delete[] avai_devices->devices; - avai_devices->devices = NULL; - avai_devices->num_devices = 0; - } -} - -void ie_exec_network_free(ie_executable_network_t** ie_exec_network) { - if (ie_exec_network) { - delete *ie_exec_network; - *ie_exec_network = NULL; - } -} - -IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t* ie_exec_network, - ie_infer_request_t** request) { - IEStatusCode status = IEStatusCode::OK; - if (ie_exec_network == nullptr || request == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return IEStatusCode::GENERAL_ERROR; - } - - try { - std::unique_ptr req(new ie_infer_request_t); - req->object = ie_exec_network->object.CreateInferRequest(); - *request = req.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_exec_network_get_metric(const ie_executable_network_t* ie_exec_network, - const char* metric_name, - ie_param_t* param_result) { - IEStatusCode status = IEStatusCode::OK; - - if (ie_exec_network == nullptr || metric_name == nullptr || param_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - InferenceEngine::Parameter parameter = ie_exec_network->object.GetMetric(metric_name); - parameter2IEparam(parameter, param_result); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_exec_network_set_config(ie_executable_network_t* ie_exec_network, const ie_config_t* param_config) { - IEStatusCode status = IEStatusCode::OK; - - if (ie_exec_network == nullptr || param_config == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - const std::map conf_map = config2ParamMap(param_config); - ie_exec_network->object.SetConfig(conf_map); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_exec_network_get_config(const ie_executable_network_t* ie_exec_network, - const char* metric_config, - ie_param_t* param_result) { - IEStatusCode status = IEStatusCode::OK; - - if (ie_exec_network == nullptr || metric_config == nullptr || param_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - InferenceEngine::Parameter parameter = ie_exec_network->object.GetConfig(metric_config); - parameter2IEparam(parameter, param_result); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -void ie_network_free(ie_network_t** network) { - if (network) { - delete *network; - *network = NULL; - } -} - -IEStatusCode ie_network_get_name(const ie_network_t* network, char** name) { - if (network == nullptr || name == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - try { - std::string _name = network->object.getName(); - std::unique_ptr netName(new char[_name.length() + 1]); - *name = netName.release(); - memcpy(*name, _name.c_str(), _name.length() + 1); - } - CATCH_IE_EXCEPTIONS - - return IEStatusCode::OK; -} - -IEStatusCode ie_network_get_inputs_number(const ie_network_t* network, size_t* size_result) { - IEStatusCode status = IEStatusCode::OK; - if (network == nullptr || size_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - *size_result = inputs.size(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_input_name(const ie_network_t* network, size_t number, char** name) { - if (network == nullptr || name == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - - // check if the number is out of bounds. - if (number >= inputs.size()) { - status = IEStatusCode::OUT_OF_BOUNDS; - } else { - IE::InputsDataMap::iterator iter = inputs.begin(); - for (size_t i = 0; i < number; ++i) { - ++iter; - } - std::unique_ptr inputName(new char[iter->first.length() + 1]); - *name = inputName.release(); - memcpy(*name, iter->first.c_str(), iter->first.length() + 1); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_input_precision(const ie_network_t* network, - const char* input_name, - precision_e* prec_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr || prec_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Precision p = inputs[input_name]->getPrecision(); - *prec_result = precision_map[p]; - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_set_input_precision(ie_network_t* network, const char* input_name, const precision_e p) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Precision precision; - for (auto const& it : precision_map) { - if (it.second == p) { - precision = it.first; - break; - } - } - inputs[input_name]->setPrecision(precision); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_input_layout(const ie_network_t* network, const char* input_name, layout_e* layout_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr || layout_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Layout l = inputs[input_name]->getLayout(); - *layout_result = layout_map[l]; - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_set_input_layout(ie_network_t* network, const char* input_name, const layout_e l) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Layout layout = IE::Layout::NCHW; - for (auto const& it : layout_map) { - if (it.second == l) { - layout = it.first; - break; - } - } - inputs[input_name]->setLayout(layout); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_input_dims(const ie_network_t* network, const char* input_name, dimensions_t* dims_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr || dims_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::SizeVector dims = inputs[input_name]->getTensorDesc().getDims(); - dims_result->ranks = dims.size(); - for (size_t i = 0; i < dims_result->ranks; ++i) { - dims_result->dims[i] = dims[i]; - } - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_input_resize_algorithm(const ie_network_t* network, - const char* input_name, - resize_alg_e* resize_alg_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr || resize_alg_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::ResizeAlgorithm resize = inputs[input_name]->getPreProcess().getResizeAlgorithm(); - *resize_alg_result = resize_alg_map[resize]; - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_set_input_resize_algorithm(ie_network_t* network, - const char* input_name, - const resize_alg_e resize_algo) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::ResizeAlgorithm resize = IE::ResizeAlgorithm::NO_RESIZE; - for (auto const& it : resize_alg_map) { - if (it.second == resize_algo) { - resize = it.first; - break; - } - } - inputs[input_name]->getPreProcess().setResizeAlgorithm(resize); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_color_format(const ie_network_t* network, - const char* input_name, - colorformat_e* colformat_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr || colformat_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::ColorFormat color = inputs[input_name]->getPreProcess().getColorFormat(); - *colformat_result = colorformat_map[color]; - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_set_color_format(ie_network_t* network, - const char* input_name, - const colorformat_e color_format) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || input_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::InputsDataMap inputs = network->object.getInputsInfo(); - if (inputs.find(input_name) == inputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::ColorFormat color = IE::ColorFormat::RGB; - for (auto const& it : colorformat_map) { - if (it.second == color_format) { - color = it.first; - break; - } - } - inputs[input_name]->getPreProcess().setColorFormat(color); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_input_shapes(ie_network* network, input_shapes_t* shapes) { - if (network == nullptr || shapes == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - try { - IE::ICNNNetwork::InputShapes net_shapes = network->object.getInputShapes(); - size_t num = net_shapes.size(); - - std::unique_ptr shape_ptrs(new input_shape[num]); - - assert(shape_ptrs); - - shapes->shape_num = num; - - IE::ICNNNetwork::InputShapes::iterator iter = net_shapes.begin(); - for (size_t i = 0; i < num; ++i, ++iter) { - IE::SizeVector net_dim = iter->second; - - std::unique_ptr _name(new char[iter->first.length() + 1]); - shape_ptrs[i].name = _name.release(); - memcpy(shape_ptrs[i].name, iter->first.c_str(), iter->first.length() + 1); - - shape_ptrs[i].shape.ranks = net_dim.size(); - for (size_t j = 0; j < shape_ptrs[i].shape.ranks; ++j) { - shape_ptrs[i].shape.dims[j] = net_dim[j]; - } - } - shapes->shapes = shape_ptrs.release(); - status = IEStatusCode::OK; - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_reshape(ie_network_t* network, const input_shapes_t shapes) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::ICNNNetwork::InputShapes net_shapes; - for (size_t i = 0; i < shapes.shape_num; ++i) { - IE::SizeVector net_dim; - for (size_t j = 0; j < shapes.shapes[i].shape.ranks; ++j) { - net_dim.push_back(shapes.shapes[i].shape.dims[j]); - } - - net_shapes[shapes.shapes[i].name] = net_dim; - } - - network->object.reshape(net_shapes); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_outputs_number(const ie_network_t* network, size_t* size_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || size_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::OutputsDataMap outputs = network->object.getOutputsInfo(); - *size_result = outputs.size(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_output_name(const ie_network_t* network, const size_t number, char** name) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::OutputsDataMap outputs = network->object.getOutputsInfo(); - // check if the number is out of bounds. - if (number >= outputs.size()) { - status = IEStatusCode::OUT_OF_BOUNDS; - } else { - IE::OutputsDataMap::iterator iter = outputs.begin(); - for (size_t i = 0; i < number; ++i) { - ++iter; - } - std::unique_ptr outputName(new char[iter->first.length() + 1]); - *name = outputName.release(); - memcpy(*name, iter->first.c_str(), iter->first.length() + 1); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_output_precision(const ie_network_t* network, - const char* output_name, - precision_e* prec_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || output_name == nullptr || prec_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::OutputsDataMap outputs = network->object.getOutputsInfo(); - if (outputs.find(output_name) == outputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Precision p = outputs[output_name]->getPrecision(); - *prec_result = precision_map[p]; - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_set_output_precision(ie_network_t* network, const char* output_name, const precision_e p) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || output_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::OutputsDataMap outputs = network->object.getOutputsInfo(); - if (outputs.find(output_name) == outputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Precision precision; - for (auto const& it : precision_map) { - if (it.second == p) { - precision = it.first; - break; - } - } - outputs[output_name]->setPrecision(precision); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_output_layout(const ie_network_t* network, - const char* output_name, - layout_e* layout_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || output_name == nullptr || layout_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::OutputsDataMap outputs = network->object.getOutputsInfo(); - if (outputs.find(output_name) == outputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Layout l = outputs[output_name]->getLayout(); - *layout_result = layout_map[l]; - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_set_output_layout(ie_network_t* network, const char* output_name, const layout_e l) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || output_name == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::OutputsDataMap outputs = network->object.getOutputsInfo(); - if (outputs.find(output_name) == outputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::Layout layout = IE::Layout::NCHW; - for (auto const& it : layout_map) { - if (it.second == l) { - layout = it.first; - break; - } - } - outputs[output_name]->setLayout(layout); - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_network_get_output_dims(const ie_network_t* network, - const char* output_name, - dimensions_t* dims_result) { - IEStatusCode status = IEStatusCode::OK; - - if (network == nullptr || output_name == nullptr || dims_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::OutputsDataMap outputs = network->object.getOutputsInfo(); - if (outputs.find(output_name) == outputs.end()) { - status = IEStatusCode::NOT_FOUND; - } else { - IE::SizeVector dims = outputs[output_name]->getTensorDesc().getDims(); - dims_result->ranks = dims.size(); - for (size_t i = 0; i < dims_result->ranks; ++i) { - dims_result->dims[i] = dims[i]; - } - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -void ie_network_input_shapes_free(input_shapes_t* inputShapes) { - if (inputShapes) { - for (size_t i = 0; i < inputShapes->shape_num; ++i) { - delete[] inputShapes->shapes[i].name; - inputShapes->shapes[i].name = NULL; - } - delete[] inputShapes->shapes; - inputShapes->shapes = NULL; - } -} - -void ie_network_name_free(char** name) { - if (*name) { - delete[] * name; - *name = NULL; - } -} - -void ie_infer_request_free(ie_infer_request_t** infer_request) { - if (infer_request) { - delete *infer_request; - *infer_request = NULL; - } -} - -IEStatusCode ie_infer_request_get_blob(ie_infer_request_t* infer_request, const char* name, ie_blob_t** blob) { - IEStatusCode status = IEStatusCode::OK; - - if (infer_request == nullptr || name == nullptr || blob == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::Blob::Ptr blob_ptr = infer_request->object.GetBlob(name); - std::unique_ptr blob_result(new ie_blob_t); - blob_result->object = blob_ptr; - *blob = blob_result.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_infer_request_set_blob(ie_infer_request_t* infer_request, const char* name, const ie_blob_t* blob) { - IEStatusCode status = IEStatusCode::OK; - - if (infer_request == nullptr || name == nullptr || blob == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - infer_request->object.SetBlob(name, blob->object); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_infer_request_infer(ie_infer_request_t* infer_request) { - IEStatusCode status = IEStatusCode::OK; - - if (infer_request == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - infer_request->object.Infer(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_infer_request_infer_async(ie_infer_request_t* infer_request) { - IEStatusCode status = IEStatusCode::OK; - - if (infer_request == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - infer_request->object.StartAsync(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_infer_set_completion_callback(ie_infer_request_t* infer_request, ie_complete_call_back_t* callback) { - IEStatusCode status = IEStatusCode::OK; - - if (infer_request == nullptr || callback == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - auto fun = [=]() { - callback->completeCallBackFunc(callback->args); - }; - infer_request->object.SetCompletionCallback(fun); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_infer_request_wait(ie_infer_request_t* infer_request, const int64_t timeout) { - IEStatusCode status = IEStatusCode::OK; - - if (infer_request == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::StatusCode status_code = infer_request->object.Wait(timeout); - status = status_map[status_code]; - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_blob_make_memory(const tensor_desc_t* tensorDesc, ie_blob_t** blob) { - if (tensorDesc == nullptr || blob == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IE::Precision prec; - for (auto const& it : precision_map) { - if (it.second == tensorDesc->precision) { - prec = it.first; - break; - } - } - - IE::Layout l = IE::Layout::NCHW; - for (auto const& it : layout_map) { - if (it.second == tensorDesc->layout) { - l = it.first; - break; - } - } - - IE::SizeVector dims_vector; - for (size_t i = 0; i < tensorDesc->dims.ranks; ++i) { - dims_vector.push_back(tensorDesc->dims.dims[i]); - } - - IEStatusCode status = IEStatusCode::OK; - try { - std::unique_ptr _blob(new ie_blob_t); - IE::TensorDesc tensor(prec, dims_vector, l); - - if (prec == IE::Precision::U8) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::U16) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::I8 || prec == IE::Precision::BIN || prec == IE::Precision::I4 || - prec == IE::Precision::U4) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::I16 || prec == IE::Precision::FP16 || prec == IE::Precision::Q78) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::I32) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::U32) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::I64) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::U64) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::FP32) { - _blob->object = IE::make_shared_blob(tensor); - } else if (prec == IE::Precision::FP64) { - _blob->object = IE::make_shared_blob(tensor); - } else { - _blob->object = IE::make_shared_blob(tensor); - } - - _blob->object->allocate(); - *blob = _blob.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t* tensorDesc, - void* ptr, - size_t size, - ie_blob_t** blob) { - if (tensorDesc == nullptr || ptr == nullptr || blob == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IE::Precision prec; - for (auto const& it : precision_map) { - if (it.second == tensorDesc->precision) { - prec = it.first; - break; - } - } - - IE::Layout l = IE::Layout::NCHW; - for (auto const& it : layout_map) { - if (it.second == tensorDesc->layout) { - l = it.first; - break; - } - } - - IE::SizeVector dims_vector; - for (size_t i = 0; i < tensorDesc->dims.ranks; ++i) { - dims_vector.push_back(tensorDesc->dims.dims[i]); - } - - IEStatusCode status = IEStatusCode::OK; - try { - IE::TensorDesc tensor(prec, dims_vector, l); - std::unique_ptr _blob(new ie_blob_t); - if (prec == IE::Precision::U8) { - uint8_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::U16) { - uint16_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::I8 || prec == IE::Precision::BIN || prec == IE::Precision::I4 || - prec == IE::Precision::U4) { - int8_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::I16 || prec == IE::Precision::FP16 || prec == IE::Precision::Q78) { - int16_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::I32) { - int32_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::U32) { - uint32_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::I64) { - int64_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::U64) { - uint64_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::FP32) { - float* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else if (prec == IE::Precision::FP64) { - double* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } else { - uint8_t* p = reinterpret_cast(ptr); - _blob->object = IE::make_shared_blob(tensor, p, size); - } - *blob = _blob.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_blob_make_memory_with_roi(const ie_blob_t* inputBlob, const roi_t* roi, ie_blob_t** blob) { - if (inputBlob == nullptr || roi == nullptr || blob == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - IEStatusCode status = IEStatusCode::OK; - try { - std::unique_ptr _blob(new ie_blob_t); - IE::ROI roi_d = {roi->id, roi->posX, roi->posY, roi->sizeX, roi->sizeY}; - _blob->object = IE::make_shared_blob(inputBlob->object, roi_d); - *blob = _blob.release(); - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_blob_size(ie_blob_t* blob, int* size_result) { - IEStatusCode status = IEStatusCode::OK; - - if (blob == nullptr || size_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - *size_result = static_cast(blob->object->size()); - - return status; -} - -IEStatusCode ie_blob_byte_size(ie_blob_t* blob, int* bsize_result) { - IEStatusCode status = IEStatusCode::OK; - - if (blob == nullptr || bsize_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - *bsize_result = static_cast(blob->object->byteSize()); - - return status; -} - -void ie_blob_deallocate(ie_blob_t** blob) { - if (*blob) { - (*blob)->object->deallocate(); - delete *blob; - *blob = NULL; - } -} - -IEStatusCode ie_blob_get_buffer(const ie_blob_t* blob, ie_blob_buffer_t* blob_buffer) { - if (blob == nullptr || blob_buffer == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - blob_buffer->buffer = blob->object->buffer(); - - return IEStatusCode::OK; -} - -IEStatusCode ie_blob_get_cbuffer(const ie_blob_t* blob, ie_blob_buffer_t* blob_cbuffer) { - if (blob == nullptr || blob_cbuffer == nullptr) { - return IEStatusCode::GENERAL_ERROR; - } - - blob_cbuffer->cbuffer = blob->object->cbuffer(); - - return IEStatusCode::OK; -} - -IEStatusCode ie_blob_get_dims(const ie_blob_t* blob, dimensions_t* dims_result) { - IEStatusCode status = IEStatusCode::OK; - - if (blob == nullptr || dims_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::SizeVector size_vector = blob->object->getTensorDesc().getDims(); - dims_result->ranks = size_vector.size(); - for (size_t i = 0; i < dims_result->ranks; ++i) { - dims_result->dims[i] = size_vector[i]; - } - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_blob_get_layout(const ie_blob_t* blob, layout_e* layout_result) { - IEStatusCode status = IEStatusCode::OK; - - if (blob == nullptr || layout_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::Layout l = blob->object->getTensorDesc().getLayout(); - *layout_result = layout_map[l]; - } - CATCH_IE_EXCEPTIONS - - return status; -} - -IEStatusCode ie_blob_get_precision(const ie_blob_t* blob, precision_e* prec_result) { - IEStatusCode status = IEStatusCode::OK; - - if (blob == nullptr || prec_result == nullptr) { - status = IEStatusCode::GENERAL_ERROR; - return status; - } - - try { - IE::Precision p = blob->object->getTensorDesc().getPrecision(); - *prec_result = precision_map[p]; - } - CATCH_IE_EXCEPTIONS - - return status; -} - -void ie_blob_free(ie_blob_t** blob) { - if (blob) { - delete *blob; - *blob = NULL; - } -} - -void ie_shutdown() { - InferenceEngine::shutdown(); -} \ No newline at end of file diff --git a/src/bindings/c/tests/CMakeLists.txt b/src/bindings/c/tests/CMakeLists.txt index 072d0e314dd3d7..3ceb40ee2e80ae 100644 --- a/src/bindings/c/tests/CMakeLists.txt +++ b/src/bindings/c/tests/CMakeLists.txt @@ -2,27 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -# OpenVINO Legacy C API test sample -set(TARGET_NAME "InferenceEngineCAPITests") - -add_executable(${TARGET_NAME} ie_c_api_test.cpp test_model_repo.hpp test_model_repo.cpp) - -target_link_libraries(${TARGET_NAME} PRIVATE openvino_c common_test_utils gtest_main) - -if(ENABLE_AUTO OR ENABLE_MULTI) - add_dependencies(${TARGET_NAME} openvino_auto_plugin) -endif() - -add_dependencies(${TARGET_NAME} mock_engine) - -add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) - -install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION tests - COMPONENT tests - EXCLUDE_FROM_ALL) - -# OpenVINO 2.0 and Legacy C API test sample +# OpenVINO C API 2.0 test sample set(TARGET_NAME "ov_capi_test") file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/ov_*.cpp test_model_repo.cpp) diff --git a/src/bindings/c/tests/ie_c_api_test.cpp b/src/bindings/c/tests/ie_c_api_test.cpp deleted file mode 100644 index feb30b1917ec47..00000000000000 --- a/src/bindings/c/tests/ie_c_api_test.cpp +++ /dev/null @@ -1,1512 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include -#include -#include "inference_engine.hpp" -#include - -#include "test_model_repo.hpp" - -#define IE_EXPECT_OK(...) EXPECT_EQ(IEStatusCode::OK, __VA_ARGS__) -#define IE_ASSERT_OK(...) ASSERT_EQ(IEStatusCode::OK, __VA_ARGS__) -#define IE_EXPECT_NOT_OK(...) EXPECT_NE(IEStatusCode::OK, __VA_ARGS__) - -OPENVINO_SUPPRESS_DEPRECATED_START - -#include - -static std::mutex m; -static bool ready = false; -static std::condition_variable condVar; - -static void completion_callback(void* args) { - ie_infer_request_t* infer_request = (ie_infer_request_t*)args; - ie_blob_t* output_blob = nullptr; - - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "Relu_1", &output_blob)); - - ie_blob_buffer_t buffer; - IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &buffer)); - float* output_data = (float*)(buffer.buffer); - EXPECT_NEAR(output_data[9], 0.f, 1.e-5); - - ie_blob_free(&output_blob); - - std::lock_guard lock(m); - ready = true; - condVar.notify_one(); -} - -class ie_c_api_test : public ::testing::TestWithParam { -public: - void SetUp() override { - TestDataHelpers::generate_test_model(); - xml_file_name = TestDataHelpers::get_model_xml_file_name(); - bin_file_name = TestDataHelpers::get_model_bin_file_name(); - } - - void TearDown() override { - TestDataHelpers::release_test_model(); - } - -public: - size_t find_device(ie_available_devices_t avai_devices, const char* device_name) { - for (size_t i = 0; i < avai_devices.num_devices; ++i) { - if (strstr(avai_devices.devices[i], device_name)) - return i; - } - - return -1; - } - - std::vector content_from_file(const char* filename, bool is_binary) { - std::vector result; - { - std::ifstream is(filename, is_binary ? std::ifstream::binary | std::ifstream::in : std::ifstream::in); - if (is) { - is.seekg(0, std::ifstream::end); - result.resize(is.tellg()); - if (result.size() > 0) { - is.seekg(0, std::ifstream::beg); - is.read(reinterpret_cast(&result[0]), result.size()); - } - } - } - return result; - } - - std::string xml_file_name, bin_file_name; - const char* input_port_name = "Param_1"; - const char* output_port_name = "Relu_1"; -}; - -INSTANTIATE_TEST_SUITE_P(ie_c_api, ie_c_api_test, ::testing::Values("")); - -TEST_P(ie_c_api_test, ie_c_api_version) { - ie_version_t version = ie_c_api_version(); - auto ver = InferenceEngine::GetInferenceEngineVersion(); - std::string ver_str = ver->buildNumber; - - EXPECT_EQ(strcmp(version.api_version, ver_str.c_str()), 0); - ie_version_free(&version); -} - -TEST_P(ie_c_api_test, ie_core_create_coreCreatewithConfig) { - std::string plugins_xml = TestDataHelpers::generate_test_xml_file(); - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create(plugins_xml.c_str(), &core)); - ASSERT_NE(nullptr, core); - - ie_core_free(&core); - TestDataHelpers::delete_test_xml_file(); -} - -TEST_P(ie_c_api_test, ie_core_create_coreCreateNoConfig) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_get_available_devices) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - - ie_available_devices_t avai_devices = {0}; - IE_EXPECT_OK(ie_core_get_available_devices(core, &avai_devices)); - - ie_core_available_devices_free(&avai_devices); - ie_core_free(&core); -} - -// TODO: CVS-68982 -#ifndef OPENVINO_STATIC_LIBRARY - -TEST_P(ie_c_api_test, ie_core_register_plugin) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - const char *plugin_name = "test_plugin"; - const char *device_name = "BLA"; - IE_EXPECT_OK(ie_core_register_plugin(core, plugin_name, device_name)); - - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_register_plugins) { - std::string plugins_xml = TestDataHelpers::generate_test_xml_file(); - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - IE_EXPECT_OK(ie_core_register_plugins(core, plugins_xml.c_str())); - - // Trigger plugin loading - ie_core_versions_t versions = {0}; - IE_EXPECT_OK(ie_core_get_versions(core, "CUSTOM", &versions)); - ie_core_versions_free(&versions); - - ie_core_free(&core); - TestDataHelpers::delete_test_xml_file(); -} - -TEST_P(ie_c_api_test, ie_core_unload_plugin) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - const char *device_name = "CPU"; - ie_core_versions_t versions = {0}; - // Trigger plugin loading - IE_EXPECT_OK(ie_core_get_versions(core, device_name, &versions)); - // Unload plugin - IE_EXPECT_OK(ie_core_unregister_plugin(core, device_name)); - - ie_core_versions_free(&versions); - ie_core_free(&core); -} - -#endif // !OPENVINO_STATIC_LIBRARY - -TEST_P(ie_c_api_test, ie_core_set_config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - const char *device_name = "CPU"; - ie_config_t config = {"CPU_THREADS_NUM", "3", nullptr}; - IE_EXPECT_OK(ie_core_set_config(core, &config, device_name)); - - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_get_metric) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - const char *device_name = "CPU"; - const char *metric_name = "SUPPORTED_CONFIG_KEYS"; - ie_param_t param; - param.params = nullptr; - IE_EXPECT_OK(ie_core_get_metric(core, device_name, metric_name, ¶m)); - - ie_param_free(¶m); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_get_config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - const char *device_name = "CPU"; - const char *config_name = "CPU_THREADS_NUM"; - ie_param_t param; - param.params = nullptr; - IE_EXPECT_OK(ie_core_get_config(core, device_name, config_name, ¶m)); - EXPECT_STREQ(param.params, "0"); - - ie_param_free(¶m); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_get_versions) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_core_versions_t versions = {0}; - IE_EXPECT_OK(ie_core_get_versions(core, "CPU", &versions)); - EXPECT_EQ(versions.num_vers, 1); - - ie_core_versions_free(&versions); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_read_network) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_read_network_from_memory) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - std::vector weights_content(content_from_file(bin_file_name.c_str(), true)); - - tensor_desc_t weights_desc { ANY, { 1, { weights_content.size() } }, U8 }; - ie_blob_t *weights_blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&weights_desc, weights_content.data(), weights_content.size(), &weights_blob)); - EXPECT_NE(nullptr, weights_blob); - - if (weights_blob != nullptr) { - std::vector xml_content(content_from_file(xml_file_name.c_str(), false)); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network_from_memory(core, xml_content.data(), xml_content.size(), weights_blob, &network)); - EXPECT_NE(nullptr, network); - if (network != nullptr) { - ie_network_free(&network); - } - ie_blob_free(&weights_blob); - } - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_export_network_to_file) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - - IE_EXPECT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "HETERO:CPU", &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - std::string export_path = TestDataHelpers::get_exported_blob_file_name(); - IE_EXPECT_OK(ie_core_export_network(exe_network, export_path.c_str())); - std::ifstream file(export_path.c_str()); - EXPECT_NE(file.peek(), std::ifstream::traits_type::eof()); - - EXPECT_NE(nullptr, exe_network); - ie_exec_network_free(&exe_network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_import_network_from_memory) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_executable_network_t *exe_network = nullptr; - - IE_EXPECT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "HETERO:CPU", nullptr, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - std::string export_path = TestDataHelpers::get_exported_blob_file_name(); - IE_EXPECT_OK(ie_core_export_network(exe_network, export_path.c_str())); - - std::vector buffer(content_from_file(export_path.c_str(), true)); - ie_executable_network_t *network = nullptr; - - IE_EXPECT_OK(ie_core_import_network_from_memory(core, buffer.data(), buffer.size(), "HETERO:CPU", nullptr, &network)); - EXPECT_NE(nullptr, network); - - ie_exec_network_free(&network); - ie_exec_network_free(&exe_network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_import_network_from_file) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_config_t conf = {nullptr, nullptr, nullptr}; - - ie_executable_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "HETERO:CPU", &conf, &network)); - EXPECT_NE(nullptr, network); - - std::string exported_model = TestDataHelpers::get_exported_blob_file_name(); - IE_EXPECT_OK(ie_core_export_network(network, exported_model.c_str())); - std::ifstream file(exported_model); - EXPECT_NE(file.peek(), std::ifstream::traits_type::eof()); - - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_import_network(core, exported_model.c_str(), "HETERO:CPU", &conf, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_exec_network_free(&network); - ie_exec_network_free(&exe_network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_import_network_from_file_errorHandling) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_config_t config = {nullptr, nullptr, nullptr}; - - ie_executable_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "HETERO:CPU", &config, &network)); - EXPECT_NE(nullptr, network); - - std::string exported_model = TestDataHelpers::get_exported_blob_file_name(); - IE_EXPECT_OK(ie_core_export_network(network, exported_model.c_str())); - - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_NOT_OK(ie_core_import_network(core, nullptr, "HETERO:CPU", &config, &exe_network)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_NOT_OK(ie_core_import_network(core, exported_model.c_str(), nullptr, &config, &exe_network)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_NOT_OK(ie_core_import_network(core, exported_model.c_str(), "HETERO:CPU", &config, nullptr)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_NOT_OK(ie_core_import_network(core, exported_model.c_str(), "UnregisteredDevice", &config, &exe_network)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_OK(ie_core_import_network(core, exported_model.c_str(), "HETERO:CPU", nullptr, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_exec_network_free(&network); - ie_exec_network_free(&exe_network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_load_network_with_config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - IE_EXPECT_OK(ie_network_set_input_layout(network, input_port_name, layout_e::NHWC)); - IE_EXPECT_OK(ie_network_set_input_precision(network, input_port_name, precision_e::U8)); - - ie_config_t config = {"CPU_THREADS_NUM", "3", nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, "CPU", &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_load_network_no_config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, "CPU", &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_load_network_null_Config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, "CPU", nullptr, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_load_network_from_file_no_config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "CPU", &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_exec_network_free(&exe_network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_core_load_network_from_file_null_config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "CPU", nullptr, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_exec_network_free(&exe_network); - ie_core_free(&core); -} - - -TEST_P(ie_c_api_test, ie_core_load_network_from_file_errorHandling) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_NOT_OK(ie_core_load_network_from_file(nullptr, xml_file_name.c_str(), "CPU", &config, &exe_network)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_NOT_OK(ie_core_load_network_from_file(core, nullptr, "CPU", &config, &exe_network)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_NOT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), nullptr, &config, &exe_network)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_NOT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "CPU", &config, nullptr)); - EXPECT_EQ(nullptr, exe_network); - - IE_EXPECT_NOT_OK(ie_core_load_network_from_file(core, xml_file_name.c_str(), "UnregisteredDevice", &config, &exe_network)); - EXPECT_EQ(nullptr, exe_network); - - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_name) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - char *network_name = nullptr; - IE_EXPECT_OK(ie_network_get_name(network, &network_name)); - - EXPECT_NE(network_name, nullptr); - - ie_network_name_free(&network_name); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_inputs_number) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - size_t size; - IEStatusCode status_result = ie_network_get_inputs_number(network, &size); - EXPECT_EQ(status_result, IEStatusCode::OK); - EXPECT_EQ(size, 1); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_input_name) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - char *input_name = nullptr; - IE_EXPECT_OK(ie_network_get_input_name(network, 0, &input_name)); - - EXPECT_STREQ(input_name, input_port_name); - - ie_network_name_free(&input_name); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_input_precision) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - precision_e p; - IE_EXPECT_OK(ie_network_get_input_precision(network, input_port_name, &p)); - EXPECT_EQ(p, precision_e::FP32); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_input_precision_incorrectName) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - const char *name = "model"; - precision_e p; - EXPECT_EQ(IEStatusCode::NOT_FOUND, ie_network_get_input_precision(network, name, &p)); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_set_input_precision) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - const precision_e p = precision_e::FP16; - IE_EXPECT_OK(ie_network_set_input_precision(network, input_port_name, p)); - precision_e p2; - IE_EXPECT_OK(ie_network_get_input_precision(network, input_port_name, &p2)); - EXPECT_EQ(p, p2); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_input_layout) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - layout_e l; - IE_EXPECT_OK(ie_network_get_input_layout(network, input_port_name, &l)); - EXPECT_EQ(l, layout_e::NCHW); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_set_input_layout) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - const layout_e l = layout_e ::NHWC; - IE_EXPECT_OK(ie_network_set_input_layout(network, input_port_name, l)); - layout_e l2; - IE_EXPECT_OK(ie_network_get_input_layout(network, input_port_name, &l2)); - EXPECT_EQ(l, l2); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_input_dims) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - dimensions_t dims_res; - IE_EXPECT_OK(ie_network_get_input_dims(network, input_port_name, &dims_res)); - EXPECT_EQ(dims_res.dims[0], 1); - EXPECT_EQ(dims_res.dims[1], 3); - EXPECT_EQ(dims_res.dims[2], 227); - EXPECT_EQ(dims_res.dims[3], 227); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_input_resize_algorithm_resize_algo) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - resize_alg_e resizeAlg; - IE_EXPECT_OK(ie_network_get_input_resize_algorithm(network, input_port_name, &resizeAlg)); - EXPECT_EQ(resizeAlg, resize_alg_e::NO_RESIZE); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_set_input_resize_algorithm_resize_algo) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - resize_alg_e resizeAlg = resize_alg_e::RESIZE_BILINEAR; - IE_EXPECT_OK(ie_network_set_input_resize_algorithm(network, input_port_name, resizeAlg)); - - resize_alg_e resizeAlg2; - IE_EXPECT_OK(ie_network_get_input_resize_algorithm(network, input_port_name, &resizeAlg2)); - EXPECT_EQ(resizeAlg, resizeAlg2); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_color_format) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - colorformat_e color; - IE_EXPECT_OK(ie_network_get_color_format(network, input_port_name, &color)); - EXPECT_EQ(color, colorformat_e::RAW); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_set_color_format) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - const colorformat_e color = colorformat_e::BGR; - IE_EXPECT_OK(ie_network_set_color_format(network, input_port_name, color)); - - colorformat_e color2; - IE_EXPECT_OK(ie_network_get_color_format(network, input_port_name, &color2)); - EXPECT_EQ(color2, colorformat_e::BGR); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_input_shapes) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - input_shapes_t shapes; - IE_EXPECT_OK(ie_network_get_input_shapes(network, &shapes)); - EXPECT_EQ(shapes.shape_num, 1); - - ie_network_input_shapes_free(&shapes); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_reshape) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - input_shapes_t inputShapes; - IE_EXPECT_OK(ie_network_get_input_shapes(network, &inputShapes)); - - inputShapes.shapes[0].shape.dims[0] = 2; - - IE_EXPECT_OK(ie_network_reshape(network, inputShapes)); - - input_shapes_t inputShapes2; - IE_EXPECT_OK(ie_network_get_input_shapes(network, &inputShapes2)); - EXPECT_EQ(inputShapes2.shapes[0].shape.dims[0], 2); - - ie_network_input_shapes_free(&inputShapes2); - ie_network_input_shapes_free(&inputShapes); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_outputs_number) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - size_t size; - IE_EXPECT_OK(ie_network_get_outputs_number(network, &size)); - EXPECT_EQ(size, 1); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_output_name) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - char *output_name = nullptr; - IE_EXPECT_OK(ie_network_get_output_name(network, 0, &output_name)); - EXPECT_STREQ(output_name, output_port_name); - - ie_network_name_free(&output_name); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_output_name_incorrectNumber) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - char *output_name = nullptr; - EXPECT_EQ(IEStatusCode::OUT_OF_BOUNDS, ie_network_get_output_name(network, 3, &output_name)); - - ie_network_name_free(&output_name); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_output_precision) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - precision_e p; - IE_EXPECT_OK(ie_network_get_output_precision(network, output_port_name, &p)); - EXPECT_EQ(p, precision_e::FP32); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_set_output_precision) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - precision_e p = precision_e::FP16; - IE_EXPECT_OK(ie_network_set_output_precision(network, output_port_name, p)); - - precision_e precision_res; - IE_EXPECT_OK(ie_network_get_output_precision(network, output_port_name, &precision_res)); - EXPECT_EQ(p, precision_res); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_output_layout) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - layout_e l; - IE_EXPECT_OK(ie_network_get_output_layout(network, output_port_name, &l)); - EXPECT_EQ(l, layout_e::NCHW); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_set_output_layout) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - layout_e l = layout_e::NCHW; - IE_EXPECT_OK(ie_network_set_output_layout(network, output_port_name, l)); - layout_e l_res; - IE_EXPECT_OK(ie_network_get_output_layout(network, output_port_name, &l_res)); - EXPECT_EQ(l, l_res); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_network_get_output_dims) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - dimensions_t dims_res; - IE_EXPECT_OK(ie_network_get_output_dims(network, output_port_name, &dims_res)); - EXPECT_EQ(dims_res.dims[0], 1); - EXPECT_EQ(dims_res.dims[1], 4); - - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_exec_network_create_infer_request) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network,device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_infer_request_t *infer_request = nullptr; - IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request)); - EXPECT_NE(nullptr, infer_request); - - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_exec_network_get_config) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network,device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_param_t param; - param.params = nullptr; - IE_EXPECT_OK(ie_exec_network_get_config(exe_network, "CPU_THREADS_NUM", ¶m)); - - ie_param_free(¶m); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_exec_network_get_metric) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_param_t param; - param.params = nullptr; - IE_EXPECT_OK(ie_exec_network_get_metric(exe_network, "SUPPORTED_CONFIG_KEYS", ¶m)); - - ie_param_free(¶m); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_infer_request_get_blob) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - char *input_name = nullptr; - IE_EXPECT_OK(ie_network_get_input_name(network, 0, &input_name)); - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_infer_request_t *infer_request = nullptr; - IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request)); - EXPECT_NE(nullptr, infer_request); - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, input_name, &blob)); - - ie_blob_free(&blob); - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_name_free(&input_name); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_infer_request_set_blob) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - dimensions_t dim_t; - precision_e p = precision_e::U8; - layout_e l = layout_e::NCHW; - IE_EXPECT_OK(ie_network_get_input_dims(network, input_port_name, &dim_t)); - IE_EXPECT_OK(ie_network_set_input_layout(network, input_port_name, l)); - IE_EXPECT_OK(ie_network_set_input_precision(network, input_port_name, p)); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_infer_request_t *infer_request = nullptr; - IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request)); - EXPECT_NE(nullptr, infer_request); - - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = p; - tensor.layout = l; - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - - IE_EXPECT_OK(ie_infer_request_set_blob(infer_request, input_port_name, blob)); - - ie_blob_deallocate(&blob); - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_infer_request_infer) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - IE_EXPECT_OK(ie_network_set_input_precision(network, input_port_name, precision_e::U8)); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_infer_request_t *infer_request = nullptr; - IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request)); - EXPECT_NE(nullptr, infer_request); - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, input_port_name, &blob)); - - dimensions_t dims; - IE_EXPECT_OK(ie_blob_get_dims(blob, &dims)); - const size_t blob_elems_count = dims.dims[0] * dims.dims[1] * dims.dims[2] * dims.dims[3]; - - ie_blob_buffer_t buffer; - IE_EXPECT_OK(ie_blob_get_buffer(blob, &buffer)); - auto* blob_internal_buffer = (uint8_t*)buffer.buffer; - std::fill(blob_internal_buffer, blob_internal_buffer + blob_elems_count, uint8_t{0}); - - IE_EXPECT_OK(ie_infer_request_infer(infer_request)); - - ie_blob_t *output_blob = nullptr; - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, output_port_name, &output_blob)); - dimensions_t dim_res; - IE_EXPECT_OK(ie_blob_get_dims(output_blob, &dim_res)); - EXPECT_EQ(dim_res.ranks, 4); - EXPECT_EQ(dim_res.dims[1], 4); - - ie_blob_buffer_t out_buffer; - IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &out_buffer)); - float *output_data = (float *)(out_buffer.buffer); - EXPECT_NEAR(output_data[9], 0.f, 1.e-5); - - ie_blob_free(&output_blob); - ie_blob_free(&blob); - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_infer_request_infer_async_wait_finish) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - IE_EXPECT_OK(ie_network_set_input_precision(network, input_port_name, precision_e::U8)); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_infer_request_t *infer_request = nullptr; - IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request)); - EXPECT_NE(nullptr, infer_request); - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, input_port_name, &blob)); - - dimensions_t dims; - IE_EXPECT_OK(ie_blob_get_dims(blob, &dims)); - const size_t blob_elems_count = dims.dims[0] * dims.dims[1] * dims.dims[2] * dims.dims[3]; - - ie_blob_buffer_t buffer; - IE_EXPECT_OK(ie_blob_get_buffer(blob, &buffer)); - auto* blob_internal_buffer = (uint8_t*)buffer.buffer; - std::fill(blob_internal_buffer, blob_internal_buffer + blob_elems_count, uint8_t{0}); - - IE_EXPECT_OK(ie_infer_request_infer_async(infer_request)); - - ie_blob_t *output_blob = nullptr; - if (!HasFatalFailure()) { - IE_EXPECT_OK(ie_infer_request_wait(infer_request, -1)); - - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, output_port_name, &output_blob)); - EXPECT_NE(nullptr, output_blob); - - ie_blob_buffer_t out_buffer; - IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &out_buffer)); - float *output_data = (float *)(out_buffer.buffer); - EXPECT_NEAR(output_data[9], 0.f, 1.e-5); - } - - ie_blob_free(&output_blob); - ie_blob_free(&blob); - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_infer_request_infer_async_wait_time) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - IE_EXPECT_OK(ie_network_set_input_precision(network, input_port_name, precision_e::U8)); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_infer_request_t *infer_request = nullptr; - IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request)); - EXPECT_NE(nullptr, infer_request); - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, input_port_name, &blob)); - EXPECT_NE(nullptr, blob); - - dimensions_t dims; - IE_EXPECT_OK(ie_blob_get_dims(blob, &dims)); - const size_t blob_elems_count = dims.dims[0] * dims.dims[1] * dims.dims[2] * dims.dims[3]; - - ie_blob_buffer_t buffer; - IE_EXPECT_OK(ie_blob_get_buffer(blob, &buffer)); - auto* blob_internal_buffer = (uint8_t*)buffer.buffer; - std::fill(blob_internal_buffer, blob_internal_buffer + blob_elems_count, uint8_t{0}); - - IE_EXPECT_OK(ie_infer_request_infer_async(infer_request)); - - ie_blob_t *output_blob = nullptr; - if (!HasFatalFailure()) { - auto waitStatus = ie_infer_request_wait(infer_request, 10); - EXPECT_TRUE((IEStatusCode::OK == waitStatus) || (IEStatusCode::RESULT_NOT_READY == waitStatus)); - if (IEStatusCode::RESULT_NOT_READY == waitStatus) { - IE_EXPECT_OK(ie_infer_request_wait(infer_request, -1)); - } - - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, output_port_name, &output_blob)); - - ie_blob_buffer_t out_buffer; - IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &out_buffer)); - float *output_data = (float *)(out_buffer.buffer); - EXPECT_NEAR(output_data[9], 0.f, 1.e-5); - } - - ie_blob_free(&output_blob); - ie_blob_free(&blob); - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -TEST_P(ie_c_api_test, ie_blob_make_memory) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_make_memory_from_preallocated) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - uint8_t array[1][3][4][4]= {{{{0}}}}; - - size_t size = 48; - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&tensor, &array, size, &blob)); - EXPECT_NE(nullptr, blob); - - ie_blob_free(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_make_memory_with_roi) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *input_blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &input_blob)); - EXPECT_NE(nullptr, input_blob); - - roi_t roi = {0, 0, 0, 1, 1}; - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory_with_roi(input_blob, &roi, &blob)); - EXPECT_NE(nullptr, blob); - - ie_blob_deallocate(&blob); - ie_blob_free(&input_blob); -} - -TEST_P(ie_c_api_test, ie_blob_deallocate) { - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_get_dims) { - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - dimensions_t dim_res; - IE_EXPECT_OK(ie_blob_get_dims(blob, &dim_res)); - EXPECT_EQ(dim_t.ranks, dim_res.ranks); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_get_layout) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - layout_e l; - IE_EXPECT_OK(ie_blob_get_layout(blob, &l)); - EXPECT_EQ(tensor.layout, l); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_get_precision) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - precision_e p; - IEStatusCode status3 = ie_blob_get_precision(blob, &p); - EXPECT_EQ(status3, IEStatusCode::OK); - EXPECT_EQ(tensor.precision, p); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_size) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::I16; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - int size_res; - IE_EXPECT_OK(ie_blob_size(blob, &size_res)); - EXPECT_EQ(size_res, 48); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_byte_size) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::I16; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - int size_res; - IE_EXPECT_OK(ie_blob_byte_size(blob, &size_res)); - EXPECT_EQ(size_res, 96); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_get_buffer) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - ie_blob_buffer_t blob_buffer; - IE_EXPECT_OK(ie_blob_get_buffer(blob, &blob_buffer)); - EXPECT_NE(nullptr, blob_buffer.buffer); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_blob_get_cbuffer) { - - dimensions_t dim_t; - dim_t.ranks = 4 ; - dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4; - tensor_desc tensor; - tensor.dims = dim_t ; - tensor.precision = precision_e::U8; - tensor.layout = layout_e::NCHW; - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob)); - EXPECT_NE(nullptr, blob); - - ie_blob_buffer_t blob_cbuffer; - IE_EXPECT_OK(ie_blob_get_buffer(blob, &blob_cbuffer)); - EXPECT_NE(nullptr, blob_cbuffer.cbuffer); - - ie_blob_deallocate(&blob); -} - -TEST_P(ie_c_api_test, ie_infer_set_completion_callback) { - ie_core_t *core = nullptr; - IE_ASSERT_OK(ie_core_create("", &core)); - ASSERT_NE(nullptr, core); - - ie_network_t *network = nullptr; - IE_EXPECT_OK(ie_core_read_network(core, xml_file_name.c_str(), bin_file_name.c_str(), &network)); - EXPECT_NE(nullptr, network); - - IE_EXPECT_OK(ie_network_set_input_precision(network, input_port_name, precision_e::U8)); - - const char *device_name = "CPU"; - ie_config_t config = {nullptr, nullptr, nullptr}; - ie_executable_network_t *exe_network = nullptr; - IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network)); - EXPECT_NE(nullptr, exe_network); - - ie_infer_request_t *infer_request = nullptr; - IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request)); - EXPECT_NE(nullptr, infer_request); - - ie_blob_t *blob = nullptr; - IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, input_port_name, &blob)); - - dimensions_t dims; - IE_EXPECT_OK(ie_blob_get_dims(blob, &dims)); - const size_t blob_elems_count = dims.dims[0] * dims.dims[1] * dims.dims[2] * dims.dims[3]; - - ie_blob_buffer_t buffer; - IE_EXPECT_OK(ie_blob_get_buffer(blob, &buffer)); - auto* blob_internal_buffer = (uint8_t*)buffer.buffer; - std::fill(blob_internal_buffer, blob_internal_buffer + blob_elems_count, uint8_t{0}); - - ie_complete_call_back_t callback; - callback.completeCallBackFunc = completion_callback; - callback.args = infer_request; - - IE_EXPECT_OK(ie_infer_set_completion_callback(infer_request, &callback)); - - IE_EXPECT_OK(ie_infer_request_infer_async(infer_request)); - - if (!HasFatalFailure()) { - std::unique_lock lock(m); - condVar.wait(lock, []{ return ready; }); - } - - ie_blob_free(&blob); - ie_infer_request_free(&infer_request); - ie_exec_network_free(&exe_network); - ie_network_free(&network); - ie_core_free(&core); -} - -OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file