From ae7219943cd728ab60037b07aad0046a210483ab Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 10 Nov 2021 16:27:23 +0300 Subject: [PATCH] Updated common migration pipeline (#8176) * Updated common migration pipeline * Fixed merge issue * Added new model and extended example * Fixed typo * Added v10-v11 comparison --- .../docs/common_inference_pipeline.md | 108 +++++++++++++++++- .../docs/graph_construction.md | 12 ++ docs/migration_ov_2_0/docs/intro.md | 1 + docs/snippets/ie_common.cpp | 36 +++++- docs/snippets/ngraph.cpp | 48 ++++++++ docs/snippets/ov_common.cpp | 75 ++++++++++-- docs/snippets/ov_graph.cpp | 47 ++++++++ ngraph/core/include/openvino/core/version.hpp | 1 - 8 files changed, 313 insertions(+), 15 deletions(-) create mode 100644 docs/migration_ov_2_0/docs/graph_construction.md create mode 100644 docs/snippets/ngraph.cpp create mode 100644 docs/snippets/ov_graph.cpp diff --git a/docs/migration_ov_2_0/docs/common_inference_pipeline.md b/docs/migration_ov_2_0/docs/common_inference_pipeline.md index af2dbf25304116..2b842e2e9e125a 100644 --- a/docs/migration_ov_2_0/docs/common_inference_pipeline.md +++ b/docs/migration_ov_2_0/docs/common_inference_pipeline.md @@ -32,6 +32,8 @@ OpenVINO™ 2.0 API: @snippet snippets/ov_common.cpp ov_api_2_0:read_model +Read model has the same structure as in the example from [OpenVINO™ Graph Construction](@ref ov_graph_construction) guide. + ### 2.1 Configure Input and Output of the Model Inference Engine API: @@ -52,4 +54,108 @@ OpenVINO™ 2.0 API: @snippet snippets/ov_common.cpp ov_api_2_0:compile_model -## 5. TBD +## 4. Create an Inference Request + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:create_infer_request + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:create_infer_request + +## 5. Prepare input + +### IR v10 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_v10 + +### IR v11 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned + +### ONNX + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned + +### From Function + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned + +## 6. Start Inference + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:inference + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:inference + + +## 7. Process the Inference Results + +### IR v10 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_v10 + +### IR v11 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned + +### ONNX + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned + +### From Function + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned + diff --git a/docs/migration_ov_2_0/docs/graph_construction.md b/docs/migration_ov_2_0/docs/graph_construction.md new file mode 100644 index 00000000000000..f3fe2c7a1dce34 --- /dev/null +++ b/docs/migration_ov_2_0/docs/graph_construction.md @@ -0,0 +1,12 @@ +# OpenVINO™ graph construction {#ov_graph_construction} + +OpenVINO™ 2.0 includes nGraph engine in a common part. The `ngraph` namespace was changed to `ov`. +Code snippets below show how application code should be changed for migration to OpenVINO™ 2.0. + +nGraph API: + +@snippet snippets/ngraph.cpp ngraph:graph + +OpenVINO™ 2.0 API: + +@snippet snippets/ov_graph.cpp ov:graph diff --git a/docs/migration_ov_2_0/docs/intro.md b/docs/migration_ov_2_0/docs/intro.md index 5d89b7aff3d809..5afc4a11304ffb 100644 --- a/docs/migration_ov_2_0/docs/intro.md +++ b/docs/migration_ov_2_0/docs/intro.md @@ -9,4 +9,5 @@ The list with differences between APIs below: - Namespaces were aligned between components. Please look at next transition guides to understand how transit own application to OpenVINO™ API 2.0. + - [OpenVINO™ Graph Construction](@ref ov_graph_construction) - [OpenVINO™ Common Inference pipeline](@ref ov_inference_pipeline) diff --git a/docs/snippets/ie_common.cpp b/docs/snippets/ie_common.cpp index 6a558129243082..25cf78a0c43582 100644 --- a/docs/snippets/ie_common.cpp +++ b/docs/snippets/ie_common.cpp @@ -27,8 +27,29 @@ int main() { //! [ie:create_infer_request] //! [ie:get_input_tensor] - InferenceEngine::Blob::Ptr input_blob = infer_request.GetBlob(inputs.begin()->first); - // fill input blob + InferenceEngine::Blob::Ptr input_blob1 = infer_request.GetBlob(inputs.begin()->first); + // fill first blob + InferenceEngine::SizeVector dims1 = input_blob1->getTensorDesc().getDims(); + InferenceEngine::MemoryBlob::Ptr minput1 = InferenceEngine::as(input_blob1); + if (minput1) { + // locked memory holder should be alive all time while access to its + // buffer happens + auto minputHolder = minput1->wmap(); + // Original I64 precision was converted to I32 + auto data = minputHolder.as::value_type*>(); + // Fill data ... + } + InferenceEngine::Blob::Ptr input_blob2 = infer_request.GetBlob("data2"); + // fill first blob + InferenceEngine::MemoryBlob::Ptr minput2 = InferenceEngine::as(input_blob2); + if (minput2) { + // locked memory holder should be alive all time while access to its + // buffer happens + auto minputHolder = minput2->wmap(); + // Original I64 precision was converted to I32 + auto data = minputHolder.as::value_type*>(); + // Fill data ... + } //! [ie:get_input_tensor] //! [ie:inference] @@ -37,7 +58,16 @@ int main() { //! [ie:get_output_tensor] InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first); - // process output data + InferenceEngine::MemoryBlob::Ptr moutput = InferenceEngine::as(output_blob); + if (moutput) { + // locked memory holder should be alive all time while access to its + // buffer happens + auto minputHolder = moutput->rmap(); + // Original I64 precision was converted to I32 + auto data = + minputHolder.as::value_type*>(); + // process output data + } //! [ie:get_output_tensor] return 0; } diff --git a/docs/snippets/ngraph.cpp b/docs/snippets/ngraph.cpp new file mode 100644 index 00000000000000..931140f99d9519 --- /dev/null +++ b/docs/snippets/ngraph.cpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +int main() { + //! [ngraph:graph] + // _____________ _____________ + // | Parameter | | Parameter | + // | data1 | | data2 | + // |___________| |___________| + // | | + // data1_t | | data2_t + // \ / + // \ / + // \ / + // ____\____/____ + // | Concat | + // | concat | + // |____________| + // | + // | concat_t + // | + // _______|_______ + // | Result | + // | result | + // |_____________| + auto data1 = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 2, 2}); + data1->set_friendly_name("data1"); // operation name + data1->output(0).set_names({"data1_t"}); // tensor names + auto data2 = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 2, 2, 2}); + data2->set_friendly_name("data2"); // operation name + data2->output(0).set_names({"data2_t"}); // tensor names + + auto concat = std::make_shared(ngraph::OutputVector{data1, data2}, 1); + concat->set_friendly_name("concat"); // operation name + concat->output(0).set_names({"concat_t"}); // tensor name + + auto result = std::make_shared(concat); + result->set_friendly_name("result"); // operation name + + auto f = std::make_shared(ngraph::ResultVector{result}, + ngraph::ParameterVector{data1, data2}, + "function_name"); + //! [ngraph:graph] + return 0; +} diff --git a/docs/snippets/ov_common.cpp b/docs/snippets/ov_common.cpp index 7cb9e344f7cbaa..1392e3a509850f 100644 --- a/docs/snippets/ov_common.cpp +++ b/docs/snippets/ov_common.cpp @@ -1,9 +1,61 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include +void inputs_v10(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_input_tensor_v10] + // Get input tensor by index + ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0); + // IR v10 works with converted precisions (i64 -> i32) + auto data1 = input_tensor1.data(); + // Fill first data ... + + // Get input tensor by tensor name + ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t"); + // IR v10 works with converted precisions (i64 -> i32) + auto data2 = input_tensor1.data(); + // Fill first data ... + //! [ov_api_2_0:get_input_tensor_v10] +} + +void inputs_aligned(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_input_tensor_aligned] + // Get input tensor by index + ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0); + // Element types, names and layouts are aligned with framework + auto data1 = input_tensor1.data(); + // Fill first data ... + + // Get input tensor by tensor name + ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t"); + // Element types, names and layouts are aligned with framework + auto data2 = input_tensor1.data(); + // Fill first data ... + //! [ov_api_2_0:get_input_tensor_aligned] +} + +void outputs_v10(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_output_tensor_v10] + // model has only one output + ov::runtime::Tensor output_tensor = infer_request.get_output_tensor(); + // IR v10 works with converted precisions (i64 -> i32) + auto out_data = output_tensor.data(); + // process output data + //! [ov_api_2_0:get_output_tensor_v10] +} + +void outputs_aligned(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_output_tensor_aligned] + // model has only one output + ov::runtime::Tensor output_tensor = infer_request.get_output_tensor(); + // Element types, names and layouts are aligned with framework + auto out_data = output_tensor.data(); + // process output data + //! [ov_api_2_0:get_output_tensor_aligned] +} + int main() { //! [ov_api_2_0:create_core] ov::runtime::Core core; @@ -14,21 +66,24 @@ int main() { //! [ov_api_2_0:read_model] //! [ov_api_2_0:get_inputs_outputs] - ov::ParameterVector inputs = network->get_parameters(); - ov::ResultVector outputs = network->get_results(); + std::vector> inputs = network->inputs(); + std::vector> outputs = network->outputs(); //! [ov_api_2_0:get_inputs_outputs] //! [ov_api_2_0:compile_model] ov::runtime::ExecutableNetwork exec_network = core.compile_model(network, "CPU"); //! [ov_api_2_0:compile_model] + //! [ov_api_2_0:create_infer_request] ov::runtime::InferRequest infer_request = exec_network.create_infer_request(); - // - // InferenceEngine::Blob::Ptr input_blob = infer_request.GetBlob(inputs.begin()->first); - // // fill input blob - // infer_request.Infer(); - // - // InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first); - // process output data + //! [ov_api_2_0:create_infer_request] + + inputs_aligned(infer_request); + //! [ov_api_2_0:inference] + infer_request.infer(); + //! [ov_api_2_0:inference] + + outputs_aligned(infer_request); + return 0; } diff --git a/docs/snippets/ov_graph.cpp b/docs/snippets/ov_graph.cpp new file mode 100644 index 00000000000000..b47abce44514cb --- /dev/null +++ b/docs/snippets/ov_graph.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +int main() { + //! [ov:graph] + // _____________ _____________ + // | Parameter | | Parameter | + // | data1 | | data2 | + // |___________| |___________| + // | | + // data1_t | | data2_t + // \ / + // \ / + // \ / + // ____\____/____ + // | Concat | + // | concat | + // |____________| + // | + // | concat_t + // | + // _______|_______ + // | Result | + // | result | + // |_____________| + auto data1 = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); + data1->set_friendly_name("data1"); // operation name + data1->output(0).set_names({"data1_t"}); // tensor names + auto data2 = std::make_shared(ov::element::i64, ov::Shape{1, 2, 2, 2}); + data2->set_friendly_name("data2"); // operation name + data2->output(0).set_names({"data2_t"}); // tensor names + + auto concat = std::make_shared(ov::OutputVector{data1, data2}, 1); + concat->set_friendly_name("concat"); // operation name + concat->output(0).set_names({"concat_t"}); // tensor name + + auto result = std::make_shared(concat); + result->set_friendly_name("result"); // operation name + + auto f = + std::make_shared(ov::ResultVector{result}, ov::ParameterVector{data1, data2}, "function_name"); + //! [ov:graph] + return 0; +} diff --git a/ngraph/core/include/openvino/core/version.hpp b/ngraph/core/include/openvino/core/version.hpp index 97f82366f9705c..368398ba129352 100644 --- a/ngraph/core/include/openvino/core/version.hpp +++ b/ngraph/core/include/openvino/core/version.hpp @@ -1,7 +1,6 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // - #pragma once #include "openvino/core/core_visibility.hpp"