Skip to content

Commit

Permalink
Merge pull request #4 from openvinotoolkit/master
Browse files Browse the repository at this point in the history
Update forked branch
  • Loading branch information
evolosen authored Feb 5, 2021
2 parents 0cf84d4 + 65e2b4a commit 0df8350
Show file tree
Hide file tree
Showing 791 changed files with 36,667 additions and 9,023 deletions.
4 changes: 2 additions & 2 deletions .ci/azure/mac.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,11 @@ jobs:
workingDirectory: $(BUILD_DIR)
displayName: 'Install'

- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid --gtest_output=xml:TEST-NGraphUT.xml
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml
displayName: 'nGraph UT'
continueOnError: false

- script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
- script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_filter=-MKLDNNGraphStructureTests.TestNoRedundantReordersBeforeDWConvolution:TestConvolution/MKLDNNGraphConvolutionTests.TestsConvolution/0:TestConvolutionDefaultPrimitivesPriority/MKLDNNGraphConvolutionTests.TestsConvolution/0 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
displayName: 'IE UT old'
continueOnError: false

Expand Down
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2018-2020 Intel Corporation
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

Expand Down Expand Up @@ -142,6 +142,7 @@ function(openvino_developer_export_targets)
"A list of OpenVINO exported components" FORCE)
endfunction()

add_subdirectory(thirdparty)
add_subdirectory(openvino)
build_ngraph()
add_subdirectory(inference-engine)
Expand Down
27 changes: 27 additions & 0 deletions cmake/toolchains/ia32.linux.toolchain.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

set(CMAKE_CXX_FLAGS_INIT "-m32")
set(CMAKE_C_FLAGS_INIT "-m32")

set(CMAKE_SHARED_LINKER_FLAGS_INIT "-m32")
set(CMAKE_MODULE_LINKER_FLAGS_INIT "-m32")
set(CMAKE_EXE_LINKER_FLAGS_INIT "-m32")

# Hints for OpenVINO

macro(_set_if_not_defined var val)
if(NOT DEFINED ${var})
set(${var} ${val} CACHE BOOL "" FORCE)
endif()
endmacro()

# need libusb 32-bits version
_set_if_not_defined(ENABLE_VPU OFF)

# _mm_loadl_epi64 is not defined
_set_if_not_defined(ENABLE_SSE42 OFF)

# fix conversion from uint64_t / int64_t to size_t
_set_if_not_defined(NGRAPH_ONNX_IMPORT_ENABLE OFF)
39 changes: 39 additions & 0 deletions cmake/toolchains/mt.runtime.win32.toolchain.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

#
# Flags for 3rd party projects
#

set(use_static_runtime ON)

if(use_static_runtime)
foreach(lang C CXX)
foreach(build_type "" "_DEBUG" "_MINSIZEREL" "_RELEASE" "_RELWITHDEBINFO")
set(flag_var "CMAKE_${lang}_FLAGS${build_type}")
string(REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endforeach()
endforeach()
endif()

function(onecoreuap_set_runtime var)
set(${var} ${use_static_runtime} CACHE BOOL "" FORCE)
endfunction()

# ONNX
onecoreuap_set_runtime(ONNX_USE_MSVC_STATIC_RUNTIME)
# pugixml
onecoreuap_set_runtime(STATIC_CRT)
# protobuf
onecoreuap_set_runtime(protobuf_MSVC_STATIC_RUNTIME)
# clDNN
onecoreuap_set_runtime(CLDNN__COMPILE_LINK_USE_STATIC_RUNTIME)
# google-test
if(use_static_runtime)
set(gtest_force_shared_crt OFF CACHE BOOL "" FORCE)
else()
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
endif()

unset(use_static_runtime)
30 changes: 3 additions & 27 deletions cmake/toolchains/onecoreuap.toolchain.cmake
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2018-2020 Intel Corporation
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

Expand Down Expand Up @@ -68,31 +68,7 @@ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${linker_flags}")
unset(linker_flags)

#
# Flags for 3rd party projects
# Static runtime to overcome apiValidator tool restrictions
#

set(use_static_runtime ON)

if(use_static_runtime)
foreach(lang C CXX)
foreach(build_type "" "_DEBUG" "_MINSIZEREL" "_RELEASE" "_RELWITHDEBINFO")
set(flag_var "CMAKE_${lang}_FLAGS${build_type}")
string(REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endforeach()
endforeach()
endif()

function(onecoreuap_set_runtime var)
set(${var} ${use_static_runtime} CACHE BOOL "" FORCE)
endfunction()

# ONNX
onecoreuap_set_runtime(ONNX_USE_MSVC_STATIC_RUNTIME)
# pugixml
onecoreuap_set_runtime(STATIC_CRT)
# protobuf
onecoreuap_set_runtime(protobuf_MSVC_STATIC_RUNTIME)
# clDNN
onecoreuap_set_runtime(CLDNN__COMPILE_LINK_USE_STATIC_RUNTIME)

unset(use_static_runtime)
include("${CMAKE_CURRENT_LIST_DIR}/mt.runtime.win32.toolchain.cmake")
1 change: 0 additions & 1 deletion docs/HOWTO/Custom_Layers_Guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,6 @@ python3 mri_reconstruction_demo.py \
- [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md)
- [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
- [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index)
- [Inference Engine Tutorials](https://github.com/intel-iot-devkit/inference-tutorials-generic)
- For IoT Libraries and Code Samples see the [Intel® IoT Developer Kit](https://github.com/intel-iot-devkit).

## Converting Models:
Expand Down
2 changes: 1 addition & 1 deletion docs/IE_DG/API_Changes.md
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ The sections below contain detailed list of changes made to the Inference Engine

### Deprecated API

**Myriad Plugin API:**
**MYRIAD Plugin API:**

* VPU_CONFIG_KEY(IGNORE_IR_STATISTIC)

Expand Down
4 changes: 2 additions & 2 deletions docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@ The `ngraph::onnx_import::Node` class represents a node in ONNX model. It provid
New operator registration must happen before the ONNX model is read, for example, if an ONNX model uses the 'CustomRelu' operator, `register_operator("CustomRelu", ...)` must be called before InferenceEngine::Core::ReadNetwork.
Re-registering ONNX operators within the same process is supported. During registration of the existing operator, a warning is printed.

The example below demonstrates an examplary model that requires previously created 'CustomRelu' operator:
The example below demonstrates an exemplary model that requires previously created 'CustomRelu' operator:
@snippet onnx_custom_op/onnx_custom_op.cpp onnx_custom_op:model


For a reference on how to create a graph with nGraph operations, visit [nGraph tutorial](../nGraphTutorial.md).
For a reference on how to create a graph with nGraph operations, visit [Custom nGraph Operations](AddingNGraphOps.md).
For a complete list of predefined nGraph operators, visit [available operations sets](../../ops/opset.md).

If operator is no longer needed, it can be unregistered by calling `unregister_operator`. The function takes three arguments `op_type`, `version`, and `domain`.
Expand Down
3 changes: 2 additions & 1 deletion docs/IE_DG/InferenceEngine_QueryAPI.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ MYRIAD.1.4-ma2480
FPGA.0
FPGA.1
CPU
GPU
GPU.0
GPU.1
...
```

Expand Down
5 changes: 1 addition & 4 deletions docs/IE_DG/Introduction.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,4 @@ The open source version is available in the [OpenVINO™ toolkit GitHub reposito
- [Intel® Deep Learning Deployment Toolkit Web Page](https://software.intel.com/en-us/computer-vision-sdk)


[scheme]: img/workflow_steps.png

#### Optimization Notice
<sup>For complete information about compiler optimizations, see our [Optimization Notice](https://software.intel.com/en-us/articles/optimization-notice#opt-en).</sup>
[scheme]: img/workflow_steps.png
3 changes: 0 additions & 3 deletions docs/IE_DG/Optimization_notice.md

This file was deleted.

4 changes: 2 additions & 2 deletions docs/IE_DG/Samples_Overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ To run the sample applications, you can use images and videos from the media fil

## Samples that Support Pre-Trained Models

You can download the [pre-trained models](@ref omz_models_intel_index) using the OpenVINO [Model Downloader](@ref omz_tools_downloader_README) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
To run the sample, you can use [public](@ref omz_models_public_index) or [Intel's](@ref omz_models_intel_index) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader_README).

## Build the Sample Applications

Expand Down Expand Up @@ -127,7 +127,7 @@ You can also build a generated solution manually. For example, if you want to bu
Microsoft Visual Studio and open the generated solution file from the `C:\Users\<user>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\Samples.sln`
directory.

### <a name="build_samples_linux"></a>Build the Sample Applications on macOS*
### <a name="build_samples_macos"></a>Build the Sample Applications on macOS*

The officially supported macOS* build environment is the following:

Expand Down
62 changes: 33 additions & 29 deletions docs/IE_DG/ShapeInference.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,36 @@
Using Shape Inference {#openvino_docs_IE_DG_ShapeInference}
==========================================

OpenVINO™ provides the following methods for runtime model reshaping:

* **Set a new input shape** with the `InferenceEngine::CNNNetwork::reshape` method.<br>
The `InferenceEngine::CNNNetwork::reshape` method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.

> **NOTES**:
> - Starting with the 2021.1 release, the Model Optimizer converts topologies keeping shape-calculating sub-graphs by default, which enables correct shape propagation during reshaping in most cases.
> - Older versions of IRs are not guaranteed to reshape successfully. Please regenerate them with the Model Optimizer of the latest version of OpenVINO™.<br>
> - If an ONNX model does not have a fully defined input shape and the model was imported with the ONNX importer, reshape the model before loading it to the plugin.
* **Set a new batch dimension value** with the `InferenceEngine::CNNNetwork::setBatchSize` method.<br>
The meaning of a model batch may vary depending on the model design.
This method does not deduce batch placement for inputs from the model architecture.
It assumes that the batch is placed at the zero index in the shape for all inputs and uses the `InferenceEngine::CNNNetwork::reshape` method to propagate updated shapes through the model.

The method transforms the model before a new shape propagation to relax a hard-coded batch dimension in the model, if any.

Use `InferenceEngine::CNNNetwork::reshape` instead of `InferenceEngine::CNNNetwork::setBatchSize` to set new input shapes for the model in case the model has:
* Multiple inputs with different zero-index dimension meanings
* Input without a batch dimension
* 0D, 1D, or 3D shape

The `InferenceEngine::CNNNetwork::setBatchSize` method is a high-level API method that wraps the `InferenceEngine::CNNNetwork::reshape` method call and works for trivial models from the batch placement standpoint.
Use `InferenceEngine::CNNNetwork::reshape` for other models.

Using the `InferenceEngine::CNNNetwork::setBatchSize` method for models with a non-zero index batch placement or for models with inputs that do not have a batch dimension may lead to undefined behaviour.

You can change input shapes multiple times using the `InferenceEngine::CNNNetwork::reshape` and `InferenceEngine::CNNNetwork::setBatchSize` methods in any order.
If a model has a hard-coded batch dimension, use `InferenceEngine::CNNNetwork::setBatchSize` first to change the batch, then call `InferenceEngine::CNNNetwork::reshape` to update other dimensions, if needed.

Inference Engine takes three kinds of a model description as an input, which are converted into an `InferenceEngine::CNNNetwork` object:
1. [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md) through `InferenceEngine::Core::ReadNetwork`
2. [ONNX model](../IE_DG/OnnxImporterTutorial.md) through `InferenceEngine::Core::ReadNetwork`
Expand All @@ -23,33 +53,7 @@ for (const auto & parameter : parameters) {

To feed input data of a shape that is different from the model input shape, reshape the model first.

OpenVINO™ provides the following methods for runtime model reshaping:

* **Set a new input shape** with the `InferenceEngine::CNNNetwork::reshape` method.<br>
The `InferenceEngine::CNNNetwork::reshape` method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.
You can reshape a model multiple times like in this application scheme:
```
ReadNetwork -> reshape(input_1_shape) -> LoadNetwork -> infer(input_1)
\
-> reshape(input_2_shape) -> LoadNetwork -> infer(input_2)
```
> **NOTES**:
> - Starting with the 2021.1 release, the Model Optimizer converts topologies keeping shape-calculating sub-graphs by default, which enables correct shape propagation during reshaping.
> - Older versions of IRs are not guaranteed to reshape successfully. Please regenerate them with the Model Optimizer of the latest version of OpenVINO™.<br>
> - If an ONNX model does not have a fully defined input shape and the model was imported with the ONNX importer, reshape the model before loading it to the plugin.
* **Set a new batch dimension value** with the `InferenceEngine::CNNNetwork::setBatchSize` method.<br>
The meaning of a model batch may vary depending on the model design.
The `InferenceEngine::CNNNetwork::setBatchSize` method deduces the index of a batch dimension based only on the input rank.
This method does not work for models with a non-zero index batch placement or models with inputs without a batch dimension.
The batch-setting algorithm does not involve the shape inference mechanism.
Batch of input and output shapes for all layers is set to a new batch value without layer validation.
It may cause both positive and negative side effects.
Due to the limitations described above, the current method is not recommended to use.
If you need to set a new batch size for the model, use the `CNNNetwork::reshape` method instead.

Do not use runtime reshaping methods simultaneously, especially do not call the `CNNNetwork::reshape` method after you use `InferenceEngine::CNNNetwork::setBatchSize`.
The `InferenceEngine::CNNNetwork::setBatchSize` method causes irreversible conversion of the internal model representation into the legacy model representation.
The method does not use nGraph for shape inference which leads to reduced reshape opportunities and may affect the performance of the model.
Once the input shape of `InferenceEngine::CNNNetwork` is set, call the `InferenceEngine::Core::LoadNetwork` method to get an `InferenceEngine::ExecutableNetwork` object for inference with updated shapes.

There are other approaches to reshape the model during the stage of <a href="_docs_MO_DG_prepare_model_convert_model_Converting_Model_General.html#when_to_specify_input_shapes">IR generation</a> or [nGraph::Function creation](../nGraph_DG/build_function.md).

Expand All @@ -62,8 +66,8 @@ Shape collision during shape propagation may be a sign that a new shape does not
Changing the model input shape may result in intermediate operations shape collision.

Examples of such operations:
- <a href="_docs_MO_DG_prepare_model_convert_model_IR_V10_opset1.html#Reshape">`Reshape` operation</a> with a hard-coded output shape value
- <a href="_docs_MO_DG_prepare_model_convert_model_IR_V10_opset1.html#MatMul">`MatMul` operation</a> with the `Const` second input cannot be resized by spatial dimensions due to operation semantics
- [`Reshape` operation](../ops/shape/Reshape_1.md) with a hard-coded output shape value
- [`MatMul` operation](../ops/matrix/MatMul_1.md) with the `Const` second input cannot be resized by spatial dimensions due to operation semantics

Model structure and logic should not change significantly after model reshaping.
- The Global Pooling operation is commonly used to reduce output feature map of classification models output.
Expand Down
1 change: 0 additions & 1 deletion docs/IE_DG/protecting_model_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,5 +59,4 @@ should be called with `weights` passed as an empty `Blob`.
- Inference Engine Developer Guide: [Inference Engine Developer Guide](Deep_Learning_Inference_Engine_DevGuide.md)
- For more information on Sample Applications, see the [Inference Engine Samples Overview](Samples_Overview.md)
- For information on a set of pre-trained models, see the [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index)
- For information on Inference Engine Tutorials, see the [Inference Tutorials](https://github.com/intel-iot-devkit/inference-tutorials-generic)
- For IoT Libraries and Code Samples see the [Intel® IoT Developer Kit](https://github.com/intel-iot-devkit).
Loading

0 comments on commit 0df8350

Please sign in to comment.