diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml
index e153030e8b692a..a3cba475e2ca38 100644
--- a/.ci/azure/linux.yml
+++ b/.ci/azure/linux.yml
@@ -30,6 +30,8 @@ jobs:
WORK_DIR: $(Pipeline.Workspace)/_w
BUILD_DIR: $(WORK_DIR)/build
BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
+ INSTALL_DIR: $(WORK_DIR)/install_pkg
+ SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
steps:
- script: |
@@ -52,10 +54,10 @@ jobs:
displayName: 'System info'
- script: |
- echo TargetBranch: $(System.PullRequest.TargetBranch)
- echo SourceBranch: $(Build.SourceBranch)
rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR)
rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR)
+ echo TargetBranch: $(System.PullRequest.TargetBranch)
+ echo SourceBranch: $(Build.SourceBranch)
displayName: 'Make dir'
- checkout: self
@@ -112,6 +114,10 @@ jobs:
- script: ls -alR $(REPO_DIR)/bin/
displayName: 'List files'
+ - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
+ workingDirectory: $(BUILD_DIR)
+ displayName: 'Install'
+
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
displayName: 'nGraph UT'
continueOnError: false
diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml
index 30032ddd25a745..e2e03690ca614f 100644
--- a/.ci/azure/mac.yml
+++ b/.ci/azure/mac.yml
@@ -30,6 +30,8 @@ jobs:
WORK_DIR: $(Pipeline.Workspace)/_w
BUILD_DIR: $(WORK_DIR)/build
BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
+ INSTALL_DIR: $(WORK_DIR)/install_pkg
+ SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
steps:
- script: |
@@ -99,7 +101,11 @@ jobs:
- script: ls -alR $(REPO_DIR)/bin/
displayName: 'List files'
- - script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid --gtest_output=xml:TEST-NGraphUT.xml
+ - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
+ workingDirectory: $(BUILD_DIR)
+ displayName: 'Install'
+
+ - script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml
displayName: 'nGraph UT'
continueOnError: false
diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml
index 3f3e12d1b0b72e..c94153df5fbbe3 100644
--- a/.ci/azure/windows.yml
+++ b/.ci/azure/windows.yml
@@ -33,6 +33,8 @@ jobs:
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
TEST_ENV_PATH: $(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.1\opencv\bin;%PATH%
+ INSTALL_DIR: $(WORK_DIR)\install_pkg
+ SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
steps:
- script: |
@@ -79,16 +81,11 @@ jobs:
displayName: 'Install dependencies'
- script: |
- certutil -urlcache -split -f https://incredibuilddiag1wu2.blob.core.windows.net/incredibuild/IBSetupConsole_9_5_0.exe IBSetupConsole_9_5_0.exe
- call IBSetupConsole_9_5_0.exe /Install /Components=Agent,oneuse /Coordinator=11.1.0.4 /AGENT:OPENFIREWALL=ON /AGENT:AUTOSELECTPORTS=ON /ADDTOPATH=ON /AGENT:INSTALLADDINS=OFF
+ certutil -urlcache -split -f https://incredibuilddiag1wu2.blob.core.windows.net/incredibuild/install_ib_console.bat install_ib_console.bat
+ call install_ib_console.bat
workingDirectory: $(WORK_DIR)
displayName: 'Install IncrediBuild'
- - script: |
- echo Stop IncrediBuild_Agent && net stop IncrediBuild_Agent
- reg add HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Xoreax\IncrediBuild\Builder /f /v LastEnabled /d 0 && echo Start IncrediBuild_Agent && net start IncrediBuild_Agent
- displayName: 'Start IncrediBuild'
-
- script: |
set PATH=$(WORK_DIR)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && cmake -GNinja -DENABLE_FASTER_BUILD=ON -DENABLE_TEMPLATE_PLUGIN=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
@@ -104,9 +101,14 @@ jobs:
- script: echo Stop IncrediBuild_Agent && net stop IncrediBuild_Agent
displayName: Stop IncrediBuild
continueOnError: true
+
- script: dir $(REPO_DIR)\bin\ /s
displayName: 'List files'
+ - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
+ workingDirectory: $(BUILD_DIR)
+ displayName: 'Install'
+
- script: |
set PATH=$(TEST_ENV_PATH)
$(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
diff --git a/cmake/developer_package/IEDevScriptsConfig.cmake b/cmake/developer_package/IEDevScriptsConfig.cmake
index a28f77099b6ba8..76324e9aa6fbb1 100644
--- a/cmake/developer_package/IEDevScriptsConfig.cmake
+++ b/cmake/developer_package/IEDevScriptsConfig.cmake
@@ -46,13 +46,7 @@ endif()
function(set_temp_directory temp_variable source_tree_dir)
if (DEFINED ENV{DL_SDK_TEMP} AND NOT $ENV{DL_SDK_TEMP} STREQUAL "")
message(STATUS "DL_SDK_TEMP environment is set : $ENV{DL_SDK_TEMP}")
-
- if (WIN32)
- string(REPLACE "\\" "\\\\" temp $ENV{DL_SDK_TEMP})
- else()
- set(temp $ENV{DL_SDK_TEMP})
- endif()
-
+ file(TO_CMAKE_PATH $ENV{DL_SDK_TEMP} temp)
if (ENABLE_ALTERNATIVE_TEMP)
set(ALTERNATIVE_PATH ${source_tree_dir}/temp)
endif()
diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake
index 8e3a5606ab78c5..87359245b541e9 100644
--- a/cmake/developer_package/compile_flags/os_flags.cmake
+++ b/cmake/developer_package/compile_flags/os_flags.cmake
@@ -271,6 +271,7 @@ else()
ie_add_compiler_flags(-fdiagnostics-show-option)
ie_add_compiler_flags(-Wundef)
ie_add_compiler_flags(-Wreturn-type)
+ ie_add_compiler_flags(-Wunused-variable)
# Disable noisy warnings
diff --git a/cmake/developer_package/compile_flags/sanitizer.cmake b/cmake/developer_package/compile_flags/sanitizer.cmake
index e303b203100f7a..a9b8a47c72a171 100644
--- a/cmake/developer_package/compile_flags/sanitizer.cmake
+++ b/cmake/developer_package/compile_flags/sanitizer.cmake
@@ -4,6 +4,14 @@
include(CheckCXXCompilerFlag)
+if (ENABLE_SANITIZER OR ENABLE_THREAD_SANITIZER)
+ # This is workaround for https://gitlab.kitware.com/cmake/cmake/-/issues/16609.
+ # It ensures pthread is searched without ASAN linking.
+ # Line bellow must be before adding -fsanitize=address or -fsanitize=thread to
+ # build options for the trick to work.
+ find_package(Threads REQUIRED)
+endif()
+
if (ENABLE_SANITIZER)
set(SANITIZER_COMPILER_FLAGS "-g -fsanitize=address -fno-omit-frame-pointer")
CHECK_CXX_COMPILER_FLAG("-fsanitize-recover=address" SANITIZE_RECOVER_SUPPORTED)
diff --git a/cmake/developer_package/message.cmake b/cmake/developer_package/message.cmake
index eb6a1af60035ad..26912b05566599 100644
--- a/cmake/developer_package/message.cmake
+++ b/cmake/developer_package/message.cmake
@@ -11,12 +11,17 @@ if(UNIX AND ENABLE_ERROR_HIGHLIGHT)
list(GET ARGV 0 MessageType)
list(REMOVE_AT ARGV 0)
+
+ foreach(arg IN LISTS ARGV)
+ set(_msg "${_msg}${arg}")
+ endforeach()
+
if(MessageType STREQUAL FATAL_ERROR OR MessageType STREQUAL SEND_ERROR)
- _message(${MessageType} "${RED}${ARGV}${RESET}")
+ _message(${MessageType} "${RED}${_msg}${RESET}")
elseif(MessageType STREQUAL WARNING)
- _message(${MessageType} "${YELLOW}${ARGV}${RESET}")
+ _message(${MessageType} "${YELLOW}${_msg}${RESET}")
else()
- _message(${MessageType} "${ARGV}")
+ _message(${MessageType} "${_msg}")
endif()
endfunction()
endif()
diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt
index a4ee2f62aa5851..e34e8fe3ade2e2 100644
--- a/docs/CMakeLists.txt
+++ b/docs/CMakeLists.txt
@@ -3,6 +3,10 @@
#
if(NOT ENABLE_DOCKER)
+ if(CMAKE_COMPILER_IS_GNUCXX)
+ ie_add_compiler_flags(-Wall)
+ endif()
+
add_subdirectory(snippets)
# Detect nGraph
diff --git a/docs/HOWTO/Custom_Layers_Guide.md b/docs/HOWTO/Custom_Layers_Guide.md
index 23437de247aabb..0cacca13451ad7 100644
--- a/docs/HOWTO/Custom_Layers_Guide.md
+++ b/docs/HOWTO/Custom_Layers_Guide.md
@@ -1,200 +1,371 @@
-# Custom Layers Guide {#openvino_docs_HOWTO_Custom_Layers_Guide}
+# Custom Operations Guide {#openvino_docs_HOWTO_Custom_Layers_Guide}
+
+The Intel® Distribution of OpenVINO™ toolkit supports neural network models trained with multiple frameworks including
+TensorFlow*, Caffe*, MXNet*, Kaldi* and ONNX* file format. The list of supported operations (layers) is different for
+each of the supported frameworks. To see the operations supported by your framework, refer to
+[Supported Framework Layers](../MO_DG/prepare_model/Supported_Frameworks_Layers.md).
+
+Custom operations are operations that are not included in the list of known operations. If your model contains any
+operation that is not in the list of known operations, the Model Optimizer is not able to generate an Intermediate
+Representation (IR) for this model.
+
+This guide illustrates the workflow for running inference on topologies featuring custom operations, allowing you to
+plug in your own implementation for existing or completely new operation.
+
+> **NOTE:** *Layer* — The legacy term for an *operation* which came from Caffe\* framework. Currently it is not used.
+> Refer to the [Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../MO_DG/IR_and_opsets.md)
+> for more information on the topic.
+
+## Terms Used in This Guide
+
+- *Intermediate Representation (IR)* — Neural Network used only by the Inference Engine in OpenVINO abstracting the
+ different frameworks and describing the model topology, operations parameters and weights.
+
+- *Operation* — The abstract concept of a math function that is selected for a specific purpose. Operations supported by
+ OpenVINO™ are listed in the supported operation set provided in the [Available Operations Sets](../ops/opset.md).
+ Examples of the operations are: [ReLU](../ops/activation/ReLU_1.md), [Convolution](../ops/convolution/Convolution_1.md),
+ [Add](../ops/arithmetic/Add_1.md), etc.
+
+- *Kernel* — The implementation of a operation function in the OpenVINO™ plugin, in this case, the math programmed (in
+ C++ and OpenCL) to perform the operation for a target hardware (CPU or GPU).
+
+- *Inference Engine Extension* — Device-specific module implementing custom operations (a set of kernels).
+
+## Custom Operation Support Overview
+
+There are three steps to support inference of a model with custom operation(s):
+1. Add support for a custom operation in the [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) so
+the Model Optimizer can generate the IR with the operation.
+2. Create an operation set and implement a custom nGraph operation in it as described in the
+[Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md).
+3. Implement a customer operation in one of the [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
+plugins to support inference of this operation using a particular target hardware (CPU, GPU or VPU).
+
+To see the operations that are supported by each device plugin for the Inference Engine, refer to the
+[Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md).
+
+> **NOTE:** If a device doesn't support a particular operation, an alternative to creating a new operation is to target
+> an additional device using the HETERO plugin. The [Heterogeneous Plugin](../IE_DG/supported_plugins/HETERO.md) may be
+> used to run an inference model on multiple devices allowing the unsupported operations on one device to "fallback" to
+> run on another device (e.g., CPU) that does support those operations.
+
+### Custom Operation Support for the Model Optimizer
+
+Model Optimizer model conversion pipeline is described in details in "Model Conversion Pipeline" section on the
+[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md).
+It is recommended to read that article first for a better understanding of the following material.
+
+Model Optimizer provides extensions mechanism to support new operations and implement custom model transformations to
+generate optimized IR. This mechanism is described in the "Model Optimizer Extensions" section on the
+[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md).
+
+Two types of the Model Optimizer extensions should be implemented to support custom operation at minimum:
+1. Operation class for a new operation. This class stores information about the operation, its attributes, shape
+inference function, attributes to be saved to an IR and some others internally used attributes. Refer to the
+"Model Optimizer Operation" section on the
+[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for the
+detailed instruction on how to implement it.
+2. Operation attributes extractor. The extractor is responsible for parsing framework-specific representation of the
+operation and uses corresponding operation class to update graph node attributes with necessary attributes of the
+operation. Refer to the "Operation Extractor" section on the
+[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for the
+detailed instruction on how to implement it.
+
+> **NOTE:** In some cases you may need to implement some transformation to support the operation. This topic is covered
+> in the "Graph Transformation Extensions" section on the
+> [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md).
+
+## Custom Operations Extensions for the Inference Engine
+
+Inference Engine provides extensions mechanism to support new operations. This mechanism is described in the
+[Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md).
+
+Each device plugin includes a library of optimized implementations to execute known operations which must be extended to
+execute a custom operation. The custom operation extension is implemented according to the target device:
+
+- Custom Operation CPU Extension
+ - A compiled shared library (`.so`, `.dylib` or `.dll`) needed by the CPU Plugin for executing the custom operation
+ on a CPU. Refer to the [How to Implement Custom CPU Operations](../IE_DG/Extensibility_DG/CPU_Kernel.md) for more
+ details.
+- Custom Operation GPU Extension
+ - OpenCL source code (.cl) for the custom operation kernel that will be compiled to execute on the GPU along with a
+ operation description file (.xml) needed by the GPU Plugin for the custom operation kernel. Refer to the
+ [How to Implement Custom GPU Operations](../IE_DG/Extensibility_DG/GPU_Kernel.md) for more details.
+- Custom Operation VPU Extension
+ - OpenCL source code (.cl) for the custom operation kernel that will be compiled to execute on the VPU along with a
+ operation description file (.xml) needed by the VPU Plugin for the custom operation kernel. Refer to the
+ [How to Implement Custom Operations for VPU](../IE_DG/Extensibility_DG/VPU_Kernel.md) for more details.
+
+Also, it is necessary to implement nGraph custom operation according to the
+[Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md) so the Inference Engine can read an IR with this
+operation and correctly infer output tensors shape and type.
+
+## Enabling Magnetic Resonance Image Reconstruction Model
+This chapter provides a step-by-step instruction on how to enable the magnetic resonance image reconstruction model
+implemented in the [repository](https://github.com/rmsouza01/Hybrid-CS-Model-MRI/) using a custom operation on CPU. The
+example is prepared for a model generated from the repository with hash `2ede2f96161ce70dcdc922371fe6b6b254aafcc8`.
+
+### Download and Convert the Model to a Frozen TensorFlow\* Model Format
+The original pre-trained model is provided in the hdf5 format which is not supported by OpenVINO directly and needs to
+be converted to TensorFlow\* frozen model format first.
+
+1. Download repository `https://github.com/rmsouza01/Hybrid-CS-Model-MRI`:
+```py
+ import keras as K
+ import numpy as np
+ import Modules.frequency_spatial_network as fsnet
+ import tensorflow as tf
-Custom layers are layers that are not included in the list of known layers. If your topology contains any layers that are not in the list of known layers, the Model Optimizer classifies them as custom.
+ under_rate = '20'
-This guide illustrates the workflow for running inference on topologies featuring custom layers, allowing you to plug in your own implementation for existing or completely new layers.
-For a step-by-step example of creating and executing a custom layer, see the [Custom Layer Implementation Tutorials for Linux and Windows.](https://github.com/david-drew/OpenVINO-Custom-Layers/tree/master/2019.r2.0)
+ stats = np.load("Data/stats_fs_unet_norm_" + under_rate + ".npy")
+ var_sampling_mask = np.load("Data/sampling_mask_" + under_rate + "perc.npy")
-## Terms used in this guide
+ model = fsnet.wnet(stats[0], stats[1], stats[2], stats[3], kshape = (5,5), kshape2=(3,3))
+ model_name = "Models/wnet_" + under_rate + ".hdf5"
+ model.load_weights(model_name)
-- *Layer* — The abstract concept of a math function that is selected for a specific purpose (relu, sigmoid, tanh, convolutional). This is one of a sequential series of building blocks within the neural network.
-- *Kernel* — The implementation of a layer function, in this case, the math programmed (in C++ and Python) to perform the layer operation for target hardware (CPU or GPU).
-- *Intermediate Representation (IR)* — Neural Network used only by the Inference Engine in OpenVINO abstracting the different frameworks and describing topology, layer parameters and weights.
-The original format will be a supported framework such as TensorFlow, Caffe, or MXNet.
+ inp = np.random.standard_normal([1, 256, 256, 2]).astype(np.float32)
+ np.save('inp', inp)
-- *Model Extension Generator* — Generates template source code files for each of the extensions needed by the Model Optimizer and the Inference Engine.
+ sess = K.backend.get_session()
+ sess.as_default()
+ graph_def = sess.graph.as_graph_def()
+ graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, ['conv2d_44/BiasAdd'])
+ with tf.gfile.FastGFile('wnet_20.pb', 'wb') as f:
+ f.write(graph_def.SerializeToString())
+```
-- *Inference Engine Extension* — Device-specific module implementing custom layers (a set of kernels).
-
-
-## Custom Layer Overview
-
-The [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) searches the list of known layers for each layer contained in the input model topology before building the model's internal representation, optimizing the model, and producing the Intermediate Representation files.
-
-The [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) loads the layers from the input model IR files into the specified device plugin, which will search a list of known layer implementations for the device. If your topology contains layers that are not in the list of known layers for the device, the Inference Engine considers the layer to be unsupported and reports an error. To see the layers that are supported by each device plugin for the Inference Engine, refer to the [Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md) documentation.
-
-> **NOTE:** If a device doesn't support a particular layer, an alternative to creating a new custom layer is to target an additional device using the HETERO plugin. The [Heterogeneous Plugin](../IE_DG/supported_plugins/HETERO.md) may be used to run an inference model on multiple devices allowing the unsupported layers on one device to "fallback" to run on another device (e.g., CPU) that does support those layers.
-
-## Custom Layer Implementation Workflow
-
-When implementing a custom layer for your pre-trained model in the Intel® Distribution of OpenVINO™ toolkit, you will need to add extensions to both the Model Optimizer and the Inference Engine.
-
-## Custom Layer Extensions for the Model Optimizer
-
-The following figure shows the basic processing steps for the Model Optimizer highlighting the two necessary custom layer extensions, the Custom Layer Extractor and the Custom Layer Operation.
-
-![](img/MO_extensions_flow.png)
-
-
-The Model Optimizer first extracts information from the input model which includes the topology of the model layers along with parameters, input and output format, etc., for each layer. The model is then optimized from the various known characteristics of the layers, interconnects, and data flow which partly comes from the layer operation providing details including the shape of the output for each layer. Finally, the optimized model is output to the model IR files needed by the Inference Engine to run the model.
-
-The Model Optimizer starts with a library of known extractors and operations for each [supported model framework](../MO_DG/prepare_model/Supported_Frameworks_Layers.md) which must be extended to use each unknown custom layer. The custom layer extensions needed by the Model Optimizer are:
-
-- Custom Layer Extractor
- - Responsible for identifying the custom layer operation and extracting the parameters for each instance of the custom layer. The layer parameters are stored per instance and used by the layer operation before finally appearing in the output IR. Typically the input layer parameters are unchanged, which is the case covered by this tutorial.
-- Custom Layer Operation
- - Responsible for specifying the attributes that are supported by the custom layer and computing the output shape for each instance of the custom layer from its parameters. The `--mo-op` command-line argument shown in the examples below generates a custom layer operation for the Model Optimizer.
-
-## Custom Layer Extensions for the Inference Engine
-
-The following figure shows the basic flow for the Inference Engine highlighting two custom layer extensions for the CPU and GPU Plugins, the Custom Layer CPU extension and the Custom Layer GPU Extension.
-
-![](img/IE_extensions_flow.png)
-
-Each device plugin includes a library of optimized implementations to execute known layer operations which must be extended to execute a custom layer. The custom layer extension is implemented according to the target device:
-
-- Custom Layer CPU Extension
- - A compiled shared library (.so or .dll binary) needed by the CPU Plugin for executing the custom layer on the CPU.
-- Custom Layer GPU Extension
- - OpenCL source code (.cl) for the custom layer kernel that will be compiled to execute on the GPU along with a layer description file (.xml) needed by the GPU Plugin for the custom layer kernel.
-
-## Model Extension Generator
+As a result the TensorFlow\* frozen model file "wnet_20.pb" is generated.
-Using answers to interactive questions or a *.json* configuration file, the Model Extension Generator tool generates template source code files for each of the extensions needed by the Model Optimizer and the Inference Engine. To complete the implementation of each extension, the template functions may need to be edited to fill-in details specific to the custom layer or the actual custom layer functionality itself.
+### Convert the Frozen TensorFlow\* Model to Intermediate Representation
-### Command-line
-
-The Model Extension Generator is included in the Intel® Distribution of OpenVINO™ toolkit installation and is run using the command (here with the "--help" option):
+Firstly, open the model in the TensorBoard or other TensorFlow* model visualization tool. The model supports dynamic
+batch dimension because the value for the batch dimension is not hardcoded in the model. Model Optimizer need to set all
+dynamic dimensions to some specific value to create the IR, therefore specify the command line parameter `-b 1` to set
+the batch dimension equal to 1. The actual batch size dimension can be changed at runtime using the Inference Engine API
+described in the [Using Shape Inference](../IE_DG/ShapeInference.md). Also refer to
+[Converting a Model Using General Conversion Parameters](../MO_DG/prepare_model/convert_model/Converting_Model_General.md)
+and [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md)
+for more details and command line parameters used for the model conversion.
```bash
-python3 /opt/intel/openvino/deployment_tools/tools/extension_generator/extgen.py new --help
+.//mo.py --input_model /wnet_20.pb -b 1
```
-where the output will appear similar to:
-
-```
-usage: You can use any combination of the following arguments:
-
-Arguments to configure extension generation in the interactive mode:
-
-optional arguments:
- -h, --help show this help message and exit
- --mo-caffe-ext generate a Model Optimizer Caffe* extractor
- --mo-mxnet-ext generate a Model Optimizer MXNet* extractor
- --mo-tf-ext generate a Model Optimizer TensorFlow* extractor
- --mo-op generate a Model Optimizer operation
- --ie-cpu-ext generate an Inference Engine CPU extension
- --ie-gpu-ext generate an Inference Engine GPU extension
- --output_dir OUTPUT_DIR
- set an output directory. If not specified, the current
- directory is used by default.
+Model Optimizer produces the following error:
+```bash
+[ ERROR ] List of operations that cannot be converted to Inference Engine IR:
+[ ERROR ] Complex (1)
+[ ERROR ] lambda_2/Complex
+[ ERROR ] IFFT2D (1)
+[ ERROR ] lambda_2/IFFT2D
+[ ERROR ] ComplexAbs (1)
+[ ERROR ] lambda_2/Abs
+[ ERROR ] Part of the nodes was not converted to IR. Stopped.
```
-The available command-line arguments are used to specify which extension(s) to generate templates for the Model Optimizer or Inference Engine. The generated extension files for each argument will appear starting from the top of the output directory as follows:
-
-Command-line Argument | Output Directory Location |
---------------------- | ------------------------------ |
-`--mo-caffe-ext` | user_mo_extensions/front/caffe |
-`--mo-mxnet-ext` | user_mo_extensions/front/mxnet |
-`--mo-tf-ext` | user_mo_extensions/front/tf |
-`--mo-op` | user_mo_extensions/ops |
-`--ie-cpu-ext` | user_ie_extensions/cpu |
-`--ie-gpu-ext` | user_ie_extensions/gpu |
-
-### Extension Workflow
-
-The workflow for each generated extension follows the same basic steps:
-
-![](img/MEG_generic_flow.png)
-
-**Step 1: Generate:** Use the Model Extension Generator to generate the Custom Layer Template Files.
-
-**Step 2: Edit:** Edit the Custom Layer Template Files as necessary to create the specialized Custom Layer Extension Source Code.
-
-**Step 3: Specify:** Specify the custom layer extension locations to be used by the Model Optimizer or Inference Engine.
+The error means that the Model Optimizer doesn't know how to handle 3 types of TensorFlow\* operations: "Complex",
+"IFFT2D" and "ComplexAbs". In order to see more details about the conversion process run the model conversion with
+additional parameter `--log_level DEBUG`. It is worth to mention the following lines from the detailed output:
-## Caffe\* Models with Custom Layers
+```bash
+[ INFO ] Called "tf_native_tf_node_infer" for node "lambda_2/Complex"
+[ ] [ DEBUG ] [ tf:228 ] Added placeholder with name 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder'
+[ ] [ DEBUG ] [ tf:228 ] Added placeholder with name 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder'
+[ ] [ DEBUG ] [ tf:241 ] update_input_in_pbs: replace input 'lambda_2/lambda_3/strided_slice' with input 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder'
+[ ] [ DEBUG ] [ tf:249 ] Replacing input '0' of the node 'lambda_2/Complex' with placeholder 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder'
+[ ] [ DEBUG ] [ tf:241 ] update_input_in_pbs: replace input 'lambda_2/lambda_4/strided_slice' with input 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder'
+[ ] [ DEBUG ] [ tf:249 ] Replacing input '1' of the node 'lambda_2/Complex' with placeholder 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder'
+[ ] [ DEBUG ] [ tf:148 ] Inferred shape of the output tensor with index '0' of the node 'lambda_2/Complex': '[ 1 256 256]'
+[ ] [ DEBUG ] [ infer:145 ] Outputs:
+[ ] [ DEBUG ] [ infer:32 ] output[0]: shape = [ 1 256 256], value =
+[ ] [ DEBUG ] [ infer:129 ] --------------------
+[ ] [ DEBUG ] [ infer:130 ] Partial infer for lambda_2/IFFT2D
+[ ] [ DEBUG ] [ infer:131 ] Op: IFFT2D
+[ ] [ DEBUG ] [ infer:132 ] Inputs:
+[ ] [ DEBUG ] [ infer:32 ] input[0]: shape = [ 1 256 256], value =
+```
-If your Caffe\* model has custom layers:
+This is a part of the log of the partial inference phase of the model conversion. See the "Partial Inference" section on
+the [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for
+more information about this phase. Model Optimizer inferred output shape for the unknown operation of type "Complex"
+using a "fallback" to TensorFlow\*. However, it is not enough to generate the IR because Model Optimizer doesn't know
+which attributes of the operation should be saved to IR. So it is necessary to implement Model Optimizer extensions to
+support these operations.
+
+Before going into the extension development it is necessary to understand what these unsupported operations do according
+to the TensorFlow\* framework specification.
+
+* "Complex" - returns a tensor of complex type constructed from two real input tensors specifying real and imaginary
+part of a complex number.
+* "IFFT2D" - returns a tensor with inverse 2-dimensional discrete Fourier transform over the inner-most 2 dimensions of
+ an input.
+* "ComplexAbs" - returns a tensor with absolute values of input tensor with complex numbers.
+
+The part of the model with all three unsupported operations is depicted below:
+
+![Unsupported sub-graph](img/unsupported_subgraph.png)
+
+This model uses complex numbers during the inference but Inference Engine does not support tensors of this data type. So
+it is necessary to find a way how to avoid using tensors of such a type in the model. Fortunately, the complex tensor
+appear as a result of "Complex" operation, is used as input in the "IFFT2D" operation then is passed to "ComplexAbs"
+which produces real value tensor as output. So there are just 3 operations consuming/producing complex tensors in the
+model.
+
+Let's design an OpenVINO operation "FFT" which get a single real number tensor describing the complex number and
+produces a single real number tensor describing output complex tensor. This way the fact that the model uses complex
+numbers is hidden inside the "FFT" operation implementation. The operation gets a tensor of shape `[N, H, W, 2]` and
+produces the output tensor with the same shape, where the innermost dimension contains pairs of real numbers describing
+the complex number (its real and imaginary part). As we will see further this operation will allow us to support the
+model. The implementation of the Model Optimizer operation should be saved to `mo_extensions/ops/FFT.py` file:
+
+@snippet FFT.py fft:operation
+
+The attribute `inverse` is a flag specifying type of the FFT to apply: forward or inverse.
+
+See the "Model Optimizer Operation" section on the
+[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for the
+detailed instruction on how to implement the operation.
+
+Now it is necessary to implement extractor for the "IFFT2D" operation according to the
+"Operation Extractor" section on the
+[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md). The
+following snippet provides two extractors: one for "IFFT2D", another one for "FFT2D", however only on of them is used
+in this example. The implementation should be saved to the file `mo_extensions/front/tf/FFT_ext.py`.
+
+@snippet FFT_ext.py fft_ext:extractor
+
+> **NOTE:** The graph is in inconsistent state after extracting node attributes because according to original operation
+> "IFFT2D" semantic it should have an input consuming a tensor of complex numbers, but the extractor instantiated an
+> operation "FFT" which expects a real tensor with specific layout. But the inconsistency will be resolved during
+> applying front phase transformations discussed below.
+
+The output shape of the operation "AddV2" from the picture above is `[N, H, W, 2]`. Where the innermost dimension
+contains pairs of real numbers describing the complex number (its real and imaginary part). The following "StridedSlice"
+operations split the input tensor into 2 parts to get a tensor of real and a tensor of imaginary parts which are then
+consumed with the "Complex" operation to produce a tensor of complex numbers. These "StridedSlice" and "Complex"
+operations can be removed so the "FFT" operation will get a real value tensor encoding complex numbers. To achieve this
+we implement the front phase transformation which searches for a pattern of two "StridedSlice" operations with specific
+attributes producing data to "Complex" operation and removes it from the graph. Refer to the
+"Pattern-Defined Front Phase Transformations" section on the
+[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for more
+information on how this type of transformation works. The code snippet should be saved to the file
+`mo_extensions/front/tf/Complex.py`.
+
+@snippet Complex.py complex:transformation
+
+> **NOTE:** The graph is in inconsistent state because the "ComplexAbs" operation consumes complex value tensor but
+> "FFT" produces real value tensor.
+
+Now lets implement a transformation which replace a "ComplexAbs" operation with a sub-graph of primitive operations
+which calculate the result using the following formulae: \f$module(z) = \sqrt{real(z) \cdot real(z) + imag(z) \cdot imag(z)}\f$.
+Original "IFFT2D" operation produces tensor of complex values, but the "FFT" operation produces a real value tensor with
+the same format and shape as the input for the operation. So the input shape for the "ComplexAbs" will be `[N, H, W, 2]`
+with the innermost dimension containing tuple with real and imaginary part of a complex number. In order to calculate
+absolute values for the complex tensor we do the following:
+1. Raise all elements in the power of 2.
+2. Calculate a reduced sum over the innermost dimension.
+3. Calculate a square root.
+
+The implementation should be saved to the file `mo_extensions/front/tf/ComplexAbs.py` and provided below:
+
+@snippet ComplexAbs.py complex_abs:transformation
+
+Now it is possible to convert the model using the following command line:
+```bash
+.//mo.py --input_model /wnet_20.pb -b 1 --extensions mo_extensions/
+```
-**Register the custom layers as extensions to the Model Optimizer**. For instructions, see [Extending Model Optimizer with New Primitives](../MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You will need a bit of Python\* code that lets the Model Optimizer;
+The sub-graph corresponding to the originally non-supported one is depicted on the image below:
-- Generate a valid Intermediate Representation according to the rules you specified.
-- Be independent from the availability of Caffe on your computer.
-
-If your model contains Custom Layers, it is important to understand the internal workflow of the Model Optimizer. Consider the following example.
+![Converted sub-graph](img/converted_subgraph.png)
-**Example**:
+> **NOTE:** Model Optimizer performed conversion of the model from NHWC to NCHW layout that is why the dimension with
+> the value 2 moved to another position.
-The network has:
+### Inference Engine Extension Implementation
+Now it is necessary to implement the extension for the CPU plugin with operation "FFT" introduced previously. The code
+below is based on the template extension described on the
+[Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md).
-* One input layer (#1)
-* One output Layer (#5)
-* Three internal layers (#2, 3, 4)
+#### CMake Build File
+The first step is to create a CMake configuration file which builds the extension. The content of the "CMakeLists.txt"
+file is the following:
-The custom and standard layer types are:
+@snippet ../template_extension/CMakeLists.txt cmake:extension
-* Layers #2 and #5 are implemented as Model Optimizer extensions.
-* Layers #1 and #4 are supported in Model Optimizer out-of-the box.
-* Layer #3 is neither in the list of supported layers nor in extensions, but is specified in CustomLayersMapping.xml.
+The CPU FFT kernel implementation uses OpenCV to perform the FFT that is why the extension library is linked with
+"opencv_core" which comes with the OpenVINO.
-> **NOTE**: If any of the layers are not in one of three categories described above, the Model Optimizer fails with an appropriate message and a link to the corresponding question in [Model Optimizer FAQ](../MO_DG/prepare_model/Model_Optimizer_FAQ.md).
+#### Custom nGraph Operation "FFT" Implementation
+The next step is to create the nGraph operation FFT. The header file "fft_op.hpp" has the following content:
-The general process is as shown:
+@snippet ../template_extension/fft_op.hpp fft_op:header
-![Example custom layer network](img/mo_caffe_priorities.png)
-
+The operation has just one boolean attribute `inverse`. Implementation of the necessary nGraph operation functions are
+in the "fft_op.cpp" file with the following content:
-**Step 1:** The example model is fed to the Model Optimizer that **loads the model** with the special parser built on top of the `caffe.proto` file. In case of failure, the Model Optimizer asks you to prepare the parser that can read the model. For more information, refer to the Model Optimizer, FAQ #1.
+@snippet ../template_extension/fft_op.cpp fft_op:implementation
-**Step 2:** The Model Optimizer **extracts the attributes of all layers** by going through the list of layers and attempting to find the appropriate extractor. In order of priority, the Model Optimizer checks if the layer is:
-
-* A. Registered as a Model Optimizer extension
-* B. Registered as a standard Model Optimizer layer
-
-When the Model Optimizer finds a satisfying condition from the list above, it extracts the attributes according to the following rules:
-
-* For A. - takes only the parameters specified in the extension
-* For B. - takes only the parameters specified in the standard extractor
-
+Refer to the [Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md) for more details.
-**Step 3:** The Model Optimizer **calculates the output shape of all layers**. The logic is the same as it is for the priorities. **Important:** the Model Optimizer always takes the first available option.
+#### CPU FFT Kernel Implementation
+The operation implementation for CPU plugin uses OpenCV to perform the FFT. The header file "fft_kernel.hpp" has the
+following content:
-**Step 4:** The Model Optimizer **optimizes the original model and produces the two Intermediate Representation (IR) files in .xml and .bin**.
-
+@snippet ../template_extension/fft_kernel.hpp fft_kernel:header
-## TensorFlow\* Models with Custom Layers
+The "fft_kernel.cpp" with the implementation of the CPU has the following content:
-You have two options for TensorFlow\* models with custom layers:
-
+@snippet ../template_extension/fft_kernel.cpp fft_kernel:implementation
-* **Register those layers as extensions to the Model Optimizer.** In this case, the Model Optimizer generates a valid and optimized Intermediate Representation.
-* **If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option.** This feature is helpful for many TensorFlow models. To read more, see [Sub-graph Replacement in the Model Optimizer](../MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md).
-
-## MXNet\* Models with Custom Layers
+Refer to the [How to Implement Custom CPU Operations](../IE_DG/Extensibility_DG/CPU_Kernel.md) for more details.
-There are two options to convert your MXNet* model that contains custom layers:
+#### Extension Library Implementation
+The last step is to create an extension library "extension.cpp" and "extension.hpp" which will include the FFT
+operation for the CPU plugin. The code of the library is described in the [Extension Library](../IE_DG/Extensibility_DG/Extension.md).
-1. Register the custom layers as extensions to the Model Optimizer. For instructions, see [Extending MXNet Model Optimizer with New Primitives](../MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You can create Model Optimizer extensions for both MXNet layers with op `Custom` and layers which are not standard MXNet layers.
+### Building and Running the Custom Extension
+In order to build the extension run the following:
+```bash
+mkdir build && cd build
+source /opt/intel/openvino/bin/setupvars.sh
+cmake .. -DCMAKE_BUILD_TYPE=Release
+make --jobs=$(nproc)
+```
-2. If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option. In MXNet the function is actively used for ssd models provides an opportunity to for the necessary subgraph sequences and replace them. To read more, see [Sub-graph Replacement in the Model Optimizer](../MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md).
+The result of this command is a compiled shared library (`.so`, `.dylib` or `.dll`). It should be loaded in the
+application using `Core` class instance method `AddExtension` like this
+`core.AddExtension(make_so_pointer(compiled_library_file_name), "CPU");`.
-## Kaldi\* Models with Custom Layers
-For information on converting your Kaldi* model containing custom layers see [Converting a Kaldi Model in the Model Optimizer Developer Guide](../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md).
+To test that the extension is implemented correctly we can run the "mri_reconstruction_demo.py" with the following content:
-## ONNX\* Models with Custom Layers
-For information on converting your ONNX* model containing custom layers see [Converting an ONNX Model in the Model Optimizer Developer Guide](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md).
+@snippet mri_reconstruction_demo.py mri_demo:demo
-## Step-by-Step Custom Layers Tutorial
-For a step-by-step walk-through creating and executing a custom layer, see [Custom Layer Implementation Tutorial for Linux and Windows.](https://github.com/david-drew/OpenVINO-Custom-Layers/tree/master/2019.r2.0)
+The script can be executed using the following command line:
+```bash
+python3 mri_reconstruction_demo.py \
+ -m /wnet_20.xml \
+ -i .npy \
+ -p /Data/sampling_mask_20perc.npy \
+ -l /libtemplate_extension.so \
+ -d CPU
+```
## Additional Resources
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)
- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org)
- [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
+- [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md)
- [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md)
- [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
- [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index)
@@ -204,9 +375,7 @@ For a step-by-step walk-through creating and executing a custom layer, see [Cust
## Converting Models:
- [Convert Your Caffe* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md)
+- [Convert Your Kaldi* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md)
- [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md)
- [Convert Your MXNet* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md)
- [Convert Your ONNX* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md)
-
-
-
diff --git a/docs/HOWTO/img/IE_extensions_flow.png b/docs/HOWTO/img/IE_extensions_flow.png
deleted file mode 100644
index ca665ca3298bbb..00000000000000
--- a/docs/HOWTO/img/IE_extensions_flow.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:c2f362a39ae6c2af080e4f055b6fdba4954f918f85731545d1df3d687d9213d5
-size 421056
diff --git a/docs/HOWTO/img/MEG_generic_flow.png b/docs/HOWTO/img/MEG_generic_flow.png
deleted file mode 100644
index a492c3fff5026b..00000000000000
--- a/docs/HOWTO/img/MEG_generic_flow.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:cb5c700d003936779455353bfa4ed9432410c0975c46e2dfd30c6a1abccd1727
-size 23320
diff --git a/docs/HOWTO/img/MO_extensions_flow.png b/docs/HOWTO/img/MO_extensions_flow.png
deleted file mode 100644
index 5009c0ce2604ad..00000000000000
--- a/docs/HOWTO/img/MO_extensions_flow.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:99d6b5146be85fa408dc5432883c3e2745cffe890133854a97dcf22f5c5962d4
-size 47564
diff --git a/docs/HOWTO/img/converted_subgraph.png b/docs/HOWTO/img/converted_subgraph.png
new file mode 100644
index 00000000000000..6a5b7220777d54
--- /dev/null
+++ b/docs/HOWTO/img/converted_subgraph.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7c8ab4f15874d235968471bcf876c89c795d601e69891208107b8b72aa58eb1
+size 70014
diff --git a/docs/HOWTO/img/mo_caffe_priorities.png b/docs/HOWTO/img/mo_caffe_priorities.png
deleted file mode 100644
index 665892316c17fc..00000000000000
--- a/docs/HOWTO/img/mo_caffe_priorities.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0a4de6e502cae7542f1f311bcdbea6bb145f960f0d27d86a03160d1a60133778
-size 301310
diff --git a/docs/HOWTO/img/unsupported_subgraph.png b/docs/HOWTO/img/unsupported_subgraph.png
new file mode 100644
index 00000000000000..80f7084a78a859
--- /dev/null
+++ b/docs/HOWTO/img/unsupported_subgraph.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d5ccf51fe1babb93d96d042494695a6a6e055d1f8ebf7eef5083d54d8987a23
+size 58789
diff --git a/docs/HOWTO/mo_extensions/front/tf/Complex.py b/docs/HOWTO/mo_extensions/front/tf/Complex.py
new file mode 100644
index 00000000000000..465608dfaba644
--- /dev/null
+++ b/docs/HOWTO/mo_extensions/front/tf/Complex.py
@@ -0,0 +1,57 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+#! [complex:transformation]
+import logging as log
+
+import numpy as np
+
+from mo.front.common.replacement import FrontReplacementSubgraph
+from mo.graph.graph import Graph
+
+
+class Complex(FrontReplacementSubgraph):
+ enabled = True
+
+ def pattern(self):
+ return dict(
+ nodes=[
+ ('strided_slice_real', dict(op='StridedSlice')),
+ ('strided_slice_imag', dict(op='StridedSlice')),
+ ('complex', dict(op='Complex')),
+ ],
+ edges=[
+ ('strided_slice_real', 'complex', {'in': 0}),
+ ('strided_slice_imag', 'complex', {'in': 1}),
+ ])
+
+ @staticmethod
+ def replace_sub_graph(graph: Graph, match: dict):
+ strided_slice_real = match['strided_slice_real']
+ strided_slice_imag = match['strided_slice_imag']
+ complex_node = match['complex']
+
+ # make sure that both strided slice operations get the same data as input
+ assert strided_slice_real.in_port(0).get_source() == strided_slice_imag.in_port(0).get_source()
+
+ # identify the output port of the operation producing datat for strided slice nodes
+ input_node_output_port = strided_slice_real.in_port(0).get_source()
+ input_node_output_port.disconnect()
+
+ # change the connection so now all consumers of "complex_node" get data from input node of strided slice nodes
+ complex_node.out_port(0).get_connection().set_source(input_node_output_port)
+#! [complex:transformation]
+
diff --git a/docs/HOWTO/mo_extensions/front/tf/ComplexAbs.py b/docs/HOWTO/mo_extensions/front/tf/ComplexAbs.py
new file mode 100644
index 00000000000000..bac4140d732f91
--- /dev/null
+++ b/docs/HOWTO/mo_extensions/front/tf/ComplexAbs.py
@@ -0,0 +1,40 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+#! [complex_abs:transformation]
+import numpy as np
+
+from extensions.ops.elementwise import Pow
+from extensions.ops.ReduceOps import ReduceSum
+from mo.front.common.replacement import FrontReplacementOp
+from mo.graph.graph import Graph, Node
+from mo.ops.const import Const
+
+
+class ComplexAbs(FrontReplacementOp):
+ op = "ComplexAbs"
+ enabled = True
+
+ def replace_op(self, graph: Graph, node: Node):
+ pow_2 = Const(graph, {'value': np.float32(2.0)}).create_node()
+ reduce_axis = Const(graph, {'value': np.int32(-1)}).create_node()
+ pow_0_5 = Const(graph, {'value': np.float32(0.5)}).create_node()
+
+ sq = Pow(graph, dict(name=node.in_node(0).name + '/sq', power=2.0)).create_node([node.in_node(0), pow_2])
+ sum = ReduceSum(graph, dict(name=sq.name + '/sum')).create_node([sq, reduce_axis])
+ sqrt = Pow(graph, dict(name=sum.name + '/sqrt', power=0.5)).create_node([sum, pow_0_5])
+ return [sqrt.id]
+#! [complex_abs:transformation]
diff --git a/docs/HOWTO/mo_extensions/front/tf/FFT_ext.py b/docs/HOWTO/mo_extensions/front/tf/FFT_ext.py
new file mode 100644
index 00000000000000..283c87ba838f80
--- /dev/null
+++ b/docs/HOWTO/mo_extensions/front/tf/FFT_ext.py
@@ -0,0 +1,47 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# ! [fft_ext:extractor]
+from ...ops.FFT import FFT
+from mo.front.extractor import FrontExtractorOp
+from mo.utils.error import Error
+
+
+class FFT2DFrontExtractor(FrontExtractorOp):
+ op = 'FFT2D'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node):
+ attrs = {
+ 'inverse': 0
+ }
+ FFT.update_node_stat(node, attrs)
+ return cls.enabled
+
+
+class IFFT2DFrontExtractor(FrontExtractorOp):
+ op = 'IFFT2D'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node):
+ attrs = {
+ 'inverse': 1
+ }
+ FFT.update_node_stat(node, attrs)
+ return cls.enabled
+# ! [fft_ext:extractor]
diff --git a/docs/HOWTO/mo_extensions/ops/FFT.py b/docs/HOWTO/mo_extensions/ops/FFT.py
new file mode 100644
index 00000000000000..c3f37f7d6d6919
--- /dev/null
+++ b/docs/HOWTO/mo_extensions/ops/FFT.py
@@ -0,0 +1,40 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+#! [fft:operation]
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.graph.graph import Node, Graph
+from mo.ops.op import Op
+
+
+class FFT(Op):
+ op = 'FFT'
+ enabled = False
+
+ def __init__(self, graph: Graph, attrs: dict):
+ super().__init__(graph, {
+ 'type': self.op,
+ 'op': self.op,
+ 'version': 'custom_opset',
+ 'inverse': None,
+ 'in_ports_count': 1,
+ 'out_ports_count': 1,
+ 'infer': copy_shape_infer
+ }, attrs)
+
+ def backend_attrs(self):
+ return ['inverse']
+#! [fft:operation]
diff --git a/docs/HOWTO/mri_reconstruction_demo.py b/docs/HOWTO/mri_reconstruction_demo.py
new file mode 100644
index 00000000000000..74ce15721fc68a
--- /dev/null
+++ b/docs/HOWTO/mri_reconstruction_demo.py
@@ -0,0 +1,119 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+#! [mri_demo:demo]
+import numpy as np
+import cv2 as cv
+import argparse
+import time
+from openvino.inference_engine import IECore
+
+
+def kspace_to_image(kspace):
+ assert(len(kspace.shape) == 3 and kspace.shape[-1] == 2)
+ fft = cv.idft(kspace, flags=cv.DFT_SCALE)
+ img = cv.magnitude(fft[:,:,0], fft[:,:,1])
+ return cv.normalize(img, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='MRI reconstrution demo for network from https://github.com/rmsouza01/Hybrid-CS-Model-MRI (https://arxiv.org/abs/1810.12473)')
+ parser.add_argument('-i', '--input', dest='input', help='Path to input .npy file with MRI scan data.')
+ parser.add_argument('-p', '--pattern', dest='pattern', help='Path to sampling mask in .npy format.')
+ parser.add_argument('-m', '--model', dest='model', help='Path to .xml file of OpenVINO IR.')
+ parser.add_argument('-l', '--cpu_extension', dest='cpu_extension', help='Path to extensions library with FFT implementation.')
+ parser.add_argument('-d', '--device', dest='device', default='CPU',
+ help='Optional. Specify the target device to infer on; CPU, '
+ 'GPU, HDDL or MYRIAD is acceptable. For non-CPU targets, '
+ 'HETERO plugin is used with CPU fallbacks to FFT implementation. '
+ 'Default value is CPU')
+ args = parser.parse_args()
+
+ xml_path = args.model
+ assert(xml_path.endswith('.xml'))
+ bin_path = xml_path[:xml_path.rfind('.xml')] + '.bin'
+
+ ie = IECore()
+ ie.add_extension(args.cpu_extension, "CPU")
+
+ net = ie.read_network(xml_path, bin_path)
+
+ device = 'CPU' if args.device == 'CPU' else ('HETERO:' + args.device + ',CPU')
+ exec_net = ie.load_network(net, device)
+
+ # Hybrid-CS-Model-MRI/Data/stats_fs_unet_norm_20.npy
+ stats = np.array([2.20295299e-01, 1.11048916e+03, 4.16997984e+00, 4.71741395e+00], dtype=np.float32)
+ # Hybrid-CS-Model-MRI/Data/sampling_mask_20perc.npy
+ var_sampling_mask = np.load(args.pattern) # TODO: can we generate it in runtime?
+ print('Sampling ratio:', 1.0 - var_sampling_mask.sum() / var_sampling_mask.size)
+
+ data = np.load(args.input)
+ num_slices, height, width = data.shape[0], data.shape[1], data.shape[2]
+ pred = np.zeros((num_slices, height, width), dtype=np.uint8)
+ data /= np.sqrt(height * width)
+
+ print('Compute...')
+ start = time.time()
+ for slice_id, kspace in enumerate(data):
+ kspace = kspace.copy()
+
+ # Apply sampling
+ kspace[var_sampling_mask] = 0
+ kspace = (kspace - stats[0]) / stats[1]
+
+ # Forward through network
+ input = np.expand_dims(kspace.transpose(2, 0, 1), axis=0)
+ outputs = exec_net.infer(inputs={'input_1': input})
+ output = next(iter(outputs.values()))
+ output = output.reshape(height, width)
+
+ # Save predictions
+ pred[slice_id] = cv.normalize(output, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
+
+ print('Elapsed time: %.1f seconds' % (time.time() - start))
+
+ WIN_NAME = 'MRI reconstruction with OpenVINO'
+
+ slice_id = 0
+ def callback(pos):
+ global slice_id
+ slice_id = pos
+
+ kspace = data[slice_id]
+ img = kspace_to_image(kspace)
+
+ kspace[var_sampling_mask] = 0
+ masked = kspace_to_image(kspace)
+
+ rec = pred[slice_id]
+
+ # Add a header
+ border_size = 20
+ render = cv.hconcat((img, masked, rec))
+ render = cv.copyMakeBorder(render, border_size, 0, 0, 0, cv.BORDER_CONSTANT, value=255)
+ cv.putText(render, 'Original', (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
+ cv.putText(render, 'Sampled (PSNR %.1f)' % cv.PSNR(img, masked), (width, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
+ cv.putText(render, 'Reconstructed (PSNR %.1f)' % cv.PSNR(img, rec), (width*2, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
+
+ cv.imshow(WIN_NAME, render)
+ cv.waitKey(1)
+
+ cv.namedWindow(WIN_NAME, cv.WINDOW_NORMAL)
+ print(num_slices)
+ cv.createTrackbar('Slice', WIN_NAME, num_slices // 2, num_slices - 1, callback)
+ callback(num_slices // 2) # Trigger initial visualization
+ cv.waitKey()
+#! [mri_demo:demo]
diff --git a/docs/IE_DG/Bfloat16Inference.md b/docs/IE_DG/Bfloat16Inference.md
index e814a8948c44bb..136607af8ad435 100644
--- a/docs/IE_DG/Bfloat16Inference.md
+++ b/docs/IE_DG/Bfloat16Inference.md
@@ -2,7 +2,8 @@
## Disclaimer
-Inference Engine with the bfloat16 inference implemented on CPU must support the `avx512_bf16` instruction and therefore the bfloat16 data format.
+Inference Engine with the bfloat16 inference implemented on CPU must support the native `avx512_bf16` instruction and therefore the bfloat16 data format.
+It is possible to use bfloat16 inference in simulation mode on platforms with Intel® Advanced Vector Extensions 512 (Intel® AVX-512), but it leads to significant performance degradation in comparison with FP32 or native `avx512_bf16` instruction usage.
## Introduction
@@ -12,7 +13,7 @@ Bfloat16 computations (referred to as BF16) is the Brain Floating-Point format w
Preserving the exponent bits keeps BF16 to the same range as the FP32 (~1e-38 to ~3e38). This simplifies conversion between two data types: you just need to skip or flush to zero 16 low bits.
Truncated mantissa leads to occasionally less precision, but according to [investigations](https://cloud.google.com/blog/products/ai-machine-learning/bfloat16-the-secret-to-high-performance-on-cloud-tpus), neural networks are more sensitive to the size of the exponent than the mantissa size. Also, in lots of models, precision is needed close to zero but not so much at the maximum range.
-Another useful feature of BF16 is possibility to encode an INT8 in BF16 without loss of accuracy, because INT8 range completely fits in BF16 mantissa field. It reduces data flow in conversion from INT8 input image data to BF16 directly without intermediate representation in FP32, or in combination of [INT8 inference](Int8Inference.md) and BF16 layers.
+Another useful feature of BF16 is possibility to encode INT8 in BF16 without loss of accuracy, because INT8 range completely fits in BF16 mantissa field. It reduces data flow in conversion from INT8 input image data to BF16 directly without intermediate representation in FP32, or in combination of [INT8 inference](Int8Inference.md) and BF16 layers.
See the [Intel's site](https://software.intel.com/sites/default/files/managed/40/8b/bf16-hardware-numerics-definition-white-paper.pdf) for more bfloat16 format details.
@@ -22,14 +23,7 @@ There are two ways to check if CPU device can support bfloat16 computations for
@snippet snippets/Bfloat16Inference0.cpp part0
-Current Inference Engine solution for bfloat16 inference uses Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the following layers in BF16 computation mode:
-* Convolution
-* FullyConnected
-* InnerProduct
-* LRN
-* Pooling
-
-This means that BF16 inference can only be performed with the CPU plugin on the layers listed above. All other layers are executed in FP32.
+Current Inference Engine solution for bfloat16 inference uses Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the significant number of layers in BF16 computation mode.
## Lowering Inference Precision
@@ -43,18 +37,36 @@ Bfloat16 data usage provides the following benefits that increase performance:
4. Reduced size of data in memory, as a result, larger models fit in the same memory bounds.
5. Reduced amount of data that must be transferred, as a result, reduced data transition time.
-For default optimization on CPU, source model converts from FP32 or FP16 to BF16 and executes internally on platforms with native BF16 support. In that case, `KEY_ENFORCE_BF16` is set to `YES`.
+For default optimization on CPU, source model is converted from FP32 or FP16 to BF16 and executed internally on platforms with native BF16 support. In this case, `KEY_ENFORCE_BF16` is set to `YES`.
The code below demonstrates how to check if the key is set:
@snippet snippets/Bfloat16Inference1.cpp part1
-To disable BF16 internal transformations, set the `KEY_ENFORCE_BF16` to `NO`. In this case, the model infers AS IS without modifications with precisions that were set on each layer edge.
+To disable BF16 internal transformations, set the `KEY_ENFORCE_BF16` to `NO`. In this case, the model infers as is without modifications with precisions that were set on each layer edge.
@snippet snippets/Bfloat16Inference2.cpp part2
+To disable BF16 in C API:
+
+```
+ie_config_t config = { "ENFORCE_BF16", "NO", NULL};
+ie_core_load_network(core, network, device_name, &config, &exe_network);
+```
-An exception with message `Platform doesn't support BF16 format` is formed in case of setting `KEY_ENFORCE_BF16` to `YES` on CPU without native BF16 support.
+An exception with message `Platform doesn't support BF16 format` is formed in case of setting `KEY_ENFORCE_BF16` to `YES` on CPU without native BF16 support or BF16 simulation mode.
-Low-Precision 8-bit integer models do not convert to BF16, even if bfloat16 optimization is set by default.
+Low-Precision 8-bit integer models cannot be converted to BF16, even if bfloat16 optimization is set by default.
+
+## Bfloat16 Simulation Mode
+
+Bfloat16 simulation mode is available on CPU and Intel® AVX-512 platforms that do not support the native `avx512_bf16` instruction. The simulator does not guarantee an adequate performance.
+To enable Bfloat16 simulator:
+* In [Benchmark App](../../inference-engine/samples/benchmark_app/README.md), add the `-enforcebf16=true` option
+* In C++ API, set `KEY_ENFORCE_BF16` to `YES`
+* In C API:
+```
+ie_config_t config = { "ENFORCE_BF16", "YES", NULL};
+ie_core_load_network(core, network, device_name, &config, &exe_network);
+```
## Performance Counters
@@ -77,4 +89,4 @@ prob EXECUTED layerType: SoftMax realT
The `execType` column of the table includes inference primitives with specific suffixes.
-[bf16_format]: img/bf16_format.png
\ No newline at end of file
+[bf16_format]: img/bf16_format.png
diff --git a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md
index 42eda8f83c0fa4..9717b08f1c427d 100644
--- a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md
+++ b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md
@@ -1,4 +1,4 @@
-# Add Custom nGraph Operations {#openvino_docs_IE_DG_Extensibility_DG_AddingNGraphOps}
+# Custom nGraph Operation {#openvino_docs_IE_DG_Extensibility_DG_AddingNGraphOps}
Inference Engine Extension API allows to register operation sets (opsets) with custom nGraph operations, it allows to support Networks with unknown operations.
@@ -71,10 +71,9 @@ nGraph provides opsets mechanism for operation versioning. Different opsets dist
When specifying opset names, follow the rules below:
* Use unique opset names.
-* Do not use the following built-in opset names: `extension`, `experimental`, `opset1`, `opest2`.
+* Do not use the following built-in opset names: `extension`, `experimental`, `opset1`, `opset2`, `opset3`, ... , `opsetN`.
* Make sure that the Model Optimizer and your extension use the same opset names.
-* IR v10 layers have the mandatory `version` attribute specifying the opset.
-* `opset1` is the name of default operations set.
+* IR v10 operations have the mandatory `version` attribute specifying the opset.
Operations from the default opset cannot be redefined.
Use a custom opset to create a new operation or extend functionality of an existing operation from another opset.
diff --git a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md
index 205ae64a6e1825..0e2adca76a8775 100644
--- a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md
+++ b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md
@@ -1,4 +1,4 @@
-# How to Implement Custom CPU Layers {#openvino_docs_IE_DG_Extensibility_DG_CPU_Kernel}
+# How to Implement Custom CPU Operations {#openvino_docs_IE_DG_Extensibility_DG_CPU_Kernel}
The primary vehicle for the performance of the CPU codepath in the Inference Engine is the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN), and new CPU kernels extend the Inference Engine plugin for the Intel MKL-DNN. Implementing the InferenceEngine::ILayerExecImpl defines a general CPU-side extension. There are no Intel MKL-DNN specifics in the way you need to implement a kernel.
diff --git a/docs/IE_DG/Extensibility_DG/Extension.md b/docs/IE_DG/Extensibility_DG/Extension.md
index 6df3a1424ec0e4..69bb614e605681 100644
--- a/docs/IE_DG/Extensibility_DG/Extension.md
+++ b/docs/IE_DG/Extensibility_DG/Extension.md
@@ -1,7 +1,10 @@
# Extension Library {#openvino_docs_IE_DG_Extensibility_DG_Extension}
Inference Engine provides an InferenceEngine::IExtension interface, which defines the interface for Inference Engine Extension libraries.
-All extension libraries should be inherited from this interface.
+All extension libraries should be inherited from this interface. The example below contains implementation of two operations: `Template`
+used as an example in this document and `FFT` used as a more complex example from the [Custom Operations Guide](../../HOWTO/Custom_Layers_Guide.md).
+
+> **NOTE**: `FFT` operation is implemented using OpenCV library functions `cv::dft` and `cv::idft`.
Based on that, declaration of an extension class can look as follows:
diff --git a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md
index a918076e756112..59c0f070cf0693 100644
--- a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md
+++ b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md
@@ -1,16 +1,16 @@
-# How to Implement Custom GPU Layers {#openvino_docs_IE_DG_Extensibility_DG_GPU_Kernel}
+# How to Implement Custom GPU Operations {#openvino_docs_IE_DG_Extensibility_DG_GPU_Kernel}
-The GPU codepath abstracts many details about OpenCL™. You need to provide the kernel code in OpenCL C and the configuration file that connects the kernel and its parameters to the parameters of the layer.
+The GPU codepath abstracts many details about OpenCL™. You need to provide the kernel code in OpenCL C and the configuration file that connects the kernel and its parameters to the parameters of the operation.
-There are two options of using custom layer configuration file:
+There are two options of using custom operation configuration file:
* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/deployment_tools/inference_engine/bin/intel64/{Debug/Release}` folder
-* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom layers to the plugin:
+* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin:
@snippet snippets/GPU_Kernel.cpp part0
All Inference Engine samples, except trivial `hello_classification`,
-feature a dedicated command-line option `-c` to load custom kernels. For example, to load custom layers for the classification sample, run the command below:
+feature a dedicated command-line option `-c` to load custom kernels. For example, to load custom operations for the classification sample, run the command below:
```sh
$ ./classification_sample -m /bvlc_alexnet_fp16.xml -i ./validation_set/daily/227x227/apron.bmp -d GPU
-c /custom_layer_example.xml
@@ -19,7 +19,7 @@ $ ./classification_sample -m /bvlc_alexnet_fp16.xml -i ./validati
## Configuration File Format
The configuration file is expected to follow the `.xml` file structure
-with a node of the type `CustomLayer` for every custom layer you provide.
+with a node of the type `CustomLayer` for every custom operation you provide.
The definitions described in the sections below use the following notations:
@@ -32,14 +32,13 @@ Notation | Description
### CustomLayer Node and Sub-node Structure
-`CustomLayer` node contains the entire configuration for a single custom
-layer.
+`CustomLayer` node contains the entire configuration for a single custom operation.
| Attribute Name |\# | Description |
|-----|-----|-----|
-| `name` | (1) | The name of the layer type to be used. This name should be identical to the type used in the IR.|
-| `type` | (1) | Must be `SimpleGPU`. |
-| `version` | (1) | Must be `1`. |
+| `name` | (1) | The name of the operation type to be used. This name should be identical to the type used in the IR.|
+| `type` | (1) | Must be `SimpleGPU`. |
+| `version` | (1) | Must be `1`. |
**Sub-nodes**: `Kernel` (1), `Buffers` (1), `CompilerOptions` (0+),
`WorkSizes` (0/1)
@@ -69,9 +68,9 @@ the sources during compilation (JIT).
| Attribute Name | \# | Description |
|------|-------|------|
| `name` | (1) | The name of the defined JIT. For static constants, this can include the value as well (taken as a string). |
-| `param` | (0/1) | This parameter value is used as the value of this JIT definition. |
+| `param` | (0/1) | This parameter value is used as the value of this JIT definition. |
| `type` | (0/1) | The parameter type. Accepted values: `int`, `float`, and `int[]`, `float[]` for arrays. |
-| `default` | (0/1) | The default value to be used if the specified parameters is missing from the layer in the IR. |
+| `default` | (0/1) | The default value to be used if the specified parameters is missing from the operation in the IR. |
**Sub-nodes:** None
@@ -92,7 +91,7 @@ weights or biases).
| Attribute Name | \# | Description |
|----|-----|------|
-| `name` | (1) | Name of a blob attached to a layer in the IR |
+| `name` | (1) | Name of a blob attached to a operation in the IR |
| `arg-index` | (1) | 0-based index in the entry function arguments to be bound to |
**Sub-nodes**: None
@@ -105,7 +104,7 @@ weights or biases).
|------|-------|-------|
| `arg-index` | (1) | 0-based index in the entry function arguments to be bound to. |
| `type` | (1) | `input` or `output` |
-| `port-index` | (1) | 0-based index in the layer’s input/output ports in the IR |
+| `port-index` | (1) | 0-based index in the operation input/output ports in the IR |
| `format` | (0/1) | Data layout declaration for the tensor. Accepted values: `BFYX`, `BYXF`, `YXFB`, `FYXB` (also in all lowercase). Default value: `BFYX` |
### CompilerOptions Node and Sub-node Structure
@@ -178,7 +177,7 @@ For an example, see [Example Kernel](#example-kernel).
| `_PITCHES_SIZE`| The size of the `_PITCHES` array |
| `_OFFSET`| The number of elements from the start of the tensor to the first valid element (bypassing the lower padding) |
All `` values are automatically defined for every tensor
-bound to this layer (`INPUT0`, `INPUT1`, `OUTPUT0`, and so on), as shown
+bound to this operation (`INPUT0`, `INPUT1`, `OUTPUT0`, and so on), as shown
in the following for example:
```sh
diff --git a/docs/IE_DG/Extensibility_DG/Intro.md b/docs/IE_DG/Extensibility_DG/Intro.md
index b5d90cba061ad3..06d030fc710294 100644
--- a/docs/IE_DG/Extensibility_DG/Intro.md
+++ b/docs/IE_DG/Extensibility_DG/Intro.md
@@ -2,19 +2,22 @@
Inference Engine Extensibility API allows to add support of custom operations to the Inference Engine.
Extension should contain operation sets with custom operations and execution kernels for custom operations.
-Physically, an extension library can be represented as a dynamic library exporting the single `CreateExtension` function that allows to create a new extension instance.
+Physically, an extension library can be represented as a dynamic library exporting the single `CreateExtension` function
+that allows to create a new extension instance.
-Extensibility library can be loaded to the InferenceEngine::Core object using the InferenceEngine::Core::AddExtension method.
+Extensibility library can be loaded to the `InferenceEngine::Core` object using the
+`InferenceEngine::Core::AddExtension` method.
## Inference Engine Extension Library
-Inference Engine Extension dynamic library contains several main components:
+Inference Engine Extension dynamic library contains several components:
- * [Extension class](Extension.md):
+ * [Extension Library](Extension.md):
- Contains custom operation sets
- Provides CPU implementations for custom operations
- * [Custom operations](Intro.md):
- - Allows to use InferenceEngine::Core::ReadNetwork to read Intermediate Representation (IR) with unsupported operations
+ * [Custom nGraph Operation](AddingNGraphOps.md):
+ - Allows to use `InferenceEngine::Core::ReadNetwork` to read Intermediate Representation (IR) with unsupported
+ operations
- Allows to create `ngraph::Function` with unsupported operations
- Provides shape inference mechanism for custom operations
@@ -26,13 +29,13 @@ at `/docs/template_extension`.
The Inference Engine workflow involves the creation of custom kernels and either custom or existing operations.
-An _Operation_ is a Network building block implemented in the training framework, for example, `Convolution` in Caffe*.
+An _Operation_ is a network building block implemented in the training framework, for example, `Convolution` in Caffe*.
A _Kernel_ is defined as the corresponding implementation in the Inference Engine.
-Refer to the [Custom Layers in the Model Optimizer](../../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) section for details on how
-mapping between framework layers and Inference Engine kernels is registered.
+Refer to the [Model Optimizer Extensibility](../../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md)
+for details on how a mapping between framework operations and Inference Engine kernels is registered.
-In short, you can plug your own kernel implementations into the Inference Engine and map them to the layers in the original framework.
+In short, you can plug your own kernel implementations into the Inference Engine and map them to the operations in the original framework.
The following pages describe how to integrate custom _kernels_ into the Inference Engine:
diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md
index d63a310de0b06b..245fa68e900e80 100644
--- a/docs/IE_DG/Samples_Overview.md
+++ b/docs/IE_DG/Samples_Overview.md
@@ -127,6 +127,63 @@ You can also build a generated solution manually. For example, if you want to bu
Microsoft Visual Studio and open the generated solution file from the `C:\Users\\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\Samples.sln`
directory.
+### Build the Sample Applications on macOS*
+
+The officially supported macOS* build environment is the following:
+
+* macOS* 10.15 64-bit
+* Clang* compiler from Xcode* 10.1 or higher
+* CMake* version 3.13 or higher
+
+> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode).
+
+To build the C or C++ sample applications for macOS, go to the `/inference_engine/samples/c` or `/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script:
+```sh
+build_samples.sh
+```
+
+Once the build is completed, you can find sample binaries in the following folders:
+* C samples: `~/inference_engine_c_samples_build/intel64/Release`
+* C++ samples: `~/inference_engine_cpp_samples_build/intel64/Release`
+
+You can also build the sample applications manually:
+
+> **NOTE**: If you have installed the product as a root user, switch to root mode before you continue: `sudo -i`
+
+> **NOTE**: Before proceeding, make sure you have OpenVINO™ environment set correctly. This can be done manually by
+```sh
+cd /bin
+source setupvars.sh
+```
+
+1. Navigate to a directory that you have write access to and create a samples build directory. This example uses a directory named `build`:
+```sh
+mkdir build
+```
+> **NOTE**: If you ran the Image Classification verification script during the installation, the C++ samples build directory was already created in your home directory: `~/inference_engine_samples_build/`
+
+2. Go to the created directory:
+```sh
+cd build
+```
+
+3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples:
+ - For release configuration:
+ ```sh
+ cmake -DCMAKE_BUILD_TYPE=Release /inference_engine/samples/cpp
+ ```
+ - For debug configuration:
+ ```sh
+ cmake -DCMAKE_BUILD_TYPE=Debug /inference_engine/samples/cpp
+ ```
+4. Run `make` to build the samples:
+```sh
+make
+```
+
+For the release configuration, the sample application binaries are in `/intel64/Release/`;
+for the debug configuration — in `/intel64/Debug/`.
+
## Get Ready for Running the Sample Applications
### Get Ready for Running the Sample Applications on Linux*
diff --git a/docs/IE_DG/ShapeInference.md b/docs/IE_DG/ShapeInference.md
index a7cdddb784d676..ea86911ff397e0 100644
--- a/docs/IE_DG/ShapeInference.md
+++ b/docs/IE_DG/ShapeInference.md
@@ -1,6 +1,36 @@
Using Shape Inference {#openvino_docs_IE_DG_ShapeInference}
==========================================
+OpenVINO™ provides the following methods for runtime model reshaping:
+
+* **Set a new input shape** with the `InferenceEngine::CNNNetwork::reshape` method.
+ The `InferenceEngine::CNNNetwork::reshape` method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.
+
+> **NOTES**:
+> - Starting with the 2021.1 release, the Model Optimizer converts topologies keeping shape-calculating sub-graphs by default, which enables correct shape propagation during reshaping in most cases.
+> - Older versions of IRs are not guaranteed to reshape successfully. Please regenerate them with the Model Optimizer of the latest version of OpenVINO™.
+> - If an ONNX model does not have a fully defined input shape and the model was imported with the ONNX importer, reshape the model before loading it to the plugin.
+
+* **Set a new batch dimension value** with the `InferenceEngine::CNNNetwork::setBatchSize` method.
+ The meaning of a model batch may vary depending on the model design.
+ This method does not deduce batch placement for inputs from the model architecture.
+ It assumes that the batch is placed at the zero index in the shape for all inputs and uses the `InferenceEngine::CNNNetwork::reshape` method to propagate updated shapes through the model.
+
+ The method transforms the model before a new shape propagation to relax a hard-coded batch dimension in the model, if any.
+
+ Use `InferenceEngine::CNNNetwork::reshape` instead of `InferenceEngine::CNNNetwork::setBatchSize` to set new input shapes for the model in case the model has:
+ * Multiple inputs with different zero-index dimension meanings
+ * Input without a batch dimension
+ * 0D, 1D, or 3D shape
+
+ The `InferenceEngine::CNNNetwork::setBatchSize` method is a high-level API method that wraps the `InferenceEngine::CNNNetwork::reshape` method call and works for trivial models from the batch placement standpoint.
+ Use `InferenceEngine::CNNNetwork::reshape` for other models.
+
+ Using the `InferenceEngine::CNNNetwork::setBatchSize` method for models with a non-zero index batch placement or for models with inputs that do not have a batch dimension may lead to undefined behaviour.
+
+You can change input shapes multiple times using the `InferenceEngine::CNNNetwork::reshape` and `InferenceEngine::CNNNetwork::setBatchSize` methods in any order.
+If a model has a hard-coded batch dimension, use `InferenceEngine::CNNNetwork::setBatchSize` first to change the batch, then call `InferenceEngine::CNNNetwork::reshape` to update other dimensions, if needed.
+
Inference Engine takes three kinds of a model description as an input, which are converted into an `InferenceEngine::CNNNetwork` object:
1. [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md) through `InferenceEngine::Core::ReadNetwork`
2. [ONNX model](../IE_DG/OnnxImporterTutorial.md) through `InferenceEngine::Core::ReadNetwork`
@@ -23,33 +53,7 @@ for (const auto & parameter : parameters) {
To feed input data of a shape that is different from the model input shape, reshape the model first.
-OpenVINO™ provides the following methods for runtime model reshaping:
-
-* **Set a new input shape** with the `InferenceEngine::CNNNetwork::reshape` method.
- The `InferenceEngine::CNNNetwork::reshape` method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.
- You can reshape a model multiple times like in this application scheme:
- ```
- ReadNetwork -> reshape(input_1_shape) -> LoadNetwork -> infer(input_1)
- \
- -> reshape(input_2_shape) -> LoadNetwork -> infer(input_2)
- ```
- > **NOTES**:
- > - Starting with the 2021.1 release, the Model Optimizer converts topologies keeping shape-calculating sub-graphs by default, which enables correct shape propagation during reshaping.
- > - Older versions of IRs are not guaranteed to reshape successfully. Please regenerate them with the Model Optimizer of the latest version of OpenVINO™.
- > - If an ONNX model does not have a fully defined input shape and the model was imported with the ONNX importer, reshape the model before loading it to the plugin.
-* **Set a new batch dimension value** with the `InferenceEngine::CNNNetwork::setBatchSize` method.
- The meaning of a model batch may vary depending on the model design.
- The `InferenceEngine::CNNNetwork::setBatchSize` method deduces the index of a batch dimension based only on the input rank.
- This method does not work for models with a non-zero index batch placement or models with inputs without a batch dimension.
- The batch-setting algorithm does not involve the shape inference mechanism.
- Batch of input and output shapes for all layers is set to a new batch value without layer validation.
- It may cause both positive and negative side effects.
- Due to the limitations described above, the current method is not recommended to use.
- If you need to set a new batch size for the model, use the `CNNNetwork::reshape` method instead.
-
-Do not use runtime reshaping methods simultaneously, especially do not call the `CNNNetwork::reshape` method after you use `InferenceEngine::CNNNetwork::setBatchSize`.
-The `InferenceEngine::CNNNetwork::setBatchSize` method causes irreversible conversion of the internal model representation into the legacy model representation.
-The method does not use nGraph for shape inference which leads to reduced reshape opportunities and may affect the performance of the model.
+Once the input shape of `InferenceEngine::CNNNetwork` is set, call the `InferenceEngine::Core::LoadNetwork` method to get an `InferenceEngine::ExecutableNetwork` object for inference with updated shapes.
There are other approaches to reshape the model during the stage of IR generation or [nGraph::Function creation](../nGraph_DG/build_function.md).
@@ -62,8 +66,8 @@ Shape collision during shape propagation may be a sign that a new shape does not
Changing the model input shape may result in intermediate operations shape collision.
Examples of such operations:
-- `Reshape` operation with a hard-coded output shape value
-- `MatMul` operation with the `Const` second input cannot be resized by spatial dimensions due to operation semantics
+- [`Reshape` operation](../ops/shape/Reshape_1.md) with a hard-coded output shape value
+- [`MatMul` operation](../ops/matrix/MatMul_1.md) with the `Const` second input cannot be resized by spatial dimensions due to operation semantics
Model structure and logic should not change significantly after model reshaping.
- The Global Pooling operation is commonly used to reduce output feature map of classification models output.
diff --git a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md
index 98de8d014145c7..8ce80da1d1579b 100644
--- a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md
+++ b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md
@@ -77,7 +77,6 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi
* [Converting DeepSpeech from TensorFlow](prepare_model/convert_model/tf_specific/Convert_DeepSpeech_From_Tensorflow.md)
* [Converting Language Model on One Billion Word Benchmark from TensorFlow](prepare_model/convert_model/tf_specific/Convert_lm_1b_From_Tensorflow.md)
* [Converting Neural Collaborative Filtering Model from TensorFlow*](prepare_model/convert_model/tf_specific/Convert_NCF_From_Tensorflow.md)
-
* [Converting TensorFlow* Object Detection API Models](prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md)
* [Converting TensorFlow*-Slim Image Classification Model Library Models](prepare_model/convert_model/tf_specific/Convert_Slim_Library_Models.md)
* [Converting CRNN Model from TensorFlow*](prepare_model/convert_model/tf_specific/Convert_CRNN_From_Tensorflow.md)
@@ -91,17 +90,15 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi
* [Model Optimizations Techniques](prepare_model/Model_Optimization_Techniques.md)
* [Cutting parts of the model](prepare_model/convert_model/Cutting_Model.md)
* [Sub-graph Replacement in Model Optimizer](prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md)
- * [(Deprecated) Case-Study: Converting SSD models created with the TensorFlow* Object Detection API](prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md)
- * [(Deprecated) Case-Study: Converting Faster R-CNN models created with the TensorFlow* Object Detection API](prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md)
* [Supported Framework Layers](prepare_model/Supported_Frameworks_Layers.md)
* [Intermediate Representation and Operation Sets](IR_and_opsets.md)
* [Operations Specification](../ops/opset.md)
* [Intermediate Representation suitable for INT8 inference](prepare_model/convert_model/IR_suitable_for_INT8_inference.md)
-
- * [Custom Layers in Model Optimizer](prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md)
+ * [Model Optimizer Extensibility](prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md)
* [Extending Model Optimizer with New Primitives](prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md)
+ * [Extending Model Optimizer with Caffe Python Layers](prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md)
+ * [Extending Model Optimizer with Custom MXNet* Operations](prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md)
* [Legacy Mode for Caffe* Custom Layers](prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md)
-
* [Model Optimizer Frequently Asked Questions](prepare_model/Model_Optimizer_FAQ.md)
* [Known Issues](Known_Issues_Limitations.md)
diff --git a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md
index 869cfa49d5e942..e938848a679444 100644
--- a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md
+++ b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md
@@ -108,6 +108,7 @@ Standard MXNet\* symbols:
| SoftmaxActivation | No |
| SoftmaxOutput | No |
| SoftSign | No |
+| Take | The attribute 'mode' is not supported |
| Tile | No |
| UpSampling | No |
| Where | No |
diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md
index b523897a773c57..2df7773b8ad57d 100644
--- a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md
+++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md
@@ -38,5 +38,5 @@ Framework-specific parameters for:
## See Also
* [Configuring the Model Optimizer](../Config_Model_Optimizer.md)
* [IR Notation Reference](../../IR_and_opsets.md)
-* [Custom Layers in Model Optimizer](../customize_model_optimizer/Customize_Model_Optimizer.md)
-* [Model Cutting](Cutting_Model.md)
\ No newline at end of file
+* [Model Optimizer Extensibility](../customize_model_optimizer/Customize_Model_Optimizer.md)
+* [Model Cutting](Cutting_Model.md)
diff --git a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md
index b208a5f5b5c307..a4bb4e98017276 100644
--- a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md
+++ b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md
@@ -9,7 +9,6 @@ The following examples are the situations when model cutting is useful or even r
* model has pre- or post-processing parts that cannot be translated to existing Inference Engine layers.
* model has a training part that is convenient to be kept in the model, but not used during inference.
* model is too complex (contains lots of unsupported operations that cannot be easily implemented as custom layers), so the complete model cannot be converted in one shot.
-* model is one of the supported [SSD models](../customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md). In this case, you need to cut a post-processing part off.
* problem with model conversion in the Model Optimizer or inference in the Inference Engine occurred. To localize the issue, limit the scope for conversion by iteratively searching for problematic places in the model.
* single custom layer or a combination of custom layers is isolated for debugging purposes.
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md
index 2eb6b1717a58f5..73e439d83fee39 100644
--- a/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md
+++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md
@@ -1,82 +1,1300 @@
-# Custom Layers in the Model Optimizer {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer}
+# Model Optimizer Extensibility {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer}
-Model Optimizer searches for each layer of the input model in the list of known layers before building the model's internal representation, optimizing the model, and producing the Intermediate Representation.
+* [Model Representation in Memory](#model-representation-in-memory)
+* [Model Conversion Pipeline](#model-conversion-pipeline)
+ * [Model Loading](#model-loading)
+ * [Operations Attributes Extracting](#operations-attributes-extracting)
+ * [Front Phase](#front-phase)
+ * [Partial Inference](#partial-inference)
+ * [Middle Phase](#middle-phase)
+ * [NHWC to NCHW Layout Change](#layout-change)
+ * [Back Phase](#back-phase)
+ * [Intermediate Representation Emitting](#ir-emitting)
+* [Graph Traversal and Modification Using `Port`s and `Connection`s](#graph-ports-and-conneсtions)
+ * [Ports](#intro-ports)
+ * [Connections](#intro-connections)
+* [Model Optimizer Extensions](#extensions)
+ * [Model Optimizer Operation](#extension-operation)
+ * [Operation Extractor](#operation-extractor)
+ * [Graph Transformation Extensions](#graph-transformations)
+ * [Front Phase Transformations](#front-phase-transformations)
+ * [Pattern-Defined Front Phase Transformations](#pattern-defined-front-phase-transformations)
+ * [Specific Operation Front Phase Transformations](#specific-operation-front-phase-transformations)
+ * [Generic Front Phase Transformations](#generic-front-phase-transformations)
+ * [Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformations)
+ * [Front Phase Transformations Using Start and End Points](#start-end-points-front-phase-transformations)
+ * [Generic Front Phase Transformations Enabled with Transformations Configuration File](#generic-transformations-config-front-phase-transformations)
+ * [Middle Phase Transformations](#middle-phase-transformations)
+ * [Pattern-Defined Middle Phase Transformations](#pattern-defined-middle-phase-transformations)
+ * [Generic Middle Phase Transformations](#generic-middle-phase-transformations)
+ * [Back Phase Transformations](#back-phase-transformations)
+ * [Pattern-Defined Back Phase Transformations](#pattern-defined-back-phase-transformations)
+ * [Generic Back Phase Transformations](#generic-back-phase-transformations)
-The list of known layers is different for each of supported frameworks. To see the layers supported by your framework, refer to the [corresponding section](../Supported_Frameworks_Layers.md).
+Model Optimizer extensibility mechanism allows to support new operations and custom transformations to generate the
+optimized Intermediate Representation (IR) as described in the
+[Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../../IR_and_opsets.md). This
+mechanism is a core part of the Model Optimizer and the Model Optimizer uses it under the hood, so the Model Optimizer
+itself is a huge set of examples on how to add custom logic to support your model.
-Custom layers are layers that are not included into a list of known layers. If your topology contains any layers that are not in the list of known layers, the Model Optimizer classifies them as custom.
+There are several cases when the customization is needed:
-## Caffe\* Models with Custom Layers
+* A model contains operation(s) not known for the Model Optimizer, but these operation(s) could be expressed as a
+combination of supported operations. In this case a custom transformation should be implemented to replace unsupported
+operation(s) with supported ones.
+* A model contains sub-graph of operations which can be replaced with a smaller number of operations to get the better
+performance. This example corresponds to so called fusing transformations. For example, replace a sub-graph performing
+the following calculation \f$x / (1.0 + e^{-(beta * x)})\f$ with a single operation of type
+[Swish](../../../ops/activation/Swish_4.md).
+* A model contains a custom framework operation (the operation which is not a part of an official operation set of the
+framework) which was developed using the framework extensibility mechanism. In this case the Model Optimizer should know
+how to handle the operation and generate a corresponding section in an IR for it.
-You have two options if your Caffe\* model has custom layers:
+It is necessary to figure out how the Model Optimizer represents a model in a memory and converts it to an IR before
+going into details of the Model Optimizer extensibility mechanism.
-* **Register the custom layers as extensions to the Model Optimizer**. For instructions, see [Extending Model Optimizer with New Primitives](Extending_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You only need to write a small chunk of Python\* code that lets the Model Optimizer:
+> **NOTE**: All paths in this document are provided relatively to the Model Optimizer installation directory if not
+> stated otherwise.
- * Generate a valid Intermediate Representation according to the rules you specified
- * Be independent from the availability of Caffe on your computer
-
-* **Register the custom layers as Custom and use the system Caffe to calculate the output shape of each Custom Layer**, which is required by the Intermediate Representation format. For this method, the Model Optimizer requires the Caffe Python interface on your system. When registering the custom layer in the `CustomLayersMapping.xml` file, you can specify if layer parameters should appear in Intermediate Representation or if they should be skipped. To read more about the expected format and general structure of this file, see [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md). This approach has several limitations:
+## Model Representation in Memory
+The model can be represented as a directed graph where nodes are operations and edges correspond to data passing from a
+producer operation (node) to a consumer operation (node).
- * If your layer output shape depends on dynamic parameters, input data or previous layers parameters, calculation of output shape of the layer via Caffe can be incorrect. In this case, you need to patch Caffe on your own.
-
- * If the calculation of output shape of the layer via Caffe fails inside the framework, Model Optimizer is unable to produce any correct Intermediate Representation and you also need to investigate the issue in the implementation of layers in the Caffe and patch it.
-
- * You are not able to produce Intermediate Representation on any machine that does not have Caffe installed. If you want to use Model Optimizer on multiple machines, your topology contains Custom Layers and you use `CustomLayersMapping.xml` to fallback on Caffe, you need to configure Caffe on each new machine.
-
- For these reasons, it is best to use the Model Optimizer extensions for Custom Layers: you do not depend on the framework and fully control the workflow.
+Model Optimizer uses Python class `mo.graph.graph.Graph` instance to represent the computation graph in memory during
+the model conversion. This class is inherited from `networkx.MultiDiGraph` class of the standard `networkx` Python
+library and provides many convenient methods to traverse and modify the graph. Refer to the `mo/graph/graph.py` file for
+the examples.
-If your model contains Custom Layers, it is important to understand the internal workflow of Model Optimizer. Consider the following example.
+Model Optimizer keeps all necessary information about the operation in a node attributes. Model Optimizer uses class
+`mo.graph.graph.Node` defined in the `mo/graph/graph.py` file which is a wrapper on top of a `networkx` node attributes
+dictionary and provides many convenient methods to work with the node. For example, the node `my_node` attribute with a
+name `'my_attr'` can be retrieved from the node with the following code `my_node.my_attr` which is equivalent to obtaining
+attribute with name `'my_attr'` in the `graph.node['my_node']` dictionary. Refer to the `mo/graph/graph.py` for the
+class implementation details.
-**Example**:
+An operation may have several inputs and outputs. For example, operation [Split](../../../ops/movement/Split_1.md) has
+two inputs: data to split and axis to split along, and variable number of outputs depending on a value of attribute
+`num_splits`. Each input data to the operation is passed to a specific operation **input port**. An operation produces
+an output data from an **output port**. Input and output ports are numbered from 0 independently. Model Optimizer uses
+classes `mo.graph.port.Port` and `mo.graph.connection.Connection` which are useful abstraction to perform graph
+modifications like nodes connecting/re-connecting and a graph traversing. These classes are widely used in the Model
+Optimizer code so it is easy to find a lot of usage examples.
-The network has:
+There is no dedicated class corresponding to an edge, so low-level graph manipulation is needed to get access to
+edge attributes if needed. Meanwhile most manipulations with nodes connections should be done with help of
+`mo.graph.connection.Connection` and `mo.graph.port.Port` classes. Thus, low-level graph manipulation is error prone and
+is strongly not recommended.
-* One input layer (#1)
-* One output Layer (#5)
-* Three internal layers (#2, 3, 4)
+Further details and examples related to a model representation in memory are provided in the sections below in a context
+for a better explanation. Also, refer to the [Graph Traversal and Modification Using `Port`s and
+`Connection`s](#graph-ports-and-conneсtions) for more information on how to use ports and connections.
-The custom and standard layer types are:
+## Model Conversion Pipeline
+A model conversion pipeline can be represented with the following diagram:
-* Layers #2 and #5 are implemented as Model Optimizer extensions.
-* Layers #1 and #4 are supported in Model Optimizer out-of-the box.
-* Layer #3 is neither in the list of supported layers nor in extensions, but is specified in CustomLayersMapping.xml.
+![Model Conversion pipeline](../../../img/MO_conversion_pipeline.png)
-> **NOTE**: If any of the layers are not in one of three categories described above, the Model Optimizer fails with an appropriate message and a link to the corresponding question in [Model Optimizer FAQ](../Model_Optimizer_FAQ.md).
+Lets review each conversion step in details.
-The general process is as shown:
+### Model Loading
+Model Optimizer gets as input a trained model file. The model loader component of the Model Optimizer reads a model file
+using Python bindings provided with the framework and builds an in-memory representation of a computation graph. There
+is a separate loader for each supported framework. These loaders are implemented in the
+`extensions/load//loader.py` files of the Model Optimizer.
-![Example custom layer network](../../img/mo_caffe_priorities.png)
+> **NOTE**: Model Optimizer uses a special parser for Caffe\* models built on top of `caffe.proto` file. In case of a
+> model loading failure, the Model Optimizer throws an error and requests to prepare the parser that can read the model.
+> For more information on how to prepare the custom Caffe\* parser, refer to the [Model Optimizer Frequently Asked Questions #1](../Model_Optimizer_FAQ.md).
-1. The example model is fed to the Model Optimizer that **loads the model** with the special parser, built on top of `caffe.proto` file. In case of failure, Model Optimizer asks you to prepare the parser that can read the model. For more information, refer to Model Optimizer, FAQ #1.
+The result of a model loading step is a `Graph` object which can be depicted like in the following example:
-2. Model Optimizer **extracts the attributes of all layers**. In particular, it goes through the list of layers and attempts to find the appropriate extractor. In order of priority, Model Optimizer checks if the layer is:
-
- * Registered in `CustomLayersMapping.xml`
- * Registered as a Model Optimizer extension
- * Registered as a standard Model Optimizer layer
-
- When the Model Optimizer finds a satisfying condition from the list above, it extracts the attributes according to the following rules:
-
- * For bullet #1 - either takes all parameters or no parameters, according to the content of `CustomLayersMapping.xml`
- * For bullet #2 - takes only the parameters specified in the extension
- * For bullet #3 - takes only the parameters specified in the standard extractor
-
-3. Model Optimizer **calculates the output shape of all layers**. The logic is the same as it is for the priorities. **Important:** the Model Optimizer always takes the first available option.
+![Graph After Load](../../../img/MO_graph_after_loader.png)
-4. Model Optimizer **optimizes the original model and produces the Intermediate Representation**.
+Model Optimizer loader saves an operation instance framework description (usually it is a Protobuf message) into a node
+attribute usually with a name `pb` for each operation of an input model. It is important that this is a
+**framework-specific** description of an operation. This means that an operation, for example,
+[Convolution](../../../ops/convolution/Convolution_1.md) may be represented differently in, for example, Caffe\* and
+TensorFlow\* frameworks but perform exactly the same calculations from a mathematical point of view.
-## TensorFlow\* Models with Custom Layers
+In the example above the "Operation 2" has one input and two outputs. The tensor produced from the output port 0 is
+consumed with the "Operation 5" (the input port 0) and "Operation 3" (the input port 1). The tensor produced from the
+output port 1 is consumed with the "Operation 4" (the input port 0).
-You have two options for TensorFlow\* models with custom layers:
+Each edge has two attributes `in` and `out` containing the input port number of the consumer node and the output port
+number of the producer node. These attribute describe the fact that nodes are operations consuming some input tensors
+and producing some output tensors. But nodes themselves are "black boxes" from the Model Optimizer perspective because
+they don't contain required information about the operation they perform.
-* **Register those layers as extensions to the Model Optimizer.** In this case, the Model Optimizer generates a valid and optimized Intermediate Representation.
-* **If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option.** This feature is helpful for many TensorFlow models. To read more, see [Sub-graph Replacement in the Model Optimizer](Subgraph_Replacement_Model_Optimizer.md).
-
-## MXNet\* Models with Custom Layers
+### Operations Attributes Extracting
+The next step is to parse framework-dependent operation representation saved in a node attribute and update the node
+attributes with the operation specific attributes. There are three options to do this.
-There are two options to convert your MXNet* model that contains custom layers:
+1. The extractor extension approach. This is a recommended way to extract attributes for an operation and it is
+explained in details in the [Operation Extractor](#extension-extractor) section.
-1. Register the custom layers as extensions to the Model Optimizer. For instructions, see [Extending MXNet Model Optimizer with New Primitives](Extending_MXNet_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You can create Model Optimizer extensions for both MXNet layers with op `Custom` and layers which are not standard MXNet layers.
+2. The legacy approach with a built-in extractor. The file `mo/front//extractor.py` (for example, the one
+for Caffe) defines a dictionary with extractors for specific operation types. A key in the dictionary is a type of an
+operation to trigger the extracting function for and the value is the function. The function has one parameter – a node
+to extract attributes from. This is a legacy and non-extensible approach so it should be avoided. This mechanism will be
+removed in future versions of the Model Optimizer.
-2. If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option. In MXNet the function is actively used for ssd models provides an opportunity to for the necessary subgraph sequences and replace them. To read more, see [Sub-graph Replacement in the Model Optimizer](Subgraph_Replacement_Model_Optimizer.md).
+3. Caffe specific extractor using the `CustomLayersMapping.xml` described in the
+[Legacy Mode for Caffe\* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md). This approach is deprecated and will be
+removed in future versions of the Model Optimizer.
+The extractors execution order is the following:
+* `CustomLayersMapping.xml` (for Caffe models only).
+* Model Optimizer extension.
+* Built-in Model Optimizer extractor.
+
+The result of operations attributes extracting step can be depicted like in the following example:
+
+![Graph After Attributes Extraction](../../../img/MO_graph_after_extractors.png)
+
+The only difference in the graph from the previous step is that nodes contain dictionary with extracted attributes and
+operation-specific attributes needed for the Model Optimizer. But starting from this step the Model Optimizer does not
+need the original representation of the operation/model and uses just Model Optimizer representation (there are some
+very specific cases when the Model Optimizer still uses the `pb` attribute and they are partially covered in this
+document). Detailed list of common node attributes and their values is provided below in the
+[Model Optimizer Operation](#extension-operation).
+
+### Front Phase
+Due to legacy reasons an user must specify shapes for all not fully-defined inputs of the model. In contrast, other
+machine learning frameworks like TensorFlow\* let user create a model with undefined or partially defined input shapes.
+As an example, undefined dimension is marked with an integer value `-1` in a TensorFlow\* model or has some string name
+in an ONNX\* model.
+
+During the front phase the Model Optimizer knows shape of the model inputs and constants only and does not know shapes
+(and even ranks) of the intermediate tensors. But information about shapes may not be needed to implement particular
+transformation. For example, the transformation `extensions/front/TopKNormalize.py` removes an attribute `k` from a
+`TopK` node and adds an input constant with the value `k`. The transformation is needed to convert a `TopK` operation
+which comes from frameworks where a number of output elements is defined as an attribute of the operation to the
+OpenVINO™ [TopK](../../../ops/sort/TopK_3.md) operation semantic which requires this value to be a separate input.
+
+It is important to mention that sometimes it seems like a transformation cannot be implemented during the front phase
+because the actual values of inputs or shapes are needed. But in fact shapes or values manipulations can be implemented
+using operations which are added to the graph. Consider the transformation
+`extensions/front/onnx/flattenONNX_to_reshape.py` which replaces an ONNX\* operation
+[Flatten](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Flatten) with a sub-graph of operations performing
+the following (for the case when `axis` is not equal to 0 and 1):
+
+1. Calculate a shape of the `Flatten` input tensor using the [ShapeOf](../../../ops/shape/ShapeOf_3.md) operation.
+2. Get the first `axis` elements from the output of `Shape` operation and calculate their product using the
+[ReduceProd](../../../ops/reduction/ReduceProd_1.md) operation.
+3. Concatenate output of the `ReduceProd` and constant with the value `-1` (refer to the
+[Reshape](../../../ops/shape/Reshape_1.md) specification for an explanation of this value).
+4. Use the concatenated value as the second input to the `Reshape` operation.
+
+It is highly recommended to write shape-agnostic transformations to avoid model reshape-ability issues. Refer to
+[Using Shape Inference](../../../IE_DG/ShapeInference.md) for more information related to the reshaping of a model.
+
+More information on how to develop front phase transformations and dedicated API description is provided in the
+[Front Phase Transformations](#front-phase-transformations).
+
+### Partial Inference
+Model Optimizer performs a partial inference of a model during a model conversion. This procedure includes output shapes
+calculation of all operations in a model and constant folding (value calculation for constant sub-graphs). The constant
+folding is needed for the shape inference because in some cases evaluation of constant sub-graph is needed to calculate
+output shapes. For example, the output shape for the [Reshape](../../../ops/shape/Reshape_1.md) operation may be
+defined as a mathematical expression using the [ShapeOf](../../../ops/shape/ShapeOf_3.md) operation output.
+
+> **NOTE**: Model Optimizer does not fold sub-graphs starting from the [ShapeOf](../../../ops/shape/ShapeOf_3.md)
+> operation by default because this leads to a model non-reshape-ability (the command line parameter `--static_shape`
+> can override this behavior). Refer to [Using Shape Inference](../../../IE_DG/ShapeInference.md) for more information
+> related to reshaping of a model.
+
+Model Optimizer calculates output shapes for all operations in a model to write them to Intermediate Representation
+files.
+
+> **NOTE**: This is a legacy requirement because starting from IR version 10 Inference Engine needs to know shapes of
+> the [Const](../../../ops/infrastructure/Constant_1.md) and the [Parameter](../../../ops/infrastructure/Parameter_1.md)
+> operations only. The nGraph component of the Inference Engine calculates output shapes for all operations in a model
+> using shapes of [Parameter](../../../ops/infrastructure/Parameter_1.md) and
+> [Const](../../../ops/infrastructure/Constant_1.md) operations defined with respective operation attributes.
+
+Model Optimizer inserts "data" nodes to the computation graph before starting the partial inference phase. The data node
+corresponds to the specific tensor produced with the operation. Each data node contains two attributes: `shape`
+containing the shape of the tensor and `value` which may contain the actual value of the tensor. The value for a `value`
+attribute is equal to `None` if this tensor value cannot be calculated. This happens in two cases: when a tensor value
+depends on a values passed to the [Parameter](../../../ops/infrastructure/Parameter_1.md) operation of a model or the
+Model Optimizer does not have value propagation implementation for the operation.
+
+The graph before running the partial inference can be depicted like in the following example:
+
+![Graph Before Partial Inference](../../../img/MO_graph_before_partial_inference.png)
+
+The difference in a graph structure with a graph during the front phase is not only in the data nodes, but also in the
+edge attributes. Note, that an `out` attribute is specified for edges **from operation** nodes only, while an `in`
+attribute is specified for edges **from data** nodes only. This corresponds to the fact that a tensor (data node) is
+produced from a specific output port of an operation and is consumed with a specific input port of an operation. Also,
+a unique data node is created for each output port of an operation and may be used as an input node for several
+operation nodes, like the data node "data2_0" which is consumed with the input port 1 of the operation "Operation 3" and
+input port 0 of the operation "Operation 5".
+
+Now consider how the Model Optimizer performs shape and value propagation. Model Optimizer performs graph nodes
+topological sort. An error message is thrown if a graph contains a cycle. Then shape inference functions are called for
+each node in the graph according to the topological order. Each node of the graph must have an attribute called `infer`
+with a shape inference function, which is a function with one parameter – an instance of the `Node` class. The `infer`
+attribute is usually set in the operation extractor or when a node is added in some transformation using the Model
+Optimizer operation class inherited from `mo.pos.Op` class. Refer to the [Model Optimizer Operation](#extension-operation)
+and [Operation Extractor](#operation-extractor) for more information on how to specify a shape inference function.
+
+A shape inference function should calculate an operation (node) output shape(s) based on input shape(s) and operation
+(node) attribute(s) and update `shape` and optionally `value` attributes of the corresponding data node(s). A simplified
+example of the shape infer function for the [Reshape](../../../ops/shape/Reshape_1.md) operation (the full version is
+available in the file `mo/ops/reshape.py`):
+
+```py
+ @staticmethod
+ def infer(node: Node):
+ name = node.soft_get('name', node.id)
+
+ input_shape = node.in_port(0).data.get_shape() # get the input tensor shape
+ new_shape = node.in_port(1).data.get_value() # get the value defining the output tensor shape. This tensor may
+ # have special values like 0 and -1
+
+ output_shape = ... # calculate output shape without special values like 0 and -1
+
+ if node.in_port(0).data.get_value() is not None: # if the input value is defined then calculate output value;
+ # shape will be updated automatically with the value shape
+ node.out_port(0).data.set_value(node.in_port(0).data.get_value().reshape(output_shape))
+ else: # in the opposite case calculate the output shape only
+ node.out_port(0).data.set_shape(output_shape)
+```
+
+Methods `in_port()` and `output_port()` of the `Node` class are used to get and set data node attributes. Refer to the
+[Graph Traversal and Modification Using `Port`s and `Connection`s](#graph-ports-and-conneсtions) section on how to use
+them.
+
+> **NOTE**: A shape inference function should perform output shape calculation in the original model layout. For
+> example, OpenVINO™ supports Convolution operations in NCHW layout only but TensorFlow\* supports NHWC layout as
+> well. Model Optimizer shape inference function calculates output shapes for NHWC Convolutions in NHWC layout and only
+> during the layout change phase the shape is converted to NCHW.
+
+> **NOTE**: There is a legacy approach to read data node attribute like `input_shape = op_node.in_node(0).shape` and
+> modify data nodes attributes like `op_node.out_node(0).shape = some_value`. This approach is still used in the Model
+> Optimizer code but is not recommended. Instead use approach described in the [Ports](#intro-ports).
+
+### Middle Phase
+The middle phase starts after the partial inference. At this phase a graph contains data nodes and output shapes of all
+operations in the graph have been calculated. Any transformation implemented at this stage must update `shape`
+attribute for all newly added operations. It is highly recommended to use API desribed in the
+[Graph Traversal and Modification Using `Port`s and `Connection`s](#graph-ports-and-conneсtions) because modification of
+a graph using this API causes automatic re-inference of affected nodes as well as necessary data nodes creation.
+
+More information on how to develop middle transformations and dedicated API description is provided in the
+[Middle Phase Transformations](#middle-phase-transformations).
+
+### NHWC to NCHW Layout Change
+There are several middle transformations responsible for changing model layout from NHWC to NCHW. These transformations
+are triggered by default for TensorFlow\* models only because it is the only framework with Convolution operations in
+NHWC layout.
+
+> **NOTE**: If a TensorFlow\* model is in NCHW layout then an user should specify `--disable_nhwc_to_nchw` command line
+> parameter to disable these transformations.
+
+The layout change is a complex problem and detailed explanation of it is out of scope of this document. A very brief
+explanation of this process is provided below:
+
+1. Model Optimizer changes output shapes of most of operations producing 4D and 5D (four dimensional and five
+dimensional) tensors as if they were in NHWC layout to NCHW layout: `nchw_shape = np.array(nhwc_shape)[0, 3, 1, 2]` for
+4D and `nchw_shape = np.array(nhwc_shape)[0, 4, 1, 2, 3]` for 5D. This permutation does not happen for some operations
+with specific conditions identified during a model conversion.
+2. Model Optimizer inserts [Gather](../../../ops/movement/Gather_1.md) operations to the sub-graph relates to shapes
+calculation to perform shape calculation in a correct layout.
+3. Model Optimizer inserts [Transpose](../../../ops/movement/Transpose_1.md) operations for some operations with
+specific conditions identified during a model conversion to produce correct inference results.
+
+The list of main transformations responsible for a layout change are: `extensions/middle/ApplyPermutations.py`,
+`extensions/middle/InsertLayoutPropagationTransposes.py`, `extensions/middle/MarkSubgraphsWithCorrectLayout.py`,
+`extensions/middle/ApplyNHWCtoNCHWpermutation.py` and `extensions/middle/LayoutChangeForConstantShapePaths.py`.
+Refer to the source code of these transformations for more details on how the layout change works.
+
+### Back Phase
+The back phase starts after the layout change to NCHW. This phase contains mostly the following transformations:
+
+1. Transformations which should be working with a graph in the NCHW layout and thus cannot be implemented in the middle
+phase.
+2. Transformations which replace nodes corresponding to internal Model Optimizer operations with nodes corresponding to
+[opset](@ref openvino_docs_ops_opset) operations.
+3. Transformations which normalize operations inputs according to the specification.
+4. Final optimization transformations.
+
+A graph structure during the back phase is the same as during the middle phase. There is no difference in writing middle
+and back transformations.
+
+More information on how to develop back transformations and dedicated API description is provided in the
+[Back Phase Transformations](#back-phase-transformations).
+
+### Intermediate Representation Emitting
+The last phase of a model conversion is the Intermediate Representation emitting. Model Optimizer performs the following
+steps:
+
+1. Iterates over all operation nodes in the graph and checks that all nodes have attribute `type` set. This attribute
+defines the operation type and used in the Inference Engine to instantiate proper operation from the
+[opset](@ref openvino_docs_ops_opset) specified in the `version` attribute of the node. If some node does not have
+attribute `type` or its values is equal to `None` then the Model Optimizer exits with an error.
+2. Performs type inference of graph operations similar to the shape inference. Inferred data types are saved to a port
+attributes in the IR.
+3. Performs topological sort of the graph and changes `id` attribute of all operation nodes to be sequential integer
+values starting from 0.
+4. Saves all Constants values to the `.bin` file. Constants with the same value are shared among different operations.
+5. Generates `.xml` file defining a graph structure. The information about operation inputs and outputs are prepared
+uniformly for all operations regardless of their type. A list of attributes to be saved to an `.xml` file is defined
+with the `backend_attrs()` or `supported_attrs()` of the `Op` class used for a graph node instantiation. For more
+information on how the operation attributes are saved to XML refer to the function `prepare_emit_ir()` in
+the `mo/pipeline/common.py` file and [Model Optimizer Operation](#extension-operation).
+
+## Graph Traversal and Modification Using `Port`s and `Connection`s
+There are three APIs for a graph traversal and transformation used in the Model Optimizer:
+1. The API provided with the `networkx` Python library for the `networkx.MultiDiGraph` class which is the base class for
+the `mo.graph.graph.Graph` object. Refer to the [Model Representation in Memory](#model-representation-in-memory) for
+more details. For example, the following methods belong to this API level: `graph.add_edges_from([list])`,
+`graph.add_node(x, attrs)`, `graph.out_edges(node_id)` etc where `graph` is a an instance of the `networkx.MultiDiGraph`
+class. **This is the lowest-level API and its usage should be avoided in the Model Optimizer transformations**.
+2. The API built around the `mo.graph.graph.Node` class. The `Node` class is the primary class to work with graph nodes
+and their attributes. **There are some `Node` class methods not recommended to use and some functions defined in the
+`mo.graph.graph` have been deprecated**. Examples of such methods and functions are:
+`node.in_node(y)`, `node.out_node(x)`, `node.get_outputs()`, `node.insert_node_after(n1, y)`, `create_edge(n1, n2)` etc.
+Refer to the `mo/graph/graph.py` for more details.
+3. The high-level API called Model Optimizer Graph API which uses `mo.graph.graph.Graph`, `mo.graph.port.Port` and
+`mo.graph.connection.Connection` classes. For example, the following methods belong to this API level:
+`node.in_port(x)`, `node.out_port(y)`, `port.get_connection()`, `connection.get_source()`,
+`connection.set_destination(dest_port)` etc. **This is the recommended API to be used in the Model Optimizer
+transformations and operations implementation**.
+
+The main benefit of using Model Optimizer Graph API is that it hides some internal implementation details (the fact that
+the graph contains data nodes), provides API to perform safe and predictable graph manipulations and adds operation
+semantic to the graph. This is achieved with introduction of concepts of ports and connections. This chapter is
+dedicated to the Model Optimizer Graph API and does not cover other two non-recommended APIs.
+
+### Ports
+An operation semantic describes how many inputs and outputs the operation have. For example, operations
+[Parameter](../../../ops/infrastructure/Parameter_1.md) and [Const](../../../ops/infrastructure/Constant_1.md) have no
+inputs and have one output, operation [ReLU](../../../ops/activation/ReLU_1.md) has one input and one output, operation
+[Split](../../../ops/movement/Split_1.md) has 2 inputs and variable number of outputs depending on the value of the
+attribute `num_splits`.
+
+Each operation node in the graph (an instance of the `Node` class) has 0 or more input and output ports (instances of
+the `mo.graph.port.Port` class). `Port` object has several attributes:
+* `node` - the instance of the `Node` object the port belongs to.
+* `idx` - the port number. Input and output ports are numbered independently starting from `0`. Thus operation
+[ReLU](../../../ops/activation/ReLU_1.md) has one input port (with index `0`) and one output port (with index `0`).
+* `type` - the type of the port. Could be equal to either `"in"` or `"out"`.
+* `data` - the object which should be used to get attributes of the corresponding data node. This object has methods
+`get_shape()` / `set_shape()` and `get_value()` / `set_value()` to get/set shape/value of the corresponding data node.
+For example, `in_port.data.get_shape()` returns an input shape of a tensor connected to input port `in_port`
+(`in_port.type == 'in'`), `out_port.data.get_value()` returns a value of a tensor produced from output port `out_port`
+(`out_port.type == 'out'`).
+
+> **NOTE**: Functions `get_shape()` and `get_value()` return `None` until the partial inference phase. Refer to the
+> [Model Conversion Pipeline](#model-conversion-pipeline) for more information about model conversion phases and
+> [Partial Inference](#partial-inference) about partial inference phase.
+
+There are several methods of the `Node` class to get the instance of a corresponding port:
+* `in_port(x)` and `out_port(x)` to get the input/output port with number `x`.
+* `in_ports()` and `out_ports()` to get a dictionary where key is a port number and the value is the corresponding
+input/output port.
+
+Attributes `in_ports_count` and `out_ports_count` of the `Op` class instance define default number of input and output
+ports to be created for the `Node` . However, additional input/output ports can be added using methods
+`add_input_port()` and `add_output_port()`. Port also can be removed using `delete_input_port()` and
+`delete_output_port()` methods.
+
+The `Port` class is just an abstraction which works with edges incoming/outgoing to/from a specific `Node` instance. For
+example, output port with `idx = 1` corresponds to the outgoing edge of a node with an attribute `out = 1`, the input
+port with `idx = 2` corresponds to the incoming edge of a node with an attribute `in = 2`.
+
+Consider an example of a graph part with 4 operation nodes "Op1", "Op2", "Op3" and "Op4" and a number of data nodes
+depicted with light green boxes.
+
+![Ports example 1](../../../img/MO_ports_example_1.png)
+
+Operation nodes have input ports (yellow squares) and output ports (light purple squares). Input port may not be
+connected. For example, the input port 2 of node "Op1" does not have incoming edge, while output port always has an
+associated data node (after the partial inference when the data nodes are added to the graph) which may have no
+consumers.
+
+Ports can be used to traverse a graph. The method `get_source()` of an input port returns an output port producing the
+tensor the input port consumes. It is important that the method works the same during front, middle and back phases of a
+model conversion even though the graph structure changes (there is no data nodes in the graph during the front phase).
+
+Let's assume that there are 4 instances of `Node` object `op1, op2, op3` and `op4` corresponding to nodes "Op1", "Op2",
+"Op3" and "Op4" correspondingly. The result of `op2.in_port(0).get_source()` and `op4.in_port(1).get_source()` is the
+same object `op1.out_port(1)` of type `Port`.
+
+The method `get_destination()` of an output port returns the input port of the node consuming this tensor. If there are
+multiple consumers of this tensor then the error is raised. The method `get_destinations()` of an output port returns a
+list of input ports consuming the tensor.
+
+The method `disconnect()` removes a node incoming edge corresponding to the specific input port. The method removes
+several edges if it is applied during the front phase for a node output port connected with multiple nodes.
+
+The method `port.connect(another_port)` connects output port `port` and input port `another_port`. The method handles
+situations when the graph contains data nodes (middle and back phases) and not just creates an edge between two nodes
+but also automatically creates data node or re-uses existing data node. If the method is used during the front phase and
+data nodes do not exist the method creates edge and properly sets `in` and `out` edge attributes.
+
+For example, applying the following two methods to the graph above will result in the graph depicted below:
+
+```py
+op4.in_port(1).disconnect()
+op3.out_port(0).connect(op4.in_port(1))
+```
+
+![Ports example 2](../../../img/MO_ports_example_2.png)
+
+> **NOTE**: Refer to the `Node` class implementation in the `mo/graph/graph.py` and `Port` class implementation in the
+`mo/graph/port.py` for a full list of available methods.
+
+### Connections
+Connection is an concept introduced to easily and reliably perform graph modifications. Connection corresponds to a
+link between a source output port with one or more destination input ports or a link between a destination input port
+and source output port producing data. So each port is connected with one or more ports with help of a connection.
+Model Optimizer uses the `mo.graph.connection.Connection` class to represent a connection.
+
+There is only one method `get_connection()` of the `Port` class to get the instance of the corresponding `Connection`
+object. If the port is not connected then the returned value is `None`.
+
+For example, the method `op3.out_port(0).get_connection()` returns a `Connection` object encapsulating edges from node
+"Op3" to data node "data_3_0" and two edges from data node "data_3_0" to two ports of the node "Op4".
+
+The `Connection` class provides methods to get source and destination(s) ports the connection corresponds to:
+* `connection.get_source()` - returns an output `Port` object producing the tensor.
+* `connection.get_destinations()` - returns a list of input `Port`s consuming the data.
+* `connection.get_destination()` - returns a single input `Port` consuming the data. If there are multiple consumers
+then the exception is raised.
+
+The `Connection` class provides methods to modify a graph by changing a source or destination(s) of a connection. For
+example, the function call `op3.out_port(0).get_connection().set_source(op1.out_port(0))` changes source port of edges
+consuming data from port `op3.out_port(0)` to `op1.out_port(0)`. The transformed graph from the sample above is depicted
+below:
+
+![Connection example 1](../../../img/MO_connection_example_1.png)
+
+Another example is the method `connection.set_destination(dest_port)`. It disconnects `dest_port` and all input ports
+the connection is currently connected to and connects the connection source port to the `dest_port`.
+
+Note that connection work seamlessly during front, middle and back phases and hides the fact that the graph structure is
+different.
+
+> **NOTE**: Refer to the `Connection` class implementation in the `mo/graph/connection.py` for a full list of available
+methods.
+
+## Model Optimizer Extensions
+Model Optimizer extensions allow to inject some logic to the model conversion pipeline without changing the Model
+Optimizer core code. There are three types of the Model Optimizer extensions:
+
+1. Model Optimizer operation.
+2. A framework operation extractor.
+3. A model transformation which can be executed during front, middle or back phase of the model conversion.
+
+An extension is just a plain text file with a Python code. The file should contain a class (or classes) inherited from
+one of extension base classes. Extension files should be saved to a directory with the following structure:
+
+```sh
+.//
+ ops/ - custom operations
+ front/ - framework independent front transformations
+ / - front transformations for models only and extractors for operations
+ / - front transformations for models only and extractors for operations
+ ...
+ middle/ - middle transformations
+ back/ - back transformations
+```
+
+Model Optimizer uses the same layout internally to keep built-in extensions. The only exception is that the directory
+`mo/ops/` is also used as a source of the Model Optimizer operations due to historical reasons.
+
+> **NOTE**: The name of a root directory with extensions should not be equal to "extensions" because it will result in a
+> name collision with the built-in Model Optimizer extensions.
+
+> **NOTE**: Model Optimizer itself is built using these extensions so there are huge number of examples on how to use
+> them in the Model Optimizer code.
+
+### Model Optimizer Operation
+Model Optimizer defines a class `mo.ops.Op` (`Op` will be used later in the document to be short) which is a base class
+for an operation used in the Model Optimizer. The instance of the `Op` class serves several purposes:
+
+1. Stores the operation attributes.
+2. Stores the operation shape/value and type inference functions.
+3. Defines operation attributes to be saved to the corresponding IR section.
+4. Contains convenient methods to create a graph node from an `Op` object instance and connect it with the existing
+graph.
+5. Used in the extractors to store parsed attributes and operation specific attributes in the dedicated graph node.
+
+It is important to mention that there is no connection between the instance of the `Op` class and the `Node` object
+created from it. The `Op` class is just an attributes container describing the operation. Model Optimizer uses the `Op`
+class during a model conversion to create node of the graph with attributes copied from the `Op` class instance. Graph
+manipulations are performed with graph `Node`s and their attributes and does not involve `Op`s.
+
+There are a number of common attributes used in the operations. Here is the list of these attributes with description.
+
+* `id` — unique identifier of a node in a graph. Generated automatically equal to the number of nodes in the graph plus
+1 if not specified. **Mandatory**.
+* `name` — name of the operation. Generated automatically equal to the `id` if not specified. **Mandatory**.
+* `type` — type of the operation according to the [opset specification](@ref openvino_docs_ops_opset). For the internal
+Model Optimizer operations this attribute should be set to `None`. The model conversion fails if an operation with
+`type` equal to `None` comes to the IR emitting phase. **Mandatory**.
+* `version` — the operation set (opset) name the operation belongs to. If not specified then the Model Optimizer sets it
+equal to `experimental`. Refer to [nGraph Basic Concepts](@ref openvino_docs_nGraph_DG_basic_concepts) for more
+information about operation sets. **Mandatory**.
+* `op` — Model Optimizer type of the operation. In many cases the value of `type` is equal to the value of `op`. But
+when the Model Optimizer cannot instantiate opset operation during model loading it creates an instance of an internal
+operation and the attribute `op` is used as a type of this internal operation. Later in the pipeline the node created
+from an internal operation will be replaced during front, middle or back phase with node(s) created from the opset.
+* `infer` — the attribute defines a function calculating output tensor(s) shape and optionally value(s). The attribute
+may be set to `None` for internal Model Optimizer operations used during the front phase only. Refer to the
+[Partial Inference](#partial-inference) for more information about the shape inference function.
+* `type_infer` — the attribute defines a function calculating output tensor(s) data type. If the attribute is not
+defined then the default function is used. The function checks if the node attribute `data_type` is set and then
+propagates this type to the output tensor from the port 0, otherwise it propagates the data type of the tensor coming
+into the input port 0 to the output tensor from the port 0.
+* `in_ports_count` — default number of input ports to be created for the operation. Additional ports can be created or
+redundant ports can be removed using dedicated `Node` class API methods.
+* `out_ports_count` — default number of output ports to be created for the operation. Additional ports can be created or
+redundant ports can be removed using dedicated `Node` class API methods.
+
+Here is an example of the Model Optimizer class for the operation [SoftMax](../../../ops/activation/SoftMax_1.md) from
+the file `mo/ops/softmax.py` with the in code comments.
+
+```py
+class Softmax(Op):
+ # the class attribute defines a name of the operation so the operation class can be obtained using the
+ # "Op.get_op_class_by_name()" static method
+ op = 'SoftMax'
+
+ # the operation works as an extractor by default. This is a legacy behaviour not recommended for using currently,
+ # thus "enabled" class attribute is set to False. The recommended approach is to use dedicated extractor extension
+ enabled = False
+
+ def __init__(self, graph: Graph, attrs: dict):
+ super().__init__(graph, { # the constructor of the base class Op is called with additional default attributes
+ 'type': __class__.op, # the operation is from the opset so the type is set to 'SoftMax'
+ 'op': __class__.op, # internal Model Optimizer operation has the same type
+ 'version': 'opset1', # the operation corresponds to opset1
+ 'infer': Softmax.infer, # shape inference function is defined below
+ 'axis': 1, # default value for the "axis" attribute of the operation SoftMax
+ 'in_ports_count': 1, # the operation has one input
+ 'out_ports_count': 1, # the operation produces one output
+ }, attrs)
+
+ # the method returns operation specific attributes list. This method is important for the case when implementing
+ # extractor inherited from CaffePythonFrontExtractorOp class to extract attribute for Caffe Python operation.
+ # But currently it is used interchangeably with the "backend_attrs()" method. If the "backend_attrs()" is not used
+ # then the "supported_attrs()" is used instead. In this particular case the operation has just one attribute "axis"
+ def supported_attrs(self):
+ return ['axis']
+
+ @staticmethod
+ def infer(node: Node):
+ "some code calculating output shape and values"
+```
+
+There is a dedicated method called `backend_attrs()` defining a list of attributes to be saved to the IR. Consider an
+example from the `mo/ops/pooling.py` file:
+```py
+ def backend_attrs(self):
+ return [
+ ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
+ ('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))),
+
+ ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))),
+ ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))),
+
+ ('pool-method', 'pool_method'),
+ ('exclude-pad', 'exclude_pad'),
+
+ 'rounding_type',
+ 'auto_pad',
+ ]
+```
+
+The `backend_attrs()` function returns a list of records. A record can be of one of the following formats:
+1. A string defining the attribute to be saved to the IR. If the value of the attribute is `None` then the attribute is
+not saved. Example of this case are `rounding_type` and `auto_pad`.
+2. A tuple where the first element is a string defining the name of the attribute as it will appear in the IR and the
+second element is a function to produce the value for this attribute. The function gets an instance of the `Node` as the
+only parameter and returns a string with the value to be saved to the IR. Example of this case are `strides`, `kernel`,
+`pads_begin` and `pads_end`.
+3. A tuple where the first element is a string defining the name of the attribute as it will appear in the IR and the
+second element is the name of tha `Node` attribute to get the value from. Example of this case are `pool-method` and
+`exclude-pad`.
+
+### Operation Extractor
+Model Optimizer runs specific extractor for each operation in the model during the model loading. Refer to the
+[operations-attributes-extracting](#operations-attributes-extracting) for more information about this process.
+
+There are several types of Model Optimizer extractor extensions:
+1. The generic one which is described in this section.
+2. The special extractor for Caffe\* models with Python layers. This kind of extractor is described in the
+[Extending the Model Optimizer with Caffe* Python Layers](Extending_Model_Optimizer_with_Caffe_Python_Layers.md).
+3. The special extractor for MXNet\* models with custom operations. This kind of extractor is described in the
+[Extending the Model Optimizer for Custom MXNet* Operations](Extending_MXNet_Model_Optimizer_with_New_Primitives.md).
+4. The special extractor and fallback to Caffe\* for shape inference is described in the
+[Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md).
+
+This chapter is focused on the option #1 which provides a generic mechanism for the operation extractor applicable for
+all frameworks. Model Optimizer provides class `mo.front.extractor.FrontExtractorOp` as a base class to implement the
+extractor. It has a class method `extract` which gets the only parameter `Node` which corresponds to the graph node to
+extract data from. The operation description in the original framework format is stored in the attribute `pb` of the
+node. The extractor goal is to parse this attribute and save necessary attributes to the corresponding node of the
+graph. Consider the extractor for the TensorFlow\* operation `Const` (refer to the file
+`extensions/front/tf/const_ext.py`):
+
+```py
+from mo.front.extractor import FrontExtractorOp
+from mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, tf_tensor_content
+from mo.ops.const import Const
+
+
+class ConstExtractor(FrontExtractorOp):
+ # the "op" class attribute defines a type of the operation in the framework (in this case it is a TensorFlow) for
+ # which the extractor should be triggered
+ op = 'Const'
+ enabled = True # the flag that indicates that this extractor is enabled
+
+ @classmethod
+ def extract(cls, node): # the entry point of the extractor
+ # node.pb attribute stores the TensorFlow representation of the operation which is a Protobuf message of the
+ # specific format. In particular the message contains the attribute called "value" containing the description of
+ # the constant. The string "pb.attr["value"].tensor" is just a Python binding for Protobuf message parsing
+ pb_tensor = node.pb.attr["value"].tensor
+ # get the shape of the tensor from the protobuf message using the helper function "tf_tensor_shape"
+ shape = tf_tensor_shape(pb_tensor.tensor_shape)
+ # create a dictionary with necessary attributes
+ attrs = {
+ 'shape': shape,
+ # get the tensor value using "tf_tensor_content" helper function
+ 'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor),
+ # get the tensor data type using "tf_dtype_extractor" helper function
+ 'data_type': tf_dtype_extractor(pb_tensor.dtype),
+ }
+ # update the node attributes using default attributes from the "Const" operation and attributes saved to the
+ # "attrs" dictionary
+ Const.update_node_stat(node, attrs)
+ return cls.enabled
+```
+
+Consider another example with an extractor of ONNX\* operation `Constant` (refer to the file
+`extensions/front/onnx/const_ext.py`):
+
+```py
+from onnx import numpy_helper
+from onnx.numpy_helper import to_array
+
+from mo.front.extractor import FrontExtractorOp
+from mo.front.onnx.extractors.utils import onnx_attr
+from mo.ops.const import Const
+
+
+class ConstantExtractor(FrontExtractorOp):
+ op = 'Constant'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node):
+ # use helper method "onnx_attr" which parses the Protobuf representation of the operation saved in the "node"
+ # gets the value of the attribute with name "value" as "TensorProto" type (specified with a keyword "t")
+ pb_value = onnx_attr(node, 'value', 't')
+ # use ONNX helper method "numpy_helper.to_array()" to convert "TensorProto" object to a numpy array
+ value = numpy_helper.to_array(pb_value)
+
+ attrs = {
+ 'data_type': value.dtype,
+ 'value': value,
+ }
+ # update the node attributes using default attributes from the "Const" operation and attributes saved to the
+ # "attrs" dictionary
+ Const.update_node_stat(node, attrs)
+ return cls.enabled
+```
+
+The extractors for operations from different frameworks work similarly. The only difference is in the helper methods
+used to parse operation attributes encoded with a framework-specific representation.
+
+A common practice is to use `update_node_stat()` method of the dedicated `Op` class to update the node attributes. This
+method does the following:
+
+1. Sets values for common attributes like `op`, `type`, `infer`, `in_ports_count`, `out_ports_count`, `version` etc to
+values specific to the dedicated operation (`Const` operation in this case).
+2. Uses methods `supported_attrs()` and `backend_attrs()` defined in the `Op` class to update specific node attribute
+`IE`. The IR emitter uses the value stored in the `IE` attribute to pre-process attribute values and save them to IR.
+3. Optionally sets additional attributes provided to the `update_node_stat()` function as a second parameter. Usually
+these attributes are parsed from the particular instance of the operation.
+
+> **NOTE**: Model Optimizer uses numpy arrays to store values and numpy arrays of type `np.int64` to store shapes in the
+> graph.
+
+### Graph Transformation Extensions
+Model Optimizer provides various base classes to implement [Front Phase Transformations](#front-phase-transformations),
+[Middle Phase Transformations](#middle-phase-transformations) and [Back Phase Transformations](#back-phase-transformations).
+All classes have the following common class attributes and methods:
+1. Attribute `enabled` specifies whether the transformation is enabled or not. The value can be changed during runtime
+to enable or disable execution of the transformation during a model conversion. Default value is `True`.
+2. Attribute `id` specifies a unique transformation string identifier. This transformation identified can be used to
+enable (disable) the transformation by setting environment variable `MO_ENABLED_TRANSFORMS` (`MO_DISABLED_TRANSFORMS`)
+with a comma separated list of `id`s. The environment variables override the value of the `enabled` attribute of the
+transformation. Instead of using `id` attribute value you can add fully defined class name to `MO_ENABLED_TRANSFORMS`
+(`MO_DISABLED_TRANSFORMS`) variable, `extensions.back.NonmalizeToNormalizeL2.NormalizeToNormalizeL2` for example. Optional attribute.
+3. Attribute `run_not_recursively` specifies whether the transformation should be executed in the sub-graphs, for
+example, body of the [TensorIterator](../../../ops/infrastructure/TensorIterator_1.md) and
+[Loop](../../../ops/infrastructure/Loop_5.md). Default value is `True`.
+4. Attribute `force_clean_up` specifies whether the graph clean up should be executed after the transformation. The
+graph cleanup removes nodes of the graph not reachable from the model inputs. Default value is `False`.
+5. Attribute `force_shape_inference` specifies whether the nodes marked with attribute `need_shape_inference` equal to
+`True` should be re-inferred after the transformation. Model Optimizer sets this attribute automatically for nodes which
+input(s) were changed during the transformation or developer can set this attribute manually in the transformation for
+the specific nodes. Default value is `False`.
+5. Attribute `graph_condition` specifies a list of functions with one parameter -- `Graph` object. The transformation
+is executed if and only if all functions return `True`. If the attribute is not set then no check is performed.
+7. Method `run_before()` returns a list of transformation classes which this transformation should be executed before.
+8. Method `run_after()` returns a list of transformation classes which this transformation should be executed after.
+
+> **NOTE**: Some of the transformation types have specific class attributes and methods which are explained in the
+> corresponding sections of this document.
+
+Model Optimizer builds a graph of dependencies between registered transformations and executes them in the topological
+order. In order to execute the transformation during a proper model conversion phase the Model Optimizer defines several
+anchor transformations which does nothing. All transformations are ordered with respect to these anchor transformations.
+The diagram below shows anchor transformations, some of built-in transformations and dependencies between them:
+
+![Transformations Graph](../../../img/MO_transformations_graph.png)
+
+User defined transformations are executed after corresponding `Start` and before corresponding `Finish` anchor
+transformations by default (if `run_before()` and `run_after()` methods have not been overridden).
+
+> **NOTE**: The `PreMiddleStart` and `PostMiddleStart` anchors were introduced due to historical reasons to refactor
+> the Model Optimizer pipeline which initially had a hardcoded order of transformations.
+
+#### Front Phase Transformations
+There are several types of a front phase transformation:
+
+1. [Pattern-Defined Front Phase Transformations](#pattern-defined-front-phase-transformations) triggered for each
+sub-graph of the original graph isomorphic to the specified pattern.
+2. [Specific Operation Front Phase Transformations](#specific-operation-front-phase-transformations) triggered for the
+node with a specific `op` attribute value.
+3. [Generic Front Phase Transformations](#generic-front-phase-transformations).
+4. Manually enabled transformation defined with a JSON configuration file (for TensorFlow\*, ONNX\* and MXNet\* models
+only) specified using the `--transformations_config` command line parameter:
+ 1. [Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformation).
+ 2. [Front Phase Transformations Using Start and End Points](#start-end-points-front-phase-transformations).
+ 3. [Generic Front Phase Transformations Enabled with Transformations Configuration File](#generic-transformations-config-front-phase-transformations).
+
+##### Pattern-Defined Front Phase Transformations
+This type of transformation is implemented using `mo.front.common.replacement.FrontReplacementSubgraph` and
+`mo.front.common.replacement.FrontReplacementPattern` as base classes and works the following way.
+1. Developer defines a sub-graph to be matched using a list of nodes with attributes and edges connecting them (edges
+may also have attributes).
+2. Model Optimizer searches for all sub-graphs of the original graph isomorphic to the specified sub-graph (pattern).
+3. Model Optimizer executes the developer-defined function performing graph transformation for each instance of a
+matched sub-graph. Developer can override different functions in the base transformation class so the Model Optimizer
+works differently:
+ 1. Override the method `replace_sub_graph(self, graph, match)`. In this case Model Optimizer only executes the
+ overridden function, pass the `graph` object and a dictionary describing the matched sub-graph. A developer is
+ responsible for writing the transformation and connecting the newly created nodes to the rest of the graph.
+ 2. Override the method `generate_sub_graph(self, graph, match)`. This case is not recommended to use because it is
+ the most complicated approach and it can be effectively replaced with one of two previous approaches and so it is not
+ explained in this section. The explanation of this function is provided in the
+ [Node Name Defined Sub-Graph Transformations](#node-name-defined-sub-graph-transformations) section.
+
+The sub-graph pattern is defined in the `pattern()` function. This function should return a dictionary with two keys:
+`nodes` and `edges`:
+* The value for the `nodes` key is a list of tuples with two elements.
+ * The first element is an alias name for a node which will be used to define edges between nodes and in the
+ transformation function.
+ * The second element is a dictionary with attributes. The key is a name of an attribute which should exist in the
+ node. The value for the attribute can be some specific value to match or a function which gets a single parameter -
+ the attribute value from the node. The function should return the result of attribute comparison with a dedicated
+ value.
+* The value for the `edges` key is a list of tuples with two or three elements.
+ * The first element is the alias name of the node producing a tensor.
+ * The second element is the alias name of the node consuming the tensor.
+ * The third element (optional) is the dictionary with expected edge attributes. Usually this dictionary contains
+ attributes like `in` and `out` defining input and output ports.
+
+Consider the example of a front transformation implemented in the `extensions/front/Mish_fusion.py` file performing
+fusing of the sub-graph defining the [Mish](../../../ops/activation/Mish_4.md) activation function into a single
+operation:
+
+```py
+from extensions.front.Softplus_fusion import SoftplusFusion
+from extensions.ops.activation_ops import Mish
+from mo.front.common.replacement import FrontReplacementSubgraph
+from mo.front.subgraph_matcher import SubgraphMatch
+from mo.graph.graph import Graph, rename_nodes
+
+
+class MishFusion(FrontReplacementSubgraph):
+ """
+ The transformation looks for the pattern with Softplus defining the Mish function: Mish(x) = x * tanh(SoftPlus(x)).
+ """
+ enabled = True # transformation is enabled
+
+ def run_after(self): # run this transformation after "SoftplusFusion" transformation
+ return [SoftplusFusion]
+
+ def pattern(self): # define pattern according to formulae x * tanh(SoftPlus(x)).
+ return dict(
+ nodes=[
+ ('mul', dict(op='Mul')),
+ ('tanh', dict(op='Tanh')),
+ ('softplus', dict(op='SoftPlus')),
+ ],
+ edges=[
+ ('softplus', 'tanh'),
+ ('tanh', 'mul'),
+ ])
+
+ def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): # entry point for the transformation
+ mul = match['mul'] # get the Node corresponding to matched "mul" node
+ mul_name = mul.soft_get('name', mul.id)
+ softplus = match['softplus'] # get the Node corresponding to the matched "softplus" node
+
+ # determine the input port of Mul which gets the 'input' node output
+ input_port_idx = int(mul.in_port(0).get_connection().get_source().node.soft_get('op') == 'Tanh')
+
+ # check that the same tensor provided as input to Mul and SoftPlus
+ if mul.in_port(input_port_idx).get_source() != softplus.in_port(0).get_source():
+ return
+
+ mish = Mish(graph, {}).create_node() # create Mish operation
+ mish.in_port(0).connect(mul.in_port(input_port_idx).get_source()) # connect input to the Mish
+ mul.out_port(0).get_connection().set_source(mish.out_port(0)) # reconnect outgoing edge from "mul" to Mish
+
+ # rename the created Mish operation to have the name of the "mul" node which produced the value equal to the
+ # Mish output
+ rename_nodes([(mul, mul_name + '/TBR'), (mish, mul_name)])
+```
+
+##### Specific Operation Front Phase Transformations
+This type of transformation is implemented using `mo.front.common.replacement.FrontReplacementOp` as base class and
+works the following way.
+1. Developer defines an operation type to trigger the transformation.
+2. Model Optimizer search for all nodes in the graph with the attribute `op` equal to the specified value.
+3. Model Optimizer executes developer-defined function performing graph transformation for each instance of a matched
+node. Developer can override different functions in the base transformation class and the Model Optimizer works
+differently:
+ 1. Override method `replace_sub_graph(self, graph, match)`. In this case Model Optimizer only executes the overridden
+ function, pass the `graph` object and a dictionary with a single key `op` with the matched node as value. A developer
+ is responsible for writing the transformation and connecting the newly created nodes to the rest of the graph.
+ 2. Override method `replace_op(self, graph, node)`. In this case Model Optimizer executes the overridden function,
+ pass the `graph` object and the matched node as `node` parameter. If the function returns an `id` of some node then
+ the `Node` with this `id` is connected to the consumers of the matched node. After applying the transformation the
+ matched node is removed from the graph.
+
+The `FrontReplacementOp` class provides a simpler mechanism to match a singe operation with specific value of `op`
+(write an attribute `op` in the class instead of defining a `pattern()` function) attribute and perform the
+transformation.
+
+Consider an example transformation from the file is `extensions/front/Pack.py` which replaces operation `Pack` from
+the TensorFlow\*:
+```py
+from mo.front.common.partial_infer.utils import int64_array
+from mo.front.common.replacement import FrontReplacementOp
+from mo.front.tf.graph_utils import create_op_with_const_inputs
+from mo.graph.graph import Node, Graph, rename_nodes
+from mo.ops.concat import Concat
+from mo.ops.unsqueeze import Unsqueeze
+
+
+class Pack(FrontReplacementOp):
+ op = "Pack" # trigger transformation for all nodes in the graph with attribute op = "Pack"
+ enabled = True # transformation is enabled
+
+ def replace_op(self, graph: Graph, node: Node): # entry point for the transformation
+ # create a Concat operation with a number of inputs equal to a number of inputs to Pack
+ out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node()
+ pack_name = node.soft_get('name', node.id)
+
+ for ind in node.in_ports():
+ # add dimension of size 1 to all inputs of the Pack operation and add them as Concat inputs
+ unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])},
+ {'name': node.soft_get('name', node.id) + '/Unsqueeze'})
+ node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0))
+ unsqueeze_node.out_port(0).connect(out_node.in_port(ind))
+
+ # rename the created Concat operation to have the name of the "pack" node which produced the value equal to the
+ # Concat output
+ rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)])
+ return [out_node.id] # reconnect the Pack operation consumers to get input from Concat instead
+```
+
+##### Generic Front Phase Transformations
+Model Optimizer provides mechanism to implement generic front phase transformation. This type of transformation is
+implemented using `mo.front.common.replacement.FrontReplacementSubgraph` or
+`mo.front.common.replacement.FrontReplacementPattern` as base classes. The only condition to execute the transformation
+is to check that it is enabled. Then the Model Optimizer executes the method `find_and_replace_pattern(self, graph)` and
+provides a `Graph` object as an input.
+
+Consider the example of a generic front transformation from a file `extensions/front/SqueezeNormalize.py` performing
+normalization of the [Squeeze](../../../ops/shape/Squeeze_1.md) operation. Older version of the operation had a list of
+axes to squeeze as an attribute, but now it is a separate input. For backward compatibility the Model Optimizer
+operation supports both semantics but before IR generation the operation should normalized according to the
+specification.
+
+```py
+import logging as log
+
+from mo.front.common.partial_infer.utils import int64_array
+from mo.front.common.replacement import FrontReplacementPattern
+from mo.graph.graph import Graph
+from mo.ops.const import Const
+from mo.utils.error import Error
+
+
+class SqueezeNormalize(FrontReplacementPattern):
+ """
+ Normalizes inputs of the Squeeze layers. The layers should have two inputs: the input with data and input with the
+ dimensions to squeeze. If the second input is omitted then all dimensions of size 1 should be removed.
+ """
+ enabled = True # the transformation is enabled
+
+ def find_and_replace_pattern(self, graph: Graph): # the function is called unconditionally
+ for squeeze_node in graph.get_op_nodes(op='Squeeze'): # iterate over all nodes with op='Squeeze'
+ # if the operation has only 1 input node and non None 'squeeze_dims' attribute then convert the attribute to
+ # the operation input
+ if len(squeeze_node.in_nodes()) == 1 and squeeze_node.has_valid('squeeze_dims'):
+ dims_node = Const(graph, {'name': squeeze_node.id + '/Dims',
+ 'value': int64_array(squeeze_node.squeeze_dims)}).create_node()
+ squeeze_node.in_port(1).connect(dims_node.out_port(0))
+ del squeeze_node['squeeze_dims']
+ # if two inputs already exists that meanss that the operation is already normalized
+ elif len(squeeze_node.in_nodes()) == 2:
+ log.debug('The Squeeze node "{}" is already normalized'.format(squeeze_node.name))
+ # in all other cases raise an error
+ else:
+ raise Error('The Squeeze layer "{}" should either have 2 inputs or one input and an "squeeze_dims" '
+ 'attribute'.format(squeeze_node.soft_get('name')))
+```
+
+Refer to the `mo/front/common/replacement.py` for the implementation details on how these front phase transformations
+work.
+
+##### Node Name Pattern Front Phase Transformations
+Let's review a real life example before going into details how this type of transformation works.
+
+TensorFlow\* uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing
+particular task into the same scope. This approach divides a graph into logical blocks that are easier to review in the
+TensorBoard\*. The scope, in fact, just defines a common name prefix for the nodes belonging to it.
+
+For example, Inception topologies contain several types of so-called "Inception blocks". Some of them are equal to each
+other, but located in different places of the network. For example, Inception V4 from the
+[TensorFlow-Slim image classification model library](https://github.com/tensorflow/models/tree/master/research/slim) has
+inception blocks `Mixed_5b`, `Mixed_5c` and `Mixed_5d` with exactly the same nodes with the same set of attributes.
+
+Consider a situation when someone implemented these Inception blocks extremely efficiently using a single Inference
+Engine operation called `InceptionBlock` and need to replace these blocks in the model with instances of this operation.
+Model Optimizer provides mechanism to trigger the transformation for a sub-graph of operations defined by the node name
+regular expressions (scope). In this particular case, some of the patterns are: `.*InceptionV4/Mixed_5b`,
+`.*InceptionV4/Mixed_5c` and `.*InceptionV4/Mixed_5d`. Each pattern starts with `.*`, because a prefix `InceptionV4`
+is added to all nodes names during a model freeze.
+
+This type of transformation is implemented using `mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph` as a
+base class and works the following way.
+1. Developer prepares a JSON configuration file template defining node names patterns.
+2. Developer runs the Model Optimizer with a command line parameter `--tensorflow_custom_operations_config_update` and
+Model Optimizer adds information about input and output nodes of the specified sub-graphs.
+3. Model Optimizer executes developer-defined transformation **only** when an user specifies the path to the
+configuration file updated in step 2 using the command line parameter `--transformations_config`.
+
+Consider the following possible configuration file template for the Inception Block transformation:
+```json
+[
+ {
+ "custom_attributes": {
+ "attr1_key": "attr1_value",
+ "attr2_key": 123456
+ },
+ "id": "InceptionBlockTransformation",
+ "instances": [
+ ".*InceptionV4/Mixed_5b",
+ ".*InceptionV4/Mixed_5c",
+ ".*InceptionV4/Mixed_5d"
+ ],
+ "match_kind": "scope"
+ }
+]
+```
+
+The configuration file contains a list of dictionaries. Each dictionary defines one transformation. Each transformation
+is defined with several parameters:
+
+* `id` (mandatory) is a unique identifier of the transformation. It is used in the Python\* code that implements the
+transformation to link the class and the transformation description from the configuration file.
+* `match_kind` (mandatory) is a string that specifies the matching algorithm. For the node name pattern case the value
+should be equal to `scope`. Another possible values are described in the dedicated sections below.
+* `instances` (mandatory) specifies instances of the sub-graph to be matched. It contains a list of node names prefixes
+patterns for the match kind of type `scope`.
+* `custom_attributes` (optional) is a dictionary with attributes that can be used in the transformation code.
+
+After running the Model Optimizer with additional parameter `--tensorflow_custom_operations_config_update` pointing to
+the template configuration file the content of the file should be updated with two new sections `inputs` and `outputs`.
+The file content after the update is the following:
+```json
+[
+ {
+ "id": "InceptionBlockTransformation",
+ "custom_attributes": {
+ "attr1_key": "attr1_value",
+ "attr2_key": 123456
+ },
+ "instances": [
+ ".*InceptionV4/Mixed_5b",
+ ".*InceptionV4/Mixed_5c",
+ ".*InceptionV4/Mixed_5d"
+ ],
+ "match_kind": "scope",
+ "inputs": [
+ [
+ {
+ "node": "Branch_2/Conv2d_0a_1x1/Conv2D$",
+ "port": 0
+ },
+ {
+ "node": "Branch_3/AvgPool_0a_3x3/AvgPool$",
+ "port": 0
+ },
+ {
+ "node": "Branch_1/Conv2d_0a_1x1/Conv2D$",
+ "port": 0
+ },
+ {
+ "node": "Branch_0/Conv2d_0a_1x1/Conv2D$",
+ "port": 0
+ }
+ ]
+ ],
+ "outputs": [
+ {
+ "node": "concat$",
+ "port": 0
+ }
+ ]
+ }
+]
+```
+
+The value for key `inputs` is a list of lists describing input tensors of the sub-graph. Each element of the top-level
+list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming
+this tensor and port numbers where the tensor is consumed. Model Optimizer generates regular expressions for the input
+nodes names to uniquely identify them in each instance of the sub-graph defined by the `instances`. Denote these nodes
+as input nodes of the sub-graph.
+
+In the InceptionV4 topology, the `InceptionV4/Mixed_5b` block has four input tensors from outside of the sub-graph,
+but all of them are produced by the node `InceptionV4/Mixed_5a/concat`. Therefore, the top-level list of the `inputs`
+contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by
+`InceptionV4/Mixed_5a/concat` node. In this case, all four input nodes consume input tensor into port 0.
+
+The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level
+list is important. This order defines the order in which the Model Optimizer attaches input tensors to a new generated
+node if the sub-graph is replaced with a single node. The `i`-th input node of the sub-graph is obtained using call
+`match.single_input_node(i)` in the sub-graph transformation code. More information about API is given below. If it is
+necessary to change the order of input tensors, the configuration file can be edited in the text-editor.
+
+The value for the key `outputs` is a list describing nodes of the sub-graph producing tensor that goes outside of the
+sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in
+the list is important. The i-th element of the list describes the `i`-th output tensor of the sub-graph, which could be
+obtained using call `match.output_node(i)`. The order of elements can be manually changed in the configuration file.
+Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node.
+
+Refer to [Converting TensorFlow\* Object Detection API Models](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md)
+for more examples of this type of transformation.
+
+##### Front Phase Transformations Using Start and End Points
+This type of transformation is implemented using `mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph` as a
+base class and works the following way.
+1. Developer prepares a JSON configuration file which defines the sub-graph to match using two lists of node names:
+"start" and "end" nodes.
+2. Model Optimizer executes developer-defined transformation **only** when an user specifies the path to the
+configuration file using the command line parameter `--transformations_config`.Model Optimizer performs the following
+steps to match the sub-graph:
+ 1. Starts a graph traversal from every start node following the direction of the graph edges. The search stops in an
+ end node or in case of a node without consumers. All visited nodes are added to the matched sub-graph.
+ 2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from the
+ "start" list. In this step the edges are traversed in the opposite edge direction. All newly visited nodes are added
+ to the matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the
+ matched sub-graph.
+ 3. Checks that all "end" nodes were reached from "start" nodes. If no then exit with error.
+ 4. Check that there are no [Parameter](../../../ops/infrastructure/Parameter_1.md) operations among added nodes. If
+ they exist then the sub-graph depends on the inputs of the model. Such configuration is considered incorrect so the
+ Model Optimizer exits with an error.
+
+This algorithm finds all nodes "between" start and end nodes and nodes needed for calculation of non-input nodes of the
+matched sub-graph.
+
+The example of a JSON configuration file for a transformation with start and end points is
+`extensions/front/tf/ssd_support_api_v1.15.json`:
+
+```json
+[
+ {
+ "custom_attributes": {
+ "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
+ "pad_mode": "caffe.ResizeParameter.CONSTANT",
+ "resize_mode": "caffe.ResizeParameter.WARP",
+ "clip_before_nms": false,
+ "clip_after_nms": true
+ },
+ "id": "ObjectDetectionAPISSDPostprocessorReplacement",
+ "include_inputs_to_sub_graph": true,
+ "include_outputs_to_sub_graph": true,
+ "instances": {
+ "end_points": [
+ "detection_boxes",
+ "detection_scores",
+ "num_detections"
+ ],
+ "start_points": [
+ "Postprocessor/Shape",
+ "Postprocessor/scale_logits",
+ "Postprocessor/Tile",
+ "Postprocessor/Reshape_1",
+ "Postprocessor/Cast_1"
+ ]
+ },
+ "match_kind": "points"
+ }
+]
+```
+
+The format of the file is similar to the one provided as an example in the
+[Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformations). There difference is in
+the value of the `match_kind` parameter which should be equal to `points` and the format of the `instances` parameter
+which should be a dictionary with two keys `start_points` and `end_points` defining start and end node names
+correspondingly.
+
+> **NOTE**: `include_inputs_to_sub_graph` and `include_outputs_to_sub_graph` parameters are redundant and should be
+> always equal to `true`.
+
+> **NOTE**: This sub-graph match algorithm has a limitation that each start node must have only one input. Therefore, it
+> is not possible to specify, for example, [Convolution](../../../ops/convolution/Convolution_1.md) node as input
+> because it has two inputs: data tensor and tensor with weights.
+
+For other examples of transformations with points, please refer to the
+[Converting TensorFlow\* Object Detection API Models](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md).
+
+##### Generic Front Phase Transformations Enabled with Transformations Configuration File
+This type of transformation works similarly to the [Generic Front Phase Transformations](#generic-front-phase-transformations)
+but require a JSON configuration file to enable it similarly to
+[Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformation) and
+[Front Phase Transformations Using Start and End Points](#start-end-points-front-phase-transformations).
+
+The base class for this type of transformation is
+`mo.front.common.replacement.FrontReplacementFromConfigFileGeneral`. The Model Optimizer executes the method
+`transform_graph(self, graph, replacement_descriptions)` and provides the `Graph` object and dictionary with values
+parsed from the `custom_attributes` attribute of the provided JSON configuration file.
+
+The example of the configuration file for this type of transformation is `extensions/front/tf/yolo_v1_tiny.json`:
+
+```json
+[
+ {
+ "id": "TFYOLO",
+ "match_kind": "general",
+ "custom_attributes": {
+ "classes": 20,
+ "coords": 4,
+ "num": 2,
+ "do_softmax": 0
+ }
+ }
+]
+```
+and the corresponding transformation file is `./extensions/front/YOLO.py`:
+
+```py
+from extensions.front.no_op_eraser import NoOpEraser
+from extensions.front.standalone_const_eraser import StandaloneConstEraser
+from extensions.ops.regionyolo import RegionYoloOp
+from mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral
+from mo.graph.graph import Node, Graph
+from mo.ops.result import Result
+from mo.utils.error import Error
+
+
+class YoloRegionAddon(FrontReplacementFromConfigFileGeneral):
+ """
+ Replaces all Result nodes in graph with YoloRegion->Result nodes chain.
+ YoloRegion node attributes are taken from configuration file
+ """
+ replacement_id = 'TFYOLO' # the identifier matching the "id" attribute in the JSON file
+
+ def run_after(self):
+ return [NoOpEraser, StandaloneConstEraser]
+
+ def transform_graph(self, graph: Graph, replacement_descriptions):
+ op_outputs = [n for n, d in graph.nodes(data=True) if 'op' in d and d['op'] == 'Result']
+ for op_output in op_outputs:
+ last_node = Node(graph, op_output).in_node(0)
+ op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1)
+ op_params.update(replacement_descriptions)
+ region_layer = RegionYoloOp(graph, op_params)
+ region_layer_node = region_layer.create_node([last_node])
+ # here we remove 'axis' from 'dim_attrs' to avoid permutation from axis = 1 to axis = 2
+ region_layer_node.dim_attrs.remove('axis')
+ Result(graph).create_node([region_layer_node])
+ graph.remove_node(op_output)
+```
+
+The configuration file has only 3 parameters: identifier of the transformation `id`, `match_kind` (which should be equal
+to `general`) and the dictionary with custom attributes `custom_attributes` accessible in the transformation.
+
+#### Middle Phase Transformations
+There are two types of middle phase transformations:
+
+1. [Pattern-Defined Middle Phase Transformations](#pattern-defined-middle-phase-transformations) triggered for each
+sub-graph of the original graph isomorphic to the specified pattern.
+2. [Generic Middle Phase Transformations](#generic-middle-phase-transformations).
+
+##### Pattern-Defined Middle Phase Transformations
+This type of transformation is implemented using `mo.middle.replacement.MiddleReplacementPattern` as a base class and
+works similarly to the [Pattern-Defined Front Phase Transformations](#pattern-defined-middle-phase-transformations).
+The are two differences:
+1. The transformation entry function name is `replace_pattern(self, graph, match)`.
+2. The pattern defining the graph should contain data nodes because the structure of the graph is different between
+front and middle phases. Refer to the [Partial Inference](#partial-inference) section for more information about the
+graph structure changes.
+
+Refer to the `extensions/middle/L2NormToNorm.py` for the example of a pattern-defined middle transformation.
+
+##### Generic Middle Phase Transformations
+Model Optimizer provides mechanism to implement generic middle phase transformations. This type of transformation is
+implemented using `mo.middle.replacement.MiddleReplacementPattern` as a base class and works similarly to the
+[Generic Front Phase Transformations](#generic-front-phase-transformations). The only difference is that the
+transformation entry function name is `find_and_replace_pattern(self, graph: Graph)`.
+
+Refer to the `extensions/middle/CheckForCycle.py` for the example of a such type of transformation.
+
+#### Back Phase Transformations
+There are two types of back phase transformations:
+
+1. [Pattern-Defined Back Phase Transformations](#pattern-defined-back-phase-transformations) triggered for each
+sub-graph of the original graph isomorphic to the specified pattern.
+2. [Generic Back Phase Transformations](#generic-back-phase-transformations).
+
+> **NOTE**: The graph layout during the back phase is always NCHW. However during the front and middle phases it could
+> be NHWC if the original model was using it. Refer to [Model Conversion Pipeline](#model-conversion-pipeline) for more
+> details.
+
+##### Pattern-Defined Back Phase Transformations
+This type of transformation is implemented using `mo.back.replacement.MiddleReplacementPattern` as a base class and
+works the same way as [Pattern-Defined Front Phase Transformations](#pattern-defined-middle-phase-transformations).
+
+Refer to the `extensions/back/ShufflenetReLUReorder.py` for the example of a pattern-defined back transformation.
+
+##### Generic Back Phase Transformations
+Model Optimizer provides mechanism to implement generic back phase transformations. This type of transformation is
+implemented using `mo.back.replacement.BackReplacementPattern` as a base class and works the same way as
+[Generic Middle Phase Transformations](#generic-middle-phase-transformations).
+
+Refer to the `extensions/back/GatherNormalizer.py` for the example of a such type of transformation.
+
+## See Also
+* [Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../../IR_and_opsets.md)
+* [Converting a Model to Intermediate Representation (IR)](../convert_model/Converting_Model.md)
+* [nGraph Basic Concepts](@ref openvino_docs_nGraph_DG_basic_concepts)
+* [Inference Engine Extensibility Mechanism](../../../IE_DG/Extensibility_DG/Intro.md)
+* [Extending the Model Optimizer with Caffe* Python Layers](Extending_Model_Optimizer_with_Caffe_Python_Layers.md)
+* [Extending the Model Optimizer for Custom MXNet* Operations](Extending_MXNet_Model_Optimizer_with_New_Primitives.md)
+* [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md)
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md
index 4203a1f74114de..aa3b5697242657 100644
--- a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md
+++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md
@@ -1,45 +1,41 @@
-# Extending the MXNet Model Optimizer with New Primitives {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_MXNet_Model_Optimizer_with_New_Primitives}
+# Extending Model Optimizer for Custom MXNet* Operations {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_MXNet_Model_Optimizer_with_New_Primitives}
-This section describes how you can create a Model Optimizer extension for a custom layer from your MXNet* model. It supplements the main document [Extending Model Optimizer with New Primitives](Extending_Model_Optimizer_with_New_Primitives.md) and provides a step-by-step procedure. To create an extension for a particular layer, perform the following steps:
+This section provides instruction on how to support a custom MXNet operation (or as it called in the MXNet documentation
+"operator" or "layer") which is not a part of the MXNet operation set. For example, if the operator is implemented using
+the following [guide](https://mxnet.apache.org/versions/1.7.0/api/faq/new_op.html).
+
+This section describes a procedure on how to extract operator attributes in the Model Optimizer. The rest of the
+operation enabling pipeline and documentation on how to support MXNet operations from standard MXNet operation set is
+described in the main document [Customize_Model_Optimizer](Customize_Model_Optimizer.md).
+
+## Writing Extractor for Custom MXNet Operation
+Custom MXNet operations have an attribute `op` (defining the type of the operation) equal to `Custom` and attribute
+`op_type` which is an operation type defined by an user. Implement extractor class inherited from the
+`MXNetCustomFrontExtractorOp` class instead of `FrontExtractorOp` class used for standard framework operations in order
+to extract attributes for such kind of operations. The `op` class attribute value should be set to the `op_type` value
+so the extractor is triggered for this kind of operation.
+
+There is the example of the extractor for the custom operation registered with type (`op_type` value) equal to
+`MyCustomOp` having attribute `my_attribute` of the floating point type with default value `5.6`. In this sample we
+assume that we have already created the `CustomOp` class (inherited from `Op` class) for the Model Optimizer operation
+for this MXNet custom operation as described in the [Customize_Model_Optimizer](Customize_Model_Optimizer.md).
-1. Create the file `custom_proposal_ext.py` in the folder `/deployment_tools/model_optimizer/extensions/front/mxnet`
-If your MXNet layer has op `Custom`, create the `CustomProposalFrontExtractor` class inherited from `MXNetCustomFrontExtractorOp`:
-```py
-from mo.front.extractor import MXNetCustomFrontExtractorOp
-class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp):
- pass
-```
-Otherwise, for layers that are not standard MXNet layers, create the `ProposalFrontExtractor` class inherited from `FrontExtractorOp`:
-```py
- from mo.front.extractor import FrontExtractorOp
- class ProposalFrontExtractor(FrontExtractorOp):
- pass
-```
-2. Specify the operation that the extractor refers to and a specific flag. The flag represents whether the operation should be used by the Model Optimizer or should be excluded from processing:
-```py
-from mo.front.extractor import MXNetCustomFrontExtractorOp
-class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp):
- op = '_contrib_Proposal'
- enabled = True
-```
-3. Register a mapping rule between the original model and the `PythonProposalOp` attributes by overriding the following function:
```py
+from extension.ops.custom_op import CustomOp # implementation of the MO operation class
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.front.extractor import MXNetCustomFrontExtractorOp
-from mo.ops.op import Op
-class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp):
- op = '_contrib_Proposal'
- enabled = True
+class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp): # inherit from specific base class
+ op = 'MyCustomOp' # the value corresponding to the `op_type` value of the MXNet operation
+ enabled = True # the extractor is enabled
+
@staticmethod
def extract(node):
- attrs = get_mxnet_layer_attrs(node.symbol_dict)
+ attrs = get_mxnet_layer_attrs(node.symbol_dict) # parse the attributes to a dictionary with string values
node_attrs = {
- 'feat_stride': attrs.float('feat_stride', 16)
+ 'my_attribute': attrs.float('my_attribute', 5.6)
}
-
- # update the attributes of the node
- Op.get_op_class_by_name('Proposal').update_node_stat(node, node_attrs) # <------ here goes the name ('Proposal') of the Operation that was implemented before
- return __class__.enabled
-```
+ CustomOp.update_node_stat(node, node_attrs) # update the attributes of the node
+ return self.enabled
+```
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md
new file mode 100644
index 00000000000000..c79da3ef0efaa0
--- /dev/null
+++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md
@@ -0,0 +1,89 @@
+# Extending Model Optimizer with Caffe* Python Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers}
+
+This section provides instruction on how to support a custom Caffe operation written only in Python. For example, the
+[Faster-R-CNN model]((http://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz?dl=0)) implemented in
+Caffe contains a custom layer Proposal written in Python. The layer is described in the
+[Faster-R-CNN protoxt](https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt)
+the following way:
+```sh
+layer {
+ name: 'proposal'
+ type: 'Python'
+ bottom: 'rpn_cls_prob_reshape'
+ bottom: 'rpn_bbox_pred'
+ bottom: 'im_info'
+ top: 'rois'
+ python_param {
+ module: 'rpn.proposal_layer'
+ layer: 'ProposalLayer'
+ param_str: "'feat_stride': 16"
+ }
+}
+```
+
+This section describes only a procedure on how to extract operator attributes in the Model Optimizer. The rest of the
+operation enabling pipeline and documentation on how to support other Caffe operations (written in C++) is described in
+the main document [Customize_Model_Optimizer](Customize_Model_Optimizer.md).
+
+## Writing Extractor for Caffe Python Layer
+Custom Caffe Python layers have an attribute `type` (defining the type of the operation) equal to `Python` and two
+mandatory attributes `module` and `layer` in the `python_param` dictionary. The `module` defines the Python module name
+with the layer implementation, while `layer` value is an operation type defined by an user. In order to extract
+attributes for such an operation it is necessary to implement extractor class inherited from the
+`CaffePythonFrontExtractorOp` class instead of `FrontExtractorOp` class used for standard framework layers. The `op`
+class attribute value should be set to the `module + "." + layer` value so the extractor is triggered for this kind of
+operation.
+
+Here is a simplified example of the extractor for the custom operation Proposal from Faster-R-CNN model mentioned above.
+The full code with additional checks is provided in the
+`/deployment_tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses
+operation `ProposalOp` which corresponds to `Proposal` operation described in the [Available Operations Sets](../../../ops/opset.md)
+document. Refer to the source code below for a detailed explanation of the extractor.
+
+```py
+from extensions.ops.proposal import ProposalOp
+from mo.front.extractor import CaffePythonFrontExtractorOp
+
+
+class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp):
+ op = 'rpn.proposal_layer.ProposalLayer' # module + "." + layer
+ enabled = True # extractor is enabled
+
+ @staticmethod
+ def extract_proposal_params(node, defaults):
+ param = node.pb.python_param # get the protobuf message representation of the layer attributes
+ # parse attributes from the layer protobuf message to a Python dictionary
+ attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str)
+ update_attrs = defaults
+
+ # the operation expects ratio and scale values to be called "ratio" and "scale" while Caffe uses different names
+ if 'ratios' in attrs:
+ attrs['ratio'] = attrs['ratios']
+ del attrs['ratios']
+ if 'scales' in attrs:
+ attrs['scale'] = attrs['scales']
+ del attrs['scales']
+
+ update_attrs.update(attrs)
+ ProposalOp.update_node_stat(node, update_attrs) # update the node attributes
+
+ @classmethod
+ def extract(cls, node):
+ # define default values for the Proposal layer attributes
+ defaults = {
+ 'feat_stride': 16,
+ 'base_size': 16,
+ 'min_size': 16,
+ 'ratio': [0.5, 1, 2],
+ 'scale': [8, 16, 32],
+ 'pre_nms_topn': 6000,
+ 'post_nms_topn': 300,
+ 'nms_thresh': 0.7
+ }
+ cls.extract_proposal_params(node, defaults)
+ return cls.enabled
+```
+
+## See Also
+* [Customize_Model_Optimizer](Customize_Model_Optimizer.md)
+* [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md)
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md
index b94ddb52885f80..9fb0e9b26f2db7 100644
--- a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md
+++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md
@@ -1,476 +1,3 @@
-# Extending the Model Optimizer with New Primitives {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_with_New_Primitives}
+# Extending Model Optimizer with New Primitives {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_with_New_Primitives}
-This section explains how to register a custom layer in the Model Optimizer, including how to register Proposal as a custom layer. This section also demonstrates how `Proposal` works as a custom layer.
-
-Model Optimizer loads the model, goes through the topology, and tries to find each layer type in the list of known layers. If the Model Optimizer does not find a layer in that list, it looks for the layer in the list of custom layers. If the Model Optimizer fails to find the layer among the defined custom layers, it registers a Caffe\* fallback for for the output shape inference. If the Model Optimizer does not find Caffe and cannot infer shapes, the Model Optimizer fails with an appropriate message.
-
-You must know two things about custom layers with the Model Optimizer:
-
-* How to map a subgraph in a FW model to a subgraph consisting of Inference Engine layers. For Caffe, the subgraph is a 1-to-1 mapping of a Caffe layer to an Inference Engine layer.
-* How to infer shapes for unknown subgraphs. This can be either for a step in which the internal representation consists of framework-specific layers, or for a step in which the internal representation consists of Inference Engine layers.
-
-You also have the option of a framework fallback for unknown subgraphs, for when the original framework is used for inference of output shapes of operations. The example below demonstrates the case in which the framework is not available or should not be used.
-
-## Preparing an Example Topology
-
-> **NOTE**: Skip this section if you have a topology with a layer that is not known to the Model Optimizer.
-
-The information in this section prepares a Caffe\* model with the provided, deployment-ready `prototxt` for a
-well-known topology called
-[Faster-R-CNN protoxt](https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt)
-to demonstrate the workflow. To use this example, you must have
-[weights and biases](http://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz?dl=0) for inference,
-because `prototxt` just describes the structure of the topology.
-
-1. Download the `.caffemodel` and `.prototxt` files
-2. Run the Model Optimizer on the `.caffemodel` and `.prototxt` files:
-```shell
-python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt
-```
-You will likely see the error message:
-```shell
-Error parsing text-format caffe.NetParameter: 196:16: Message type "caffe.DropoutParameter" has no field named "scale_train".
-```
-Whether you see the error depends on your Caffe version. For example, BVLC Caffe does not support the boolean parameter `scale_train` for the `dropout` layer. The error message does not matter, because the dropout layer is needed only for training, and the Model Optimizer removes it.
-3. To proceed, comment out these lines in `test.prototxt`:
-```sh
-...
-layer {
- name: "drop6"
- type: "Dropout"
- bottom: "fc6"
- top: "fc6"
- dropout_param {
- dropout_ratio: 0.5
- # scale_train: false # <-------------- comment out this line
- }
-}
-...
-layer {
- name: "drop7"
- type: "Dropout"
- bottom: "fc7"
- top: "fc7"
- dropout_param {
- dropout_ratio: 0.5
- # scale_train: false # <-------------- comment out this line
- }
-}
-...
-```
-4. Run the Model Optimizer on this model again:
-```shell
-python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt
-```
- You get the model successfully converted to Intermediate Representation, and you can infer it with the Inference Engine.
-
- However, the aim of this tutorial is to demonstrate the way of supporting custom layers not yet supported by the Model Optimizer.
- If you want to understand better how Model Optimizer works, remove the extension for layer `Proposal` and follow all steps of this tutorial.
-
-5. Remove the extension for layer `Proposal`:
-```sh
-mkdir extensions/old
-mv extensions/front/caffe/proposal_python_ext.py extensions/old/proposal_python_ext_old.py
-mv extensions/ops/proposal_python_example.py extensions/old/proposal_python__example_old.py
-```
-6. Now you can run the Model Optimizer on this model once again:
-```sh
-python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt
-```
-You will see the message:
-```shell
-[ ERROR ] Found custom layer proposal. Model Optimizer does not support this layer.
-Please, register it in CustomLayersMapping.xml or implement extension.
-For more information please refer to Model Optimizer FAQ, question #FAQ45.
-```
-This message means the Model Optimizer can load the model, but is unable to infer the shape and handle the custom layer properties.
-
-## Registering a Custom Layer as a Model Optimizer Extension
-
-In the following sections, you will learn how to make the Model Optimizer independent from Caffe\* when processing a
-model that has a custom layer. In this example, the custom layer is referred to as the Proposal layer.
-
-Use this section to implement the mapping rules for the `Proposal` layer attributes and the output shape calculation. As part of these steps, you must first create a class for the `Proposal` layer and inherit it from general-purpose Op that defines the interface of every new custom layer.
-
-In this section, it is important to understand the `Op` class and its function. The implementation of this class shows that it expects a graph and attributes to be passed when initializing. The graph and attributes are in `/deployment_tools/model_optimizer/mo/ops/op.py`
-
-`Op` keeps the attributes for each operation and contains logic for handling node creation for internal model representation. `Op` is responsible for dumping each particular operation to the `.xml` format for the Intermediate Representation. By inheriting from it, the technical items are complete and you concentrate on the specificity of this layer: the attributes it supports and the rules on computing its output shape.
-
-Follow these steps:
-
-1. Create the file `python_proposal.py` in the directory `/deployment_tools/model_optimizer/extensions/ops`:
-```python
-from mo.ops.op import Op
-class PythonProposalOp(Op):
- pass
-```
-2. Define the name of the operation and make a stub constructor:
-```python
-from mo.ops.op import Op
-class PythonProposalOp(Op):
- op = 'Proposal'
- def __init__(self, graph, attrs):
- super().__init__(graph)
-```
-3. Every `Op` must have three specific fields defined: `type`, `op`, and `infer`. In most cases, the `type` and `op` names are the same, and `infer` is defined as a function to compute the output shape. Reflect these fields in your constructor:
-```python
-from mo.ops.op import Op
-class PythonProposalOp(Op):
- op = 'Proposal'
- def __init__(self, graph, attrs):
- mandatory_props = {
- 'type': __class__.op,
- 'op': __class__.op,
- 'infer': None
- }
- super().__init__(graph, mandatory_props, attrs)
-```
- According to the Intermediate Representation catalog, Proposal layer has the following attributes:
-
- * `pre_nms_topn`
- * `post_nms_topn`
- * `nms_thresh`
- * `feat_stride`
- * `min_size`
- * `base_size`
- * `ratio`
- * `scale`
-4. In defining supported attribute names, it is best to use the same names as in the original models. The names are similar to parameters and have no connection with the model layer properties. For clarity, you can use the name `my_ratio` for `ratio`. Other than defining the list of supported parameters, you can define only the parameters that appear in the Intermediate Representation in the `backend_attrs` method.
- Define your attributes:
-```python
-class PythonProposalOp(Op):
- # ... constructor
- def supported_attrs(self):
- return [
- 'pre_nms_topn',
- 'post_nms_topn',
- 'nms_thresh',
- 'feat_stride',
- 'min_size',
- 'base_size',
- 'ratio',
- 'scale'
- ]
-```
-5. Model Optimizer now knows how to create the layer called Proposal when it is in the topology and what attributes this layer has. However, the Model Optimizer does not know how to calculate the output shape of this operation. Define a rule to calculate the output shape:
-```python
-import numpy as np
-from mo.graph.graph import Node
-from mo.ops.op import Op
-class PythonProposalOp(Op):
- def __init__(self, graph, attrs):
- mandatory_props = {
- 'type': __class__.op,
- 'op': __class__.op,
- 'infer': PythonProposalOp.calculate_output_shape
- }
- super().__init__(graph, mandatory_props, attrs)
- # ... supported attrs
- @staticmethod
- def calculate_output_shape(node: Node):
- node.out_node().shape = (1, 1, 1, 1) # any Proposal now has always the same output
-```
-6. According to the Intermediate Representation catalog, Proposal layer has the following output calculation formula, where shape dynamically depends on the `post_nms_topn` parameter.
- Implement the output calculation formula in Python\*:
-```python
-import numpy as np
-class PythonProposalOp(Op):
- # ... static fields
- # ... constructor
- # ... supported attrs
- @staticmethod
- def calculate_output_shape(node: Node):
- input_shape = node.in_node(0).shape
- out_shape = np.array([0, 0], dtype=np.int64)
- # rois blob: holds R regions of interest, each is a 5 - tuple
- # (n, x1, y1, x2, y2) specifying an image batch index n and a
- # rectangle(x1, y1, x2, y2)
- out_shape[0] = input_shape[0] * node.post_nms_topn
- out_shape[1] = 5
- node.out_node(0).shape = out_shape
-```
- The node does not contain this parameter because it should be initialized in the constructor and in other parameters. The Inference Engine contains the implementation of a Caffe\*-like Proposal layer and works well with the default values from `caffe.proto`:
-```
-// Message that stores parameters used by ProposalLayer message ProposalParameter { optional uint32 feat_stride = 1 [default = 16]; optional uint32 base_size = 2 [default = 16]; optional uint32 min_size = 3 [default = 16]; repeated float ratio = 4; repeated float scale = 5; optional uint32 pre_nms_topn = 6 [default = 6000]; optional uint32 post_nms_topn = 7 [default = 300]; optional float nms_thresh = 8 [default = 0.7]; }
-```
-7. Change the constructor as follows:
-```python
-class PythonProposalOp(Op):
- # ... static fields
- def __init__(self, graph, attrs):
- mandatory_props = {
- 'type': __class__.op,
- 'op': __class__.op,
- 'feat_stride': 16,
- 'base_size': 16,
- 'min_size': 16,
- 'ratio': [0.5, 1, 2],
- 'scale': [8, 16, 32],
- 'pre_nms_topn': 6000,
- 'post_nms_topn': 300,
- 'nms_thresh': 0.7,
- 'infer': PythonProposalOp.calculate_output_shape
- }
- super().__init__(graph, mandatory_props, attrs)
- # ... supported attrs
- # ... calculate output shape
-
-```
-
-It is mandatory to call two functions right after the implementation of that class:
-
-```
-class ProposalPythonOp(Op):
- ...
-
-register_caffe_python_extractor(ProposalPythonOp, 'rpn.proposal_layer.ProposalLayer')
-Op.excluded_classes.append(ProposalPythonOp)
-```
-
-Note that the first call register_caffe_python_extractor(ProposalPythonOp, 'rpn.proposal_layer.ProposalLayer') registers the extension of the layer in the Model Optimizer that will be found by a specific name (it is mandatory to join module name and layer name): 'rpn.proposal_layer.ProposalLayer'.
-
-The second call prevents the Model Optimizer from using this extension as if it is an extension for a layer with type `Proposal`. Otherwise, this layer can be chosen as an implementation of extension that can lead to potential issues.
-
-**Summary**
-
-In this section you implemented support for a custom layer with type `Python` that is `Proposal` layer in the topology. You learned how to calculate output shape of this layer.
-
-The values of attributes are hardcoded, and in the next section you will learn how to extract these values from original framework model (Caffe model in this case).
-
-## Registering Rules to Pass Extension Layer Properties from a Caffe\* Model to the Intermediate Representation
-
-Model Optimizer now knows how to set the shape of the `PythonProposalOp` operation, but it is incorrect to initialize attributes with same values for every operation. Instead, the values should be extracted from the original topology. Model Optimizer does not know how to map the custom layer properties to the `PythonProposalOp`. For this, you must register the `FrontExtractorOp` instance.
-
-> **NOTE**: This step is required only if the layer requires parameters from the original model.
-
-1. Remove call functions `register_caffe_python_extractor` and `Op.excluded_classes.append` from the file with `op`, because you will implement extracted attributes from prototxt by yourself.
-There are multiple types of layers in Caffe: for example, `Convolution` and `Pooling`. Also, there is a specific type for custom Python\* layers called `Python`. Therefore, it is necessary to distinguish between those 'usual' types of layers and custom ones. If you want to implement extensions for a layer with type different to `Python`, you need to inherit your class of operation (for example, `ProposalFrontExtractor`) from `FrontExtractorOp`. Otherwise, inherit your class of operation from `CaffePythonFrontExtractorOp`.
-2. Create a file `python_proposal_ext.py` in the folder `/deployment_tools/model_optimizer/extensions/front/caffe`
-```py
-from mo.front.extractor import CaffePythonFrontExtractorOp
-class PythonProposalFrontExtractor(CaffePythonFrontExtractorOp):
- pass
-```
-For other layers types, inherit from `FrontExtractorOp`:
-```py
- from mo.front.extractor import FrontExtractorOp
- class ProposalFrontExtractor(FrontExtractorOp):
- pass
-```
-You will implement extractor for layer with type `Python`, however, the steps are generally the same for layers with other types.
-3. Specify the operation that the extractor refers to and a specific flag. The flag represents whether the operation should be used by the Model Optimizer or should be excluded from processing:
-```py
-from mo.front.extractor import CaffePythonFrontExtractorOp
-class PythonProposalFrontExtractor(CaffePythonFrontExtractorOp):
- op = 'rpn.proposal_layer.ProposalLayer'
- enabled = True
-```
-4. Register a mapping rule between the original model and the `PythonProposalOp` attributes by overriding the following function:
-```py
-from mo.front.extractor import CaffePythonFrontExtractorOp
-from mo.ops.op import Op
-class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp):
- op = 'rpn.proposal_layer.ProposalLayer'
- enabled = True
- @staticmethod
- def extract(node):
- proto_layer = node.pb
- param = proto_layer.python_param # each layer has a specific parameter, take a look at caffe.proto
- python_params = str(param.param_str) # for Python layers, all params are in param_str
- attrs = {
- 'feat_stride': int(python_params.split(':')[-1])
- }
- # update the attributes of the node
- Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs) # <------ here goes the name ('Proposal') of the Operation that was implemented before
- return __class__.enabled
-```
-> **NOTE:** if you implement extension for layer with type different to `Python`, change the following line: Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs) to this line: Op.get_op_class_by_name(__class__.op).update_node_stat(node, mapping_rule).
-You have successfully extracted the parameter `feat_stride` from `prototxt`, assuming it is the only parameter in this layer.
-5. To increase the implementation flexibility:
-```py
- from mo.front.extractor import CaffePythonFrontExtractorOp
- from mo.ops.op import Op
- class PythonProposalFrontExtractor(CaffePythonFrontExtractorOp):
- op = 'rpn.proposal_layer.ProposalLayer'
- enabled = True
- @staticmethod
- def extract(node):
- param = node.pb.python_param
- attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str)
- Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs)
- return ProposalPythonFrontExtractor.enabled
-```
-
-You can successfully convert the model. Open the `.xml` file and view your code:
-```xml
-...
-
-
-
-
- 1
- 18
- 15
- 15
-
-
- 1
- 36
- 15
- 15
-
-
- 1
- 3
-
-
-
-
-...
-```
-
-Look at the output shape of the custom layer you implemented. The shape was calculated according to the rules specified in `PythonProposalOp`. The `ratio` and `scale` properties have the value `[0.5, 1, 2]` and `[8, 16, 32]`. They have square brackets because they are originally a repeated parameter. You converted the parameter to a list in `PythonProposalOp`. Model Optimizer cast the value to a string. According to Python\* rules, a list has a string representation of opening and closing square brackets and values joined by commas.
-
-This is not a valid notation for the Intermediate Representation specification, because repeated parameters must be separated by a comma but without the brackets. Therefore, you must override the Model Optimizer default behavior regarding how it handles those parameters during the Intermediate Representation emitting stage, after the optimizations are complete. To do so, implement `backend_attrs()` in the `PythonProposalOp` class:
-```python
-class PythonProposalOp(Op):
- ... other methods
- def backend_attrs(self) -> list:
- """
- Gets list of attributes that should appear in resulting IR
- Returns:
- list of attributes names or list of tuples (name of attribute, pre-processing rule)
- """
- return [
- ( # a tuple per attribute
- 'ratio', # name of attribute
- # pre-processing rule in a form of lambda
- # lambda takes a PythonProposalOp node with all defined properties
- # it translates [1,2,3] -> "1,2,3"
- lambda node: ','.join(map(str, node['ratio']))
- ),
- (
- 'scale',
- lambda node: ','.join(map(str, node['scale']))
- ),
- 'feat_stride',
- 'base_size',
- 'min_size',
- 'pre_nms_topn',
- 'post_nms_topn',
- 'nms_thresh'
- ]
-```
-The model can now be successfully converted.
-
-Open the `.xml` file. `ratio` and `scale` have the expected correct values `0.5,1,2` and `8,16,32`:
-```xml
- ...
-
-
-
-
- ...
-
-
-
-
- ...
-```
-
-> **NOTE**: Model Optimizer supports the Faster-R-CNN topology. Run the following command for the same Intermediate Representation:
-
-```sh
-python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt --extensions /deployment_tools/inference-engine/samples/object_detection_sample/fasterrcnn_extensions
-```
-
-**Summary**
-
-In this section you learned how to:
-
-1. Create a framework-independent extension implementation of the Intermediate Representation custom layer with unified logic for calculating output shapes, specified set of attributes
-2. Use the Framework-Specific property extractor to map original model custom layer properties to the expected properties of the Framework-Independent extension
-3. Manipulate the custom layer properties representation in the resulting Intermediate Representation
-
-Files used in this section:
-
-* `/deployment_tools/model_optimizer/extensions/ops/python_proposal.py`:
-
-```py
-import networkx as nx
-import numpy as np
-from mo.front.extractor import attr_getter
-from mo.graph.graph import Node
-from mo.ops.op import Op
-
-class ProposalOp(Op):
- op = 'Proposal'
-
- def __init__(self, graph: nx.MultiDiGraph, attrs: dict):
- mandatory_props = {
- 'type': __class__.op,
- 'op': __class__.op,
- 'post_nms_topn': 300, # default in caffe-shared
- 'infer': ProposalOp.proposal_infer
- }
- super().__init__(graph, mandatory_props, attrs)
-
- def supported_attrs(self):
- return [
- 'feat_stride',
- 'base_size',
- 'min_size',
- 'ratio',
- 'scale',
- 'pre_nms_topn',
- 'post_nms_topn',
- 'nms_thresh'
- ]
-
- def backend_attrs(self):
- return [
- 'feat_stride',
- 'base_size',
- 'min_size',
- ('ratio', lambda node: attr_getter(node, 'ratio')),
- ('scale', lambda node: attr_getter(node, 'scale')),
- 'pre_nms_topn',
- 'post_nms_topn',
- 'nms_thresh',
- ]
-
- @staticmethod
- def proposal_infer(node: Node):
- input_shape = node.in_node(0).shape
- out_shape = np.array([0, 0], dtype=np.int64)
- # rois blob: holds R regions of interest, each is a 5 - tuple
- # (n, x1, y1, x2, y2) specifying an image batch index n and a
- # rectangle(x1, y1, x2, y2)
- out_shape[0] = input_shape[0] * node.post_nms_topn
- out_shape[1] = 5
- node.out_node(0).shape = out_shape
-```
-* `/deployment_tools/model_optimizer/extensions/front/caffe/python_proposal_ext.py`:
-
-```py
-from mo.front.extractor import CaffePythonFrontExtractorOp
-from mo.ops.op import Op
-
-class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp):
- op = 'rpn.proposal_layer.ProposalLayer'
- enabled = True
-
- @staticmethod
- def extract(node):
- param = node.pb.python_param
- attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str)
- Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs)
- return ProposalPythonFrontExtractor.enabled
-```
+This page is deprecated. Please, refer to [Model Optimizer Extensibility](Customize_Model_Optimizer.md) page for more information.
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md
index ba56ecfcaa147d..c106d489ea8af7 100644
--- a/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md
+++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md
@@ -1,10 +1,23 @@
# Legacy Mode for Caffe* Custom Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Legacy_Mode_for_Caffe_Custom_Layers}
-> **NOTE**: This functionality is deprecated and will be removed in future releases.
+> **NOTE**: This functionality is deprecated and will be removed in the future releases.
-Model Optimizer can register custom layers in a way that the output shape is calculated by the Caffe\* framework installed on your system. This chapter covers this option.
+Model Optimizer can register custom layers in a way that the output shape is calculated by the Caffe\* framework
+installed on your system. This approach has several limitations:
-> **NOTE**: Caffe Python\* API has an issue when layer name does not correspond to the name of its top. The fix was implemented on [BVLC Caffe\*](https://github.com/BVLC/caffe/commit/35a7b87ad87457291dfc79bf8a7e7cf7ef278cbb). The Caffe framework on your computer must contain this fix. Otherwise, Caffe framework can unexpectedly fail during the fallback procedure.
+* If your layer output shape depends on dynamic parameters, input data or previous layers parameters, calculation of
+output shape of the layer via Caffe can be incorrect. For example, `SimplerNMS` is filtering out bounding boxes that do
+not satisfy the condition. Internally, Caffe fallback forwards the whole net without any meaningful data - just some
+noise. It is natural to get only one bounding box (0,0,0,0) instead of expected number (for example, 15). There is an
+option to patch Caffe accordingly, however, it makes success of Intermediate Representation generation on the patched
+Caffe on the particular machine. To keep the solution independent from Caffe, we recommend to use extensions mechanism
+for such layers described in the [Model Optimizer Extensibility](Customize_Model_Optimizer.md).
+* It is not possible to produce Intermediate Representation on a machine that does not have Caffe installed.
+
+> **NOTE**: Caffe Python\* API has an issue when layer name does not correspond to the name of its top. The fix was
+> implemented on [BVLC Caffe\*](https://github.com/BVLC/caffe/commit/35a7b87ad87457291dfc79bf8a7e7cf7ef278cbb). The
+> Caffe framework on your computer must contain this fix. Otherwise, Caffe framework can unexpectedly fail during the
+> fallback procedure.
> **NOTE**: The Caffe fallback feature was validated against [this GitHub revision](https://github.com/BVLC/caffe/tree/99466224dac86ddb86296b1e727794fb836bd80f). You may have issues with forks or later Caffe framework versions.
@@ -25,7 +38,8 @@ Where:
**Example**:
-1. `Proposal` layer has parameters, and they appear in the Intermediate Representation. The parameters are stored in the `proposal_param` property of the layer:
+1. `Proposal` layer has parameters, and they appear in the Intermediate Representation. The parameters are stored in
+the `proposal_param` property of the layer:
```shell
\
```
@@ -34,16 +48,6 @@ Where:
\
```
-For this feature, you need an appropriate version of Caffe installed on the computer on which you run the Model Optimizer.
-
-## Constraints of Using the Caffe Fallback
-
-Several layers in the Caffe\* framework can have shapes that dynamically depend on the input data, not only the layers that proceed the layer and its parameters. For example, `SimplerNMS` is filtering out bounding boxes that do not satisfy the condition. Internally, Caffe fallback forwards the whole net without any meaningful data - just some noise. It is natural to get only one bounding box (0,0,0,0) instead of expected number (for example, 15). There is an option to patch Caffe accordingly, however, it makes success of Intermediate Representation generation on the patched Caffe on the particular machine. To keep the solution independent from Caffe, we recommend to use extensions mechanism for such layers.
-
-Known cases like `Proposal`, `DetectionOutput`, `SimplerNMS` are implemented as extensions and can be used out of the box.
-
-A detailed description of supported layers is in the [Operations Specification](../../../ops/opset.md) document.
-
## Building Caffe\*
1. Build Caffe\* with Python\* 3.5:
@@ -68,4 +72,4 @@ python3
import caffe
```
-If Caffe was installed correctly, the `caffe` module is imported without errors.
\ No newline at end of file
+If Caffe was installed correctly, the `caffe` module is imported without errors.
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md
index d3ba399a87745d..a3e6eda7756ad7 100644
--- a/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md
+++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md
@@ -1,363 +1,4 @@
# Sub-Graph Replacement in the Model Optimizer {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Subgraph_Replacement_Model_Optimizer}
-Several reasons exist for why the Model Optimizer could not generate an Intermediate Representation for a model. However, in some cases, the Intermediate Representation could be generated after providing certain hints to the tool. The examples of hints below are mostly related to TensorFlow\*, but potentially could be actual for models created in any framework:
-
-* Topology contains an operation (or a sub-graph of operations) not known for Model Optimizer, but this operation (sub-graph) could be expressed as a combination of known operations. A hint would be a description of this combination to the tool).
-* Sub-graph of operations in the topology expresses a single layer known to Inference Engine.
-* TensorFlow and Inference Engine use different layouts of tensors, NHWC and NCHW respectively. If some tensor in NHWC layout is flattened (for example, all the dimensions are squashed into single dim), it is not possible to convert it to NCHW layout required for Inference Engine, so Model Optimizer cannot produce correct Intermediate Representation.
-
-The detailed solutions for the examples above are given later, the next subsection shows what is common in all three examples.
-
-## Sub-graph Replacement
-
-In these cases, the sub-graph (or a single node) of initial graph is replaced with a new sub-graph (single node). The sub-graph replacement consists of the following steps:
-
-1. Identify an existing sub-graph for replacement
-
-2. Generate a new sub-graph
-
-3. Connect a new sub-graph to the graph (create input/output edges to the new sub-graph)
-
-4. Create output edges out of a new sub-graph to the graph
-
-5. Do something with the original sub-graph (for example, remove it)
-
-Model Optimizer provides several ways to perform most of the sub-graph replacement steps. The next subsections describe these methods.
-
-## Replace a Single Operation with a Sub-graph of Operations
-
-For example, there is an operation `SquaredDifference` in TensorFlow which calculates \f$(a - b)^2\f$, where \f$a\f$ and \f$b\f$ are input tensors. Inference Engine does not support such operation. However, `SquaredDifference` could be expressed using two `Power` operations and one `Eltwise Add`. The `Power` operation calculates \f$scale * (a ^ {power}) + shift\f$, where \f$a\f$ is a tensor and \f$scale\f$, \f$power\f$ and \f$shift\f$ are float values. The first `Power` operation negates the value of tensor \f$b\f$. The second one is used to square the result of \f$a + (- b)\f$ which is calculated using the `Eltwise Add` operation applied to tensor \f$a\f$ and tensor \f$-b\f$.
-
-Given that, we can replace all `SquaredDifference` operations in the initial model with two `Power` and one `Eltwise` operations. The replacer is implemented in the following file `/deployment_tools/model_optimizer/extensions/front/SquaredDifference.py`.
-```python
-import networkx as nx
-from mo.front.common.replacement import FrontReplacementOp
-from mo.graph.graph import Node
-from mo.ops.eltwise import Eltwise
-from mo.ops.power import Power
-class SquaredDifference(FrontReplacementOp):
- """
- Example class illustrating how to implement replacement of a single op in the front-end of the MO pipeline.
- This class replaces a single op SquaredDifference by a sub-graph consisting of 3 lower-level ops.
- """
- op = "SquaredDifference"
- enabled = True
- def replace_op(self, graph: nx.MultiDiGraph, node: Node):
- negate = Power(graph, dict(scale=-1, name=node.name + '/negate_'))
- add = Eltwise(graph, dict(operation='sum', name=node.name + '/add_'))
- squared = Power(graph, dict(power=2, name=node.name + '/squared_'))
- out_node = squared.create_node([add.create_node([node.in_node(0), negate.create_node([node.in_node(1)])])])
- # Replace edge from out port 0 of the matched node with a edge from node out_node.id with port 0.
- # The "explicit" version of the return value is: [(out_node.id, 0)])
- return [out_node.id]
-```
-Model Optimizer internal representation of the graph uses the networkx module.
-
-**Key lines**:
-
-* Line 1: Imports this module.
-
-* Line 3: Imports class `FrontReplacementOp` that is used to replace operation of particular type with a new sub-graph. This class performs the first step of the sub-graph replacement (identifies an existing sub-graph for replacement). It is important to mention that the replacement happens before shape inference and creation of data nodes representing tensors with values. At this stage of model conversion pipeline, all nodes in the graph are operation nodes or nodes of type `Const` that produce tensor with fixed value embedded into the node.
-
-* Line 4: Imports class `Node` representing a single node in the computation graph.
-
-* Lines 5 - 6: Import classes representing operations `Power` and `Eltwise`. These classes are inherited from base class `mo.ops.Op` that represents operation and stores its attributes.
-
-* Line 9: Defines class `SquaredDifference` inherited from `FrontReplacementOp`. This is a replacer class that is automatically registered and executed by Model Optimizer. Since the class is located in the common (not framework) specific directory `/deployment_tools/model_optimizer/extensions/front`, it is used for replacement for all supported frameworks.
-
-* Line 15: Defines the class variable `op` that stores the name of the operation to be replaced. In this case, it is `SquaredDifference`.
-
-* Line 16: Defines class variable `enabled` that controls whether the replacer is enabled or not. The only function that should be implemented in the class is `replace_op`. It gets graph to operate on and an instance of node of desired operation (`SquaredDifference` in this case). This function performs step two and three of the sub-graph replacement (generates a new sub-graph to replace with and connects a new sub-graph to the graph).
-
-* Lines 19 - 21: Create instances of operations classes with required attributes.
-
-* Line 23: Creates a sub-graph from the operations defined above. The `create_node` method of the `Op` class generates `Node` from the `Op` and uses single mandatory argument - the list of input nodes (represented as instances of `Node` class) to create input edges to the node being generated. Inputs of the `SquaredDifference` node are retrieved using `node.in_node(0)` and `node.in_node(1)` method calls. The `Eltwise Add` node gets first input as initial first input of `SquaredDifference` node, the second input of `add` is the result of negation of the second input of `SquaredDifference` node: `[add.create_node([node.in_node(0), negate.create_node([node.in_node(1)])])]`. Then the result of `Add` node is squared. `out_node` node performs this calculation.
-
-The `replace_op` function returns a list of node names used to create output edges of the sub-graph to connect it with the rest of the graph. Each element of the list describes mapping between old output edge of the matched node and new sub-graph node and output edge index. The i-th element of the list corresponds to the i-th output tensor of the matched node. In this case, `SquaredDifference` produces single tensor through output port 0, so the returned list contains single element. In general, each element is a tuple, where the first element is the name of a new node producing required tensor and the second is the output port for that tensor. If the output port is 0, it is possible to use shortcut - just the name of the node instead of a tuple. Line 26 uses this shortcut. The returned value is used to create the new sub-graph output edges (step 4 of the sub-graph replacement).
-
-Default implementation of the `FrontReplacementOp` class removes matched node and all its input/output edges (step 5 of the sub-graph replacement).
-
-Another example of such kind of replacement is in the `/deployment_tools/model_optimizer/extensions/front/Sub.py` class where all instances of `Sub` operations are replaced with two operations: `Power` to negate the second argument and the `Eltwise` to perform elementwise add.
-
-## Replace Sub-graph of Operations with a New Sub-graph of Operations
-
-The previous example considered situation when one single node of a specific type is replaced. When it is necessary to replace a sub-graph of operations it is necessary to tell Model Optimizer how to identify this sub-graph. There are three ways to achieve that:
-
-* Use graph isomorphism pattern of the networkx module
-
-* Use nodes name pattern to identify `scope` (according to TensorFlow terminology) to be replaced
-
-* Use sets of `start` and `end` node names to match all nodes "between" them
-
-The next sections explain each option using real examples.
-
-### Replace Sub-graph of Operations Using Graph Isomorphism Pattern
-
-networkx Python\* module provides methods to find graph isomorphic to the given one using nodes and edges match: for example, `networkx.algorithms.isomorphism.categorical_node_match`, `networkx.algorithms.isomorphism.categorical_multiedge_match`. Model Optimizer uses these methods and provides simple API to use that feature.
-
-For example, the Caffe\* has layer called [Mean-Variance Normalization (MVN)](http://caffe.berkeleyvision.org/tutorial/layers/mvn.html), which is also supported by the Inference Engine. This layer is implemented with low-level operations in TensorFlow: `Mean`, `StopGradient`, `SquaredDifference`, `Squeeze` and `FusedBatchNorm`. Model Optimizer should replace sub-graph with these operations with a single Inference Engine layer of type `MVN`.
-
-The file `/deployment_tools/model_optimizer/extensions/front/tf/mvn.py` performs such a replacement. The first part of the file is:
-```python
-class MVN(FrontReplacementSubgraph):
- enabled = True
- def pattern(self):
- log.debug('Enabled MVN replacement')
- return dict(
- nodes=[
- ('mean', dict(op='Mean')),
- ('stop_grad', dict(op='StopGradient')),
- ('sqdiff', dict(op='SquaredDifference')),
- ('variance', dict(op='Mean')),
- ('squeeze_mean', dict(op='Squeeze')),
- ('squeeze_variance', dict(op='Squeeze')),
- ('fbn', dict(op='FusedBatchNorm')),
- ],
- edges=[
- ('mean', 'stop_grad', {'in': 0}),
- ('stop_grad', 'sqdiff', {'in': 1}),
- ('sqdiff', 'variance', {'in': 0}),
- ('mean', 'squeeze_mean', {'in': 0}),
- ('variance', 'squeeze_variance', {'in': 0}),
- ('squeeze_mean', 'fbn', {'in': 3}),
- ('squeeze_variance', 'fbn', {'in': 4}),
- ],
- node_attrs=['op'],
- edge_attrs=['in'])
-```
-**Key lines**:
-
-* Line 1: Defines class `MVN` inherited from class `FrontReplacementSubgraph` that performs sub-graph replacement using sub-graph isomorphism pattern.
-
-* Line 3: Sets class variable `enabled` to value True meaning that this replacer is enabled.
-
-* The function `pattern` defines the sub-graph constraints to be matched. It returns a dictionary with four keys:
-
- * the `nodes` defines a list of nodes to be matched. Each element in the list is a tuple. The first element is the alias name assigned for the matched node, the second element is a dictionary with desired attributes of the node.
-
- * the `edges` defines a list of edges to be matched. Each element in the list is a tuple. The first and the second elements are the start and end edge nodes alias names respectively. The third element is a dictionary with desired edge attributes.
-
- * the `node_attrs` contains the names of nodes attributes to use during sub-graph isomorphism search.
-
- * the `edge_attrs` contains the names of edges attributes to use during sub-graph isomorphism search.
-
- The sub-graph is matched if all provided constraints are satisfied. If at least one node with desired attributes is missing or at least one defined edge is absent, the sub-graph is not matched.
-* Line 9: Adds constraint that sub-graph should contain node with attribute `op` with value `Mean`. The matched node gets an alias name `mean`. The same way the line 10 add constrain for node `StopGradient`, the matched node gets an alias name `stop_grad`.
-
-* Line 18: Defines edge from node with alias name `mean` to node with alias name `stop_grad` having attribute `in` equal to 0. This means that the output of node `mean` is connected to the node `stop_grad` as a first input (Model Optimizer uses zero-based indexing that is why `in` is 0). Another example of defining the edges constraints is in line 25 where the edge from `squeeze_mean` is connected to the `fbn` node as fourth input.
-
-* Lines 26 - 27: Specify a list of attributes to be checked. In fact, these lists are just list of all keys in the dictionaries for node and edge attributes.
-
-Now when the Model Optimizer knows how to find sub-graph (step 1 of the sub-graph replacement), it is necessary to implement function that will perform actual sub-graph replacement (step 2 and 3). The code for this function is:
-```python
-def replace_sub_graph(self, graph: nx.MultiDiGraph, match: dict):
- fbn = match['fbn']
- input = fbn.in_node(0)
- log.debug('Found potential MVN pattern after {} with name {}'.format(input.op, input.name))
- if input.id != match['mean'].in_node(0).id or input.id != match['sqdiff'].in_node(0).id:
- return
- log.debug('Confirmed MVN pattern after {} with name {}'.format(input.op, input.name))
- MVN = Op.get_op_class_by_name('MVN')
- mvn = MVN(graph, dict(
- name=fbn.name + '/MVN_',
- eps=fbn.eps,
- required_reduction_indices=[1,2] if fbn.data_format == b'NHWC' else [2,3]
- ))
- mvn.attrs['old_infer'] = mvn.attrs['infer']
- mvn.attrs['infer'] = __class__.infer
- mul = Eltwise(graph, dict(operation='mul', name=fbn.name + '/Mul_'))
- add = Eltwise(graph, dict(operation='sum', name=fbn.name + '/Add_'))
- input_gamma = fbn.in_node(1)
- input_beta = fbn.in_node(2)
- mean_reduction = match['mean'].in_node(1)
- variance_reduction = match['mean'].in_node(1)
- new_subgraph = add.create_node([
- mul.create_node([
- mvn.create_node([input, mean_reduction, variance_reduction]),
- input_gamma
- ]),
- input_beta
- ])
- replace_node(fbn, new_subgraph)
-```
-The function accepts two arguments - the graph and the dictionary `match`. The keys in the dictionary are the alias names of matched nodes (defined in the `nodes` list in the function `pattern`) and the values are the matched node of the graph (the instance of Node object).
-
-The function generates new sub-graph with node of type `MVN` and two nodes of the type `Eltwise` calculating sum and product. There is nothing interesting in how the graph is generated and mathematics behind that, so attention will be put to two aspects of this function.
-
-The first one is the call to function `replace_node` in line 36. `FusedBatchNorm` node is replaced with the output node of the generated sub-graph: all input edges of the `FusedBatchNorm` node are re-connected to the `new_subgraph` node, all consumers of the `FusedBatchNorm` node are updated to get inputs from the `new_subgraph` node. This action connects newly generated sub-graph with an existing graph (step 4 of the sub-graph replacement).
-
-The second one is that the default implementation of the inference function for `MVN` operation is overwritten. In line 16, the default implementation of the inference function for `MVN` is saved to attribute `old_infer`. In line 17, the new inference function is saved to the instance of the `MVN` operation class. The new inference function code looks the following way:
-```python
-@staticmethod
-def infer(node: Node):
- if not(node.in_node(1).has_valid('value') and node.in_node(2).has_valid('value')):
- log.warning('Reduction indices for mean and variance for MVN node {} are not constants'.format(node.name))
- return
- if not(all(node.in_node(1).value == node.required_reduction_indices) and
- all(node.in_node(2).value == node.required_reduction_indices)):
- log.warning('Reduction indices for mean {} and variance {} do not match required ones {}'.format(
- node.in_node(1).value,
- node.in_node(2).value,
- node.required_reduction_indices
- ))
- return
- node.graph.remove_edge(node.in_node(1).id, node.id)
- node.graph.remove_edge(node.in_node(2).id, node.id)
- node.old_infer(node)
-```
-The `infer` function is needed to infer value of the node (if it is possible) and to infer shapes of the output tensors of the node (mandatory). The custom `infer` function performs additional checks that describe limitations of the `MVN` layer implementation in the Inference Engine. For example, reduction indices for mean and variance must be constants (line 10), while in TensorFlow they could be computed during model inference. In addition, the function removes two edges from the graph (lines 17 and 18) because all required information is already stored in the `MVN` node attributes. This is due to different `MVN` layer implementation in Inference Engine and TensorFlow\*: `mean` and `variance` are attributes of the node in Inference Engine while in TensorFlow they are input tensors. Edges are not removed in the `replace_sub_graph` function, because these edges are used in the `infer` function (lines 7-12).
-
-The last action in the `infer` method (line 19) is to call default infer function for the `MVN`, which is saved in the attribute `old_infer` of the node to infer output tensors shapes.
-
-On the step 5 of the sub-graph replacement, six matching nodes are automatically removed during the dead code elimination pass that is performed after applying of custom sub-graph replacements defined. Six matching nodes are no more connected to the inputs of the network after replacing node `fbn` with a newly created sub-graph node. Since they are not marked as output nodes (using `--output` command line parameter), they could be removed.
-
-The replacement works for all sub-graph isomorphism instances found in the network.
-
-### Replace Sub-graph of Operations Using Nodes Name Pattern
-
-TensorFlow uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing particular task into the scope. This approach divides a graph into logical blocks that are easier to review in TensorBoard\*. The `scope`, in fact, just defines a common prefix for the node names in the scope.
-
-For example, Inception topologies contain several types of so-called "Inception blocks". Some of them are exactly equal to each other, but located in different places of the network. For example, Inception V4 from `tensorflow.contrib.slim` module has inception blocks `Mixed_5b`, `Mixed_5c` and `Mixed_5d` with exactly the same nodes with the same attributes.
-
-Now consider situation when someone implemented these Inception blocks extremely efficiently using single Inference Engine custom layer called `InceptionBlock` and would like to replace these blocks with instances of the layer to decrease inference time. Model Optimizer provides mechanism to replace sub-graph of operations defined by the regular expressions for the node names prefixes (scope). In this particular case, some of the patterns are: `.*InceptionV4/Mixed_5b`, `.*InceptionV4/Mixed_5c` and `.*InceptionV4/Mixed_5d`. Each pattern starts with `.*`, because a prefix `InceptionV4` is added to all nodes names during a model freeze.
-
-The sub-graph replacement using nodes name pattern is a bit trickier than replacements of single operation and networkx isomorphism pattern described above. You should do the following additional steps in comparison with previously described replacements:
-
-1. Prepare configuration file template defining node names patterns and information about custom layer attributes.
-
-2. Run Model Optimizer with command line parameter to add information about input and output nodes of the specified sub-graphs.
-
-Consider the following possible configuration file for the Inception Block replacer:
-```json
-[
- {
- "custom_attributes": {
- "attr1_key": "attr1_value",
- "attr2_key": 123456
- },
- "id": "InceptionBlockReplacer",
- "op": "InceptionBlock",
- "instances": [
- ".*InceptionV4/Mixed_5b",
- ".*InceptionV4/Mixed_5c",
- ".*InceptionV4/Mixed_5d"
- ],
- "match_kind": "scope"
- }
-]
-```
-The `.json` file contains list of dictionaries. Each dictionary defines one replacement. Each replacement is defined with several keys:
-
-* `id` (mandatory) is a unique identifier of the replacer. It is used in the Python\* code that implements sub-graph replacement to link the class and the replacement description from the configuration file.
-
-* `match_kind` (mandatory) is a string that specifies what matching algorithm is used. Currently supported `scope` and `points`. In this example, the first one is considered. The `points` match kind is described below.
-
-* `instances` (mandatory) specifies instances of the sub-graph to be matched. It contains a list of node names prefixes patterns for the match kind `scope`.
-
-* `custom_attributes` (optional) is a dictionary with static attributes of the layer to be dumped to Inference Engine Intermediate Representation `.xml` file.
-
-* `op` (optional) is used only if the sub-graph replacement Python code is not needed, because the sub-graph should be replaced with a single node of type `op`. If this attribute is not set, it is necessary to implement Python code with sub-graph generation code. Both options are considered in this example.
-
-When the configuration file is ready, run the Model Optimizer with regular command line parameters pointing to the file with model and input shapes (if necessary) and additional parameter `--tensorflow_custom_operations_config_update` pointing to the generated configuration file. If the file is correct, Model Optimizer adds two keys to the `InceptionBlockReplacer` dictionary: `inputs` and `outputs` with the following content:
-```json
-[
- {
- "id": "InceptionBlockReplacer",
- ...
- "inputs": [
- [
- {
- "node": "Branch_2/Conv2d_0a_1x1/Conv2D$",
- "port": 0
- },
- {
- "node": "Branch_3/AvgPool_0a_3x3/AvgPool$",
- "port": 0
- },
- {
- "node": "Branch_1/Conv2d_0a_1x1/Conv2D$",
- "port": 0
- },
- {
- "node": "Branch_0/Conv2d_0a_1x1/Conv2D$",
- "port": 0
- }
- ]
- ],
- "outputs": [
- {
- "node": "concat$",
- "port": 0
- }
- ]
- }
-]
-```
-The value for key `inputs` is a list of lists describing input tensors of the sub-graph. Each element of the top-level list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming this tensor and port numbers where the tensor is consumed. Model Optimizer generates regular expressions for the input nodes names to uniquely identify them in each instance of the sub-graph defined by the `instances`. Denote these nodes as input nodes of the sub-graph.
-
-In the InceptionV4 topology, the `InceptionV4/Mixed_5b` block has four input tensors from outside of the sub-graph, but all of them are produced by the node `InceptionV4/Mixed_5a/concat`. Therefore, the top-level list of the `inputs` contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by `InceptionV4/Mixed_5a/concat` node. In this case, all four input nodes consume input tensor into port 0.
-
-The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level list is important. This order defines the order in which the Model Optimizer attaches input tensors to a new generated node if the sub-graph is replaced with a single node. The i-th input node of the sub-graph is obtained using call `match.single_input_node(i)` in the sub-graph replacer code. More information about API is given below. If you need to change the order of input tensors, you can edit the configuration file in the text-editor.
-
-The value for the key `outputs` is a list describing nodes of the sub-graph producing tensor that goes outside of the sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in the list is important. The i-th element of the list describes the i-th output tensor of the sub-graph, which could be obtained using call `match.output_node(i)`. The order of elements can be manually changed in the configuration file. Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node.
-
-Now, when meaning of `inputs` and `outputs` attributes is clean, return back to the replacer implementation. The replacer `InceptionBlockReplacer` contains attribute `op` with the value `InceptionBlock`, which means that the identified sub-graph should be replaced with a single layer of type `InceptionBlock`. This layer is not known for the Model Optimizer, so it is necessary to define it. See [Extending the Model Optimizer with New Primitives](Extending_Model_Optimizer_with_New_Primitives.md). You must create file `extension/ops/InceptionBlock.py` with the following content:
-```python
-import numpy as np
-from mo.graph.graph import Node
-from mo.ops.op import Op
-class InceptionBlock(Op):
- op = "InceptionBlock"
- enabled = True
- def __init__(self, graph, attrs):
- super().__init__(graph, attrs, {
- 'type': __class__.op,
- 'op': __class__.op,
- })
-```
-The shape inference function is not defined. In this case, Model Optimizer uses TensorFlow fallback to calculate shapes of the sub-graph output tensors.
-
-Run the Model Optimizer with the regular command line parameters, path to the model file and input shape (if necessary), and the parameter `--tensorflow_use_custom_operations_config` and point to the created configuration file. Model Optimizer generates Intermediate Representation `.xml` file with three sequential layers of type `InceptionBlock` like in the following example:
-```xml
-
-
-
- 1
- 384
- 35
- 35
-
-
-
-
-```
-The implementation of the sub-graph replacement by scope with a single layer is complete. The next subsection explains
-how Model Optimizer replaces sub-graph identified by start/end nodes (`points`) with another sub-graph.
-
-### Replace Sub-graph of Operations Using Points
-In this scenario, for the matching algorithm user defines the sub-graph via a set of "start" and "end" nodes.
-Given the set, the Model Optimizer performs the following steps:
-1. Starts graph traversal from every _start_ nodes following the direction of the graph edges.
-The search stops in _end_ nodes or in case of nodes without further children. All visited nodes are added to the matched sub-graph.
-2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from "start" set.
-In this step the edges are traversed in the opposite edge direction. All newly visited nodes are added to the
- matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the
- matched sub-graph.
-3. Checks that all "end" nodes were reached from "input" nodes. If no then exit with error.
-4. Check that there are no "Placeholder" operations among added nodes. If it is not true then some side branch of
- the sub-graph (added in step 2) depends on inputs of the network. Such configuration is not correct so exit with error.
-
-This algorithm finds all nodes "between" start and end nodes. Also nodes needed for calculation of non-input nodes of the
-matched sub-graph produce _constant_ values because they do not depend on input of the network.
-**This sub-graph match has a limitation that each start node must have only one input**. Therefore, it is not possible
-to specify, for example, convolution node as input because it has two inputs: data tensor and tensor with weights.
-
-For example of replacement with points, please refer to the case-study of the
-[conversion for the SSD models, created with TensorFlow Object Detection API](TensorFlow_SSD_ObjectDetection_API.md).
+The document has been deprecated. Refer to the [Model Optimizer Extensibility](Subgraph_Replacement_Model_Optimizer.md)
+for the up-to-date documentation.
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md b/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md
deleted file mode 100644
index 482cb1545abf97..00000000000000
--- a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md
+++ /dev/null
@@ -1,449 +0,0 @@
-# Converting Faster R-CNN models, created with TensorFlow Object Detection API {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_TensorFlow_Faster_RCNN_ObjectDetection_API}
-
-This is a deprecated page. Please, consider reading [this](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md) page describing new approach to convert Object Detection API models giving closer to TensorFlow inference results.
-
-## Converting models created with TensorFlow Object Detection API version equal or higher than 1.6.0
-This chapter describes how to convert selected Faster R-CNN models from the TensorFlow Object Detection API zoo version equal or higher than 1.6.0. The full list of supported models is provided in the table below. Note that currently batch size 1 is supported only. The only Inference Engine plugin supporting these topologies inference is CPU.
-
-The Faster R-CNN models contain several building blocks similar to building blocks from SSD models so it is highly recommended to read chapter about [enabling TensorFlow Object Detection API SSD models](TensorFlow_SSD_ObjectDetection_API.md) first. Detailed information about Faster R-CNN topologies is provided [here](https://arxiv.org/abs/1506.01497).
-
-The TensorFlow network consists of a number of big blocks grouped by scope:
-
-* `Preprocessor` performs scaling/resizing of the image and converts input data to [0, 1] interval. Has two outputs: the first one is modified input image and the second one is a constant tensor with shape (batch_size, 3) and values (resized_image_height, resized_image_width, 3).
-
-* `FirstStageFeatureExtractor` is a backbone feature extractor.
-
-* `FirstStageBoxPredictor` calculates boxes and classes predictions.
-
-* `GridAnchorGenerator` generates anchors coordinates.
-
-* `ClipToWindow` crops anchors to the resized image size.
-
-* `Decode` decodes coordinates of boxes using anchors and data from the `FirstStageBoxPredictor`.
-
-* `BatchMultiClassNonMaxSuppression` performs non maximum suppression.
-
-* `map` scales coordinates of boxes to [0, 1] interval by dividing coordinates by (resized_image_height, resized_image_width).
-
-* `map_1` scales coordinates from [0, 1] interval to resized image sizes.
-
-* `SecondStageFeatureExtractor` is a feature extractor for predicted Regions of interest (ROIs).
-
-* `SecondStageBoxPredictor` refines box coordinates according `SecondStageFeatureExtractor`.
-
-* `SecondStagePostprocessor` is Detection Output layer performing final boxes predictions.
-
-### Sub-graph replacements
-There are three sub-graph replacements defined in the `extensions/front/tf/legacy_faster_rcnn_support.json` used to convert these models:
-
-* the first one replaces the `Preprocessor` block. The implementation of this replacer is in the `/deployment_tools/model_optimizer/extensions/front/tf/Preprocessor.py`
-
-* the second one replaces a number of blocks in the the graph including `GridAnchorGenerator`, `ClipToWindow`, `Decode`, `BatchMultiClassNonMaxSuppression`, `Tile`, `Tile_1` and `map` with Proposal and ROIRooling layers and some additional layers to pre-process input data
-
-* the third one replaces `SecondStagePostprocessor` with a DetectionOutput layer.
-
-The second replacer is defined using the following configuration that matches sub-graph by points:
-
-```json
- {
- "custom_attributes": {
- "nms_threshold": 0.7,
- "feat_stride": 16,
- "max_proposals": 100,
- "anchor_base_size": 256,
- "anchor_scales": [0.25, 0.5, 1.0, 2.0],
- "anchor_aspect_ratios": [0.5, 1.0, 2.0],
- "roi_spatial_scale": 0.0625
- },
- "id": "TFObjectDetectionAPIFasterRCNNProposalAndROIPooling",
- "include_inputs_to_sub_graph": true,
- "include_outputs_to_sub_graph": true,
- "instances": {
- "end_points": [
- "CropAndResize",
- "map_1/TensorArrayStack/TensorArrayGatherV3",
- "map_1/while/strided_slice/Enter",
- "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3"
- ],
- "start_points": [
- "FirstStageBoxPredictor/concat",
- "FirstStageBoxPredictor/concat_1",
- "GridAnchorGenerator/Identity",
- "Shape",
- "CropAndResize"
- ]
- },
- "match_kind": "points"
- }
-```
-
-The `start_points` list contains the following nodes:
-
-* `FirstStageBoxPredictor/concat` node produces box coordinates predictions.
-
-* `FirstStageBoxPredictor/concat_1` node produces classes predictions which will be used for the ROIs
-
-* `GridAnchorGenerator/Identity` node produces anchors coordinates.
-
-* `Shape` and `CropAndResize` nodes are specified as inputs to correctly isolate the required sub-graph. Refer to the [chapter](Subgraph_Replacement_Model_Optimizer.md) for more information about replacements by points.
-
-The `end_points` list contains the following nodes:
-
-* `CropAndResize` is the node that performs ROI pooling operation.
-
-* `map_1/TensorArrayStack/TensorArrayGatherV3`, `map_1/while/strided_slice/Enter` and `BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3` are specified to correctly isolate the sub-graph.
-
-The `custom_attributes` dictionary contains attributes where most values are taken from the topology-specific configuration file `samples/configs/faster_rcnn_*.config` of the [TensorFlow Object Detection API repository](https://github.com/tensorflow/models/tree/master/research/object_detection):
-
-* `nms_threshold` is the value of the `first_stage_nms_iou_threshold` parameter.
-
-* `feat_stride` is the value of the `height_stride` and `width_stride` parameters. Inference Engine supports case when these two values are equal that is why the replacement configuration file contains just one parameter.
-
-* `max_proposals` is the value of the `max_total_detections` parameter which is a maximum number of proposal boxes from the Proposal layer and detected boxes.
-
-* `anchor_base_size` is the base size of the generated anchor. The 256 is the default value for this parameter and it is not specified in the configuration file.
-
-* `anchor_scales" is the value of the `scales` attrbite.
-
-* `anchor_aspect_ratios` is the value of the `aspect_ratios` attribute.
-
-* `roi_spatial_scale` is needed for the Inference Engine ROIPooling layer. It is the default value that is not actually used.
-
-The identifier for this replacer is `TFObjectDetectionAPIFasterRCNNProposalAndROIPooling`. The Python implementation of this replacer is in the file `/deployment_tools/model_optimizer/extensions/front/tf/FasterRCNNs.py`.
-
-The first four functions of the replacer class are the following:
-
-```python
-class TFObjectDetectionAPIFasterRCNNProposalAndROIPooling(FrontReplacementFromConfigFileSubGraph):
- """
- This class replaces sub-graph of operations with Proposal and ROIPooling layers and additional layers transforming
- tensors from layout of TensorFlow to layout required by Inference Engine.
- Refer to comments inside the function for more information about performed actions.
- """
- replacement_id = 'TFObjectDetectionAPIFasterRCNNProposalAndROIPooling'
-
- def run_after(self):
- return [PreprocessorReplacement]
-
- def run_before(self):
- return [SecondStagePostprocessorReplacement]
-
- def output_edges_match(self, graph: nx.DiGraph, match: SubgraphMatch, new_sub_graph: dict):
- return {match.output_node(0)[0].id: new_sub_graph['roi_pooling_node'].id}
-
- def nodes_to_remove(self, graph: nx.MultiDiGraph, match: SubgraphMatch):
- new_list = match.matched_nodes_names().copy()
- # do not remove nodes that produce box predictions and class predictions
- new_list.remove(match.single_input_node(0)[0].id)
- new_list.remove(match.single_input_node(1)[0].id)
- return new_list
-```
-
-The function `run_after` returns list of Python classes inherited from one of the replacer classes (`FrontReplacementOp`, `FrontReplacementPattern`, `FrontReplacementFromConfigFileSubGraph` etc) those current sub-graph replacement class must be run after. In this case the replacer must be run after the `Preprocessor` is removed by the `PreprocessorReplacement` replacer. Similar way the `run_before` function is used to tell Model Optimizer to execute `SecondStagePostprocessorReplacement` before this replacer.
-
-The `output_edges_match` function describes matching between the output nodes of the sub-graph before replacement and after. In this case the only needed output node of the sub-graph is the `CropAndResize` node which is identified with `match.output_node(0)[0]`. The new output node which is created in the `generate_sub_graph` function is identified with `new_sub_graph['roi_pooling_node']`.
-
-The `nodes_to_remove` function takes the default list of nodes to be removed which contains all matched nodes and remove from them two input nodes which are identified with `match.single_input_node(0)[0]` and `match.single_input_node(1)[0]`. These nodes will be connected as inputs to new nodes being generated in the `generate_sub_graph` function so they should node be removed.
-
-The code generating new sub-graph is the following:
-
-```python
- def generate_sub_graph(self, graph: nx.MultiDiGraph, match: SubgraphMatch):
- log.debug('TFObjectDetectionAPIFasterRCNNProposal: matched_nodes = {}'.format(match.matched_nodes_names()))
-
- config_attrs = match.custom_replacement_desc.custom_attributes
- nms_threshold = config_attrs['nms_threshold']
- feat_stride = config_attrs['feat_stride']
- max_proposals = config_attrs['max_proposals']
- anchor_base_size = config_attrs['anchor_base_size']
- roi_spatial_scale = config_attrs['roi_spatial_scale']
- proposal_ratios = config_attrs['anchor_aspect_ratios']
- proposal_scales = config_attrs['anchor_scales']
- anchors_count = len(proposal_ratios) * len(proposal_scales)
-```
-
-These lines get parameters defined in the sub-graph replacement configuration file and calculate initial anchors count.
-
-```python
- # get the ROIPool size from the CropAndResize which performs the same action
- if 'CropAndResize' not in graph.nodes():
- raise Error('Failed to find node with name "CropAndResize" in the topology. Probably this is not Faster'
- ' RCNN topology or it is not supported')
- roi_pool_size = Node(graph, 'CropAndResize').in_node(3).value[0]
-```
-
-The code above gets the ROI Pooling spatial output dimension size as a value from the fourth argument of the node with name `CropAndResize`.
-
-```python
- # Convolution/matmul node that produces classes predictions
- # Permute result of the tensor with classes permissions so it will be in a correct layout for Softmax
- predictions_node = match.single_input_node(1)[0].in_node(0).in_node(0)
- permute_predictions_op = Permute(graph, {'order': np.array([0, 2, 3, 1])})
- permute_predictions_node = permute_predictions_op.create_node([], dict(name=predictions_node.name + '/Permute_'))
- insert_node_after(predictions_node, permute_predictions_node, 0)
-
- reshape_classes_op = Reshape(graph, {'dim': np.array([0, -1, 2])})
- reshape_classes_node = reshape_classes_op.create_node([permute_predictions_node],
- dict(name='Reshape_FirstStageBoxPredictor_Class_'))
- update_attrs(reshape_classes_node, 'shape_attrs', 'dim')
-
- softmax_conf_op = Softmax(graph, {'axis': 1})
- softmax_conf_node = softmax_conf_op.create_node([reshape_classes_node],
- dict(name='FirstStageBoxPredictor_SoftMax_Class_'))
-```
-
-The output with class predictions from the `FirstStageBoxPredictor` is generated with a convolution operation. The convolution output data layout in TensorFlow is NHWC while Inference Engine uses NCHW layout. Model Optimizer by default converts the weights of TensorFlow convolutions to produce output tensor in NCHW layout required by Inference Engine. The issue arises because the class predictions tensor is passed through the Softmax operation to produce class probabilities. The Inference Engine Softmax is performed over the fastest-changing dimension which is 'W' in Inference Engine. Thus, the softmax operation will be performed over a wrong dimension after conversion of the convolution node producing classes predicitions. The solution is to add Permute and Reshape operations to prepare the input data for Softmax. The Reshape operation is required to make the size of the fastest-changing dimension equal to 2, because there are 2 classes being predicted: background and foreground.
-
-Another issue is that layout of elements in the predicted classes tensor is different between TensorFlow and Inference Engine Proposal layer requirements. In TensorFlow the tensor has the following virtual layout [N, H, W, num_anchors, num_classes] while the Inference Engine Proposal layer requires in the following virtual layout [N, num_classes, num_anchors, H, W]. Thus, it is necessary to reshape, permute and then reshape again output from the Softmax to the required shape for the Proposal layer:
-
-```python
- reshape_softmax_op = Reshape(graph, {'dim': np.array([1, anchors_count, 2, -1])})
- reshape_softmax_node = reshape_softmax_op.create_node([softmax_conf_node], dict(name='Reshape_Softmax_Class_'))
- update_attrs(reshape_softmax_node, 'shape_attrs', 'dim')
-
- permute_reshape_softmax_op = Permute(graph, {'order': np.array([0, 1, 3, 2])})
- permute_reshape_softmax_node = permute_reshape_softmax_op.create_node([reshape_softmax_node],
- dict(name='Permute_'))
-
- # implement custom reshape infer function because we need to know the input convolution node output dimension
- # sizes but we can know it only after partial infer
- reshape_permute_op = Reshape(graph, {'dim': np.ones([4]), 'anchors_count': anchors_count,
- 'conv_node': predictions_node})
- reshape_permute_op.attrs['old_infer'] = reshape_permute_op.attrs['infer']
- reshape_permute_op.attrs['infer'] = __class__.classes_probabilities_reshape_shape_infer
- reshape_permute_node = reshape_permute_op.create_node([permute_reshape_softmax_node],
- dict(name='Reshape_Permute_Class_'))
- update_attrs(reshape_permute_node, 'shape_attrs', 'dim')
-```
-
-The Proposal layer has 3 inputs: classes probabilities, boxes predictions and a input shape of the image. The first two tensors are ready so it is necessary to create the Const operation that produces the desired third input tensor.
-
-```python
- # create constant input with the image height, width and scale H and scale W (if present) required for Proposal
- const_value = np.array([[input_height, input_width, 1]], dtype=np.float32)
- const_op = Const(graph, dict(value=const_value, shape=const_value.shape))
- const_node = const_op.create_node([], dict(name='Proposal_const_image_size_'))
-```
-
-Now add the Proposal layer:
-
-```python
-
- proposal_op = ProposalOp(graph, dict(min_size=10, framework='tensorflow', box_coordinate_scale=10,
- box_size_scale=5, post_nms_topn=max_proposals, feat_stride=feat_stride,
- ratio=proposal_ratios, scale=proposal_scales, base_size=anchor_base_size,
- pre_nms_topn=2**31 - 1,
- nms_thresh=nms_threshold))
- proposal_node = proposal_op.create_node([reshape_permute_node,
- match.single_input_node(0)[0].in_node(0).in_node(0),
- const_node],
- dict(name=proposal_op.attrs['type'] + '_'))
-```
-
-The box coordinates in the TensorFlow are in the following layout "YXYX" while Inference Engine uses "XYXY" layout so it is necessary to swap coordinates produced by Proposal layer. It is implemented with help of a convolution node with a special filter of a size [5, 5]:
-
-```python
- proposal_reshape_4d_op = Reshape(graph, {'dim': np.array([max_proposals, 1, 1, 5])})
- proposal_reshape_4d_node = proposal_reshape_4d_op.create_node([proposal_node], dict(name="reshape_4d_"))
- update_attrs(proposal_reshape_4d_node, 'shape_attrs', 'dim')
-
- # create convolution node to swap X and Y coordinates in the proposals
- conv_filter_const_data = np.array(np.array([[1, 0, 0, 0, 0],
- [0, 0, 1, 0, 0],
- [0, 1, 0, 0, 0],
- [0, 0, 0, 0, 1],
- [0, 0, 0, 1, 0]],
- dtype=np.float32).reshape([1, 1, 5, 5]), dtype=np.float32)
- conv_filter_const_op = Const(graph, dict(value=conv_filter_const_data, spatial_dims=np.array([2, 3])))
- conv_filter_const_node = conv_filter_const_op.create_node([], dict(name="conv_weights"))
-
- conv_op = Op(graph, {
- 'op': 'Conv2D',
- 'bias_addable': False,
- 'spatial_dims': np.array([1, 2]),
- 'channel_dims': np.array([3]),
- 'batch_dims': np.array([0]),
- 'pad': None,
- 'pad_spatial_shape': None,
- 'input_feature_channel': 2,
- 'output_feature_channel': 2,
- 'output_shape': [max_proposals, 1, 1, 5],
- 'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
- 'stride': np.array([1, 1, 1, 1]),
- 'type': 'Convolution',
- 'group': None,
- 'layout': 'NHWC',
- 'infer': __class__.fake_conv_shape_infer})
- predictions_node = conv_op.create_node([proposal_reshape_4d_node, conv_filter_const_node], dict(name="conv_"))
- update_ie_fields(graph.node[predictions_node.id])
-
- proposal_reshape_2d_op = Reshape(graph, {'dim': np.array([max_proposals, 5])})
- proposal_reshape_2d_node = proposal_reshape_2d_op.create_node([predictions_node], dict(name="reshape_2d_"))
- # set specific name for this Reshape operation so we can use it in the DetectionOutput replacer
- proposal_reshape_2d_node['name'] = 'swapped_proposals'
-```
-
-The ROIPooling layer in TensorFlow is implemented with operation called `CropAndResize` with bi-linear filtration. Inference Engine implementation of the ROIPooling layer with bi-linear filtration requires input boxes coordinates be scaled to [0, 1] interval. Adding elementwise multiplication of box coordinates solves this issue:
-
-```python
- # the TF implementation of Proposal with bi-linear filtration need proposals scaled by image size
- proposal_scale_const = np.array([1.0, 1 / input_height, 1 / input_width, 1 / input_height, 1 / input_width],
- dtype=np.float32)
- proposal_scale_const_op = Const(graph, dict(value=proposal_scale_const, shape=proposal_scale_const.shape))
- proposal_scale_const_node = proposal_scale_const_op.create_node([], dict(name='Proposal_scale_const_'))
-
- scale_proposals_op = Eltwise(graph, {'operation': 'mul'})
- scale_proposals_node = scale_proposals_op.create_node([proposal_reshape_2d_node, proposal_scale_const_node],
- dict(name='scale_proposals_'))
-```
-
-The last step is to create the ROIPooling node with 2 inputs: the identified feature maps from the `FirstStageFeatureExtractor` and the scaled output of the Proposal layer:
-
-```python
- feature_extractor_output_nodes = scope_output_nodes(graph, 'FirstStageFeatureExtractor')
- if len(feature_extractor_output_nodes) != 1:
- raise Error("Failed to determine FirstStageFeatureExtractor output node to connect it to the ROIPooling."
- "Found the following nodes: {}".format([node.name for node in feature_extractor_output_nodes]))
-
- roi_pooling_op = ROIPooling(graph, dict(method="bilinear", framework="tensorflow",
- pooled_h=roi_pool_size, pooled_w=roi_pool_size,
- spatial_scale=roi_spatial_scale))
- roi_pooling_node = roi_pooling_op.create_node([feature_extractor_output_nodes[0], scale_proposals_node],
- dict(name='ROI_Pooling_'))
-
- return {'roi_pooling_node': roi_pooling_node}
-```
-
-The are two additional methods implemented in the replacer class:
-
-* The `fake_conv_shape_infer` is a silly infer function for the convolution that permutes X and Y coordinates of the Proposal output which avoids setting a lot of internal attributes required for propoper shape inference.
-
-* The "classes_probabilities_reshape_shape_infer" function is used to update the output dimension of the reshape operation. The output spatial dimensions depends on the convolution output spatial dimensions thus they are not known until the shape inference pass which is performed after this sub-graph replacement class. So this custom infer function is called instead of default Reshape shape inference function, updates the required attribute "dim" of the node with the convolution output spatial dimensions which are known at the time of calling this inference function and then call the default Reshape inference function.
-
-```python
- @staticmethod
- def fake_conv_shape_infer(node: Node):
- node.out_node(0).shape = node.in_node(0).shape
- # call functions to update internal attributes required for correct IR generation
- mark_input_bins(node)
- assign_dims_to_weights(node.in_node(1), [0, 1], node.input_feature_channel, node.output_feature_channel, 4)
-
- @staticmethod
- def classes_probabilities_reshape_shape_infer(node: Node):
- # now we can determine the reshape dimensions from Convolution node
- conv_node = node.conv_node
- conv_output_shape = conv_node.out_node().shape
-
- # update desired shape of the Reshape node
- node.dim = np.array([0, conv_output_shape[1], conv_output_shape[2], node.anchors_count * 2])
- node.old_infer(node)
-```
-
-The second replacer defined in the sub-graph replacement configuration file replaces the `SecondStagePostprocessor` block and is defined using scope:
-
-```json
- {
- "custom_attributes": {
- "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
- "confidence_threshold": 0.01,
- "keep_top_k": 300,
- "nms_threshold": 0.6,
- "pad_mode": "caffe.ResizeParameter.CONSTANT",
- "resize_mode": "caffe.ResizeParameter.WARP",
- "max_detections_per_class": 100,
- "num_classes": 90
- },
- "id": "SecondStagePostprocessorReplacement",
- "inputs": [
- [
- {
- "node": "Reshape$",
- "port": 0
- }
- ],
- [
- {
- "node": "Reshape_1$",
- "port": 0
- }
- ],
- [
- {
- "node": "ExpandDims$",
- "port": 0
- }
- ]
- ],
- "instances": [
- ".*SecondStagePostprocessor/"
- ],
- "match_kind": "scope",
- "outputs": [
- {
- "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$",
- "port": 0
- }
- ]
- }
-```
-
-The replacement code is similar to the `SecondStagePostprocessor` replacement for the SSDs topologies. The are two major difference:
-
-* The tensor with bounding boxes doesn't contain locations for class 0 (background class) but Inference Engine Detection Output layer requires it. The Const node with some dummy values are created and concatenated with the tensor.
-
-* The priors tensor is not constant like in SSDs so the bounding boxes tensor must be scaled with variances [0.1, 0.1, 0.2, 0.2].
-
-The described above difference are resolved with the following code:
-
-```python
- # TF produces locations tensor without boxes for background.
- # Inference Engine DetectionOutput layer requires background boxes so we generate them with some values
- # and concatenate with locations tensor
- fake_background_locs_blob = np.tile([[[1, 1, 2, 2]]], [max_detections_per_class, 1, 1])
- fake_background_locs_const_op = Const(graph, dict(value=fake_background_locs_blob,
- shape=fake_background_locs_blob.shape))
- fake_background_locs_const_node = fake_background_locs_const_op.create_node([])
-
- reshape_loc_op = Reshape(graph, {'dim': np.array([max_detections_per_class, num_classes, 4])})
- reshape_loc_node = reshape_loc_op.create_node([match.single_input_node(0)[0].in_node(0)],
- dict(name='Reshape_loc_'))
-
- concat_loc_op = Concat(graph, {'axis': 1})
- concat_loc_node = concat_loc_op.create_node([fake_background_locs_const_node, reshape_loc_node],
- dict(name='Concat_fake_loc_'))
-
- # blob with variances
- variances_blob = np.array([0.1, 0.1, 0.2, 0.2])
- variances_const_op = Const(graph, dict(value=variances_blob, shape=variances_blob.shape))
- variances_const_node = variances_const_op.create_node([])
-
- # reshape locations tensor to 2D so it could be passed to Eltwise which will be converted to ScaleShift
- reshape_loc_2d_op = Reshape(graph, {'dim': np.array([-1, 4])})
- reshape_loc_2d_node = reshape_loc_2d_op.create_node([concat_loc_node], dict(name='reshape_locs_2d_'))
-
- # element-wise multiply locations with variances
- eltwise_locs_op = Eltwise(graph, {'operation': 'mul'})
- eltwise_locs_node = eltwise_locs_op.create_node([reshape_loc_2d_node, variances_const_node],
- dict(name='scale_locs_'))
-```
-
-### Example of Model Optimizer Command-Line for TensorFlow's Faster R-CNNs
-The final command line to convert Faster R-CNNs from the TensorFlow* Object Detection Zoo is the following:
-
-```sh
-./mo.py --input_model= --output=detection_boxes,detection_scores,num_detections --tensorflow_use_custom_operations_config extensions/front/tf/legacy_faster_rcnn_support.json
-```
-
-Note that there are minor changes that should be made to the and sub-graph replacement configuration file `/deployment_tools/model_optimizer/extensions/front/tf/legacy_faster_rcnn_support.json` before converting particular Faster R-CNN topology. Refer to the table below.
-
-### Sub-Graph Replacement Configuration File Parameters to Convert Different Faster R-CNN Models
-|Model Name | Configuration File Changes|
-|:----|:----:|
-| faster_rcnn_inception_v2_coco | None
-| faster_rcnn_resnet50_coco | None
-| faster_rcnn_resnet50_lowproposals_coco | None
-| faster_rcnn_resnet101_coco | None
-| faster_rcnn_resnet101_lowproposals_coco | None
-| faster_rcnn_inception_resnet_v2_atrous_coco | "feat_stride: 8"
-| faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco| "feat_stride: 8"
-
diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md b/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md
deleted file mode 100644
index b43d5de15e21aa..00000000000000
--- a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md
+++ /dev/null
@@ -1,339 +0,0 @@
-# (Deprecated) Case Study: Converting SSD Models Created with TensorFlow* Object Detection API {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_TensorFlow_SSD_ObjectDetection_API}
-
-This is a deprecated page. Please, consider reading [this](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md) page describing new approach to convert Object Detection API models giving closer to TensorFlow inference results.
-
-## Converting Models Created with TensorFlow Object Detection API Version prior 1.6.0
-
-As explained in the [Sub-graph Replacement in Model Optimizer](Subgraph_Replacement_Model_Optimizer.md) section, there are multiple
-ways to setup the sub-graph matching. In this example we are focusing on the defining the sub-graph via a set of
-"start" and "end" nodes.
-The result of matching is two buckets of nodes:
-* Nodes "between" start and end nodes.
-* Nodes connected to the first list, but just on the constant path (e.g. these nodes are not connected to the inputs of the entire graph).
-
-Let's look closer to the SSD models from the TensorFlow* detection model
-zoo:
-[SSD MobileNet](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2017_11_17.tar.gz) and
-[SSD InceptionV2](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2017_11_17.tar.gz).
-
-* Nodes "between" start and end nodes
-* Nodes connected to the first list, but just on the constant path (for example, these nodes are not connected to the inputs of the entire graph). Let's look closer to the SSD models from the TensorFlow\* detection model zoo : [SSD MobileNet](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2017_11_17.tar.gz) and [SSD InceptionV2](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2017_11_17.tar.gz).
-
-A distinct layer of any SSD topology is the `DetectionOutput` layer. This layer is implemented with a dozens of primitive operations in TensorFlow, while in Inference Engine, it is one [layer](../../../ops/opset.md). Thus, to convert a SSD model from the TensorFlow, the Model Optimizer should replace the entire sub-graph of operations that implement the `DetectionOutput` layer with a single well-known `DetectionOutput` node.
-
-The Inference Engine `DetectionOutput` layer consumes three tensors in the following order:
-
-1. Tensor with locations of bounding boxes
-2. Tensor with confidences for each bounding box
-3. Tensor with prior boxes (anchors in TensorFlow terminology)
-
-`DetectionOutput` layer produces one tensor with seven numbers for each actual detection. There are more output tensors in the TensorFlow Object Detection API, but the values in them are consistent with the Inference Engine ones.
-
-The difference with [other examples](Subgraph_Replacement_Model_Optimizer.md) is that here the `DetectionOutput` sub-graph is replaced with a new sub-graph (not a single layer).
-
-Look at sub-graph replacement configuration file `/deployment_tools/model_optimizer/extensions/front/tf/legacy_ssd_support.json` that is used to enable two models listed above:
-```json
-[
- {
- "custom_attributes": {
- "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
- "confidence_threshold": 0.01,
- "keep_top_k": 200,
- "nms_threshold": 0.45,
- "pad_mode": "caffe.ResizeParameter.CONSTANT",
- "resize_mode": "caffe.ResizeParameter.WARP"
- },
- "id": "TFObjectDetectionAPIDetectionOutput",
- "include_inputs_to_sub_graph": true,
- "include_outputs_to_sub_graph": true,
- "instances": {
- "end_points": [
- "detection_boxes",
- "detection_scores",
- "num_detections"
- ],
- "start_points": [
- "Postprocessor/Shape",
- "Postprocessor/Slice",
- "Postprocessor/ExpandDims",
- "Postprocessor/Reshape_1"
- ]
- },
- "match_kind": "points"
- },
- {
- "custom_attributes": {
- },
- "id": "PreprocessorReplacement",
- "inputs": [
- [
- {
- "node": "map/Shape$",
- "port": 0
- },
- {
- "node": "map/TensorArrayUnstack/Shape$",
- "port": 0
- },
- {
- "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$",
- "port": 2
- }
- ]
- ],
- "instances": [
- ".*Preprocessor/"
- ],
- "match_kind": "scope",
- "outputs": [
- {
- "node": "sub$",
- "port": 0
- },
- {
- "node": "map/TensorArrayStack_1/TensorArrayGatherV3$",
- "port": 0
- }
- ]
- }
-]
-```
-
-**Key lines**:
-
-* Lines 3-10 define static attributes that will be saved to the Intermediate Representation `.xml` file for `DetectionOutput` layer.
-
-* Lines 12 and 13 define values for attributes that should be always set to "true" for this release of the Model Optimizer. These two attributes are specific for sub-graph match by points only.
-
-* Lines 14-26 define one instance of the sub-graph to be match. It is an important difference between sub-graph matching by scope and points. Several instances could be specified for matching by scope, but matching with points allows specifying just one instance. So the full node names (not regular expressions like in case of match with scope) are specified in `instances` dictionary.
-
-The second sub-graph replacer with identifier `PreprocessorReplacement` is used to remove the `Preprocessing` block from the graph. The replacer removes all nodes from this scope except nodes performing mean value subtraction and scaling (if applicable). Implementation of the replacer is in the `/deployment_tools/model_optimizer/extensions/front/tf/Preprocessor.py` file.
-
-Now let's analyze the structure of the topologies generated with the Object Detection API. There are several blocks in the graph performing particular task:
-
-* `Preprocessor` block resizes, scales and subtracts mean values from the input image.
-
-* `FeatureExtractor` block is a [MobileNet](https://arxiv.org/abs/1704.04861) or other backbone to extract features.
-
-* `MultipleGridAnchorGenerator` block creates initial bounding boxes locations (anchors).
-
-* `Postprocessor` block acts as a `DetectionOutput` layer. So we need to replace `Postprocessor` block with `DetectionOutput` layer. It is necessary to add all input nodes of the `Postprocessor` scope to the list `start_points`. Consider inputs of each of these nodes:
-
- * `Postprocessor/Shape` consumes tensor with locations.
- * `Postprocessor/Slice` consumes tensor with confidences.
- * `Postprocessor/ExpandDims` consumes tensor with prior boxes.
- * `Postprocessor/Reshape_1` consumes tensor with locations similarly to the `Postprocessor/Shape` node. Despite the fact that the last node `Postprocessor/Reshape_1` gets the same tensor as node `Postprocessor/Shape`, it must be explicitly put to the list.
-
-Object Detection API `Postprocessor` block generates output nodes: `detection_boxes`, `detection_scores`, `num_detections`, `detection_classes`.
-
-Now consider the implementation of the sub-graph replacer, available in the `/deployment_tools/model_optimizer/extensions/front/tf/SSDs.py`. The file is rather big, so only some code snippets are used:
-```python
-class PostprocessorReplacement(FrontReplacementFromConfigFileSubGraph):
- replacement_id = 'TFObjectDetectionAPIDetectionOutput'
-```
-
-These lines define the new `PostprocessorReplacement` class inherited from `FrontReplacementFromConfigFileSubGraph`. `FrontReplacementFromConfigFileSubGraph` is designed to replace sub-graph of operations described in the configuration file. There are methods to override for implementing custom replacement logic that we need:
-
-* `generate_sub_graph` performs new sub-graph generation and returns dictionary where key is an alias name for the node and value is a Node objects. The dictionary has the same format as parameter `match` in the `replace_sub_graph` method in the example with networkx sub-graph isomorphism pattern. This dictionary is passed as argument to the next three methods, so it should contain entries the for nodes that the functions need.
-
-* `input_edges_match` specifies mapping between input edges to sub-graph before replacement and after replacement. The key of the dictionary is a tuple specifying input tensor of the sub-graph before replacement: sub-graph input node name and input port number for this node. The value for this key is also a tuple specifying the node where this tensor should be attached during replacement: the node name (or alias name of the node) and the input port for this node. If the port number is zero, the parameter could be omitted so the key or value is just a node name (alias). Default implementation of the method returns an empty dictionary, so Model Optimizer does not create new edges.
-
-* `output_edges_match` returns mapping between old output edges of the matched nodes and new sub-graph node and output edge index. The format is similar to the dictionary returned in the `input_edges_match` method. The only difference is that instead of specifying input port numbers for the nodes it is necessary to specify output port number. Of course, this mapping is needed for the output nodes only. Default implementation of the method returns an empty dictionary, so the Model Optimizer does not create new edges.
-
-* `nodes_to_remove` specifies list of nodes that Model Optimizer should remove after sub-graph replacement. Default implementation of the method removes all sub-graph nodes.
-
-Review of the replacer code, considering details of the `DetectionOutput` layer implementation in the Inference Engine. There are several constraints to the input tensors of the `DetectionOutput` layer:
-
-* The tensor with locations must be of shape `[#batch, #prior_boxes * 4]` or `[#batch, #prior_boxes * 5]` depending on shared locations between different batches or not.
-* The tensor with confidences must be of shape `[#batch, #prior_boxes * #classes]` and confidences values are in range [0, 1], that is passed through `softmax` layer.
-* The tensor with prior boxes must be of shape `[#batch, 2, #prior_boxes * 4]`. Inference Engine expects that it contains variance values which TensorFlow Object Detection API does not add.
-
-To enable these models, add `Reshape` operations for locations and confidences tensors and update the values for the prior boxes to include the variance constants (they are not there in TensorFlow Object Detection API).
-
-Look at the `generate_sub_graph` method:
-```python
-def generate_sub_graph(self, graph: nx.MultiDiGraph, match: SubgraphMatch):
- log.debug('PostprocessorReplacement.generate_sub_graph')
- log.debug('matched_nodes = {}'.format(match.matched_nodes_names()))
- # softmax to be applied to the confidence
- softmax_conf_op = Softmax(graph, {'axis': 2, 'nchw_layout': True})
- softmax_conf_node = softmax_conf_op.add_node(dict(name='DetectionOutput_SoftMax_conf_'))
- # Inference Engine DetectionOutput layer consumes flattened tensors
- # reshape operation to flatten locations tensor
- reshape_loc_op = Reshape(graph, {'dim': np.array([0, -1])})
- reshape_loc_node = reshape_loc_op.add_node(dict(name='DetectionOutput_Reshape_loc_'))
- # Inference Engine DetectionOutput layer consumes flattened tensors
- # reshape operation to flatten confidence tensor
- reshape_conf_op = Reshape(graph, {'dim': np.array([0, -1])})
- reshape_conf_node = reshape_conf_op.add_node(dict(name='DetectionOutput_Reshape_conf_'))
- # create Node object from Op class
- detection_output_op = DetectionOutput(graph, match.custom_replacement_desc.custom_attributes)
- detection_output_op.attrs['old_infer'] = detection_output_op.attrs['infer']
- detection_output_op.attrs['infer'] = __class__.do_infer
- detection_output_node = detection_output_op.add_node(dict(name=detection_output_op.attrs['type'] + '_'))
- # create internal edges of the sub-graph. In this case we add edges to connect input port 0 and 1 of the
- # detection output with output of reshape of locations and reshape of confidence
- create_edge(softmax_conf_node, reshape_conf_node, 0, 0)
- create_edge(reshape_loc_node, detection_output_node, 0, 0)
- create_edge(reshape_conf_node, detection_output_node, 0, 1)
- return {'detection_output_node': detection_output_node, 'reshape_conf_node': softmax_conf_node,
- 'reshape_loc_node': reshape_loc_node}
-```
-The method has two inputs: the graph to operate on and the instance of `SubgraphMatch` object, which describes matched sub-graph. The latter class has several useful methods to get particular input/output node of the sub-graph by input/output index or by node name pattern. Examples of these methods usage are given below.
-
-**Key lines**:
-
-* Lines 6 and 7 create new instance of operation of type `Softmax` and graph Node object corresponding to that operation.
-
-* Lines 11-12 and 16-17 create new instance of operation of type `Reshape` to reshape locations and confidences tensors correspondingly.
-
-* Lines 20-23 create new instance of operation `DetectionOutput` and graph Node object corresponding to that operation.
-
-* Lines 27-29 connect `softmax` node with `reshape` node and connect two reshaped locations and confidences tensors with `DetectionOutput` node.
-
-* Lines 30-31 define dictionary with aliases for detection output node, reshape locations and confidences nodes. These aliases are used in the `input_edges_match` and `output_edges_match` methods.
-
-The `input_edges_match` method is the following:
-```python
-def input_edges_match(self, graph: nx.DiGraph, match: SubgraphMatch, new_sub_graph: dict):
- locs_consumer_node, locs_consumer_node_port = match.input_nodes(0)[0]
- conf_consumer_node, conf_consumer_node_port = match.input_nodes(1)[0]
- priors_consumer_node, priors_consumer_node_port = match.input_nodes(2)[0]
- # create matching nodes for locations and confidence tensors using simple scheme "old_node_name: new_node_name"
- # which in fact means "(old_node_name, 0): (new_node_name, 0)", while first '0' means old_port and the second
- # zero defines 'new_port'.
- return {locs_consumer_node.id: new_sub_graph['reshape_loc_node'].id,
- conf_consumer_node.id: new_sub_graph['reshape_conf_node'].id,
- priors_consumer_node.id: (new_sub_graph['detection_output_node'].id, 2),
- }
-```
-The method has three parameters: input `graph`, `match` object describing matched sub-graph and `new_sub_graph` dictionary with alias names returned from the `generate_sub_graph` method.
-
-**Key lines**:
-
-* Lines 2-4 initialize Node objects and input ports for the nodes where the input tensors for the sub-graph are consumed. The method `match.input_nodes(ind)` returns list of tuples where the first element is a Node object and the second is the input port for this node which consumes the ind-th input tensor of the sub-graph. `input_points` list in the configuration file defines the order of input tensors to the sub-graph. For example, the `locs_consumer_node` object of type Node is a node that consumes tensor with locations in the port with number `locs_consumer_node_port`.
-
-* Lines 8-11 define dictionary with the mapping of tensors as described above. Note that the attribute `id` of the Node object contains the name of the node in the graph.
-
-The `output_edges_match` method is the following:
-```python
-def output_edges_match(self, graph: nx.DiGraph, match: SubgraphMatch, new_sub_graph: dict):
- # the DetectionOutput in IE produces single tensor, but in TF it produces two tensors, so we need to create only
- # one output edge match
- return {match.output_node(0)[0].id: new_sub_graph['detection_output_node'].id}
-```
-
-The method has the same three parameters as `input_edges_match` method. The returned dictionary contains mapping just for one tensor initially produces by the first output node of the sub-graph (which is `detection_boxes` according to the configuration file) to a single output tensor of the created `DetectionOutput` node. In fact, it is possible to use any output node of the initial sub-graph in mapping, because the sub-graph output nodes are the output nodes of the whole graph (their output is not consumed by any other nodes).
-
-Now, the Model Optimizer knows how to replace the sub-graph. The last step to enable the model is to cut-off some parts of the graph not needed during inference.
-
-It is necessary to remove the `Preprocessor` block where image is resized. Inference Engine does not support dynamic input shapes, so the Model Optimizer must froze the input image size, and thus, resizing of the image is not necessary. This is achieved by replacer `/deployment_tools/model_optimizer/extensions/front/tf/Preprocessor.py` which is executed automatically.
-
-There are several `Switch` operations in the `Postprocessor` block without output edges. For example:
-```sh
-Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond/cond/switch_t
-```
-```sh
-Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond/cond/switch_f
-```
-```sh
-Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond_1/cond/switch_t
-```
-```sh
-Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond_1/cond/switch_f
-```
-
-Model Optimizer marks these nodes as output nodes of the topology. Some parts of the `Posprocessor` blocks are not removed during sub-graph replacement because of that. In order to fix this issue, it is necessary to specify output nodes of the graph manually using the `--output` command line parameter.
-
-###Example Model Optimizer Command-Line for TensorFlow\* SSD
-
-The final command line to convert SSDs from the TensorFlow Object Detection API Zoo is:
-```shell
-./mo_tf.py --input_model= --tensorflow_use_custom_operations_config extensions/front/tf/legacy_ssd_support.json --output="detection_boxes,detection_scores,num_detections"
-```
-
-## Converting MobileNet V2 model created with TensorFlow Object Detection API
-The [MobileNet V2 model](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz) differs from the previous version, so converting the model requires a new sub-graph replacement configuration file and new command line parameters. The major differences are:
-
-* The `Preprocessor` block has two outputs: the pre-processed image and the pre-processed image size.
-* The `Postprocessor` block has one more input (in comparison with models created with TensorFlow Object Detection API
-version 1.6 or lower): the pre-processed image size.
-* Some node names have been changed in the `Postprocessor` block.
-
-The updated sub-graph replacement configuration file `extensions/front/tf/ssd_v2_support.json` reflecting these changes
-is the following:
-
-```json
-[
- {
- "custom_attributes": {
- "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
- "confidence_threshold": 0.01,
- "keep_top_k": 200,
- "nms_threshold": 0.6,
- "pad_mode": "caffe.ResizeParameter.CONSTANT",
- "resize_mode": "caffe.ResizeParameter.WARP"
- },
- "id": "TFObjectDetectionAPIDetectionOutput",
- "include_inputs_to_sub_graph": true,
- "include_outputs_to_sub_graph": true,
- "instances": {
- "end_points": [
- "detection_boxes",
- "detection_scores",
- "num_detections"
- ],
- "start_points": [
- "Postprocessor/Shape",
- "Postprocessor/scale_logits",
- "Postprocessor/ExpandDims",
- "Postprocessor/Reshape_1",
- "Postprocessor/ToFloat"
- ]
- },
- "match_kind": "points"
- },
- {
- "custom_attributes": {
- },
- "id": "PreprocessorReplacement",
- "inputs": [
- [
- {
- "node": "map/Shape$",
- "port": 0
- },
- {
- "node": "map/TensorArrayUnstack/Shape$",
- "port": 0
- },
- {
- "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$",
- "port": 2
- }
- ]
- ],
- "instances": [
- ".*Preprocessor/"
- ],
- "match_kind": "scope",
- "outputs": [
- {
- "node": "sub$",
- "port": 0
- },
- {
- "node": "map/TensorArrayStack_1/TensorArrayGatherV3$",
- "port": 0
- }
- ]
- }
-]
-```
-
-### Example of Model Optimizer Command-Line for TensorFlow SSD MobileNet V2
-The final command line to convert MobileNet SSD V2 from the TensorFlow Object Detection Zoo is the following:
-
-```sh
-./mo_tf.py --input_model= --tensorflow_use_custom_operations_config extensions/front/tf/ssd_v2_support.json --output="detection_boxes,detection_scores,num_detections"
-```
diff --git a/docs/doxygen/assets/bootstrap.bundle.min.js b/docs/doxygen/assets/bootstrap.bundle.min.js
new file mode 100644
index 00000000000000..6952361b1b2a0b
--- /dev/null
+++ b/docs/doxygen/assets/bootstrap.bundle.min.js
@@ -0,0 +1,8 @@
+/*!
+ * Bootstrap v4.4.1 (https://getbootstrap.com/)
+ * Copyright 2011-2019 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+ !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],t):t((e=e||self).bootstrap={},e.jQuery)}(this,function(e,p){"use strict";function i(e,t){for(var n=0;nthis._items.length-1||e<0))if(this._isSliding)p(this._element).one(V.SLID,function(){return t.to(e)});else{if(n===e)return this.pause(),void this.cycle();var i=n=i.clientWidth&&n>=i.clientHeight}),u=0l[e]&&!i.escapeWithReference&&(n=Math.min(h[t],l[e]-("right"===e?h.width:h.height))),Ye({},t,n)}};return c.forEach(function(e){var t=-1!==["left","top"].indexOf(e)?"primary":"secondary";h=ze({},h,u[t](e))}),e.offsets.popper=h,e},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(e){var t=e.offsets,n=t.popper,i=t.reference,o=e.placement.split("-")[0],r=Math.floor,s=-1!==["top","bottom"].indexOf(o),a=s?"right":"bottom",l=s?"left":"top",c=s?"width":"height";return n[a]r(i[a])&&(e.offsets.popper[l]=r(i[a])),e}},arrow:{order:500,enabled:!0,fn:function(e,t){var n;if(!gt(e.instance.modifiers,"arrow","keepTogether"))return e;var i=t.element;if("string"==typeof i){if(!(i=e.instance.popper.querySelector(i)))return e}else if(!e.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),e;var o=e.placement.split("-")[0],r=e.offsets,s=r.popper,a=r.reference,l=-1!==["left","right"].indexOf(o),c=l?"height":"width",h=l?"Top":"Left",u=h.toLowerCase(),f=l?"left":"top",d=l?"bottom":"right",p=nt(i)[c];a[d]-ps[d]&&(e.offsets.popper[u]+=a[u]+p-s[d]),e.offsets.popper=Xe(e.offsets.popper);var m=a[u]+a[c]/2-p/2,g=ke(e.instance.popper),_=parseFloat(g["margin"+h],10),v=parseFloat(g["border"+h+"Width"],10),y=m-e.offsets.popper[u]-_-v;return y=Math.max(Math.min(s[c]-p,y),0),e.arrowElement=i,e.offsets.arrow=(Ye(n={},u,Math.round(y)),Ye(n,f,""),n),e},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(m,g){if(at(m.instance.modifiers,"inner"))return m;if(m.flipped&&m.placement===m.originalPlacement)return m;var _=Ze(m.instance.popper,m.instance.reference,g.padding,g.boundariesElement,m.positionFixed),v=m.placement.split("-")[0],y=it(v),E=m.placement.split("-")[1]||"",b=[];switch(g.behavior){case Et:b=[v,y];break;case bt:b=yt(v);break;case wt:b=yt(v,!0);break;default:b=g.behavior}return b.forEach(function(e,t){if(v!==e||b.length===t+1)return m;v=m.placement.split("-")[0],y=it(v);var n=m.offsets.popper,i=m.offsets.reference,o=Math.floor,r="left"===v&&o(n.right)>o(i.left)||"right"===v&&o(n.left)o(i.top)||"bottom"===v&&o(n.top)o(_.right),l=o(n.top)o(_.bottom),h="left"===v&&s||"right"===v&&a||"top"===v&&l||"bottom"===v&&c,u=-1!==["top","bottom"].indexOf(v),f=!!g.flipVariations&&(u&&"start"===E&&s||u&&"end"===E&&a||!u&&"start"===E&&l||!u&&"end"===E&&c),d=!!g.flipVariationsByContent&&(u&&"start"===E&&a||u&&"end"===E&&s||!u&&"start"===E&&c||!u&&"end"===E&&l),p=f||d;(r||h||p)&&(m.flipped=!0,(r||h)&&(v=b[t+1]),p&&(E=function(e){return"end"===e?"start":"start"===e?"end":e}(E)),m.placement=v+(E?"-"+E:""),m.offsets.popper=ze({},m.offsets.popper,ot(m.instance.popper,m.offsets.reference,m.placement)),m=st(m.instance.modifiers,m,"flip"))}),m},behavior:"flip",padding:5,boundariesElement:"viewport",flipVariations:!1,flipVariationsByContent:!1},inner:{order:700,enabled:!1,fn:function(e){var t=e.placement,n=t.split("-")[0],i=e.offsets,o=i.popper,r=i.reference,s=-1!==["left","right"].indexOf(n),a=-1===["top","left"].indexOf(n);return o[s?"left":"top"]=r[n]-(a?o[s?"width":"height"]:0),e.placement=it(t),e.offsets.popper=Xe(o),e}},hide:{order:800,enabled:!0,fn:function(e){if(!gt(e.instance.modifiers,"hide","preventOverflow"))return e;var t=e.offsets.reference,n=rt(e.instance.modifiers,function(e){return"preventOverflow"===e.name}).boundaries;if(t.bottomn.right||t.top>n.bottom||t.rightdocument.documentElement.clientHeight;!this._isBodyOverflowing&&e&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!e&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var e=document.body.getBoundingClientRect();this._isBodyOverflowing=e.left+e.right',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",sanitize:!0,sanitizeFn:null,whiteList:Cn,popperConfig:null},Fn="show",Mn="out",Wn={HIDE:"hide"+Nn,HIDDEN:"hidden"+Nn,SHOW:"show"+Nn,SHOWN:"shown"+Nn,INSERTED:"inserted"+Nn,CLICK:"click"+Nn,FOCUSIN:"focusin"+Nn,FOCUSOUT:"focusout"+Nn,MOUSEENTER:"mouseenter"+Nn,MOUSELEAVE:"mouseleave"+Nn},Un="fade",Bn="show",qn=".tooltip-inner",Kn=".arrow",Qn="hover",Vn="focus",Yn="click",zn="manual",Xn=function(){function i(e,t){if("undefined"==typeof St)throw new TypeError("Bootstrap's tooltips require Popper.js (https://popper.js.org/)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=e,this.config=this._getConfig(t),this.tip=null,this._setListeners()}var e=i.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(e){if(this._isEnabled)if(e){var t=this.constructor.DATA_KEY,n=p(e.currentTarget).data(t);n||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),p(e.currentTarget).data(t,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(p(this.getTipElement()).hasClass(Bn))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),p.removeData(this.element,this.constructor.DATA_KEY),p(this.element).off(this.constructor.EVENT_KEY),p(this.element).closest(".modal").off("hide.bs.modal",this._hideModalHandler),this.tip&&p(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===p(this.element).css("display"))throw new Error("Please use show on visible elements");var e=p.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){p(this.element).trigger(e);var n=m.findShadowRoot(this.element),i=p.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!i)return;var o=this.getTipElement(),r=m.getUID(this.constructor.NAME);o.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&p(o).addClass(Un);var s="function"==typeof this.config.placement?this.config.placement.call(this,o,this.element):this.config.placement,a=this._getAttachment(s);this.addAttachmentClass(a);var l=this._getContainer();p(o).data(this.constructor.DATA_KEY,this),p.contains(this.element.ownerDocument.documentElement,this.tip)||p(o).appendTo(l),p(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new St(this.element,o,this._getPopperConfig(a)),p(o).addClass(Bn),"ontouchstart"in document.documentElement&&p(document.body).children().on("mouseover",null,p.noop);var c=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,p(t.element).trigger(t.constructor.Event.SHOWN),e===Mn&&t._leave(null,t)};if(p(this.tip).hasClass(Un)){var h=m.getTransitionDurationFromElement(this.tip);p(this.tip).one(m.TRANSITION_END,c).emulateTransitionEnd(h)}else c()}},e.hide=function(e){function t(){n._hoverState!==Fn&&i.parentNode&&i.parentNode.removeChild(i),n._cleanTipClass(),n.element.removeAttribute("aria-describedby"),p(n.element).trigger(n.constructor.Event.HIDDEN),null!==n._popper&&n._popper.destroy(),e&&e()}var n=this,i=this.getTipElement(),o=p.Event(this.constructor.Event.HIDE);if(p(this.element).trigger(o),!o.isDefaultPrevented()){if(p(i).removeClass(Bn),"ontouchstart"in document.documentElement&&p(document.body).children().off("mouseover",null,p.noop),this._activeTrigger[Yn]=!1,this._activeTrigger[Vn]=!1,this._activeTrigger[Qn]=!1,p(this.tip).hasClass(Un)){var r=m.getTransitionDurationFromElement(i);p(i).one(m.TRANSITION_END,t).emulateTransitionEnd(r)}else t();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(e){p(this.getTipElement()).addClass(Ln+"-"+e)},e.getTipElement=function(){return this.tip=this.tip||p(this.config.template)[0],this.tip},e.setContent=function(){var e=this.getTipElement();this.setElementContent(p(e.querySelectorAll(qn)),this.getTitle()),p(e).removeClass(Un+" "+Bn)},e.setElementContent=function(e,t){"object"!=typeof t||!t.nodeType&&!t.jquery?this.config.html?(this.config.sanitize&&(t=In(t,this.config.whiteList,this.config.sanitizeFn)),e.html(t)):e.text(t):this.config.html?p(t).parent().is(e)||e.empty().append(t):e.text(p(t).text())},e.getTitle=function(){var e=this.element.getAttribute("data-original-title");return e=e||("function"==typeof this.config.title?this.config.title.call(this.element):this.config.title)},e._getPopperConfig=function(e){var t=this;return l({},{placement:e,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:Kn},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(e){e.originalPlacement!==e.placement&&t._handlePopperPlacementChange(e)},onUpdate:function(e){return t._handlePopperPlacementChange(e)}},{},this.config.popperConfig)},e._getOffset=function(){var t=this,e={};return"function"==typeof this.config.offset?e.fn=function(e){return e.offsets=l({},e.offsets,{},t.config.offset(e.offsets,t.element)||{}),e}:e.offset=this.config.offset,e},e._getContainer=function(){return!1===this.config.container?document.body:m.isElement(this.config.container)?p(this.config.container):p(document).find(this.config.container)},e._getAttachment=function(e){return Hn[e.toUpperCase()]},e._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(e){if("click"===e)p(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(e){return i.toggle(e)});else if(e!==zn){var t=e===Qn?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=e===Qn?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;p(i.element).on(t,i.config.selector,function(e){return i._enter(e)}).on(n,i.config.selector,function(e){return i._leave(e)})}}),this._hideModalHandler=function(){i.element&&i.hide()},p(this.element).closest(".modal").on("hide.bs.modal",this._hideModalHandler),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var e=typeof this.element.getAttribute("data-original-title");!this.element.getAttribute("title")&&"string"==e||(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(e,t){var n=this.constructor.DATA_KEY;(t=t||p(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),p(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusin"===e.type?Vn:Qn]=!0),p(t.getTipElement()).hasClass(Bn)||t._hoverState===Fn?t._hoverState=Fn:(clearTimeout(t._timeout),t._hoverState=Fn,t.config.delay&&t.config.delay.show?t._timeout=setTimeout(function(){t._hoverState===Fn&&t.show()},t.config.delay.show):t.show())},e._leave=function(e,t){var n=this.constructor.DATA_KEY;(t=t||p(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),p(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusout"===e.type?Vn:Qn]=!1),t._isWithActiveTrigger()||(clearTimeout(t._timeout),t._hoverState=Mn,t.config.delay&&t.config.delay.hide?t._timeout=setTimeout(function(){t._hoverState===Mn&&t.hide()},t.config.delay.hide):t.hide())},e._isWithActiveTrigger=function(){for(var e in this._activeTrigger)if(this._activeTrigger[e])return!0;return!1},e._getConfig=function(e){var t=p(this.element).data();return Object.keys(t).forEach(function(e){-1!==xn.indexOf(e)&&delete t[e]}),"number"==typeof(e=l({},this.constructor.Default,{},t,{},"object"==typeof e&&e?e:{})).delay&&(e.delay={show:e.delay,hide:e.delay}),"number"==typeof e.title&&(e.title=e.title.toString()),"number"==typeof e.content&&(e.content=e.content.toString()),m.typeCheckConfig(An,e,this.constructor.DefaultType),e.sanitize&&(e.template=In(e.template,e.whiteList,e.sanitizeFn)),e},e._getDelegateConfig=function(){var e={};if(this.config)for(var t in this.config)this.constructor.Default[t]!==this.config[t]&&(e[t]=this.config[t]);return e},e._cleanTipClass=function(){var e=p(this.getTipElement()),t=e.attr("class").match(Pn);null!==t&&t.length&&e.removeClass(t.join(""))},e._handlePopperPlacementChange=function(e){var t=e.instance;this.tip=t.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(e.placement))},e._fixTransition=function(){var e=this.getTipElement(),t=this.config.animation;null===e.getAttribute("x-placement")&&(p(e).removeClass(Un),this.config.animation=!1,this.hide(),this.show(),this.config.animation=t)},i._jQueryInterface=function(n){return this.each(function(){var e=p(this).data(On),t="object"==typeof n&&n;if((e||!/dispose|hide/.test(n))&&(e||(e=new i(this,t),p(this).data(On,e)),"string"==typeof n)){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.4.1"}},{key:"Default",get:function(){return Rn}},{key:"NAME",get:function(){return An}},{key:"DATA_KEY",get:function(){return On}},{key:"Event",get:function(){return Wn}},{key:"EVENT_KEY",get:function(){return Nn}},{key:"DefaultType",get:function(){return jn}}]),i}();p.fn[An]=Xn._jQueryInterface,p.fn[An].Constructor=Xn,p.fn[An].noConflict=function(){return p.fn[An]=kn,Xn._jQueryInterface};var Gn="popover",$n="bs.popover",Jn="."+$n,Zn=p.fn[Gn],ei="bs-popover",ti=new RegExp("(^|\\s)"+ei+"\\S+","g"),ni=l({},Xn.Default,{placement:"right",trigger:"click",content:"",template:'
');
- }
- }
- // Do not create smartmenus
- // $('#main-menu').smartmenus();
- }
- /* @license-end */
-
\ No newline at end of file
diff --git a/docs/doxygen/assets/openvino-layout.js b/docs/doxygen/assets/openvino-layout.js
index 907e7f47d346dd..db73c971843a57 100644
--- a/docs/doxygen/assets/openvino-layout.js
+++ b/docs/doxygen/assets/openvino-layout.js
@@ -1,3 +1,21 @@
+/*
+******************************************************************************
+Copyright 2017-2021 Intel Corporation
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+******************************************************************************
+*/
+
"use strict";
/**
@@ -487,6 +505,7 @@ function openVinoContent() {
searchSlider.on('click', function() {
$(this).toggleClass('closed open');
$("#MSearchField").animate({width:'toggle'},200);
+ $('#MSearchField').focus();
});
if (['http:', 'https:'].indexOf(window.location.protocol) !== -1) {
$('#MSearchField').replaceWith('');
@@ -522,6 +541,51 @@ function openVinoContent() {
$(".contents").prepend($(".header"));
}
+ // assign clipboard button for each .fragment element
+ $('.fragment').wrap('');
+ $('.code-container').prepend($(''));
+ var $copyButton = $('content_copy');
+ $copyButton.click(function() {
+ var self = this;
+ $(self).text('check_circle_outline')
+ .css('color', '#003C71')
+ .css("pointer-events", 'none');;
+ $(self).next('.copy-tooltip')
+ .attr('data-original-title', 'Copied!')
+ .tooltip('show')
+ .addClass('active');
+ var fragment = $(self.parentElement.parentElement).children('div.fragment')[0];
+ var text = [];
+ $(fragment).children('div.line').each(function(key, val) {
+ text.push(val.innerText);
+ });
+ text = text.join('\n');
+ var $placeholder = $('