Skip to content

Commit

Permalink
Merge pull request slyalin#76 from nosovmik/rebase13
Browse files Browse the repository at this point in the history
Rebase13
  • Loading branch information
nosovmik authored Jun 2, 2021
2 parents 5a9767d + eb62e72 commit 7ec81fd
Show file tree
Hide file tree
Showing 205 changed files with 2,776 additions and 1,560 deletions.
15 changes: 14 additions & 1 deletion .ci/azure/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ jobs:
MODELS_PATH: $(REPO_DIR)/../testdata
WORK_DIR: $(Pipeline.Workspace)/_w
BUILD_DIR: $(WORK_DIR)/build
BUILD_SAMPLES_DIR: $(WORK_DIR)/build_samples
BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
INSTALL_DIR: $(WORK_DIR)/install_pkg
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
Expand Down Expand Up @@ -56,6 +57,7 @@ jobs:
- script: |
rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR)
rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR)
rm -rf $(BUILD_SAMPLES_DIR) ; mkdir $(BUILD_SAMPLES_DIR)
echo TargetBranch: $(System.PullRequest.TargetBranch)
echo SourceBranch: $(Build.SourceBranch)
displayName: 'Make dir'
Expand Down Expand Up @@ -119,12 +121,23 @@ jobs:
displayName: 'Build Lin'

- script: ls -alR $(REPO_DIR)/bin/
displayName: 'List files'
displayName: 'List bin files'

- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
workingDirectory: $(BUILD_DIR)
displayName: 'Install'

- script: ls -alR $(INSTALL_DIR)
displayName: 'List install files'

- script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/cpp/build_samples.sh
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build cpp samples'

- script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/c/build_samples.sh
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build c samples'

- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
displayName: 'nGraph UT'
continueOnError: false
Expand Down
15 changes: 14 additions & 1 deletion .ci/azure/windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ jobs:
MODELS_PATH: $(REPO_DIR)\..\testdata
WORK_DIR: $(Pipeline.Workspace)\_w
BUILD_DIR: D:\build
BUILD_SAMPLES_DIR: D:\build_samples
BIN_DIR: $(REPO_DIR)\bin\intel64
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
Expand Down Expand Up @@ -56,6 +57,7 @@ jobs:
- script: |
rd /Q /S $(WORK_DIR) & mkdir $(WORK_DIR)
rd /Q /S $(BUILD_DIR) & mkdir $(BUILD_DIR)
rd /Q /S $(BUILD_SAMPLES_DIR) & mkdir $(BUILD_SAMPLES_DIR)
displayName: 'Make dir'
- script: |
Expand Down Expand Up @@ -101,12 +103,23 @@ jobs:
displayName: 'Build Win'
- script: dir $(REPO_DIR)\bin\ /s
displayName: 'List files'
displayName: 'List bin files'

- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
workingDirectory: $(BUILD_DIR)
displayName: 'Install'

- script: dir $(INSTALL_DIR) /s
displayName: 'List install files'

- script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\cpp\build_samples_msvc.bat
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build cpp samples'

- script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\c\build_samples_msvc.bat
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build c samples'

- script: |
set PATH=$(TEST_ENV_PATH)
$(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
Expand Down
4 changes: 3 additions & 1 deletion docs/IE_DG/Samples_Overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ After installation of Intel® Distribution of OpenVINO™ toolkit, С, C++ and P

Inference Engine sample applications include the following:

- **[Automatic Speech Recognition C++ Sample](../../inference-engine/samples/speech_sample/README.md)** – Acoustic model inference based on Kaldi neural networks and speech feature vectors.
- **Speech Sample** - Acoustic model inference based on Kaldi neural networks and speech feature vectors.
- [Automatic Speech Recognition C++ Sample](../../inference-engine/samples/speech_sample/README.md)
- [Automatic Speech Recognition Python Sample](../../inference-engine/ie_bridges/python/sample/speech_sample/README.md)
- **Benchmark Application** – Estimates deep learning inference performance on supported devices for synchronous and asynchronous modes.
- [Benchmark C++ Tool](../../inference-engine/samples/benchmark_app/README.md)
- [Benchmark Python Tool](../../inference-engine/tools/benchmark_tool/README.md)
Expand Down
2 changes: 1 addition & 1 deletion docs/IE_DG/supported_plugins/MULTI.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ Notice that until R2 you had to calculate number of requests in your application
@snippet snippets/MULTI5.cpp part5

## Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance
Notice that every OpenVINO sample that supports "-d" (which stays for "device") command-line option transparently accepts the multi-device.
Notice that every OpenVINO sample that supports "-d" (which stands for "device") command-line option transparently accepts the multi-device.
The [Benchmark Application](../../../inference-engine/samples/benchmark_app/README.md) is the best reference to the optimal usage of the multi-device. As discussed multiple times earlier, you don't need to setup number of requests, CPU streams or threads as the application provides optimal out of the box performance.
Below is example command-line to evaluate HDDL+GPU performance with that:

Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
# Convert ONNX* DLRM to the Intermediate Representation {#openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_DLRM}
[DEPRECATED] Convert ONNX* DLRM to the Intermediate Representation {#openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_DLRM}
===============================

> **NOTE**: These instructions are currently deprecated. Since OpenVINO™ 2020.4 version, no specific steps are needed to convert ONNX\* DLRM models. For general instructions on converting ONNX models, please refer to [Converting a ONNX* Model](../Convert_Model_From_ONNX.md) topic.
These instructions are applicable only to the DLRM converted to the ONNX* file format from the [facebookresearch/dlrm model](https://github.com/facebookresearch/dlrm).

**Step 1**. Save trained Pytorch* model to ONNX* format. If you train the model using the [script provided in model repository](https://github.com/facebookresearch/dlrm/blob/master/dlrm_s_pytorch.py), just add the `--save-onnx` flag to the command line parameters and you'll get the `dlrm_s_pytorch.onnx` file containing the model serialized in ONNX* format.
**Step 1**. Save trained Pytorch* model to ONNX* format or download pretrained ONNX* from
[MLCommons/inference/recommendation/dlrm](https://github.com/mlcommons/inference/tree/r1.0/recommendation/dlrm/pytorch#supported-models) repository.
If you train the model using the [script provided in model repository](https://github.com/facebookresearch/dlrm/blob/master/dlrm_s_pytorch.py), just add the `--save-onnx` flag to the command line parameters and you'll get the `dlrm_s_pytorch.onnx` file containing the model serialized in ONNX* format.

**Step 2**. To generate the Intermediate Representation (IR) of the model, change your current working directory to the Model Optimizer installation directory and run the Model Optimizer with the following parameters:
```sh
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

## Download the Pre-trained Language Model on One Billion Word Benchmark

TensorFlow* provides [a pre-trained Language Model on One Billion Word Benchmark](https://github.com/tensorflow/models/tree/master/research/lm_1b).
TensorFlow* provides [a pre-trained Language Model on One Billion Word Benchmark](https://github.com/tensorflow/models/tree/r2.3.0/research/lm_1b).

To download the model for IR conversion, please follow the instruction:
1. Create new directory to store the model:
Expand Down
10 changes: 5 additions & 5 deletions docs/doxygen/ie_docs.xml
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ limitations under the License.
<tab type="user" title="Convert ONNX* Faster R-CNN Model to the Intermediate Representation" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_Faster_RCNN"/>
<tab type="user" title="Convert ONNX* Mask R-CNN Model to the Intermediate Representation" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_Mask_RCNN"/>
<tab type="user" title="Convert ONNX* GPT-2 Model to the Intermediate Representation" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_GPT2"/>
<tab type="user" title="Convert DLRM ONNX* Model to the Intermediate Representation" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_DLRM"/>
<tab type="user" title="[DEPRECATED] Convert DLRM ONNX* Model to the Intermediate Representation" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_DLRM"/>
<tab type="usergroup" title="Converting Your PyTorch* Model" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_PyTorch">
<tab type="user" title="Convert PyTorch* QuartzNet Model" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_QuartzNet"/>
<tab type="user" title="Convert PyTorch* RNN-T Model " url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_RNNT"/>
<tab type="user" title="Convert PyTorch* YOLACT Model" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_YOLACT"/>
<tab type="user" title="Convert PyTorch* F3Net Model" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_F3Net"/>
<tab type="user" title="Convert PyTorch* QuartzNet Model" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_QuartzNet"/>
<tab type="user" title="Convert PyTorch* RNN-T Model " url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_RNNT"/>
<tab type="user" title="Convert PyTorch* YOLACT Model" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_YOLACT"/>
<tab type="user" title="Convert PyTorch* F3Net Model" url="@ref openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_F3Net"/>
</tab>
</tab>
<tab type="user" title="Model Optimizations Techniques" url="@ref openvino_docs_MO_DG_prepare_model_Model_Optimization_Techniques"/>
Expand Down
1 change: 1 addition & 0 deletions docs/doxygen/openvino_docs.xml
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ limitations under the License.
<tab type="user" title="Object Detection SSD Python* Sample" url="@ref openvino_inference_engine_ie_bridges_python_sample_object_detection_sample_ssd_README"/>
<tab type="user" title="Object Detection SSD C Sample" url="@ref openvino_inference_engine_ie_bridges_c_samples_object_detection_sample_ssd_README"/>
<tab type="user" title="Automatic Speech Recognition C++ Sample" url="@ref openvino_inference_engine_samples_speech_sample_README"/>
<tab type="user" title="Automatic Speech Recognition Python Sample" url="@ref openvino_inference_engine_ie_bridges_python_sample_speech_sample_README"/>
<tab type="user" title="Style Transfer C++ Sample" url="@ref openvino_inference_engine_samples_style_transfer_sample_README"/>
<tab type="user" title="Style Transfer Python* Sample" url="@ref openvino_inference_engine_ie_bridges_python_sample_style_transfer_sample_README"/>
<tab type="user" title="Benchmark C++ Tool" url="@ref openvino_inference_engine_samples_benchmark_app_README"/>
Expand Down
14 changes: 10 additions & 4 deletions docs/ops/image/Interpolate_4.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,18 +89,24 @@

**Inputs**

* **1**: `data` - Input tensor with data for interpolation. Type of elements is any supported floating point type or `int8` type. Required.
* **1**: `data` - tensor of type `T` with data for interpolation. **Required.**

* **2**: `sizes` - 1D tensor describing output shape for spatial axes. Number of elements matches the number of indices in `axes` input, the order matches as well. Required.
* **2**: `sizes` - 1D tensor of type `T_SIZE` describing output shape for spatial axes. Number of elements matches the number of indices in `axes` input, the order matches as well. **Required.**

* **3**: `scales` - 1D tensor describing scales for spatial axes. Type of elements is any supported floating point type. Number and order of elements match the number and order of indices in `axes` input. Required.
* **3**: `scales` - 1D tensor of type `T_SCALES` describing scales for spatial axes. Number and order of elements match the number and order of indices in `axes` input. **Required.**

* **4**: `axes` - 1D tensor specifying dimension indices where interpolation is applied, and `axes` is any unordered list of indices of different dimensions of input tensor, e.g. `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`. These indices should be non-negative integers from `0` to `rank(data) - 1` inclusively. Other dimensions do not change. The order of elements in `axes` attribute matters, and mapped directly to elements in the 2nd input `sizes`. Optional with default value `[0,...,rank(data) - 1]`.
* **4**: `axes` - 1D tensor of type `T_AXES` specifying dimension indices where interpolation is applied, and `axes` is any unordered list of indices of different dimensions of input tensor, e.g. `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`. These indices should be non-negative integers from `0` to `rank(data) - 1` inclusively. Other dimensions do not change. The order of elements in `axes` attribute matters, and mapped directly to elements in the 2nd input `sizes`. **Optional** with default value `[0,...,rank(data) - 1]`.

**Outputs**

* **1**: Resulting interpolated tensor with elements of the same type as input `data` tensor. The shape of the output matches input `data` shape except spatial dimensions mentioned in `axes` attribute. For other dimensions shape matches sizes from `sizes` in order specified in `axes`.

**Types**
* *T*: any supported numeric type.
* *T_SIZE*: any supported integer type.
* *T_SCALES*: any supported floating point type.
* *T_AXES*: any supported integer type.


**Detailed description**
Calculations are performed according to the following rules.
Expand Down
62 changes: 33 additions & 29 deletions docs/ops/normalization/LRN_1.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,70 +6,74 @@

**Short description**: Local response normalization.

**Detailed description**:
Local Response Normalization performs a normalization over local input regions.
Each input value is divided by
\f[ (bias + \frac{alpha}{{size}^{len(axes)}} \cdot \sum_{i} data_{i})^{beta} \f]
The sum is taken over a region of a side length `size` and number of dimensions equal to number of axes.
The region is centered at the input value that's being normalized (with zero padding added if needed).

Here is an example for 4D `data` input tensor and `axes = [1]`:
```
sqr_sum[a, b, c, d] =
sum(data[a, max(0, b - size / 2) : min(data.shape[1], b + size / 2 + 1), c, d] ** 2)
output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
```

Example for 4D `data` input tensor and `axes = [2, 3]`:
```
sqr_sum[a, b, c, d] =
sum(data[a, b, max(0, c - size / 2) : min(data.shape[2], c + size / 2 + 1), max(0, d - size / 2) : min(data.shape[3], d + size / 2 + 1)] ** 2)
output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
```

**Attributes**:

* *alpha*

* **Description**: *alpha* represents the scaling attribute for the normalizing sum. For example, *alpha* equal 0.0001 means that the normalizing sum is multiplied by 0.0001.
* **Description**: *alpha* represents the scaling attribute for the normalizing sum. For example, *alpha* equal `0.0001` means that the normalizing sum is multiplied by `0.0001`.
* **Range of values**: no restrictions
* **Type**: float
* **Type**: `float`
* **Default value**: None
* **Required**: *yes*

* *beta*

* **Description**: *beta* represents the exponent for the normalizing sum. For example, *beta* equal 0.75 means that the normalizing sum is raised to the power of 0.75.
* **Description**: *beta* represents the exponent for the normalizing sum. For example, *beta* equal `0.75` means that the normalizing sum is raised to the power of `0.75`.
* **Range of values**: positive number
* **Type**: float
* **Type**: `float`
* **Default value**: None
* **Required**: *yes*

* *bias*

* **Description**: *bias* represents the offset. Usually positive number to avoid dividing by zero.
* **Range of values**: no restrictions
* **Type**: float
* **Type**: `float`
* **Default value**: None
* **Required**: *yes*

* *size*

* **Description**: *size* represents the side length of the region to be used for the normalization sum. The region can have one or more dimensions depending on the second input axes indices.
* **Range of values**: positive integer
* **Type**: int
* **Type**: `int`
* **Default value**: None
* **Required**: *yes*

**Inputs**

* **1**: `data` - input tensor of any floating point type and arbitrary shape. Required.
* **1**: `data` - tensor of type `T` and arbitrary shape. **Required.**

* **2**: `axes` - specifies indices of dimensions in `data` that define normalization slices. Required.
* **2**: `axes` - 1D tensor of type `T_IND` which specifies indices of dimensions in `data` which define normalization slices. **Required.**

**Outputs**

* **1**: Output tensor of the same shape and type as the `data` input tensor.
* **1**: Output tensor of type `T` and the same shape as the `data` input tensor.

**Detailed description**:
Local Response Normalization performs a normalization over local input regions.
Each input value is divided by
\f[ (bias + \frac{alpha}{{size}^{len(axes)}} \cdot \sum_{i} data_{i})^{beta} \f]
The sum is taken over a region of a side length `size` and number of dimensions equal to number of axes.
The region is centered at the input value that's being normalized (with zero padding added if needed).

Here is an example for 4D `data` input tensor and `axes = [1]`:
```
sqr_sum[a, b, c, d] =
sum(data[a, max(0, b - size / 2) : min(data.shape[1], b + size / 2 + 1), c, d] ** 2)
output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
```

Example for 4D `data` input tensor and `axes = [2, 3]`:
```
sqr_sum[a, b, c, d] =
sum(data[a, b, max(0, c - size / 2) : min(data.shape[2], c + size / 2 + 1), max(0, d - size / 2) : min(data.shape[3], d + size / 2 + 1)] ** 2)
output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
```
**Types**
* *T*: any supported floating point type.
* *T_IND*: any supported integer type.

**Example**

Expand Down
Loading

0 comments on commit 7ec81fd

Please sign in to comment.