From 509867fd409467c50b5bc26ea06676667dc663aa Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sat, 19 Jun 2021 20:58:46 +0300 Subject: [PATCH 01/57] OV new package structure --- .ci/azure/linux.yml | 4 +- .ci/azure/windows.yml | 4 +- .ci/openvino-onnx/Dockerfile | 4 +- CMakeLists.txt | 4 +- cmake/developer_package/packaging.cmake | 14 ++-- docs/IE_DG/Cross_Check_Tool.md | 2 +- docs/IE_DG/Extensibility_DG/GPU_Kernel.md | 2 +- docs/IE_DG/Extensibility_DG/VPU_Kernel.md | 12 ++-- docs/IE_DG/Samples_Overview.md | 2 +- docs/IE_DG/Tools_Overview.md | 8 +-- .../prepare_model/Config_Model_Optimizer.md | 10 +-- .../prepare_model/Model_Optimizer_FAQ.md | 6 +- .../convert_model/Convert_Model_From_Caffe.md | 2 +- .../convert_model/Convert_Model_From_Kaldi.md | 2 +- .../convert_model/Convert_Model_From_MxNet.md | 2 +- .../convert_model/Convert_Model_From_ONNX.md | 2 +- .../Convert_Model_From_TensorFlow.md | 10 +-- .../convert_model/Converting_Model.md | 2 +- .../convert_model/Converting_Model_General.md | 2 +- .../convert_model/Cutting_Model.md | 2 +- .../Convert_EfficientDet_Models.md | 4 +- .../Convert_Object_Detection_API_Models.md | 10 +-- .../Convert_YOLO_From_Tensorflow.md | 6 +- ...odel_Optimizer_with_Caffe_Python_Layers.md | 2 +- docs/get_started/get_started_linux.md | 10 +-- docs/get_started/get_started_macos.md | 12 ++-- docs/get_started/get_started_raspbian.md | 6 +- docs/get_started/get_started_windows.md | 10 +-- docs/how_tos/POT_how_to_example.md | 20 +++--- docs/install_guides/PAC_Configure_2018R5.md | 2 +- docs/install_guides/PAC_Configure_2019RX.md | 6 +- .../VisionAcceleratorFPGA_Configure_2018R5.md | 2 +- .../VisionAcceleratorFPGA_Configure_2019R1.md | 2 +- .../VisionAcceleratorFPGA_Configure_2019R3.md | 2 +- .../install_guides/deployment-manager-tool.md | 6 +- .../installing-openvino-docker-linux.md | 4 +- .../installing-openvino-linux-ivad-vpu.md | 2 +- .../installing-openvino-linux.md | 4 +- .../installing-openvino-macos.md | 4 +- .../installing-openvino-raspbian.md | 2 +- .../installing-openvino-windows.md | 8 +-- .../dldt_optimization_guide.md | 2 +- docs/template_plugin/src/CMakeLists.txt | 1 - inference-engine/CMakeLists.txt | 18 +++--- inference-engine/cmake/dependencies.cmake | 2 +- .../ie_bridges/c/src/CMakeLists.txt | 4 +- .../ie_bridges/python/CMakeLists.txt | 2 +- .../ie_bridges/python/wheel/.env.in | 4 +- .../ie_bridges/python/wheel/CMakeLists.txt | 7 +- .../ie_bridges/python/wheel/setup.py | 20 +++--- .../samples/benchmark_app/README.md | 4 +- .../src/gna_plugin/CMakeLists.txt | 2 +- .../src/inference_engine/CMakeLists.txt | 21 +++--- inference-engine/src/vpu/CMakeLists.txt | 2 +- .../src/vpu/myriad_plugin/CMakeLists.txt | 2 +- .../tools/benchmark_tool/README.md | 4 +- .../tools/compile_tool/CMakeLists.txt | 4 +- inference-engine/tools/compile_tool/README.md | 2 +- model-optimizer/CMakeLists.txt | 2 +- model-optimizer/README.md | 2 +- .../extensions/analysis/tf_retinanet.py | 2 +- .../extensions/analysis/tf_yolo.py | 4 +- ngraph/CMakeLists.txt | 16 +---- ngraph/cmake/external_onnx.cmake | 6 +- ngraph/cmake/external_protobuf.cmake | 6 +- ngraph/core/CMakeLists.txt | 12 ++-- .../frontend/frontend_manager/CMakeLists.txt | 4 +- ngraph/frontend/onnx_common/CMakeLists.txt | 2 +- ngraph/frontend/onnx_editor/CMakeLists.txt | 2 +- ngraph/frontend/onnx_import/CMakeLists.txt | 8 +-- scripts/demo/demo_benchmark_app.bat | 10 +-- scripts/demo/demo_benchmark_app.sh | 8 +-- scripts/demo/demo_security_barrier_camera.bat | 4 +- scripts/demo/demo_security_barrier_camera.sh | 4 +- .../demo_squeezenet_download_convert_run.bat | 10 +-- .../demo_squeezenet_download_convert_run.sh | 8 +-- .../install_NCS_udev_rules.sh | 4 +- scripts/setupvars/setupvars.bat | 42 ++++-------- scripts/setupvars/setupvars.sh | 64 ++++++++----------- tests/lib/path_utils.py | 12 ++-- tools/CMakeLists.txt | 4 +- 81 files changed, 258 insertions(+), 299 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 146775f6189f02..4e12c5cbf1d942 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -131,11 +131,11 @@ jobs: - script: ls -alR $(INSTALL_DIR) displayName: 'List install files' - - script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/cpp/build_samples.sh + - script: $(INSTALL_DIR)/samples/cpp/build_samples.sh workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build cpp samples' - - script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/c/build_samples.sh + - script: $(INSTALL_DIR)/samples/c/build_samples.sh workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build c samples' diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index 21a36392e33812..a8f683da7272bf 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -112,11 +112,11 @@ jobs: - script: dir $(INSTALL_DIR) /s displayName: 'List install files' - - script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\cpp\build_samples_msvc.bat + - script: $(INSTALL_DIR)\samples\cpp\build_samples_msvc.bat workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build cpp samples' - - script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\c\build_samples_msvc.bat + - script: $(INSTALL_DIR)\samples\c\build_samples_msvc.bat workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build c samples' diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index ec78869b6d6585..10f7ab484e33ba 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -79,7 +79,7 @@ RUN make -j $(nproc) install # Run tests via tox WORKDIR /openvino/ngraph/python -ENV ngraph_DIR=/openvino/dist/deployment_tools/ngraph -ENV LD_LIBRARY_PATH=/openvino/dist/deployment_tools/ngraph/lib +ENV ngraph_DIR=/openvino/dist/runtime +ENV LD_LIBRARY_PATH=/openvino/dist/runtime/lib ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/lib/python_api/python3.8:${PYTHONPATH} CMD tox diff --git a/CMakeLists.txt b/CMakeLists.txt index 3602750435c550..39e69bff521156 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -208,13 +208,13 @@ ie_cpack_add_component(demo_scripts DEPENDS core) if(UNIX) install(DIRECTORY scripts/demo/ - DESTINATION deployment_tools/demo + DESTINATION extras/open_model_zoo/demo COMPONENT demo_scripts USE_SOURCE_PERMISSIONS PATTERN *.bat EXCLUDE) elseif(WIN32) install(DIRECTORY scripts/demo/ - DESTINATION deployment_tools/demo + DESTINATION extras/open_model_zoo/demo COMPONENT demo_scripts USE_SOURCE_PERMISSIONS PATTERN *.sh EXCLUDE) diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index 2b5e945b17af54..03d14ad1810b7b 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -5,8 +5,6 @@ include(CPackComponent) unset(IE_CPACK_COMPONENTS_ALL CACHE) -set(IE_CPACK_IE_DIR deployment_tools/inference_engine) - # # ie_cpack_set_library_dir() # @@ -14,13 +12,13 @@ set(IE_CPACK_IE_DIR deployment_tools/inference_engine) # function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) else() - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) endif() endfunction() diff --git a/docs/IE_DG/Cross_Check_Tool.md b/docs/IE_DG/Cross_Check_Tool.md index 495afa790fcccc..fee2923ff93886 100644 --- a/docs/IE_DG/Cross_Check_Tool.md +++ b/docs/IE_DG/Cross_Check_Tool.md @@ -8,7 +8,7 @@ The Cross Check Tool can compare metrics per layer or all over the model. On Linux* OS, before running the Cross Check Tool binary, make sure your application can find the Deep Learning Inference Engine libraries. -Navigate to the `/deployment_tools/inference_engine/bin` folder and run the `setvars.sh` script to +Navigate to the `/runtime/bin` folder and run the `setvars.sh` script to set all necessary environment variables: ```sh diff --git a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md index d9fd809f8e4227..ea8cb89ae56bbb 100644 --- a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md @@ -4,7 +4,7 @@ The GPU codepath abstracts many details about OpenCL\*. You need to provide the There are two options of using the custom operation configuration file: -* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/deployment_tools/inference_engine/bin/intel64/{Debug/Release}` folder +* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/runtime//bin` folder * Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin: @snippet snippets/GPU_Kernel.cpp part0 diff --git a/docs/IE_DG/Extensibility_DG/VPU_Kernel.md b/docs/IE_DG/Extensibility_DG/VPU_Kernel.md index 033097598317bf..4dca14ce50233e 100644 --- a/docs/IE_DG/Extensibility_DG/VPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/VPU_Kernel.md @@ -15,18 +15,18 @@ To customize your topology with an OpenCL layer, follow the steps below: > **NOTE:** OpenCL compiler, targeting Intel® Neural Compute Stick 2 for the SHAVE* processor only, is redistributed with OpenVINO. OpenCL support is provided by ComputeAorta*, and is distributed under a license agreement between Intel® and Codeplay* Software Ltd. -The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline compilation only, so first compile OpenCL C code using the standalone `clc` compiler. You can find the compiler binary at `/deployment_tools/tools/cl_compiler`. +The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline compilation only, so first compile OpenCL C code using the standalone `clc` compiler. You can find the compiler binary at `/tools/cl_compiler`. > **NOTE:** By design, custom OpenCL layers support any OpenCL kernels written with 1.2 version assumed. It also supports half float extension and is optimized for this type, because it is a native type for Intel® Movidius™ VPUs. 1. Prior to running a compilation, make sure that the following variables are set: - * `SHAVE_MA2X8XLIBS_DIR=/deployment_tools/tools/cl_compiler/lib/` - * `SHAVE_LDSCRIPT_DIR=/deployment_tools/tools/cl_compiler/ldscripts/` - * `SHAVE_MYRIAD_LD_DIR=/deployment_tools/tools/cl_compiler/bin/` - * `SHAVE_MOVIASM_DIR=/deployment_tools/tools/cl_compiler/bin/` + * `SHAVE_MA2X8XLIBS_DIR=/tools/cl_compiler/lib/` + * `SHAVE_LDSCRIPT_DIR=/tools/cl_compiler/ldscripts/` + * `SHAVE_MYRIAD_LD_DIR=/tools/cl_compiler/bin/` + * `SHAVE_MOVIASM_DIR=/tools/cl_compiler/bin/` 2. Run the compilation with the command below. You should use `--strip-binary-header` to make an OpenCL runtime-agnostic binary runnable with the Inference Engine. ```bash -cd /deployment_tools/tools/cl_compiler/bin +cd /tools/cl_compiler/bin ./clc --strip-binary-header custom_layer.cl -o custom_layer.bin ``` diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index db39cbfc5b4cf8..b5de5a83656531 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -240,7 +240,7 @@ For example, for the **Debug** configuration, go to the project's variable in the **Environment** field to the following: ```sh -PATH=\deployment_tools\inference_engine\bin\intel64\Debug;\opencv\bin;%PATH% +PATH=\runtime\bin;\opencv\bin;%PATH% ``` where `` is the directory in which the OpenVINO toolkit is installed. diff --git a/docs/IE_DG/Tools_Overview.md b/docs/IE_DG/Tools_Overview.md index f0741105387617..0b72494ea88f1c 100644 --- a/docs/IE_DG/Tools_Overview.md +++ b/docs/IE_DG/Tools_Overview.md @@ -6,11 +6,11 @@ The OpenVINO™ toolkit installation includes the following tools: |Tool | Location in the Installation Directory| |-----------------------------------------------------------------------------|---------------------------------------| -|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/deployment_tools/tools/open_model_zoo/tools/accuracy_checker`| -|[Post-Training Optimization Tool](@ref pot_README) | `/deployment_tools/tools/post_training_optimization_toolkit`| +|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/tools/accuracy_checker`| +|[Post-Training Optimization Tool](@ref pot_README) | `/tools/post_training_optimization_toolkit`| |[Model Downloader](@ref omz_tools_downloader) | `/deployment_tools/tools/model_downloader`| -|[Cross Check Tool](../../inference-engine/tools/cross_check_tool/README.md) | `/deployment_tools/tools/cross_check_tool`| -|[Compile Tool](../../inference-engine/tools/compile_tool/README.md) | `/deployment_tools/inference_engine/lib/intel64/`| +|[Cross Check Tool](../../inference-engine/tools/cross_check_tool/README.md) | `/tools/cross_check_tool`| +|[Compile Tool](../../inference-engine/tools/compile_tool/README.md) | `/tools/compile_tool`| ## See Also diff --git a/docs/MO_DG/prepare_model/Config_Model_Optimizer.md b/docs/MO_DG/prepare_model/Config_Model_Optimizer.md index 9b978d750aa586..4d61e1a81d4d09 100644 --- a/docs/MO_DG/prepare_model/Config_Model_Optimizer.md +++ b/docs/MO_DG/prepare_model/Config_Model_Optimizer.md @@ -12,7 +12,7 @@ dependencies and provide the fastest and easiest way to configure the Model Optimizer. To configure all three frameworks, go to the -`/deployment_tools/model_optimizer/install_prerequisites` +`/tools/model_optimizer/install_prerequisites` directory and run: * For Linux\* OS: @@ -37,7 +37,7 @@ install_prerequisites.bat ``` To configure a specific framework, go to the -`/deployment_tools/model_optimizer/install_prerequisites` +`/tools/model_optimizer/install_prerequisites` directory and run: * For Caffe\* on Linux: @@ -103,7 +103,7 @@ framework at a time. 1. Go to the Model Optimizer directory: ```shell -cd /deployment_tools/model_optimizer/ +cd /tools/model_optimizer/ ``` 2. **Strongly recommended for all global Model Optimizer dependency installations**: Create and activate a virtual environment. While not required, this step is @@ -179,7 +179,7 @@ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp On Windows, pre-built protobuf packages for Python versions 3.4, 3.5, 3.6, and 3.7 are provided with the installation package and can be found in the -`\deployment_tools\model_optimizer\install_prerequisites` +`\tools\model_optimizer\install_prerequisites` folder. Please note that they are not installed with the `install_prerequisites.bat` installation script due to possible issues with `pip`, and you can install them at your own discretion. Make sure @@ -196,7 +196,7 @@ To install the protobuf package: 1. Open the command prompt as administrator. 2. Go to the `install_prerequisites` folder of the OpenVINO toolkit installation directory: ```sh -cd \deployment_tools\model_optimizer\install_prerequisites +cd \tools\model_optimizer\install_prerequisites ``` 3. Run the following command to install the protobuf for Python 3.6. If diff --git a/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md b/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md index bb599cf93b5632..cd41e9da21d0a8 100644 --- a/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md +++ b/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md @@ -28,7 +28,7 @@ For example, to add the description of the `CustomReshape` layer, which is an ar 2. Generate a new parser: ```shell -cd /deployment_tools/model_optimizer/mo/front/caffe/proto +cd /tools/model_optimizer/mo/front/caffe/proto python3 generate_caffe_pb2.py --input_proto /src/caffe/proto/caffe.proto ``` where `PATH_TO_CUSTOM_CAFFE` is the path to the root directory of custom Caffe\*. @@ -66,7 +66,7 @@ The mean file that you provide for the Model Optimizer must be in a `.binaryprot #### 7. What does the message "Invalid proto file: there is neither 'layer' nor 'layers' top-level messages" mean? -The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `/deployment_tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: +The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: ``` message NetParameter { // ... some other parameters @@ -81,7 +81,7 @@ This means that any topology should contain layers as top-level structures in `p #### 8. What does the message "Old-style inputs (via 'input_dims') are not supported. Please specify inputs via 'input_shape'" mean? -The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `/deployment_tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: +The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: ```sh message NetParameter { diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md index 4c257d1689ea23..229205f7b68166 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md @@ -38,7 +38,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert a Caffe\* model: -1. Go to the `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer` directory. +1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model, specifying the path to the input model `.caffemodel` file and the path to an output directory with write permissions: ```sh python3 mo.py --input_model .caffemodel --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md index 23fbad2ee08e13..3c0a36aab14fc3 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md @@ -33,7 +33,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert a Kaldi\* model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` or `.mdl` file and to an output directory where you have write permissions: ```sh python3 mo.py --input_model .nnet --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md index 4b8c1816e8b318..14f17864fc935f 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md @@ -45,7 +45,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert an MXNet\* model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. To convert an MXNet\* model contained in a `model-file-symbol.json` and `model-file-0000.params`, run the Model Optimizer launch script `mo.py`, specifying a path to the input model file and a path to an output directory with write permissions: ```sh python3 mo_mxnet.py --input_model model-file-0000.params --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md index 79f740b55ecdd4..6ab9ef30e43782 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md @@ -59,7 +59,7 @@ The Model Optimizer process assumes you have an ONNX model that was directly dow To convert an ONNX\* model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` file and an output directory where you have write permissions: ```sh python3 mo.py --input_model .onnx --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md index c4721cdead07ee..e162b817aa2dc3 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md @@ -178,7 +178,7 @@ There are three ways to store non-frozen TensorFlow models and load them to the To convert such a TensorFlow model: - 1. Go to the `/deployment_tools/model_optimizer` directory + 1. Go to the `/tools/model_optimizer` directory 2. Run the `mo_tf.py` script with the path to the checkpoint file to convert a model and an output directory where you have write permissions: * If input model is in `.pb` format:
@@ -200,7 +200,7 @@ python3 mo_tf.py --input_model .pbtxt --input_checkpoint /deployment_tools/model_optimizer` directory + 1. Go to the `/tools/model_optimizer` directory 2. Run the `mo_tf.py` script with a path to the MetaGraph `.meta` file and a writable output directory to convert a model:
```sh python3 mo_tf.py --input_meta_graph .meta --output_dir @@ -212,7 +212,7 @@ python3 mo_tf.py --input_meta_graph .meta --output_dir /deployment_tools/model_optimizer` directory + 1. Go to the `/tools/model_optimizer` directory 2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory to convert a model:
```sh python3 mo_tf.py --saved_model_dir --output_dir @@ -251,7 +251,7 @@ Where: To convert a TensorFlow model: -1. Go to the `/deployment_tools/model_optimizer` directory +1. Go to the `/tools/model_optimizer` directory 2. Use the `mo_tf.py` script to simply convert a model with the path to the input model `.pb` file and a writable output directory: ```sh python3 mo_tf.py --input_model .pb --output_dir @@ -342,7 +342,7 @@ Below are the instructions on how to convert each of them. A model in the SavedModel format consists of a directory with a `saved_model.pb` file and two subfolders: `variables` and `assets`. To convert such a model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory: ```sh python3 mo_tf.py --saved_model_dir --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md index ed6451a76322d6..5c5c797b5d5836 100644 --- a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md @@ -1,6 +1,6 @@ # Converting a Model to Intermediate Representation (IR) {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model} -Use the mo.py script from the `/deployment_tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR). +Use the mo.py script from the `/tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR). The simplest way to convert a model is to run mo.py with a path to the input model file and an output directory where you have write permissions: ```sh python3 mo.py --input_model INPUT_MODEL --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md index 2d267cda3e7172..68deae11bcb109 100644 --- a/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md +++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md @@ -5,7 +5,7 @@ To simply convert a model trained by any supported framework, run the Model Opti python3 mo.py --input_model INPUT_MODEL --output_dir ``` -The script is in `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/`. The output directory must have write permissions, so you can run mo.py from the output directory or specify an output path with the `--output_dir` option. +The script is in `$INTEL_OPENVINO_DIR/tools/model_optimizer/`. The output directory must have write permissions, so you can run mo.py from the output directory or specify an output path with the `--output_dir` option. > **NOTE:** The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the `RGB<->BGR` conversion specifying the command-line parameter: `--reverse_input_channels`. Otherwise, inference results may be incorrect. For details, refer to [When to Reverse Input Channels](#when_to_reverse_input_channels). diff --git a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md index d86368a9f708f5..049d66e0d81e06 100644 --- a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md @@ -39,7 +39,7 @@ In the TensorBoard, it looks the following way together with some predecessors: Convert this model and put the results in a writable output directory: ```sh -${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer +${INTEL_OPENVINO_DIR}/tools/model_optimizer python3 mo.py --input_model inception_v1.pb -b 1 --output_dir ``` (The other examples on this page assume that you first cd to the `model_optimizer` directory and add the `--output_dir` argument with a directory where you have write permissions.) diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md index b78ec640cba19c..fe829c1c21cbd3 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md @@ -47,9 +47,9 @@ As a result the frozen model file `savedmodeldir/efficientdet-d4_frozen.pb` will To generate the IR of the EfficientDet TensorFlow model, run:
```sh -python3 $INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/mo.py \ +python3 $INTEL_OPENVINO_DIR/tools/model_optimizer/mo.py \ --input_model savedmodeldir/efficientdet-d4_frozen.pb \ ---transformations_config $INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/extensions/front/tf/automl_efficientdet.json \ +--transformations_config $INTEL_OPENVINO_DIR/tools/model_optimizer/extensions/front/tf/automl_efficientdet.json \ --input_shape [1,$IMAGE_SIZE,$IMAGE_SIZE,3] \ --reverse_input_channels ``` diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md index 6feec5f627a82e..41be67099dac56 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md @@ -12,10 +12,10 @@ You can download TensorFlow\* Object Detection API models from the @@ -120,7 +120,7 @@ There are a number of important notes about feeding input images to the samples: This section is intended for users who want to understand how the Model Optimizer performs Object Detection API models conversion in details. The knowledge given in this section is also useful for users having complex models that are not converted with the Model Optimizer out of the box. It is highly recommended to read [Sub-Graph Replacement in Model Optimizer](../../customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md) chapter first to understand sub-graph replacement concepts which are used here. -Implementation of the sub-graph replacers for Object Detection API models is located in the file `/deployment_tools/model_optimizer/extensions/front/tf/ObjectDetectionAPI.py`. +Implementation of the sub-graph replacers for Object Detection API models is located in the file `/tools/model_optimizer/extensions/front/tf/ObjectDetectionAPI.py`. It is also important to open the model in the [TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard) to see the topology structure. Model Optimizer can create an event file that can be then fed to the TensorBoard* tool. Run the Model Optimizer with providing two command line parameters: * `--input_model ` --- Path to the frozen model @@ -141,7 +141,7 @@ Model Optimizer cannot convert the part of the `Preprocessor` block performing s The `Preprocessor` block has two outputs: the tensor with pre-processed image(s) data and a tensor with pre-processed image(s) size(s). While converting the model, Model Optimizer keeps only the nodes producing the first tensor. The second tensor is a constant which can be obtained from the `pipeline.config` file to be used in other replacers. -The implementation of the `Preprocessor` block sub-graph replacer is the following (file `/deployment_tools/model_optimizer/extensions/front/tf/ObjectDetectionAPI.py`): +The implementation of the `Preprocessor` block sub-graph replacer is the following (file `/tools/model_optimizer/extensions/front/tf/ObjectDetectionAPI.py`): ```python class ObjectDetectionAPIPreprocessorReplacement(FrontReplacementFromConfigFileSubGraph): diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md index 653165576ce125..fb31fba111a078 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md @@ -57,7 +57,7 @@ python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weig ### Convert YOLOv3 TensorFlow Model to the IR -To solve the problems explained in the YOLOv3 architecture overview section, use the `yolo_v3.json` or `yolo_v3_tiny.json` (depending on a model) configuration file with custom operations located in the `/deployment_tools/model_optimizer/extensions/front/tf` repository. +To solve the problems explained in the YOLOv3 architecture overview section, use the `yolo_v3.json` or `yolo_v3_tiny.json` (depending on a model) configuration file with custom operations located in the `/tools/model_optimizer/extensions/front/tf` repository. It consists of several attributes:
```sh @@ -158,7 +158,7 @@ Converted TensorFlow YOLO model is missing `Region` layer and its parameters. Or file under the `[region]` title. To recreate the original model structure, use the corresponding yolo `.json` configuration file with custom operations and `Region` layer -parameters when converting the model to the IR. This file is located in the `/deployment_tools/model_optimizer/extensions/front/tf` directory. +parameters when converting the model to the IR. This file is located in the `/tools/model_optimizer/extensions/front/tf` directory. If chosen model has specific values of this parameters, create another configuration file with custom operations and use it for conversion. @@ -169,7 +169,7 @@ python3 ./mo_tf.py --input_model /.pb \ --batch 1 \ --scale 255 \ ---transformations_config /deployment_tools/model_optimizer/extensions/front/tf/.json +--transformations_config /tools/model_optimizer/extensions/front/tf/.json ``` where: diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md index e4a71a8fdc9298..579437aeb5a98a 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md @@ -35,7 +35,7 @@ operation. Here is a simplified example of the extractor for the custom operation Proposal from Faster-R-CNN model mentioned above. The full code with additional checks is provided in the -`/deployment_tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses +`/tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses operation `ProposalOp` which corresponds to `Proposal` operation described in the [Available Operations Sets](../../../ops/opset.md) document. Refer to the source code below for a detailed explanation of the extractor. diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index d64d63ed2fccf9..26522956305b96 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -31,7 +31,7 @@ For simplicity, a symbolic link to the latest installation is also created: `/ho If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. -The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2021/deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2021/tools` directory.
Click for the Intel® Distribution of OpenVINO™ toolkit directory structure @@ -235,7 +235,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ +cd /opt/intel/openvino_2021/tools/model_downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -339,7 +339,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -352,7 +352,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -530,7 +530,7 @@ To build all the demos and samples: cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp # to compile C samples, go here also: cd /inference_engine/samples/c build_samples.sh -cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos +cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos build_demos.sh ``` diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index a15240a1c9b9c4..36b5ef71b3da6c 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -31,7 +31,7 @@ For simplicity, a symbolic link to the latest installation is also created: `/ho If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. -The primary tools for deploying your models and applications are installed to the `/deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `/tools` directory.
Click for the Intel® Distribution of OpenVINO™ toolkit directory structure @@ -219,7 +219,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ +cd /opt/intel/openvino_2021/tools/model_downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -326,7 +326,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -339,7 +339,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -484,7 +484,7 @@ To build all the demos and samples: cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp # to compile C samples, go here also: cd /inference_engine/samples/c build_samples.sh -cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos +cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos build_demos.sh ``` @@ -503,7 +503,7 @@ Template to call sample code or a demo application: With the sample information specified, the command might look like this: ```sh -cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos/object_detection_demo +cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos/object_detection_demo ./object_detection_demo -i ~/Videos/catshow.mp4 -m ~/ir/fp32/mobilenet-ssd.xml -d CPU ``` diff --git a/docs/get_started/get_started_raspbian.md b/docs/get_started/get_started_raspbian.md index 5f3baf87d2f638..b19ab5a0930d5d 100644 --- a/docs/get_started/get_started_raspbian.md +++ b/docs/get_started/get_started_raspbian.md @@ -22,9 +22,9 @@ This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit The OpenVINO toolkit for Raspbian* OS is distributed without installer. This document refers to the directory to which you unpacked the toolkit package as ``. -The primary tools for deploying your models and applications are installed to the `/deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `/tools` directory.
- Click for the deployment_tools directory structure + Click for the tools directory structure | Directory         | Description | @@ -62,7 +62,7 @@ Follow the steps below to run pre-trained Face Detection network using Inference ``` 2. Build the Object Detection Sample with the following command: ```sh - cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/deployment_tools/inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/samples/cpp make -j2 object_detection_sample_ssd ``` 3. Download the pre-trained Face Detection model with the [Model Downloader tool](@ref omz_tools_downloader): diff --git a/docs/get_started/get_started_windows.md b/docs/get_started/get_started_windows.md index 253af476efb186..ffe86367a2501b 100644 --- a/docs/get_started/get_started_windows.md +++ b/docs/get_started/get_started_windows.md @@ -26,9 +26,9 @@ This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit By default, the installation directory is `C:\Program Files (x86)\Intel\openvino_`, referred to as ``. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. For simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2021`. -The primary tools for deploying your models and applications are installed to the `\deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `\tools` directory.
- Click for the deployment_tools directory structure + Click for the tools directory structure | Directory         | Description | @@ -323,7 +323,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit 3. Run the Model Optimizer script: ```bat - cd \deployment_tools\model_optimizer + cd \tools\model_optimizer ``` ```bat python .\mo.py --input_model \ --data_type --output_dir @@ -336,7 +336,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `C:\Users\\Documents\models\public\squeezenet1.1\ir` output directory: ```bat - cd \deployment_tools\model_optimizer + cd \tools\model_optimizer ``` ```bat python .\mo.py --input_model C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir C:\Users\username\Documents\models\public\squeezenet1.1\ir @@ -492,7 +492,7 @@ To build all the demos and samples: cd $INTEL_OPENVINO_DIR\inference_engine_samples\cpp # to compile C samples, go here also: cd \inference_engine\samples\c build_samples_msvc.bat -cd $INTEL_OPENVINO_DIR\deployment_tools\open_model_zoo\demos +cd $INTEL_OPENVINO_DIR\extras\open_model_zoo\demos build_demos_msvc.bat ``` diff --git a/docs/how_tos/POT_how_to_example.md b/docs/how_tos/POT_how_to_example.md index 28adc19062b4a6..2c80b4f91359b8 100644 --- a/docs/how_tos/POT_how_to_example.md +++ b/docs/how_tos/POT_how_to_example.md @@ -20,17 +20,17 @@ export OV=/opt/intel/openvino_2021/ ``` 2. Install the Model Optimizer prerequisites: ``` -cd $OV/deployment_tools/model_optimizer/install_prerequisites +cd $OV/tools/model_optimizer/install_prerequisites sudo ./install_prerequisites.sh ``` 3. Install the Accuracy Checker requirements: ``` -cd $OV/deployment_tools/open_model_zoo/tools/accuracy_checker +cd $OV/extras/open_model_zoo/tools/accuracy_checker sudo python3 setup.py install ``` 4. Install the Post-training Optimization Tool: ``` -cd $OV/deployment_tools/tools/post_training_optimization_toolkit +cd $OV/tools/post_training_optimization_toolkit sudo python3 setup.py install ``` @@ -53,7 +53,7 @@ python3 $OV/deployment_tools/tools/model_downloader/downloader.py --name mobilen Install requirements for PyTorch using the commands below: ``` -cd $OV/deployment_tools/open_model_zoo/tools/downloader +cd $OV/extras/open_model_zoo/tools/downloader ``` ``` python3 -mpip install --user -r ./requirements-pytorch.in @@ -61,13 +61,13 @@ python3 -mpip install --user -r ./requirements-pytorch.in You can find the parameters for Mobilnet v2 conversion here: ``` -vi /opt/intel/openvino_2021/deployment_tools/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml +vi /opt/intel/openvino_2021/extras/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml ``` Convert the model from PyTorch to ONNX*: ``` cd ~/POT/public/mobilenet-v2-pytorch -python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/pytorch_to_onnx.py \ +python3 /opt/intel/openvino_2021/extras/open_model_zoo/tools/downloader/pytorch_to_onnx.py \ --model-name=MobileNetV2 \ --model-path=. \ --weights=mobilenet-v2.pth \ @@ -100,17 +100,17 @@ mv mobilenet-v2.bin ~/POT/model.bin Edit the configuration files: ``` -sudo vi $OV/deployment_tools/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml +sudo vi $OV/extras/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml (edit imagenet_1000_classes) ``` ``` -export DEFINITIONS_FILE=/opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml +export DEFINITIONS_FILE=/opt/intel/openvino_2021/extras/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml ``` Copy the JSON file to my directory and edit: ``` -cp $OV/deployment_tools/tools/post_training_optimization_toolkit/configs/examples/quantization/classification/mobilenetV2_pytorch_int8.json ~/POT +cp $OV/tools/post_training_optimization_toolkit/configs/examples/quantization/classification/mobilenetV2_pytorch_int8.json ~/POT ``` ``` vi mobilenetV2_pytorch_int8.json @@ -119,7 +119,7 @@ vi mobilenetV2_pytorch_int8.json Copy the YML file to my directory and edit: ``` -cp /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT +cp /opt/intel/openvino_2021/extras/open_model_zoo/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT ``` ``` vi mobilenet-v2.yml diff --git a/docs/install_guides/PAC_Configure_2018R5.md b/docs/install_guides/PAC_Configure_2018R5.md index 1378c0c6f2cb09..bfbb1be1638625 100644 --- a/docs/install_guides/PAC_Configure_2018R5.md +++ b/docs/install_guides/PAC_Configure_2018R5.md @@ -175,7 +175,7 @@ export ALTERAOCLSDKROOT="\$INTELFPGAOCLSDKROOT" export AOCL_BOARD_PACKAGE_ROOT="\$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" \$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh source $INTELFPGAOCLSDKROOT/init_opencl.sh -export IE_INSTALL="/opt/intel/openvino/deployment_tools" +export IE_INSTALL="/opt/intel/openvino" source \$IE_INSTALL/../bin/setupvars.sh export PATH="\$PATH:\$HOME/inference_engine_samples/intel64/Release" alias mo="python3.6 \$IE_INSTALL/model_optimizer/mo.py" diff --git a/docs/install_guides/PAC_Configure_2019RX.md b/docs/install_guides/PAC_Configure_2019RX.md index 150ca475d65a8e..1cb2c40cba393a 100644 --- a/docs/install_guides/PAC_Configure_2019RX.md +++ b/docs/install_guides/PAC_Configure_2019RX.md @@ -167,10 +167,10 @@ export ALTERAOCLSDKROOT="$INTELFPGAOCLSDKROOT" export AOCL_BOARD_PACKAGE_ROOT="$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" $AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh source $INTELFPGAOCLSDKROOT/init_opencl.sh -export IE_INSTALL="/opt/intel/openvino/deployment_tools" -source $IE_INSTALL/../bin/setupvars.sh +export IE_INSTALL="/opt/intel/openvino" +source $IE_INSTALL/setupvars.sh export PATH="$PATH:$HOME/inference_engine_samples_build/intel64/Release" -alias mo="python3.6 $IE_INSTALL/model_optimizer/mo.py" +alias mo="python3.6 $IE_INSTALL/tools/model_optimizer/mo.py" ``` For Ubuntu systems, it is recommended to use python3.5 above instead of python3.6. diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md index 328c824fa35967..24d3c159bf3802 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md @@ -282,7 +282,7 @@ cd /home//squeezenet1.1_FP16 3. Use the Model Optimizer to convert an FP16 SqueezeNet Caffe* model into an optimized Intermediate Representation (IR): ```sh -python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model /home//openvino_models/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir . +python3 /opt/intel/openvino/tools/model_optimizer/mo.py --input_model /home//openvino_models/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir . ``` 4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md index 8de131e8c45161..97f09f779bf8f4 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md @@ -233,7 +233,7 @@ cd /home//squeezenet1.1_FP16 3. Use the Model Optimizer to convert the FP32 SqueezeNet Caffe* model into an FP16 optimized Intermediate Representation (IR). The model files were downloaded when you ran the the Image Classification verification script while [installing the Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support](installing-openvino-linux-fpga.md). To convert, run the Model Optimizer script with the following arguments: ```sh -python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model /home//openvino_models/models/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir . +python3 /opt/intel/openvino/tools/model_optimizer/mo.py --input_model /home//openvino_models/models/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir . ``` 4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md index 06d8ebbc86939a..cd7a750b984b71 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md @@ -238,7 +238,7 @@ cd ~/squeezenet1.1_FP16 3. Use the Model Optimizer to convert the FP32 SqueezeNet Caffe* model into an FP16 optimized Intermediate Representation (IR). The model files were downloaded when you ran the the Image Classification verification script while [installing the Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support](installing-openvino-linux-fpga.md). To convert, run the Model Optimizer script with the following arguments: ```sh -python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model ~/openvino_models/models/FP16/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir . +python3 /opt/intel/openvino/tools/model_optimizer/mo.py --input_model ~/openvino_models/models/FP16/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir . ``` 4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: diff --git a/docs/install_guides/deployment-manager-tool.md b/docs/install_guides/deployment-manager-tool.md index 0989a3d5929c57..00fb5e24c27520 100644 --- a/docs/install_guides/deployment-manager-tool.md +++ b/docs/install_guides/deployment-manager-tool.md @@ -2,7 +2,7 @@ The Deployment Manager of Intel® Distribution of OpenVINO™ creates a deployment package by assembling the model, IR files, your application, and associated dependencies into a runtime package for your target device. -The Deployment Manager is a Python\* command-line tool that is delivered within the Intel® Distribution of OpenVINO™ toolkit for Linux\* and Windows\* release packages and available after installation in the `/deployment_tools/tools/deployment_manager` directory. +The Deployment Manager is a Python\* command-line tool that is delivered within the Intel® Distribution of OpenVINO™ toolkit for Linux\* and Windows\* release packages and available after installation in the `/tools/deployment_manager` directory. ## Pre-Requisites @@ -32,7 +32,7 @@ Interactive mode provides a user-friendly command-line interface that will guide 1. To launch the Deployment Manager in the interactive mode, open a new terminal window, go to the Deployment Manager tool directory and run the tool script without parameters: ```sh - /deployment_tools/tools/deployment_manager + /tools/deployment_manager ``` ```sh ./deployment_manager.py @@ -94,7 +94,7 @@ To deploy the Inference Engine components from the development machine to the ta The package is unpacked to the destination directory and the following subdirectories are created: * `bin` — Snapshot of the `bin` directory from the OpenVINO installation directory. - * `deployment_tools/inference_engine` — Contains the Inference Engine binary files. + * `runtime` — Contains the OpenVINO runtime binary files. * `install_dependencies` — Snapshot of the `install_dependencies` directory from the OpenVINO installation directory. * `` — The directory with the user data (IRs, datasets, etc.) you specified while configuring the package. 3. For Linux, to run inference on a target Intel® GPU, Intel® Movidius™ VPU, or Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, you need to install additional dependencies by running the `install_openvino_dependencies.sh` script: diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index df87cd3d442c1d..66f01eb0299872 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -150,7 +150,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2021/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` - **CentOS 7**: @@ -184,7 +184,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2021/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` 2. Run the Docker* image: diff --git a/docs/install_guides/installing-openvino-linux-ivad-vpu.md b/docs/install_guides/installing-openvino-linux-ivad-vpu.md index cd86804307c7fe..a6bc6032273b18 100644 --- a/docs/install_guides/installing-openvino-linux-ivad-vpu.md +++ b/docs/install_guides/installing-openvino-linux-ivad-vpu.md @@ -13,7 +13,7 @@ For Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the followi ```sh source /opt/intel/openvino_2021/bin/setupvars.sh ``` -> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/deployment_tools/inference_engine/external/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021//deployment_tools/inference_engine/external/hddl`. +> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021//3rdparty/hddl`. 2. Install dependencies: ```sh diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index a78fa8fc43d7a1..8d2d5acbfbd12d 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -218,7 +218,7 @@ You can choose to either configure all supported frameworks at once **OR** confi 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -232,7 +232,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index d878eac5c3a84a..94eb42b2c1a5da 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -196,7 +196,7 @@ You can choose to either configure the Model Optimizer for all supported framewo 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -210,7 +210,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: diff --git a/docs/install_guides/installing-openvino-raspbian.md b/docs/install_guides/installing-openvino-raspbian.md index 61cff12e424760..0cabee7b1f76ba 100644 --- a/docs/install_guides/installing-openvino-raspbian.md +++ b/docs/install_guides/installing-openvino-raspbian.md @@ -140,7 +140,7 @@ Follow the next steps to use the pre-trained face detection model using Inferenc ``` 2. Build the Object Detection Sample: ```sh - cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/deployment_tools/inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/samples/cpp ``` ```sh make -j2 object_detection_sample_ssd diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index 054950292b6337..e74ac276c97ec8 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -153,7 +153,7 @@ The Model Optimizer is a key component of the Intel® Distribution of OpenVINO The Inference Engine reads, loads, and infers the IR files, using a common API across the CPU, GPU, or VPU hardware. -The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use. +The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2021\tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use. This section explains how to use scripts to configure the Model Optimizer either for all of the supported frameworks at the same time or for individual frameworks. If you want to manually configure the Model Optimizer instead of using scripts, see the **Using Manual Configuration Process** section on the [Configuring the Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) page. @@ -182,7 +182,7 @@ Type commands in the opened window: 2. Go to the Model Optimizer prerequisites directory.
```sh -cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites +cd C:\Program Files (x86)\Intel\openvino_2021\tools\model_optimizer\install_prerequisites ``` 3. Run the following batch file to configure the Model Optimizer for Caffe\*, TensorFlow\* 1.x, MXNet\*, Kaldi\*, and ONNX\*:
@@ -194,7 +194,7 @@ install_prerequisites.bat 1. Go to the Model Optimizer prerequisites directory:
```sh -cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites +cd C:\Program Files (x86)\Intel\openvino_2021\tools\model_optimizer\install_prerequisites ``` 2. Run the batch file for the framework you will use with the Model Optimizer. You can use more than one: @@ -275,7 +275,7 @@ To perform inference on Intel® Vision Accelerator Design with Intel® Movidius 1. Download and install Visual C++ Redistributable for Visual Studio 2017 2. Check with a support engineer if your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs card requires SMBUS connection to PCIe slot (most unlikely). Install the SMBUS driver only if confirmed (by default, it's not required): - 1. Go to the `\deployment_tools\inference-engine\external\hddl\drivers\SMBusDriver` directory, where `` is the directory in which the Intel Distribution of OpenVINO toolkit is installed. + 1. Go to the `\runtime\3rdparty\hddl\drivers\SMBusDriver` directory, where `` is the directory in which the Intel Distribution of OpenVINO toolkit is installed. 2. Right click on the `hddlsmbus.inf` file and choose **Install** from the pop up menu. You are done installing your device driver and are ready to use your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs. diff --git a/docs/optimization_guide/dldt_optimization_guide.md b/docs/optimization_guide/dldt_optimization_guide.md index 9ece7fec93a628..a67a29a9058fd8 100644 --- a/docs/optimization_guide/dldt_optimization_guide.md +++ b/docs/optimization_guide/dldt_optimization_guide.md @@ -325,7 +325,7 @@ Other than that, when implementing the kernels, you can try the methods from the ### Few Device-Specific Tips - As already outlined in the CPU Checklist, align the threading model that you use in your CPU kernels with the model that the rest of the Inference Engine compiled with. -- For CPU extensions, consider kernel flavor that supports blocked layout, if your kernel is in the hotspots (see Internal Inference Performance Counters). Since Intel MKL-DNN internally operates on the blocked layouts, this would save you a data packing (Reorder) on tensor inputs/outputs of your kernel. For example of the blocked layout support, please, refer to the extensions in the `/deployment_tools/samples/extension/`. +- For CPU extensions, consider kernel flavor that supports blocked layout, if your kernel is in the hotspots (see Internal Inference Performance Counters). Since Intel MKL-DNN internally operates on the blocked layouts, this would save you a data packing (Reorder) on tensor inputs/outputs of your kernel. ## Plugging Inference Engine to Applications diff --git a/docs/template_plugin/src/CMakeLists.txt b/docs/template_plugin/src/CMakeLists.txt index 799a2c76c48ed3..4d1144e6a00921 100644 --- a/docs/template_plugin/src/CMakeLists.txt +++ b/docs/template_plugin/src/CMakeLists.txt @@ -50,6 +50,5 @@ endif() # install(TARGETS ${TARGET_NAME} # RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} -# ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} # LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} # COMPONENT ${component_name}) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 1ac7fd8bf62b4a..77df1b77a39ba9 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -73,20 +73,20 @@ endif() ie_cpack_add_component(cpp_samples DEPENDS core) install(DIRECTORY ../thirdparty/zlib - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp/thirdparty + DESTINATION samples/cpp/thirdparty COMPONENT cpp_samples USE_SOURCE_PERMISSIONS PATTERN .clang-format EXCLUDE) install(DIRECTORY ../thirdparty/cnpy - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp/thirdparty + DESTINATION samples/cpp/thirdparty COMPONENT cpp_samples USE_SOURCE_PERMISSIONS PATTERN .clang-format EXCLUDE) if(UNIX) install(DIRECTORY samples/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp + DESTINATION samples/cpp COMPONENT cpp_samples USE_SOURCE_PERMISSIONS PATTERN *.bat EXCLUDE @@ -94,7 +94,7 @@ if(UNIX) PATTERN .clang-format EXCLUDE) elseif(WIN32) install(DIRECTORY samples/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp + DESTINATION samples/cpp COMPONENT cpp_samples USE_SOURCE_PERMISSIONS PATTERN *.sh EXCLUDE @@ -108,22 +108,22 @@ ie_cpack_add_component(c_samples DEPENDS core_c) if(UNIX) install(PROGRAMS samples/build_samples.sh - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples) elseif(WIN32) install(PROGRAMS samples/build_samples_msvc.bat - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples) endif() install(DIRECTORY ie_bridges/c/samples/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples PATTERN ie_bridges/c/samples/CMakeLists.txt EXCLUDE PATTERN ie_bridges/c/samples/.clang-format EXCLUDE) install(FILES samples/CMakeLists.txt - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples) # install speech demo files @@ -133,7 +133,7 @@ if(SPEECH_LIBS_AND_DEMOS) install(DIRECTORY ${TEMP}/deployment_tools ${TEMP}/data_processing - DESTINATION . + DESTINATION extras/open_model_zoo USE_SOURCE_PERMISSIONS COMPONENT speech_demo_files) endif() diff --git a/inference-engine/cmake/dependencies.cmake b/inference-engine/cmake/dependencies.cmake index b270c46f2da7cc..11f8e54219fb39 100644 --- a/inference-engine/cmake/dependencies.cmake +++ b/inference-engine/cmake/dependencies.cmake @@ -72,7 +72,7 @@ if (THREADING STREQUAL "OMP") ie_cpack_add_component(omp REQUIRED) file(GLOB_RECURSE source_list "${OMP}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") install(FILES ${source_list} - DESTINATION "deployment_tools/inference_engine/external/omp/lib" + DESTINATION "3rdparty/omp/lib" COMPONENT omp) endif () diff --git a/inference-engine/ie_bridges/c/src/CMakeLists.txt b/inference-engine/ie_bridges/c/src/CMakeLists.txt index 69760a52de96a6..3fcf707b63bfc3 100644 --- a/inference-engine/ie_bridges/c/src/CMakeLists.txt +++ b/inference-engine/ie_bridges/c/src/CMakeLists.txt @@ -14,7 +14,7 @@ add_library(${TARGET_NAME} SHARED ${HEADERS} ${SOURCES}) target_link_libraries(${TARGET_NAME} PRIVATE inference_engine) target_include_directories(${TARGET_NAME} PUBLIC - $ + $ $) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) @@ -40,5 +40,5 @@ install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c) install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/ - DESTINATION ${IE_CPACK_IE_DIR}/include + DESTINATION runtime/include COMPONENT core_c_dev) diff --git a/inference-engine/ie_bridges/python/CMakeLists.txt b/inference-engine/ie_bridges/python/CMakeLists.txt index b8216b0cb3435a..5ba1c8c43e076b 100644 --- a/inference-engine/ie_bridges/python/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/CMakeLists.txt @@ -93,7 +93,7 @@ install(PROGRAMS src/openvino/__init__.py ie_cpack_add_component(python_samples) install(DIRECTORY sample/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/python + DESTINATION samples/python COMPONENT python_samples) ie_cpack(${PYTHON_VERSION} python_samples) diff --git a/inference-engine/ie_bridges/python/wheel/.env.in b/inference-engine/ie_bridges/python/wheel/.env.in index 9ba0660d5d2557..760f8bcb358f29 100644 --- a/inference-engine/ie_bridges/python/wheel/.env.in +++ b/inference-engine/ie_bridges/python/wheel/.env.in @@ -9,8 +9,6 @@ WHEEL_REQUIREMENTS=@WHEEL_REQUIREMENTS@ WHEEL_OVERVIEW=@WHEEL_OVERVIEW@ CMAKE_BUILD_DIR=@CMAKE_BINARY_DIR@ -CORE_LIBS_DIR=@IE_CPACK_RUNTIME_PATH@ -PLUGINS_LIBS_DIR=@PLUGINS_LIBS_DIR@ -NGRAPH_LIBS_DIR=@NGRAPH_LIBS_DIR@ +OV_RUNTIME_LIBS_DIR=@IE_CPACK_RUNTIME_PATH@ TBB_LIBS_DIR=@TBB_LIBS_DIR@ PY_PACKAGES_DIR=@PY_PACKAGES_DIR@ diff --git a/inference-engine/ie_bridges/python/wheel/CMakeLists.txt b/inference-engine/ie_bridges/python/wheel/CMakeLists.txt index 681954f2766d77..3045ee65c44390 100644 --- a/inference-engine/ie_bridges/python/wheel/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/wheel/CMakeLists.txt @@ -18,11 +18,8 @@ set(WHEEL_OVERVIEW "${CMAKE_CURRENT_SOURCE_DIR}/meta/pypi_overview.md" CACHE STR set(SETUP_PY "${CMAKE_CURRENT_SOURCE_DIR}/setup.py") set(SETUP_ENV "${CMAKE_CURRENT_SOURCE_DIR}/.env.in") -set(CORE_LIBS_DIR ${IE_CPACK_RUNTIME_PATH}) -set(PLUGINS_LIBS_DIR ${IE_CPACK_RUNTIME_PATH}) -set(NGRAPH_LIBS_DIR deployment_tools/ngraph/lib) set(PY_PACKAGES_DIR ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}) -set(TBB_LIBS_DIR deployment_tools/inference_engine/external/tbb/lib) +set(TBB_LIBS_DIR runtime/3rdparty/tbb/lib) if(APPLE) set(WHEEL_PLATFORM macosx_10_15_x86_64) @@ -30,7 +27,7 @@ elseif(UNIX) set(WHEEL_PLATFORM manylinux2014_x86_64) elseif(WIN32) set(WHEEL_PLATFORM win_amd64) - set(TBB_LIBS_DIR deployment_tools/inference_engine/external/tbb/bin) + set(TBB_LIBS_DIR runtime/3rdparty/tbb/bin) else() message(FATAL_ERROR "This platform is not supported") endif() diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py index dc177be7d31375..49d1b1eecd7a40 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.py +++ b/inference-engine/ie_bridges/python/wheel/setup.py @@ -26,9 +26,7 @@ # The following variables can be defined in environment or .env file CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.') -CORE_LIBS_DIR = config('CORE_LIBS_DIR', '') -PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', '') -NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', '') +OV_RUNTIME_LIBS_DIR = config('OV_RUNTIME_LIBS_DIR', '') TBB_LIBS_DIR = config('TBB_LIBS_DIR', '') PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', '') LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path' @@ -37,49 +35,49 @@ 'ie_libs': { 'name': 'core', 'prefix': 'libs.core', - 'install_dir': CORE_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'hetero_plugin': { 'name': 'hetero', 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'gpu_plugin': { 'name': 'gpu', 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'cpu_plugin': { 'name': 'cpu', 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'multi_plugin': { 'name': 'multi', 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'auto_plugin': { 'name': 'auto', 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'myriad_plugin': { 'name': 'myriad', 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'ngraph_libs': { 'name': 'ngraph', 'prefix': 'libs.ngraph', - 'install_dir': NGRAPH_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'tbb_libs': { diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index 7c61bc570d518e..dfea2262474a4d 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -146,14 +146,14 @@ This section provides step-by-step instructions on how to run the Benchmark Tool 1. Download the model. Go to the the Model Downloader directory and run the `downloader.py` script with specifying the model name and directory to download the model to: ```sh - cd /deployment_tools/open_model_zoo/tools/downloader + cd /extras/open_model_zoo/tools/downloader ``` ```sh python3 downloader.py --name googlenet-v1 -o ``` 2. Convert the model to the Inference Engine IR format. Go to the Model Optimizer directory and run the `mo.py` script with specifying the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files: ```sh - cd /deployment_tools/model_optimizer + cd /tools/model_optimizer ``` ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir diff --git a/inference-engine/src/gna_plugin/CMakeLists.txt b/inference-engine/src/gna_plugin/CMakeLists.txt index 36b9d6d5cc0b8e..f90cfce5c8a229 100644 --- a/inference-engine/src/gna_plugin/CMakeLists.txt +++ b/inference-engine/src/gna_plugin/CMakeLists.txt @@ -81,5 +81,5 @@ set_target_properties(${TARGET_NAME} ${TARGET_NAME}_test_static file(GLOB_RECURSE source_list "${libGNA_LIBRARIES_BASE_PATH}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") install(FILES ${source_list} - DESTINATION ${IE_CPACK_IE_DIR}/external/gna/lib + DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT gna) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index e8ed1a5c4c38f4..7d218e6772371e 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -164,7 +164,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE pugixml openvino::itt ${CMAKE_DL_LI target_include_directories(${TARGET_NAME} INTERFACE $ - $ + $ PRIVATE $ $) @@ -232,27 +232,27 @@ if((THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") AND TBBROOT MATCH list(APPEND core_components tbb) install(DIRECTORY "${TBB}/include" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) install(DIRECTORY "${TBB}/lib" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) if(EXISTS "${TBB}/bin") install(DIRECTORY "${TBB}/bin" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) endif() install(FILES "${TBB}/LICENSE" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) - set(IE_TBB_DIR_INSTALL "external/tbb/cmake") + set(IE_TBB_DIR_INSTALL "runtime/3rdarty/tbb/cmake") set(IE_TBB_DIR "${TBB_DIR}") list(APPEND PATH_VARS "IE_TBB_DIR") install(FILES "${TBB}/cmake/TBBConfig.cmake" "${TBB}/cmake/TBBConfigVersion.cmake" - DESTINATION ${IE_CPACK_IE_DIR}/${IE_TBB_DIR_INSTALL} + DESTINATION ${IE_TBB_DIR_INSTALL} COMPONENT tbb) endif() @@ -261,7 +261,8 @@ endif() ie_cpack_add_component(core REQUIRED DEPENDS ${core_components}) ie_cpack_add_component(core_dev REQUIRED core ngraph_dev) -install(DIRECTORY "${IE_MAIN_SOURCE_DIR}/include" DESTINATION ${IE_CPACK_IE_DIR} +install(DIRECTORY "${IE_MAIN_SOURCE_DIR}/include" + DESTINATION runtime/include/ie COMPONENT core_dev) install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets @@ -278,7 +279,7 @@ install(FILES $/plugins.xml install(EXPORT InferenceEngineTargets FILE InferenceEngineTargets.cmake NAMESPACE IE:: - DESTINATION ${IE_CPACK_IE_DIR}/share + DESTINATION runtime/cmake COMPONENT core_dev) set(IE_NGRAPH_DIR "${CMAKE_BINARY_DIR}/ngraph") @@ -307,5 +308,5 @@ configure_file("${IE_MAIN_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-vers install(FILES "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" "${InferenceEngine_SOURCE_DIR}/cmake/ie_parallel.cmake" - DESTINATION ${IE_CPACK_IE_DIR}/share + DESTINATION runtime/cmake COMPONENT core_dev) diff --git a/inference-engine/src/vpu/CMakeLists.txt b/inference-engine/src/vpu/CMakeLists.txt index 00069d2a262c43..b634d8d7b3bd9c 100644 --- a/inference-engine/src/vpu/CMakeLists.txt +++ b/inference-engine/src/vpu/CMakeLists.txt @@ -29,7 +29,7 @@ if(ENABLE_MYRIAD) DESTINATION ${IE_CPACK_LIBRARY_PATH}/vpu_custom_kernels COMPONENT myriad) install(DIRECTORY ${VPU_CLC_MA2X8X_ROOT}/ - DESTINATION deployment_tools/tools/cl_compiler + DESTINATION tools/cl_compiler COMPONENT myriad PATTERN ie_dependency.info EXCLUDE) endif() diff --git a/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt b/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt index a3e0182ed9a65a..7b5ec9d1ecfb54 100644 --- a/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt +++ b/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt @@ -55,6 +55,6 @@ set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_REL # install if (LINUX) install(FILES ${IE_MAIN_SOURCE_DIR}/thirdparty/movidius/mvnc/src/97-myriad-usbboot.rules - DESTINATION ${IE_CPACK_IE_DIR}/external + DESTINATION runtime/3rdparty COMPONENT myriad) endif() diff --git a/inference-engine/tools/benchmark_tool/README.md b/inference-engine/tools/benchmark_tool/README.md index 1eacb8f56adc5b..57e0640cd1e00c 100644 --- a/inference-engine/tools/benchmark_tool/README.md +++ b/inference-engine/tools/benchmark_tool/README.md @@ -157,14 +157,14 @@ This section provides step-by-step instructions on how to run the Benchmark Tool 1. Download the model. Go to the the Model Downloader directory and run the `downloader.py` script with specifying the model name and directory to download the model to: ```sh - cd /deployment_tools/open_model_zoo/tools/downloader + cd /extras/open_model_zoo/tools/downloader ``` ```sh python3 downloader.py --name googlenet-v1 -o ``` 2. Convert the model to the Inference Engine IR format. Go to the Model Optimizer directory and run the `mo.py` script with specifying the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files: ```sh - cd /deployment_tools/model_optimizer + cd /tools/model_optimizer ``` ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir diff --git a/inference-engine/tools/compile_tool/CMakeLists.txt b/inference-engine/tools/compile_tool/CMakeLists.txt index 6e6f384289ee22..c76754979b20ca 100644 --- a/inference-engine/tools/compile_tool/CMakeLists.txt +++ b/inference-engine/tools/compile_tool/CMakeLists.txt @@ -41,9 +41,9 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) ie_cpack_add_component(core_tools DEPENDS core) install(TARGETS compile_tool - RUNTIME DESTINATION deployment_tools/tools/compile_tool + RUNTIME DESTINATION tools/compile_tool COMPONENT core_tools) install(FILES README.md - DESTINATION deployment_tools/tools/compile_tool + DESTINATION tools/compile_tool COMPONENT core_tools) diff --git a/inference-engine/tools/compile_tool/README.md b/inference-engine/tools/compile_tool/README.md index 0b083e15dc1e1a..99b282f89844fb 100644 --- a/inference-engine/tools/compile_tool/README.md +++ b/inference-engine/tools/compile_tool/README.md @@ -11,7 +11,7 @@ The tool compiles networks for the following target devices using corresponding The tool is delivered as an executable file that can be run on both Linux* and Windows*. -The tool is located in the `/deployment_tools/tools/compile_tool` directory. +The tool is located in the `/tools/compile_tool` directory. The workflow of the Compile tool is as follows: diff --git a/model-optimizer/CMakeLists.txt b/model-optimizer/CMakeLists.txt index b0e75505cc2cd3..cccf75ff00cd48 100644 --- a/model-optimizer/CMakeLists.txt +++ b/model-optimizer/CMakeLists.txt @@ -18,7 +18,7 @@ configure_file( @ONLY) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ - DESTINATION deployment_tools/model_optimizer + DESTINATION tools/model_optimizer USE_SOURCE_PERMISSIONS COMPONENT model_optimizer PATTERN ".*" EXCLUDE diff --git a/model-optimizer/README.md b/model-optimizer/README.md index bedd7dea26b8bf..7731565ad01e61 100644 --- a/model-optimizer/README.md +++ b/model-optimizer/README.md @@ -10,7 +10,7 @@ Model Optimizer requires: 1. Go to the Model Optimizer folder:
-    cd PATH_TO_INSTALL_DIR/deployment_tools/model_optimizer
+    cd PATH_TO_INSTALL_DIR/tools/model_optimizer
 
2. Create virtual environment and activate it. This option is strongly recommended as it creates a Python sandbox and diff --git a/model-optimizer/extensions/analysis/tf_retinanet.py b/model-optimizer/extensions/analysis/tf_retinanet.py index 35fdcf9d49c04a..6d78c81f5905bd 100644 --- a/model-optimizer/extensions/analysis/tf_retinanet.py +++ b/model-optimizer/extensions/analysis/tf_retinanet.py @@ -59,7 +59,7 @@ def analyze(self, graph: Graph): "To generate the IR, provide model to the Model Optimizer with the following parameters:\n" \ "\t--input_model /.pb\n" \ "\t--input_shape [1,600,600,3]\n" \ - "\t--tensorflow_use_custom_operations_config /deployment_tools/model_optimizer/extensions/front/tf/retinanet.json\n" \ + "\t--tensorflow_use_custom_operations_config /tools/model_optimizer/extensions/front/tf/retinanet.json\n" \ "\t--reverse_input_channels" return {'model_type': {'TF_RetinaNet': result}}, message diff --git a/model-optimizer/extensions/analysis/tf_yolo.py b/model-optimizer/extensions/analysis/tf_yolo.py index 626187b25f508b..f409cf283135b0 100644 --- a/model-optimizer/extensions/analysis/tf_yolo.py +++ b/model-optimizer/extensions/analysis/tf_yolo.py @@ -72,7 +72,7 @@ def analyze(self, graph: Graph): "To generate the IR, provide TensorFlow YOLOv1 or YOLOv2 Model to the Model Optimizer with the following parameters:\n" \ "\t--input_model /.pb\n" \ "\t--batch 1\n" \ - "\t--tensorflow_use_custom_operations_config /deployment_tools/model_optimizer/extensions/front/tf/.json\n" \ + "\t--tensorflow_use_custom_operations_config /tools/model_optimizer/extensions/front/tf/.json\n" \ "All detailed information about conversion of this model can be found at\n" \ "https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message @@ -99,7 +99,7 @@ def analyze(self, graph: Graph): "To generate the IR, provide TensorFlow YOLOv3 Model to the Model Optimizer with the following parameters:\n" \ "\t--input_model /yolo_v3.pb\n" \ "\t--batch 1\n" \ - "\t--tensorflow_use_custom_operations_config /deployment_tools/model_optimizer/extensions/front/tf/yolo_v3.json\n" \ + "\t--tensorflow_use_custom_operations_config /tools/model_optimizer/extensions/front/tf/yolo_v3.json\n" \ "Detailed information about conversion of this model can be found at\n" \ "https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message diff --git a/ngraph/CMakeLists.txt b/ngraph/CMakeLists.txt index 55e218daa2c924..99d24c366a13b1 100644 --- a/ngraph/CMakeLists.txt +++ b/ngraph/CMakeLists.txt @@ -184,12 +184,6 @@ if (DEFINED NGRAPH_INSTALL_PREFIX) endif() message(STATUS "Installation directory: ${CMAKE_INSTALL_PREFIX}") -# Destinations -set(NGRAPH_INSTALL_LIB "deployment_tools/ngraph/${CMAKE_INSTALL_LIBDIR}") -set(NGRAPH_INSTALL_INCLUDE "deployment_tools/ngraph/${CMAKE_INSTALL_INCLUDEDIR}") -set(NGRAPH_INSTALL_DOC "deployment_tools/ngraph/${CMAKE_INSTALL_DOCDIR}") -set(NGRAPH_INSTALL_BIN "deployment_tools/ngraph/${CMAKE_INSTALL_BINDIR}") - #----------------------------------------------------------------------------------------------- # Compile Flags for nGraph... #----------------------------------------------------------------------------------------------- @@ -274,7 +268,7 @@ if (NGRAPH_EXPORT_TARGETS_ENABLE) install(EXPORT ngraphTargets FILE ngraphTargets.cmake NAMESPACE ngraph:: - DESTINATION "deployment_tools/ngraph/cmake" + DESTINATION "runtime/cmake" COMPONENT ngraph_dev) endif() @@ -288,7 +282,7 @@ if (NGRAPH_EXPORT_TARGETS_ENABLE) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ngraphConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/ngraphConfigVersion.cmake - DESTINATION "deployment_tools/ngraph/cmake" + DESTINATION "runtime/cmake" COMPONENT ngraph_dev) endif() @@ -336,10 +330,6 @@ endif() add_subdirectory(test) -if (NGRAPH_PYTHON_BUILD_ENABLE) +if(NGRAPH_PYTHON_BUILD_ENABLE) add_subdirectory(python) endif() - -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/VERSION - DESTINATION "deployment_tools/ngraph" - COMPONENT ngraph) diff --git a/ngraph/cmake/external_onnx.cmake b/ngraph/cmake/external_onnx.cmake index 2629d489fc5fd4..8e35e849fcfb24 100644 --- a/ngraph/cmake/external_onnx.cmake +++ b/ngraph/cmake/external_onnx.cmake @@ -48,9 +48,9 @@ macro(onnx_set_target_properties) target_compile_definitions(onnx PUBLIC ONNX_BUILD_SHARED_LIBS) install(TARGETS onnx_proto - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) if (NGRAPH_EXPORT_TARGETS_ENABLE) export(TARGETS onnx onnx_proto NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") diff --git a/ngraph/cmake/external_protobuf.cmake b/ngraph/cmake/external_protobuf.cmake index 502a23d572f42f..9e72340a86987e 100644 --- a/ngraph/cmake/external_protobuf.cmake +++ b/ngraph/cmake/external_protobuf.cmake @@ -118,9 +118,9 @@ endif() set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE "${PUSH_CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE}") install(TARGETS ${Protobuf_LIBRARIES} - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) if (NGRAPH_EXPORT_TARGETS_ENABLE) export(TARGETS ${Protobuf_LIBRARIES} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") endif() diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index 72272046b01e9d..69140dad6c8b72 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -79,7 +79,7 @@ set_source_files_properties("${CMAKE_CURRENT_SOURCE_DIR}/src/pass/convert_precis # Defines macro in C++ to load backend plugin target_include_directories(ngraph PUBLIC $ - $ + $ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src ${CMAKE_CURRENT_BINARY_DIR}/include) @@ -96,11 +96,11 @@ endif() # nGraph install(TARGETS ngraph EXPORT ngraphTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ - DESTINATION ${NGRAPH_INSTALL_INCLUDE} + DESTINATION runtime/include COMPONENT ngraph_dev FILES_MATCHING PATTERN "*.hpp" @@ -108,7 +108,7 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ PATTERN "*version.in.hpp" EXCLUDE ) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/ngraph/version.hpp - DESTINATION ${NGRAPH_INSTALL_INCLUDE}/ngraph + DESTINATION runtime/include/ngraph COMPONENT ngraph_dev) set(CPACK_GENERATOR "DEB") diff --git a/ngraph/frontend/frontend_manager/CMakeLists.txt b/ngraph/frontend/frontend_manager/CMakeLists.txt index 5efd560357b357..30e8f9de6d3f9a 100644 --- a/ngraph/frontend/frontend_manager/CMakeLists.txt +++ b/ngraph/frontend/frontend_manager/CMakeLists.txt @@ -27,9 +27,9 @@ if(COMMAND ie_add_vs_version_file) FILEDESCRIPTION "Manager of OpenVINO nGraph Frontends") endif() -set(FRONTEND_INSTALL_INCLUDE "${NGRAPH_INSTALL_INCLUDE}/ngraph/frontend/frontend_manager") +set(FRONTEND_INSTALL_INCLUDE "runtime/include/ngraph/frontend/frontend_manager") target_include_directories(${TARGET_NAME} PUBLIC $ - $) + $) target_include_directories(${TARGET_NAME} PRIVATE ${NGRAPH_INCLUDE_PATH} ${FRONTEND_INCLUDE_DIR}) target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) diff --git a/ngraph/frontend/onnx_common/CMakeLists.txt b/ngraph/frontend/onnx_common/CMakeLists.txt index cd40569d6b8e05..b09854ad283576 100644 --- a/ngraph/frontend/onnx_common/CMakeLists.txt +++ b/ngraph/frontend/onnx_common/CMakeLists.txt @@ -22,7 +22,7 @@ add_library(ngraph::onnx_common ALIAS ${TARGET_NAME}) set(ONNX_COMMON_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) set(ONNX_COMMON_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src) -set(ONNX_COMMON_INSTALL_INCLUDE "${NGRAPH_INSTALL_INCLUDE}/ngraph/frontend") +set(ONNX_COMMON_INSTALL_INCLUDE "runtime/include/ngraph/frontend") target_include_directories(${TARGET_NAME} PUBLIC $ $) diff --git a/ngraph/frontend/onnx_editor/CMakeLists.txt b/ngraph/frontend/onnx_editor/CMakeLists.txt index d893f40a4faac2..9b9a2f5d1323ff 100644 --- a/ngraph/frontend/onnx_editor/CMakeLists.txt +++ b/ngraph/frontend/onnx_editor/CMakeLists.txt @@ -27,7 +27,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE onnx_common onnx_importer set(ONNX_EDITOR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) set(ONNX_EDITOR_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src) -set(ONNX_EDITOR_INSTALL_INCLUDE "${NGRAPH_INSTALL_INCLUDE}/ngraph/frontend") +set(ONNX_EDITOR_INSTALL_INCLUDE "runtime/include/ngraph/frontend") target_include_directories(${TARGET_NAME} PUBLIC $ $) diff --git a/ngraph/frontend/onnx_import/CMakeLists.txt b/ngraph/frontend/onnx_import/CMakeLists.txt index f3ef7b10fe799f..bbd30844e710f8 100644 --- a/ngraph/frontend/onnx_import/CMakeLists.txt +++ b/ngraph/frontend/onnx_import/CMakeLists.txt @@ -48,7 +48,7 @@ endif() target_link_libraries(onnx_importer PRIVATE onnx_common ngraph::builder PUBLIC ngraph) -set(ONNX_INSTALL_INCLUDE "${NGRAPH_INSTALL_INCLUDE}/ngraph/frontend") +set(ONNX_INSTALL_INCLUDE "runtime/include/ngraph/frontend") target_include_directories(onnx_importer SYSTEM PUBLIC $ $) @@ -68,9 +68,9 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") endif() install(TARGETS onnx_importer EXPORT ngraphTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${ONNX_IMPORT_INCLUDE_DIR}/onnx_import DESTINATION ${ONNX_INSTALL_INCLUDE} diff --git a/scripts/demo/demo_benchmark_app.bat b/scripts/demo/demo_benchmark_app.bat index 3ca0d6c7bdc6e0..97ebb1fc4e02b2 100644 --- a/scripts/demo/demo_benchmark_app.bat +++ b/scripts/demo/demo_benchmark_app.bat @@ -91,7 +91,7 @@ if not "%python_ver%"=="okay" ( pip3 install --user -r "%ROOT_DIR%..\open_model_zoo\tools\downloader\requirements.in" if ERRORLEVEL 1 GOTO errorHandling -set downloader_dir=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader +set downloader_dir=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader for /F "tokens=* usebackq" %%d in ( `python "%downloader_dir%\info_dumper.py" --name "%model_name%" ^| @@ -121,7 +121,7 @@ echo. echo ###############^|^| Install Model Optimizer prerequisites ^|^|############### echo. CALL :delay 3 -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\install_prerequisites" +cd /d "%INTEL_OPENVINO_DIR%\tools\model_optimizer\install_prerequisites" call install_prerequisites_caffe.bat if ERRORLEVEL 1 GOTO errorHandling @@ -132,8 +132,8 @@ echo. CALL :delay 3 ::set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp -echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" -python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 @@ -213,7 +213,7 @@ set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_samples_build" echo Creating Visual Studio !MSBUILD_VERSION! %PLATFORM% files in %SOLUTION_DIR64%... && ^ if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt" -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" +cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 diff --git a/scripts/demo/demo_benchmark_app.sh b/scripts/demo/demo_benchmark_app.sh index 25d1f94ae8eca7..003e2195584d1b 100755 --- a/scripts/demo/demo_benchmark_app.sh +++ b/scripts/demo/demo_benchmark_app.sh @@ -153,7 +153,7 @@ else sudo -E "$pip_binary" install -r "$ROOT_DIR/../open_model_zoo/tools/downloader/requirements.in" fi -downloader_dir="${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/downloader" +downloader_dir="${INTEL_OPENVINO_DIR}/extras/open_model_zoo/tools/downloader" model_dir=$("$python_binary" "$downloader_dir/info_dumper.py" --name "$model_name" | "$python_binary" -c 'import sys, json; print(json.load(sys.stdin)[0]["subdirectory"])') @@ -168,7 +168,7 @@ if [ ! -e "$ir_dir" ]; then # Step 2. Configure Model Optimizer printf "%s" "${dashes}" printf "Install Model Optimizer dependencies\n\n" - cd "${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/install_prerequisites" + cd "${INTEL_OPENVINO_DIR}/tools/model_optimizer/install_prerequisites" . ./install_prerequisites.sh caffe cd "$cur_path" @@ -176,7 +176,7 @@ if [ ! -e "$ir_dir" ]; then printf "%s" "${dashes}" printf "Convert a model with Model Optimizer\n\n" - mo_path="${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py" + mo_path="${INTEL_OPENVINO_DIR}/tools/model_optimizer/mo.py" export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp print_and_run "$python_binary" "$downloader_dir/converter.py" --mo "$mo_path" --name "$model_name" -d "$models_path" -o "$irs_path" --precisions "$target_precision" @@ -197,7 +197,7 @@ if [ "$OS_PATH" == "x86_64" ]; then NUM_THREADS="-j8" fi -samples_path="${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/samples/cpp" +samples_path="${INTEL_OPENVINO_DIR}/samples/cpp" build_dir="$HOME/inference_engine_samples_build" binaries_dir="${build_dir}/${OS_PATH}/Release" diff --git a/scripts/demo/demo_security_barrier_camera.bat b/scripts/demo/demo_security_barrier_camera.bat index 6e2e1b99bc6400..60e44752118af1 100644 --- a/scripts/demo/demo_security_barrier_camera.bat +++ b/scripts/demo/demo_security_barrier_camera.bat @@ -89,7 +89,7 @@ if not exist "%models_cache%" ( mkdir "%models_cache%" ) -set downloader_dir=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader +set downloader_dir=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader for /F "tokens=1,2 usebackq" %%a in ("%ROOT_DIR%demo_security_barrier_camera.conf") do ( echo python "%downloader_dir%\downloader.py" --name "%%b" --output_dir "%models_path%" --cache_dir "%models_cache%" @@ -177,7 +177,7 @@ set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_demos_build" echo Creating Visual Studio !MSBUILD_VERSION! %PLATFORM% files in %SOLUTION_DIR64%... && ^ if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt" -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\demos" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\demos" +cd /d "%INTEL_OPENVINO_DIR%\extras\open_model_zoo\demos" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\extras\open_model_zoo\demos" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 diff --git a/scripts/demo/demo_security_barrier_camera.sh b/scripts/demo/demo_security_barrier_camera.sh index eaf6cd6784e094..40eace42247a00 100755 --- a/scripts/demo/demo_security_barrier_camera.sh +++ b/scripts/demo/demo_security_barrier_camera.sh @@ -141,7 +141,7 @@ target_precision="FP16" printf "target_precision = %s\n" "${target_precision}" -downloader_dir="${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/downloader" +downloader_dir="${INTEL_OPENVINO_DIR}/extras/open_model_zoo/tools/downloader" downloader_path="$downloader_dir/downloader.py" models_path="$HOME/openvino_models/ir" @@ -164,7 +164,7 @@ done < "$ROOT_DIR/demo_security_barrier_camera.conf" printf "%s" "${dashes}" printf "Build Inference Engine demos\n\n" -demos_path="${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/demos" +demos_path="${INTEL_OPENVINO_DIR}/extras/open_model_zoo/demos" if ! command -v cmake &>/dev/null; then printf "\n\nCMAKE is not installed. It is required to build Inference Engine demos. Please install it. %s" "${run_again}" diff --git a/scripts/demo/demo_squeezenet_download_convert_run.bat b/scripts/demo/demo_squeezenet_download_convert_run.bat index ad317ab4fd9f3e..9ad1ab9983d59e 100644 --- a/scripts/demo/demo_squeezenet_download_convert_run.bat +++ b/scripts/demo/demo_squeezenet_download_convert_run.bat @@ -87,7 +87,7 @@ if not "%python_ver%"=="okay" ( pip3 install --user -r "%ROOT_DIR%..\open_model_zoo\tools\downloader\requirements.in" if ERRORLEVEL 1 GOTO errorHandling -set downloader_dir=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader +set downloader_dir=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader for /F "tokens=* usebackq" %%d in ( `python "%downloader_dir%\info_dumper.py" --name "%model_name%" ^| @@ -117,7 +117,7 @@ echo. echo ###############^|^| Install Model Optimizer prerequisites ^|^|############### echo. CALL :delay 3 -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\install_prerequisites" +cd /d "%INTEL_OPENVINO_DIR%\tools\model_optimizer\install_prerequisites" call install_prerequisites_caffe.bat if ERRORLEVEL 1 GOTO errorHandling @@ -128,8 +128,8 @@ echo. CALL :delay 3 ::set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp -echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" -python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 @@ -209,7 +209,7 @@ set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_samples_build" echo Creating Visual Studio !MSBUILD_VERSION! %PLATFORM% files in %SOLUTION_DIR64%... && ^ if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt" -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" +cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 diff --git a/scripts/demo/demo_squeezenet_download_convert_run.sh b/scripts/demo/demo_squeezenet_download_convert_run.sh index 91a9055375b7ef..ab6219d004423b 100755 --- a/scripts/demo/demo_squeezenet_download_convert_run.sh +++ b/scripts/demo/demo_squeezenet_download_convert_run.sh @@ -149,7 +149,7 @@ else sudo -E "$pip_binary" install -r "$ROOT_DIR/../open_model_zoo/tools/downloader/requirements.in" fi -downloader_dir="${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/downloader" +downloader_dir="${INTEL_OPENVINO_DIR}/extras/open_model_zoo/tools/downloader" model_dir=$("$python_binary" "$downloader_dir/info_dumper.py" --name "$model_name" | "$python_binary" -c 'import sys, json; print(json.load(sys.stdin)[0]["subdirectory"])') @@ -164,7 +164,7 @@ if [ ! -e "$ir_dir" ]; then # Step 2. Configure Model Optimizer printf "%s" "${dashes}" printf "Install Model Optimizer dependencies\n\n" - cd "${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/install_prerequisites" + cd "${INTEL_OPENVINO_DIR}/tools/model_optimizer/install_prerequisites" . ./install_prerequisites.sh caffe cd "$cur_path" @@ -172,7 +172,7 @@ if [ ! -e "$ir_dir" ]; then printf "%s" "${dashes}" printf "Convert a model with Model Optimizer\n\n" - mo_path="${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py" + mo_path="${INTEL_OPENVINO_DIR}/tools/model_optimizer/mo.py" export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp print_and_run "$python_binary" "$downloader_dir/converter.py" --mo "$mo_path" --name "$model_name" -d "$models_path" -o "$irs_path" --precisions "$target_precision" @@ -193,7 +193,7 @@ if [ "$OS_PATH" == "x86_64" ]; then NUM_THREADS="-j8" fi -samples_path="${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/samples/cpp" +samples_path="${INTEL_OPENVINO_DIR}/samples/cpp" build_dir="$HOME/inference_engine_samples_build" binaries_dir="${build_dir}/${OS_PATH}/Release" diff --git a/scripts/install_dependencies/install_NCS_udev_rules.sh b/scripts/install_dependencies/install_NCS_udev_rules.sh index 7062e9d753db92..43c45c5c50b334 100755 --- a/scripts/install_dependencies/install_NCS_udev_rules.sh +++ b/scripts/install_dependencies/install_NCS_udev_rules.sh @@ -10,10 +10,10 @@ if [ -z "$INTEL_OPENVINO_DIR" ]; then exit -1 fi -if [ -f "$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/97-myriad-usbboot.rules" ]; then +if [ -f "$INTEL_OPENVINO_DIR/3rdparty/97-myriad-usbboot.rules" ]; then sudo usermod -a -G users "$(whoami)" - sudo cp "$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/97-myriad-usbboot.rules" /etc/udev/rules.d/ + sudo cp "$INTEL_OPENVINO_DIR/3rdparty/97-myriad-usbboot.rules" /etc/udev/rules.d/ sudo udevadm control --reload-rules sudo udevadm trigger sudo ldconfig diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index 83c8d7520c21bf..00579fffc3aff9 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -32,35 +32,21 @@ set "PATH=%INTEL_OPENVINO_DIR%\opencv\x64\vc14\bin;%PATH%" ) :: Model Optimizer -if exist %INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer ( -set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer;%PYTHONPATH% -set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer;%PATH%" +if exist %INTEL_OPENVINO_DIR%\tools\model_optimizer ( +set PYTHONPATH=%INTEL_OPENVINO_DIR%\tools\model_optimizer;%PYTHONPATH% +set "PATH=%INTEL_OPENVINO_DIR%\tools\model_optimizer;%PATH%" ) :: Inference Engine -set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share" -set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\hddl" -set "OPENMP_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\omp\lib" -set "GNA_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\gna\lib" - -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Release;%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%OPENMP_DIR%;%GNA_DIR%;%OPENVINO_LIB_PATHS%" -if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions ( -set ARCH_ROOT_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions -) -if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions ( -set ARCH_ROOT_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions -) +set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" +set "ngraph_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" +set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\hddl" +set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" :: TBB -if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb ( -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\bin;%OPENVINO_LIB_PATHS%" -set "TBB_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\cmake" -) - -:: nGraph -if exist %INTEL_OPENVINO_DIR%\deployment_tools\ngraph ( -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\lib;%OPENVINO_LIB_PATHS%" -set "ngraph_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\cmake" +if exist %INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb ( +set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb\bin;%OPENVINO_LIB_PATHS%" +set "TBB_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb\cmake" ) :: Add libs dirs to the PATH @@ -115,12 +101,12 @@ if not "%bitness%"=="64" ( set PYTHONPATH=%INTEL_OPENVINO_DIR%\python\python%pyversion_major%.%pyversion_minor%;%INTEL_OPENVINO_DIR%\python\python3;%PYTHONPATH% -if exist %INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\accuracy_checker ( - set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\accuracy_checker;%PYTHONPATH% +if exist %INTEL_OPENVINO_DIR%\tools\accuracy_checker ( + set PYTHONPATH=%INTEL_OPENVINO_DIR%\tools\accuracy_checker;%PYTHONPATH% ) -if exist %INTEL_OPENVINO_DIR%\deployment_tools\tools\post_training_optimization_toolkit ( - set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\tools\post_training_optimization_toolkit;%PYTHONPATH% +if exist %INTEL_OPENVINO_DIR%\post_training_optimization_toolkit ( + set PYTHONPATH=%INTEL_OPENVINO_DIR%\post_training_optimization_toolkit;%PYTHONPATH% ) echo [setupvars.bat] OpenVINO environment initialized diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 62f9b5f8ec8337..d0847a67bbd52b 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -29,68 +29,60 @@ esac shift done -if [ -e "$INSTALLDIR/deployment_tools/inference_engine" ]; then - export InferenceEngine_DIR=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/share - system_type=$(ls "$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/") - IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/$system_type +if [ -e "$INSTALLDIR/runtime" ]; then + export InferenceEngine_DIR=$INTEL_OPENVINO_DIR/runtime/cmake + export ngraph_DIR=$INTEL_OPENVINO_DIR/runtime/cmake + system_type=$(ls "$INTEL_OPENVINO_DIR/runtime/lib/") + IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/runtime/lib - if [[ -e ${IE_PLUGINS_PATH}/arch_descriptions ]]; then - export ARCH_ROOT_DIR=${IE_PLUGINS_PATH}/arch_descriptions - fi - - export HDDL_INSTALL_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/hddl + export HDDL_INSTALL_DIR=$INSTALLDIR/runtime/3rdparty/hddl if [[ "$OSTYPE" == "darwin"* ]]; then - export DYLD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:${IE_PLUGINS_PATH}${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export DYLD_LIBRARY_PATH=${IE_PLUGINS_PATH}${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} + export LD_LIBRARY_PATH=${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} else - export LD_LIBRARY_PATH=$HDDL_INSTALL_DIR/lib:$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/gna/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_lnx/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export LD_LIBRARY_PATH=$HDDL_INSTALL_DIR/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} fi - HDDL_UNITE_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/hddl_unite + HDDL_UNITE_DIR=$INSTALLDIR/runtime/3rdparty/hddl_unite if [ -e "$HDDL_UNITE_DIR" ]; then export LD_LIBRARY_PATH=$HDDL_UNITE_DIR/lib:$HDDL_UNITE_DIR/thirdparty/XLink/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} fi fi -if [ -e "$INSTALLDIR/deployment_tools/inference_engine/external/tbb" ]; then +if [ -e "$INSTALLDIR/runtime/3rdparty/tbb" ]; then if [[ "$OSTYPE" == "darwin"* ]]; then - export DYLD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} + export DYLD_LIBRARY_PATH=$INSTALLDIR/runtime/3rdparty/tbb/lib:${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} fi - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} - export TBB_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/cmake -fi - -if [ -e "$INSTALLDIR/deployment_tools/ngraph" ]; then - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/ngraph/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} - export ngraph_DIR=$INSTALLDIR/deployment_tools/ngraph/cmake + export LD_LIBRARY_PATH=$INSTALLDIR/runtime/3rdparty/tbb/lib:${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export TBB_DIR=$INSTALLDIR/runtime/3rdparty/tbb/cmake fi -if [ -e "$INSTALLDIR/opencv" ]; then - if [ -f "$INSTALLDIR/opencv/setupvars.sh" ]; then - source "$INSTALLDIR/opencv/setupvars.sh" +if [ -e "$INSTALLDIR/extras/opencv" ]; then + if [ -f "$INSTALLDIR/extras/opencv/setupvars.sh" ]; then + source "$INSTALLDIR/extras/opencv/setupvars.sh" else - export OpenCV_DIR="$INSTALLDIR/opencv/share/OpenCV" - export LD_LIBRARY_PATH="$INSTALLDIR/opencv/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" - export LD_LIBRARY_PATH="$INSTALLDIR/opencv/share/OpenCV/3rdparty/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" + export OpenCV_DIR="$INSTALLDIR/extras/opencv/share/OpenCV" + export LD_LIBRARY_PATH="$INSTALLDIR/extras/opencv/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" + export LD_LIBRARY_PATH="$INSTALLDIR/extras/opencv/share/OpenCV/3rdparty/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" fi fi -if [ -f "$INTEL_OPENVINO_DIR/data_processing/dl_streamer/bin/setupvars.sh" ]; then - source "$INTEL_OPENVINO_DIR/data_processing/dl_streamer/bin/setupvars.sh" +if [ -f "$INTEL_OPENVINO_DIR/extras/dl_streamer/bin/setupvars.sh" ]; then + source "$INTEL_OPENVINO_DIR/extras/dl_streamer/bin/setupvars.sh" fi -export PATH="$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer${PATH:+:$PATH}" -export PYTHONPATH="$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer${PYTHONPATH:+:$PYTHONPATH}" +export PATH="$INTEL_OPENVINO_DIR/tools/model_optimizer${PATH:+:$PATH}" +export PYTHONPATH="$INTEL_OPENVINO_DIR/tools/model_optimizer${PYTHONPATH:+:$PYTHONPATH}" -if [ -e "$INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/tools/accuracy_checker" ]; then - export PYTHONPATH="$INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/tools/accuracy_checker:$PYTHONPATH" +if [ -e "$INTEL_OPENVINO_DIR/tools/accuracy_checker" ]; then + export PYTHONPATH="$INTEL_OPENVINO_DIR/tools/accuracy_checker:$PYTHONPATH" fi -if [ -e "$INTEL_OPENVINO_DIR/deployment_tools/tools/post_training_optimization_toolkit" ]; then - export PYTHONPATH="$INTEL_OPENVINO_DIR/deployment_tools/tools/post_training_optimization_toolkit:$PYTHONPATH" +if [ -e "$INTEL_OPENVINO_DIR/tools/post_training_optimization_toolkit" ]; then + export PYTHONPATH="$INTEL_OPENVINO_DIR/tools/post_training_optimization_toolkit:$PYTHONPATH" fi if [ -z "$python_version" ]; then diff --git a/tests/lib/path_utils.py b/tests/lib/path_utils.py index f97da037c5facd..ed2a2ce88936d6 100644 --- a/tests/lib/path_utils.py +++ b/tests/lib/path_utils.py @@ -39,13 +39,13 @@ def get_lib_path(lib_name): os_name = get_os_name() all_libs = { 'inference_engine_transformations': { - 'Windows': Path('deployment_tools/inference_engine/bin/intel64/Release/inference_engine_transformations.dll'), - 'Linux': Path('deployment_tools/inference_engine/lib/intel64/libinference_engine_transformations.so')}, + 'Windows': Path('runtime/bin/inference_engine_transformations.dll'), + 'Linux': Path('runtime/lib/libinference_engine_transformations.so')}, 'MKLDNNPlugin': { - 'Windows': Path('deployment_tools/inference_engine/bin/intel64/Release/MKLDNNPlugin.dll'), - 'Linux': Path('deployment_tools/inference_engine/lib/intel64/libMKLDNNPlugin.so')}, + 'Windows': Path('runtime/bin/MKLDNNPlugin.dll'), + 'Linux': Path('runtime/lib/libMKLDNNPlugin.so')}, 'ngraph': { - 'Windows': Path('deployment_tools/ngraph/lib/ngraph.dll'), - 'Linux': Path('deployment_tools/ngraph/lib/libngraph.so')} + 'Windows': Path('runtime/lib/ngraph.dll'), + 'Linux': Path('runtime/lib/libngraph.so')} } return all_libs[lib_name][os_name] diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index fdc6d9ef11a8f4..dd969b8d774aa9 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -30,11 +30,11 @@ if(ENABLE_PYTHON) ie_cpack_add_component(python_tools) install(DIRECTORY ../inference-engine/tools/benchmark_tool - DESTINATION deployment_tools/tools + DESTINATION tools COMPONENT python_tools) install(DIRECTORY ../inference-engine/tools/cross_check_tool - DESTINATION deployment_tools/tools + DESTINATION tools COMPONENT python_tools) install(FILES README.md From 2e01b374278186fd5a8c8fc6c9d38b93d9609f2a Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sat, 19 Jun 2021 21:06:55 +0300 Subject: [PATCH 02/57] Fixes --- .ci/azure/linux.yml | 2 +- .ci/azure/linux_conditional_compilation.yml | 2 +- .ci/azure/linux_onnxruntime.yml | 14 +++++++------- .ci/azure/mac.yml | 2 +- .ci/azure/windows.yml | 2 +- .ci/azure/windows_conditional_compilation.yml | 2 +- CMakeLists.txt | 4 ++-- docs/HOWTO/Custom_Layers_Guide.md | 2 +- .../Deep_Learning_Inference_Engine_DevGuide.md | 4 ++-- .../Integrate_with_customer_application_new_API.md | 2 +- docs/IE_DG/Samples_Overview.md | 6 +++--- docs/IE_DG/inference_engine_intro.md | 4 ++-- docs/get_started/get_started_linux.md | 6 +++--- docs/get_started/get_started_macos.md | 8 ++++---- docs/get_started/get_started_raspbian.md | 2 +- docs/get_started/get_started_windows.md | 6 +++--- docs/how_tos/MonoDepth_how_to.md | 2 +- docs/install_guides/PAC_Configure_2018R5.md | 4 ++-- .../VisionAcceleratorFPGA_Configure_2019R3.md | 2 +- docs/install_guides/deployment-manager-tool.md | 4 ++-- .../installing-openvino-docker-linux.md | 2 +- .../installing-openvino-docker-windows.md | 2 +- .../installing-openvino-linux-ivad-vpu.md | 4 ++-- docs/install_guides/installing-openvino-linux.md | 4 ++-- docs/install_guides/installing-openvino-macos.md | 4 ++-- .../install_guides/installing-openvino-raspbian.md | 6 +++--- docs/install_guides/installing-openvino-windows.md | 2 +- inference-engine/ie_bridges/c/docs/api_overview.md | 2 +- .../ie_bridges/python/docs/api_overview.md | 10 +++++----- inference-engine/samples/build_samples.sh | 10 ++++------ inference-engine/samples/build_samples_msvc.bat | 11 ++++------- ngraph/python/BUILDING.md | 6 +++--- scripts/demo/demo_benchmark_app.bat | 4 ++-- scripts/demo/demo_benchmark_app.sh | 4 ++-- scripts/demo/demo_security_barrier_camera.bat | 4 ++-- scripts/demo/demo_security_barrier_camera.sh | 4 ++-- .../demo/demo_squeezenet_download_convert_run.bat | 4 ++-- .../demo/demo_squeezenet_download_convert_run.sh | 4 ++-- .../install_dependencies/install_NCS_udev_rules.sh | 6 +++--- scripts/setupvars/setupvars.sh | 4 ++-- tests/lib/install_pkg.py | 4 ++-- 41 files changed, 88 insertions(+), 93 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 4e12c5cbf1d942..ee86e4003cb60e 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -32,7 +32,7 @@ jobs: BUILD_SAMPLES_DIR: $(WORK_DIR)/build_samples BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE) INSTALL_DIR: $(WORK_DIR)/install_pkg - SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh + SETUPVARS: $(INSTALL_DIR)/setupvars.sh steps: - script: | diff --git a/.ci/azure/linux_conditional_compilation.yml b/.ci/azure/linux_conditional_compilation.yml index 6d2d33574b7295..b672816a479748 100644 --- a/.ci/azure/linux_conditional_compilation.yml +++ b/.ci/azure/linux_conditional_compilation.yml @@ -19,7 +19,7 @@ jobs: BUILD_DIR: $(WORK_DIR)/build BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE) INSTALL_DIR: $(WORK_DIR)/install_pkg - SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh + SETUPVARS: $(INSTALL_DIR)/setupvars.sh steps: - script: | diff --git a/.ci/azure/linux_onnxruntime.yml b/.ci/azure/linux_onnxruntime.yml index 686d250d61145e..6dfaf5de949590 100644 --- a/.ci/azure/linux_onnxruntime.yml +++ b/.ci/azure/linux_onnxruntime.yml @@ -114,45 +114,45 @@ jobs: displayName: 'Install' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh echo "2021.2" > $(INSTALL_DIR)/deployment_tools/inference_engine/version.txt CXXFLAGS="-Wno-error=deprecated-declarations" ./build.sh --config RelWithDebInfo --use_openvino CPU_FP32 --build_shared_lib --parallel --skip_tests --build_dir $(ONNXRUNTIME_BUILD_DIR) workingDirectory: $(ONNXRUNTIME_REPO_DIR) displayName: 'Build ONNX Runtime' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh skip_tests=`tr -s '\n ' ':' < $(ONNXRUNTIME_UTILS)/skip_tests` ./onnxruntime_test_all --gtest_filter=-$skip_tests workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_test_all' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnxruntime_shared_lib_test workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_shared_lib_test' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnxruntime_global_thread_pools_test workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_global_thread_pools_test' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnxruntime_api_tests_without_env workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_api_tests_without_env' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run pytorch-converted tests' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run pytorch-operator tests' diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml index 04d4c16ea23344..2c191868d8c414 100644 --- a/.ci/azure/mac.yml +++ b/.ci/azure/mac.yml @@ -31,7 +31,7 @@ jobs: BUILD_DIR: $(WORK_DIR)/build BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE) INSTALL_DIR: $(WORK_DIR)/install_pkg - SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh + SETUPVARS: $(INSTALL_DIR)/setupvars.sh steps: - script: | diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index a8f683da7272bf..6fb7cd0aed4a61 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -34,7 +34,7 @@ jobs: MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe INSTALL_DIR: $(WORK_DIR)\install_pkg - SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat + SETUPVARS: $(INSTALL_DIR)\setupvars.bat IB_DIR: C:\Program Files (x86)\IncrediBuild IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe TEST_ENV_PATH: $(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.2\opencv\bin;$(IB_DIR);%PATH% diff --git a/.ci/azure/windows_conditional_compilation.yml b/.ci/azure/windows_conditional_compilation.yml index 719e02d757448b..4ccac120679cbe 100644 --- a/.ci/azure/windows_conditional_compilation.yml +++ b/.ci/azure/windows_conditional_compilation.yml @@ -21,7 +21,7 @@ jobs: MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe INSTALL_DIR: $(WORK_DIR)\install_pkg - SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat + SETUPVARS: $(INSTALL_DIR)\setupvars.bat IB_DIR: C:\Program Files (x86)\IncrediBuild IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe TEST_ENV_PATH: $(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.2\opencv\bin;$(IB_DIR);%PATH% diff --git a/CMakeLists.txt b/CMakeLists.txt index 39e69bff521156..be78fca70f5151 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -185,11 +185,11 @@ ie_cpack_add_component(setupvars REQUIRED) if(UNIX) install(PROGRAMS scripts/setupvars/setupvars.sh - DESTINATION bin + DESTINATION . COMPONENT setupvars) elseif(WIN32) install(PROGRAMS scripts/setupvars/setupvars.bat - DESTINATION bin + DESTINATION . COMPONENT setupvars) endif() diff --git a/docs/HOWTO/Custom_Layers_Guide.md b/docs/HOWTO/Custom_Layers_Guide.md index cda4ed1c968f47..73d178c74373bf 100644 --- a/docs/HOWTO/Custom_Layers_Guide.md +++ b/docs/HOWTO/Custom_Layers_Guide.md @@ -313,7 +313,7 @@ operation for the CPU plugin. The code of the library is described in the [Exte To build the extension, run the following:
```bash mkdir build && cd build -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh cmake .. -DCMAKE_BUILD_TYPE=Release make --jobs=$(nproc) ``` diff --git a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md index 5fc2b3f910255f..17fb2e71482514 100644 --- a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md +++ b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md @@ -81,9 +81,9 @@ Make sure those libraries are in your computer's path or in the place you pointe * Windows: `PATH` * macOS: `DYLD_LIBRARY_PATH` -On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables. +On Linux and macOS, use the script `setupvars.sh` to set the environment variables. -On Windows, run the `bin\setupvars.bat` batch file to set the environment variables. +On Windows, run the `setupvars.bat` batch file to set the environment variables. To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter. diff --git a/docs/IE_DG/Integrate_with_customer_application_new_API.md b/docs/IE_DG/Integrate_with_customer_application_new_API.md index 9e35f483717433..3e03cd8a94430c 100644 --- a/docs/IE_DG/Integrate_with_customer_application_new_API.md +++ b/docs/IE_DG/Integrate_with_customer_application_new_API.md @@ -200,7 +200,7 @@ add_executable(${PROJECT_NAME} src/main.cpp) target_link_libraries(${PROJECT_NAME} PRIVATE ${InferenceEngine_LIBRARIES} ${OpenCV_LIBS} ${NGRAPH_LIBRARIES}) ``` 3. **To build your project** using CMake with the default build tools currently available on your machine, execute the following commands: -> **NOTE**: Make sure you set environment variables first by running `/bin/setupvars.sh` (or setupvars.bat for Windows)`. Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls. +> **NOTE**: Make sure you set environment variables first by running `/setupvars.sh` (or setupvars.bat for Windows)`. Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls. ```sh cd build/ cmake ../project diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index b5de5a83656531..ae1d7483ba8261 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -199,7 +199,7 @@ Before running compiled binary files, make sure your application can find the Inference Engine and OpenCV libraries. Run the `setupvars` script to set all necessary environment variables: ```sh -source /bin/setupvars.sh +source /setupvars.sh ``` **(Optional)**: The OpenVINO environment variables are removed when you close the @@ -212,7 +212,7 @@ vi /.bashrc 2. Add this line to the end of the file: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ``` 3. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key. @@ -228,7 +228,7 @@ Before running compiled binary files, make sure your application can find the Inference Engine and OpenCV libraries. Use the `setupvars` script, which sets all necessary environment variables: ```sh -\bin\setupvars.bat +\setupvars.bat ``` To debug or run the samples on Windows in Microsoft Visual Studio, make sure you diff --git a/docs/IE_DG/inference_engine_intro.md b/docs/IE_DG/inference_engine_intro.md index 847c0a59e354d9..08a8ad805e5255 100644 --- a/docs/IE_DG/inference_engine_intro.md +++ b/docs/IE_DG/inference_engine_intro.md @@ -84,9 +84,9 @@ Make sure those libraries are in your computer's path or in the place you pointe * Windows: `PATH` * macOS: `DYLD_LIBRARY_PATH` -On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables. +On Linux and macOS, use the script `setupvars.sh` to set the environment variables. -On Windows, run the `bin\setupvars.bat` batch file to set the environment variables. +On Windows, run the `setupvars.bat` batch file to set the environment variables. To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter. diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 26522956305b96..e191402d8bcb8c 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -384,7 +384,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2021/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -463,7 +463,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2021/setupvars.sh ``` 2. Go to the demo application build directory: ```sh @@ -512,7 +512,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index 36b5ef71b3da6c..19b5da7666af11 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -219,7 +219,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/tools/model_downloader/ +cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -371,7 +371,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2021/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -431,7 +431,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2021/setupvars.sh ``` 2. Go to the demo application build directory: ```sh @@ -466,7 +466,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` diff --git a/docs/get_started/get_started_raspbian.md b/docs/get_started/get_started_raspbian.md index b19ab5a0930d5d..1f7af067f978fb 100644 --- a/docs/get_started/get_started_raspbian.md +++ b/docs/get_started/get_started_raspbian.md @@ -84,7 +84,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /bin/setupvars.sh +source /setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries diff --git a/docs/get_started/get_started_windows.md b/docs/get_started/get_started_windows.md index ffe86367a2501b..de2091bbc62678 100644 --- a/docs/get_started/get_started_windows.md +++ b/docs/get_started/get_started_windows.md @@ -368,7 +368,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```bat - \openvino\bin\setupvars.sh + \openvino\setupvars.bat ``` 2. Go to the code samples build directory: ```bat @@ -433,7 +433,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on 1. Set up the OpenVINO environment variables: ```bat - \bin\setupvars.bat + \setupvars.bat ``` 2. Go to the demo application build directory: ```bat @@ -474,7 +474,7 @@ Below you can find basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```bat -\bin\setupvars.bat +\setupvars.bat ``` 2. Make sure to have the directory path for the following: - Code Sample binaries located in `C:\Users\\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release` diff --git a/docs/how_tos/MonoDepth_how_to.md b/docs/how_tos/MonoDepth_how_to.md index 329eac9e063b49..49a9f4c12096f1 100644 --- a/docs/how_tos/MonoDepth_how_to.md +++ b/docs/how_tos/MonoDepth_how_to.md @@ -22,7 +22,7 @@ export WD=~/MonoDepth_Python/ Initialize OpenVINO™: ``` -source $OV/bin/setupvars.sh +source $OV/setupvars.sh ``` Install the Model Optimizer prerequisites: diff --git a/docs/install_guides/PAC_Configure_2018R5.md b/docs/install_guides/PAC_Configure_2018R5.md index bfbb1be1638625..54f6cdcd58bfb9 100644 --- a/docs/install_guides/PAC_Configure_2018R5.md +++ b/docs/install_guides/PAC_Configure_2018R5.md @@ -176,9 +176,9 @@ export AOCL_BOARD_PACKAGE_ROOT="\$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" \$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh source $INTELFPGAOCLSDKROOT/init_opencl.sh export IE_INSTALL="/opt/intel/openvino" -source \$IE_INSTALL/../bin/setupvars.sh +source \$IE_INSTALL/setupvars.sh export PATH="\$PATH:\$HOME/inference_engine_samples/intel64/Release" -alias mo="python3.6 \$IE_INSTALL/model_optimizer/mo.py" +alias mo="python3.6 \$IE_INSTALL/tools/model_optimizer/mo.py" ``` 2. Source the script diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md index cd7a750b984b71..bb2201d3754f47 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md @@ -59,7 +59,7 @@ export INTELFPGAOCLSDKROOT=/opt/altera/aocl-pro-rte/aclrte-linux64 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$AOCL_BOARD_PACKAGE_ROOT/linux64/lib export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 source /opt/altera/aocl-pro-rte/aclrte-linux64/init_opencl.sh -source /opt/intel/openvino/bin/setupvars.sh +source /opt/intel/openvino/setupvars.sh ``` 12. Source the script. diff --git a/docs/install_guides/deployment-manager-tool.md b/docs/install_guides/deployment-manager-tool.md index 00fb5e24c27520..cbbe8f15361815 100644 --- a/docs/install_guides/deployment-manager-tool.md +++ b/docs/install_guides/deployment-manager-tool.md @@ -110,14 +110,14 @@ To deploy the Inference Engine components from the development machine to the ta cd /openvino/ ``` ```sh - source ./bin/setupvars.sh + source ./setupvars.sh ``` * For Windows: ``` cd \openvino\ ``` ``` - .\bin\setupvars.bat + .\setupvars.bat ``` Congratulations, you have finished the deployment of the Inference Engine components to the target host. \ No newline at end of file diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index 66f01eb0299872..faf96b0c458678 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -180,7 +180,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ /bin/mkdir -p '/usr/local/include/libusb-1.0' && \ /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \ /bin/mkdir -p '/usr/local/lib/pkgconfig' && \ - printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2021/bin/setupvars.sh + printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2021/setupvars.sh WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ diff --git a/docs/install_guides/installing-openvino-docker-windows.md b/docs/install_guides/installing-openvino-docker-windows.md index 09435ec7a2a721..121756eb280369 100644 --- a/docs/install_guides/installing-openvino-docker-windows.md +++ b/docs/install_guides/installing-openvino-docker-windows.md @@ -135,7 +135,7 @@ GPU Acceleration in Windows containers feature requires to meet Windows host, Op ~~~ 3. For example, run the `demo_security_barrier_camera` demo with the command below: ~~~ - cd bin && setupvars.bat && cd ../ && cd deployment_tools\demo && demo_security_barrier_camera.bat -d GPU -sample-options -no_show + setupvars.bat && cd ../ && cd deployment_tools\demo && demo_security_barrier_camera.bat -d GPU -sample-options -no_show ~~~ > **NOTE**: Addittional third-party dependencies will be installed. diff --git a/docs/install_guides/installing-openvino-linux-ivad-vpu.md b/docs/install_guides/installing-openvino-linux-ivad-vpu.md index a6bc6032273b18..3e3ffd79786816 100644 --- a/docs/install_guides/installing-openvino-linux-ivad-vpu.md +++ b/docs/install_guides/installing-openvino-linux-ivad-vpu.md @@ -11,7 +11,7 @@ For Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the followi 1. Set the environment variables: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ``` > **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021//3rdparty/hddl`. @@ -52,7 +52,7 @@ E: [ncAPI] [ 965618] [MainThread] ncDeviceOpen:677 Failed to find a device, ```sh kill -9 $(pidof hddldaemon autoboot) pidof hddldaemon autoboot # Make sure none of them is alive -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ${HDDL_INSTALL_DIR}/bin/bsl_reset ``` diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index 8d2d5acbfbd12d..c252a0da62e87b 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -170,7 +170,7 @@ sudo -E ./install_openvino_dependencies.sh You must update several environment variables before you can compile and run OpenVINO™ applications. Run the following script to temporarily set your environment variables: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ``` **Optional:** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows: @@ -182,7 +182,7 @@ vi /.bashrc 2. Add this line to the end of the file: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ``` 3. Save and close the file: press the **Esc** key and type `:wq`. diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 94eb42b2c1a5da..502c9c777b7a05 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -145,7 +145,7 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_` and autom You need to update several environment variables before you can compile and run OpenVINO™ applications. Open the macOS Terminal\* or a command-line interface shell you prefer and run the following script to temporarily set your environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2021/setupvars.sh ``` If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory. @@ -160,7 +160,7 @@ If you didn't choose the default installation option, replace `/opt/intel/openvi 3. Add this line to the end of the file: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2021/setupvars.sh ``` If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory. diff --git a/docs/install_guides/installing-openvino-raspbian.md b/docs/install_guides/installing-openvino-raspbian.md index 0cabee7b1f76ba..180c4b3802cd12 100644 --- a/docs/install_guides/installing-openvino-raspbian.md +++ b/docs/install_guides/installing-openvino-raspbian.md @@ -97,12 +97,12 @@ CMake is installed. Continue to the next section to set the environment variable You must update several environment variables before you can compile and run OpenVINO toolkit applications. Run the following script to temporarily set the environment variables: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2021/setupvars.sh ``` **(Optional)** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows: ```sh -echo "source /opt/intel/openvino_2021/bin/setupvars.sh" >> ~/.bashrc +echo "source /opt/intel/openvino_2021/setupvars.sh" >> ~/.bashrc ``` To test your change, open a new terminal. You will see the following: @@ -120,7 +120,7 @@ This task applies only if you have an Intel® Neural Compute Stick 2 device. Log out and log in for it to take effect. 2. If you didn't modify `.bashrc` to permanently set the environment variables, run `setupvars.sh` again after logging in: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2021/setupvars.sh ``` 3. To perform inference on the Intel® Neural Compute Stick 2, install the USB rules running the `install_NCS_udev_rules.sh` script: ```sh diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index e74ac276c97ec8..3a4fb92feac825 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -132,7 +132,7 @@ The screen example below indicates you are missing two dependencies: You must update several environment variables before you can compile and run OpenVINO™ applications. Open the Command Prompt, and run the `setupvars.bat` batch file to temporarily set your environment variables: ```sh -"C:\Program Files (x86)\Intel\openvino_2021\bin\setupvars.bat" +"C:\Program Files (x86)\Intel\openvino_2021\setupvars.bat" ``` > **IMPORTANT**: Windows PowerShell* is not recommended to run the configuration commands, please use the Command Prompt instead. diff --git a/inference-engine/ie_bridges/c/docs/api_overview.md b/inference-engine/ie_bridges/c/docs/api_overview.md index f2d9dd92b32df7..298ff4f3e650c1 100644 --- a/inference-engine/ie_bridges/c/docs/api_overview.md +++ b/inference-engine/ie_bridges/c/docs/api_overview.md @@ -22,7 +22,7 @@ Supported Python* versions: To configure the environment for the Inference Engine C* API, run: -- On Ubuntu 16.04: `source /bin/setupvars.sh .` +- On Ubuntu 16.04: `source /setupvars.sh .` - On Windows 10: XXXX The script automatically detects latest installed C* version and configures required environment if the version is supported. diff --git a/inference-engine/ie_bridges/python/docs/api_overview.md b/inference-engine/ie_bridges/python/docs/api_overview.md index 577edcc080c181..3938c71b1480f2 100644 --- a/inference-engine/ie_bridges/python/docs/api_overview.md +++ b/inference-engine/ie_bridges/python/docs/api_overview.md @@ -26,11 +26,11 @@ Supported Python* versions: ## Set Up the Environment To configure the environment for the Inference Engine Python\* API, run: - * On Ubuntu\* 18.04 or 20.04: `source /bin/setupvars.sh .` - * On CentOS\* 7.4: `source /bin/setupvars.sh .` - * On macOS\* 10.x: `source /bin/setupvars.sh .` - * On Raspbian\* 9,: `source /bin/setupvars.sh .` - * On Windows\* 10: `call \bin\setupvars.bat` + * On Ubuntu\* 18.04 or 20.04: `source /setupvars.sh .` + * On CentOS\* 7.4: `source /setupvars.sh .` + * On macOS\* 10.x: `source /setupvars.sh .` + * On Raspbian\* 9,: `source /setupvars.sh .` + * On Windows\* 10: `call \setupvars.bat` The script automatically detects latest installed Python\* version and configures required environment if the version is supported. If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=/python/` diff --git a/inference-engine/samples/build_samples.sh b/inference-engine/samples/build_samples.sh index d584a11011985d..8e3f54afd80ff2 100755 --- a/inference-engine/samples/build_samples.sh +++ b/inference-engine/samples/build_samples.sh @@ -19,12 +19,10 @@ SAMPLES_PATH="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )" printf "\nSetting environment variables for building samples...\n" if [ -z "$INTEL_OPENVINO_DIR" ]; then - if [ -e "$SAMPLES_PATH/../../../bin/setupvars.sh" ]; then - setvars_path="$SAMPLES_PATH/../../../bin/setupvars.sh" - elif [ -e "$SAMPLES_PATH/../../../../bin/setupvars.sh" ]; then - setvars_path="$SAMPLES_PATH/../../../../bin/setupvars.sh" + if [ -e "$SAMPLES_PATH/../setupvars.sh" ]; then + setvars_path="$SAMPLES_PATH/../setupvars.sh" else - printf "Error: Failed to set the environment variables automatically. To fix, run the following command:\n source /bin/setupvars.sh\n where INSTALL_DIR is the OpenVINO installation directory.\n\n" + printf "Error: Failed to set the environment variables automatically. To fix, run the following command:\n source /setupvars.sh\n where INSTALL_DIR is the OpenVINO installation directory.\n\n" exit 1 fi if ! source "$setvars_path" ; then @@ -33,7 +31,7 @@ if [ -z "$INTEL_OPENVINO_DIR" ]; then fi else # case for run with `sudo -E` - source "$INTEL_OPENVINO_DIR/bin/setupvars.sh" + source "$INTEL_OPENVINO_DIR/setupvars.sh" fi if ! command -v cmake &>/dev/null; then diff --git a/inference-engine/samples/build_samples_msvc.bat b/inference-engine/samples/build_samples_msvc.bat index d7f0bce1dd4c1d..fd915b7ce4b8e7 100644 --- a/inference-engine/samples/build_samples_msvc.bat +++ b/inference-engine/samples/build_samples_msvc.bat @@ -30,19 +30,16 @@ if not "%1" == "" ( ) if "%INTEL_OPENVINO_DIR%"=="" ( - if exist "%ROOT_DIR%\..\..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%\..\..\..\bin\setupvars.bat" + if exist "%ROOT_DIR%\..\setupvars.bat" ( + call "%ROOT_DIR%\..\setupvars.bat" ) else ( - if exist "%ROOT_DIR%\..\..\..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%\..\..\..\..\bin\setupvars.bat" - ) else ( echo Failed to set the environment variables automatically - echo To fix, run the following command: ^\bin\setupvars.bat + echo To fix, run the following command: ^\setupvars.bat echo where INSTALL_DIR is the OpenVINO installation directory. GOTO errorHandling ) ) -) +) if "%PROCESSOR_ARCHITECTURE%" == "AMD64" ( set "PLATFORM=x64" diff --git a/ngraph/python/BUILDING.md b/ngraph/python/BUILDING.md index 5d4d9c72892b3c..708529f4f234af 100644 --- a/ngraph/python/BUILDING.md +++ b/ngraph/python/BUILDING.md @@ -58,7 +58,7 @@ set the mentioned flags to `ON`. Note the `CMAKE_INSTALL_PREFIX`, which defaults The Python module is installed in the `${OPENVINO_BASEDIR}/openvino_dist/python/python/` folder. Set up the OpenVINO™ environment in order to add the module path to `PYTHONPATH`: - source ${OPENVINO_BASEDIR}/openvino_dist/bin/setupvars.sh + source ${OPENVINO_BASEDIR}/openvino_dist/setupvars.sh If you would like to use a specific version of Python, or use a virtual environment, you can set the `PYTHON_EXECUTABLE` variable. For example: @@ -138,7 +138,7 @@ adjust the number of threads used in the building process to your machine's capa Set up the OpenVINO™ environment in order to add a module path to `PYTHONPATH`: - %OPENVINO_BASEDIR%\openvino_dist\bin\setupvars.bat + %OPENVINO_BASEDIR%\openvino_dist\setupvars.bat ### Build an nGraph Python Wheel on Windows @@ -175,7 +175,7 @@ You should now be able to run tests. You may need to run the `setupvars` script from the OpenVINO™ Toolkit to set paths to OpenVINO™ components. - source ${OPENVINO_BASEDIR}/openvino_dist/bin/setupvars.sh + source ${OPENVINO_BASEDIR}/openvino_dist/setupvars.sh Now you can run tests using `pytest`: diff --git a/scripts/demo/demo_benchmark_app.bat b/scripts/demo/demo_benchmark_app.bat index 97ebb1fc4e02b2..e388721f361b58 100644 --- a/scripts/demo/demo_benchmark_app.bat +++ b/scripts/demo/demo_benchmark_app.bat @@ -49,8 +49,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%..\..\bin\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_benchmark_app.sh b/scripts/demo/demo_benchmark_app.sh index 003e2195584d1b..bce7c406c3c5fe 100755 --- a/scripts/demo/demo_benchmark_app.sh +++ b/scripts/demo/demo_benchmark_app.sh @@ -64,8 +64,8 @@ run_again="Then run the script again\n\n" dashes="\n\n###################################################\n\n" -if [ -e "$ROOT_DIR/../../bin/setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../bin/setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else printf "Error: setupvars.sh is not found\n" fi diff --git a/scripts/demo/demo_security_barrier_camera.bat b/scripts/demo/demo_security_barrier_camera.bat index 60e44752118af1..ff496639dec041 100644 --- a/scripts/demo/demo_security_barrier_camera.bat +++ b/scripts/demo/demo_security_barrier_camera.bat @@ -40,8 +40,8 @@ set TARGET_PRECISION=FP16 echo target_precision = !TARGET_PRECISION! -if exist "%ROOT_DIR%..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%..\..\bin\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_security_barrier_camera.sh b/scripts/demo/demo_security_barrier_camera.sh index 40eace42247a00..c82e3242862625 100755 --- a/scripts/demo/demo_security_barrier_camera.sh +++ b/scripts/demo/demo_security_barrier_camera.sh @@ -122,8 +122,8 @@ else sudo -E "$pip_binary" install -r "$ROOT_DIR/../open_model_zoo/tools/downloader/requirements.in" fi -if [ -e "$ROOT_DIR/../../bin/setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../bin/setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else printf "Error: setupvars.sh is not found\n" fi diff --git a/scripts/demo/demo_squeezenet_download_convert_run.bat b/scripts/demo/demo_squeezenet_download_convert_run.bat index 9ad1ab9983d59e..df58cc8533fef4 100644 --- a/scripts/demo/demo_squeezenet_download_convert_run.bat +++ b/scripts/demo/demo_squeezenet_download_convert_run.bat @@ -45,8 +45,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%..\..\bin\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_squeezenet_download_convert_run.sh b/scripts/demo/demo_squeezenet_download_convert_run.sh index ab6219d004423b..fd737b09e9e63b 100755 --- a/scripts/demo/demo_squeezenet_download_convert_run.sh +++ b/scripts/demo/demo_squeezenet_download_convert_run.sh @@ -60,8 +60,8 @@ run_again="Then run the script again\n\n" dashes="\n\n###################################################\n\n" -if [ -e "$ROOT_DIR/../../bin/setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../bin/setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else printf "Error: setupvars.sh is not found\n" fi diff --git a/scripts/install_dependencies/install_NCS_udev_rules.sh b/scripts/install_dependencies/install_NCS_udev_rules.sh index 43c45c5c50b334..b05342d6cf4b6d 100755 --- a/scripts/install_dependencies/install_NCS_udev_rules.sh +++ b/scripts/install_dependencies/install_NCS_udev_rules.sh @@ -6,14 +6,14 @@ echo "Updating udev rules..." if [ -z "$INTEL_OPENVINO_DIR" ]; then - echo "Please set up your environment. Run 'source /bin/setupvars.sh'." + echo "Please set up your environment. Run 'source /setupvars.sh'." exit -1 fi -if [ -f "$INTEL_OPENVINO_DIR/3rdparty/97-myriad-usbboot.rules" ]; then +if [ -f "$INTEL_OPENVINO_DIR/runtime/3rdparty/97-myriad-usbboot.rules" ]; then sudo usermod -a -G users "$(whoami)" - sudo cp "$INTEL_OPENVINO_DIR/3rdparty/97-myriad-usbboot.rules" /etc/udev/rules.d/ + sudo cp "$INTEL_OPENVINO_DIR/runtime/3rdparty/97-myriad-usbboot.rules" /etc/udev/rules.d/ sudo udevadm control --reload-rules sudo udevadm trigger sudo ldconfig diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index d0847a67bbd52b..75dd6b218b68a4 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -69,8 +69,8 @@ if [ -e "$INSTALLDIR/extras/opencv" ]; then fi -if [ -f "$INTEL_OPENVINO_DIR/extras/dl_streamer/bin/setupvars.sh" ]; then - source "$INTEL_OPENVINO_DIR/extras/dl_streamer/bin/setupvars.sh" +if [ -f "$INTEL_OPENVINO_DIR/extras/dl_streamer/setupvars.sh" ]; then + source "$INTEL_OPENVINO_DIR/extras/dl_streamer/setupvars.sh" fi export PATH="$INTEL_OPENVINO_DIR/tools/model_optimizer${PATH:+:$PATH}" diff --git a/tests/lib/install_pkg.py b/tests/lib/install_pkg.py index c45d985c10a111..aca427c5db3726 100644 --- a/tests/lib/install_pkg.py +++ b/tests/lib/install_pkg.py @@ -16,10 +16,10 @@ def get_openvino_environment(install_prefix: Path): """ Get OpenVINO environment variables """ if sys.platform == "win32": - script = install_prefix / "bin" / "setupvars.bat" + script = install_prefix / "setupvars.bat" cmd = f"{script} && set" else: - script = install_prefix / "bin" / "setupvars.sh" + script = install_prefix / "setupvars.sh" # setupvars.sh is not compatible with /bin/sh. Using bash. cmd = f'bash -c ". {script} && env"' From a0e4a8367a4feefd9edcbfd81e80637b1999963f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sat, 19 Jun 2021 21:45:43 +0300 Subject: [PATCH 03/57] More fixes --- CMakeLists.txt | 8 ++++++ cmake/developer_package/packaging.cmake | 12 ++++---- docs/IE_DG/Cross_Check_Tool.md | 4 +-- docs/IE_DG/Extensibility_DG/GPU_Kernel.md | 2 +- docs/get_started/get_started_linux.md | 26 ++++++++--------- docs/get_started/get_started_macos.md | 24 ++++++++-------- docs/get_started/get_started_windows.md | 28 +++++++++---------- docs/how_tos/MonoDepth_how_to.md | 8 +++--- docs/how_tos/POT_how_to_example.md | 2 +- .../VisionAcceleratorFPGA_Configure_2018R5.md | 2 +- .../VisionAcceleratorFPGA_Configure_2019R1.md | 2 +- .../VisionAcceleratorFPGA_Configure_2019R3.md | 2 +- .../installing-openvino-docker-linux.md | 12 ++++---- .../installing-openvino-docker-windows.md | 4 +-- .../installing-openvino-linux-ivad-vpu.md | 2 +- .../installing-openvino-linux.md | 2 +- .../installing-openvino-macos.md | 2 +- inference-engine/CMakeLists.txt | 4 +-- inference-engine/cmake/dependencies.cmake | 5 ++-- .../c/samples/hello_classification/README.md | 2 +- .../samples/benchmark_app/README.md | 8 +++--- .../samples/hello_classification/README.md | 2 +- .../speech_libs_and_demos/Speech_library.md | 2 +- .../Speech_libs_and_demos.md | 4 +-- .../src/inference_engine/CMakeLists.txt | 20 ++++++++----- .../tools/benchmark_tool/README.md | 8 +++--- .../tools/vpu/vpu_compile/CMakeLists.txt | 8 ------ .../tools/vpu/vpu_perfcheck/CMakeLists.txt | 6 ---- scripts/setupvars/setupvars.bat | 1 - scripts/setupvars/setupvars.sh | 9 ++---- 30 files changed, 107 insertions(+), 114 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index be78fca70f5151..70561ec08e7f7e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -220,4 +220,12 @@ elseif(WIN32) PATTERN *.sh EXCLUDE) endif() +# install licensing + +ie_cpack_add_component(licensing) + +install(DIRECTORY licensing + DESTINATION doc/licensing + COMPONENT licensing) + ie_cpack(${IE_CPACK_COMPONENTS_ALL}) diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index 03d14ad1810b7b..717ce1b46fa5cd 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -12,13 +12,13 @@ unset(IE_CPACK_COMPONENTS_ALL CACHE) # function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/bin PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) else() - set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) endif() endfunction() diff --git a/docs/IE_DG/Cross_Check_Tool.md b/docs/IE_DG/Cross_Check_Tool.md index fee2923ff93886..d53d3dddfe47de 100644 --- a/docs/IE_DG/Cross_Check_Tool.md +++ b/docs/IE_DG/Cross_Check_Tool.md @@ -8,11 +8,11 @@ The Cross Check Tool can compare metrics per layer or all over the model. On Linux* OS, before running the Cross Check Tool binary, make sure your application can find the Deep Learning Inference Engine libraries. -Navigate to the `/runtime/bin` folder and run the `setvars.sh` script to +Navigate to the `` folder and run the `setupvars.sh` script to set all necessary environment variables: ```sh -source setvars.sh +source setupvars.sh ``` ## Running the Cross Check Tool diff --git a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md index ea8cb89ae56bbb..f206c2c0bcb41e 100644 --- a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md @@ -4,7 +4,7 @@ The GPU codepath abstracts many details about OpenCL\*. You need to provide the There are two options of using the custom operation configuration file: -* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/runtime//bin` folder +* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/runtime/bin` folder * Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin: @snippet snippets/GPU_Kernel.cpp part0 diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index e191402d8bcb8c..27cd4fa59baa3f 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -63,7 +63,7 @@ The simplified OpenVINO™ workflow is: ## Use the Demo Scripts to Learn the Workflow -The demo scripts in `/opt/intel/openvino_2021/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: +The demo scripts in `/opt/intel/openvino_2021/extras/open_model_zoo/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit. * Download trained models. * Perform pipeline steps and see the output on the console. @@ -97,7 +97,7 @@ The script: To preview the image that the script will classify: ```sh -cd ${INTEL_OPENVINO_DIR}/deployment_tools/demo +cd ${INTEL_OPENVINO_DIR}/extras/open_model_zoo/demo eog car.png ``` @@ -360,9 +360,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2021/extras/open_model_zoo/demo/squeezenet1.1.labels ```
@@ -373,8 +373,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2021/deployment_tools/demo/car.png` -* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp` +* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png` +* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -397,32 +397,32 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU + ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU ``` **MYRIAD:** > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` **HDDL:** > **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL + ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: @@ -480,14 +480,14 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU +./security_barrier_camera_demo -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU ``` **MYRIAD:** diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index 19b5da7666af11..a51b4f3c8e0901 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -66,7 +66,7 @@ The simplified OpenVINO™ workflow is: ## Use the Demo Scripts to Learn the Workflow -The demo scripts in `/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: +The demo scripts in `/extras/open_model_zoo/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit * Download trained models * Perform pipeline steps and see the output on the console @@ -108,7 +108,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image /opt/intel/openvino_2021/deployment_tools/demo/car.png +Image /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png classid probability label ------- ----------- ----- @@ -219,7 +219,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ +cd /opt/intel/openvino_2021/tools/model_downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -347,9 +347,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2021/extras/open_model_zoo/demo/squeezenet1.1.labels ```
@@ -360,8 +360,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2021/deployment_tools/demo/car.png` -* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp` +* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png` +* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -384,11 +384,11 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` @@ -396,14 +396,14 @@ The following commands run the Image Classification Code Sample using the `car.p > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: ```sh Top 10 results: -Image /opt/intel/openvino_2021/deployment_tools/demo/car.png +Image /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png classid probability label ------- ----------- ----- @@ -448,7 +448,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **MYRIAD:** diff --git a/docs/get_started/get_started_windows.md b/docs/get_started/get_started_windows.md index de2091bbc62678..7d5cca3ecbd206 100644 --- a/docs/get_started/get_started_windows.md +++ b/docs/get_started/get_started_windows.md @@ -63,7 +63,7 @@ The simplified OpenVINO™ workflow is: ## Use the Demo Scripts to Learn the Workflow -The demo scripts in `\deployment_tools\demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps demonstrate how to: +The demo scripts in `\extras\open_model_zoo\demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps demonstrate how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit * Download trained models * Perform pipeline steps and see the output on the console @@ -108,7 +108,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png +Image C:\Program Files (x86)\Intel\openvino_2021\extras\open_model_zoo\demo\car.png classid probability label ------- ----------- ----- @@ -219,7 +219,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```bat -cd \deployment_tools\tools\model_downloader\ +cd \tools\model_downloader\ ``` ```bat python info_dumper.py --print_all @@ -344,9 +344,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `C:\Users\\Documents\models\public\squeezenet1.1\ir` directory. -Copy the `squeezenet1.1.labels` file from the `\deployment_tools\demo\` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `\extras\open_model_zoo\demo\` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```batch - cp \deployment_tools\demo\squeezenet1.1.labels + cp \extras\open_model_zoo\demo\squeezenet1.1.labels ```
@@ -357,8 +357,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `\deployment_tools\demo\car.png` -* `\deployment_tools\demo\car_1.bmp` +* `\extras\open_model_zoo\demo\car.png` +* `\extras\open_model_zoo\demo\car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -381,31 +381,31 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `\deployment_tools\demo` directory as an input image, the IR of your model from `C:\Users\\Documents\models\public\squeezenet1.1\ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `\extras\open_model_zoo\demo` directory as an input image, the IR of your model from `C:\Users\\Documents\models\public\squeezenet1.1\ir` and on different hardware devices: **CPU:** ```bat - .\classification_sample_async -i \deployment_tools\demo\car.png -m C:\Users\\Documents\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d CPU + .\classification_sample_async -i \extras\open_model_zoo\demo\car.png -m C:\Users\\Documents\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md). ```bat - .\classification_sample_async -i \deployment_tools\demo\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d GPU + .\classification_sample_async -i \extras\open_model_zoo\demo\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d GPU ``` **MYRIAD:** ```bat - .\classification_sample_async -i \deployment_tools\demo\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d MYRIAD + .\classification_sample_async -i \extras\open_model_zoo\demo\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d MYRIAD ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: ```bat Top 10 results: -Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png +Image C:\Program Files (x86)\Intel\openvino_2021\extras\open_model_zoo\demo\car.png classid probability label ------- ----------- ----- @@ -450,14 +450,14 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```bat -.\security_barrier_camera_demo -i \deployment_tools\demo\car_1.bmp -m C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.xml -m_va C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.xml -m_lpr C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.xml -d CPU +.\security_barrier_camera_demo -i \extras\open_model_zoo\demo\car_1.bmp -m C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.xml -m_va C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.xml -m_lpr C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md). ```bat -.\security_barrier_camera_demo -i \deployment_tools\demo\car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU +.\security_barrier_camera_demo -i \extras\open_model_zoo\demo\car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU ``` **MYRIAD:** diff --git a/docs/how_tos/MonoDepth_how_to.md b/docs/how_tos/MonoDepth_how_to.md index 49a9f4c12096f1..631d48b9ed31ea 100644 --- a/docs/how_tos/MonoDepth_how_to.md +++ b/docs/how_tos/MonoDepth_how_to.md @@ -27,14 +27,14 @@ source $OV/setupvars.sh Install the Model Optimizer prerequisites: ``` -cd $OV/deployment_tools/model_optimizer/install_prerequisites/ +cd $OV/model_optimizer/install_prerequisites/ sudo ./install_prerequisites.sh ``` Install the Model Downloader prerequisites: ``` -cd $OV/deployment_tools/tools/model_downloader/ +cd $OV/tools/model_downloader/ python3 -mpip install --user -r ./requirements.in sudo python3 -mpip install --user -r ./requirements-pytorch.in sudo python3 -mpip install --user -r ./requirements-caffe2.in @@ -44,7 +44,7 @@ sudo python3 -mpip install --user -r ./requirements-caffe2.in Download all models from the Demo Models list: ``` -python3 $OV/deployment_tools/tools/model_downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD +python3 $OV/tools/model_downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD ``` ## 4. Convert Models to Intermediate Representation (IR) @@ -52,7 +52,7 @@ python3 $OV/deployment_tools/tools/model_downloader/downloader.py --list $OV/dep Use the convert script to convert the models to ONNX*, and then to IR format: ``` cd $WD -python3 $OV/deployment_tools/tools/model_downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst +python3 $OV/tools/model_downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst ``` ## 5. Run Demo diff --git a/docs/how_tos/POT_how_to_example.md b/docs/how_tos/POT_how_to_example.md index 2c80b4f91359b8..abc9fedd544c45 100644 --- a/docs/how_tos/POT_how_to_example.md +++ b/docs/how_tos/POT_how_to_example.md @@ -46,7 +46,7 @@ mkdir ~/POT cd ~/POT ``` ``` -python3 $OV/deployment_tools/tools/model_downloader/downloader.py --name mobilenet-v2-pytorch -o . +python3 $OV/tools/model_downloader/downloader.py --name mobilenet-v2-pytorch -o . ``` ## 3. Prepare Model for Inference diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md index 24d3c159bf3802..59a68f26fc1f65 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md @@ -292,7 +292,7 @@ cp /home//openvino_models/ir/squeezenet1.1/FP32/squeezenet1.1.labels . 5. Copy a sample image to the release directory. You will use this with your optimized model: ```sh -sudo cp /opt/intel/openvino/deployment_tools/demo/car.png ~/inference_engine_samples/intel64/Release +sudo cp /opt/intel/openvino/extras/open_model_zoo/demo/car.png ~/inference_engine_samples/intel64/Release ``` ## 5. Run a Sample Application diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md index 97f09f779bf8f4..4cbdd91d178142 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md @@ -243,7 +243,7 @@ cp /home//openvino_models/ir/FP32/classification/squeezenet/1.1/caffe/sque 5. Copy a sample image to the release directory. You will use this with your optimized model: ```sh -sudo cp /opt/intel/openvino/deployment_tools/demo/car.png ~/inference_engine_samples_build/intel64/Release +sudo cp /opt/intel/openvino/extras/open_model_zoo/demo/car.png ~/inference_engine_samples_build/intel64/Release ``` ## 4. Run a Sample Application diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md index bb2201d3754f47..138b776ea321a5 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md @@ -248,7 +248,7 @@ cp ~/openvino_models/ir/FP16/public/squeezenet1.1/squeezenet1.1.labels . 5. Copy a sample image to the release directory. You will use this with your optimized model: ```sh -cp /opt/intel/openvino/deployment_tools/demo/car.png ~/inference_engine_samples_build/intel64/Release +cp /opt/intel/openvino/extras/open_model_zoo/demo/car.png ~/inference_engine_samples_build/intel64/Release ``` ## 4. Run a Sample Application diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index faf96b0c458678..8ad2a58415a40b 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -150,7 +150,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2021/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` - **CentOS 7**: @@ -184,7 +184,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2021/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` 2. Run the Docker* image: @@ -281,25 +281,25 @@ To run the Security Barrier Camera Demo on a specific inference device, run the **CPU**: ```sh docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d CPU -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d CPU -sample-options -no_show" ``` **GPU**: ```sh docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d GPU -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d GPU -sample-options -no_show" ``` **MYRIAD**: ```sh docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d MYRIAD -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d MYRIAD -sample-options -no_show" ``` **HDDL**: ```sh docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d HDDL -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d HDDL -sample-options -no_show" ``` ## Use a Docker* Image for FPGA diff --git a/docs/install_guides/installing-openvino-docker-windows.md b/docs/install_guides/installing-openvino-docker-windows.md index 121756eb280369..58a0643484a6ee 100644 --- a/docs/install_guides/installing-openvino-docker-windows.md +++ b/docs/install_guides/installing-openvino-docker-windows.md @@ -77,7 +77,7 @@ docker run -it --rm If you want to try some demos then run image with the root privileges (some additional 3-rd party dependencies will be installed): ~~~ -docker run -itu ContainerAdministrator --rm cmd /S /C "cd deployment_tools\demo && demo_security_barrier_camera.bat -d CPU -sample-options -no_show" +docker run -itu ContainerAdministrator --rm cmd /S /C "cd extras\open_model_zoo\demo && demo_security_barrier_camera.bat -d CPU -sample-options -no_show" ~~~ ## Build and Run the Docker* Image for GPU @@ -135,7 +135,7 @@ GPU Acceleration in Windows containers feature requires to meet Windows host, Op ~~~ 3. For example, run the `demo_security_barrier_camera` demo with the command below: ~~~ - setupvars.bat && cd ../ && cd deployment_tools\demo && demo_security_barrier_camera.bat -d GPU -sample-options -no_show + setupvars.bat && cd ../ && cd extras\open_model_zoo\demo && demo_security_barrier_camera.bat -d GPU -sample-options -no_show ~~~ > **NOTE**: Addittional third-party dependencies will be installed. diff --git a/docs/install_guides/installing-openvino-linux-ivad-vpu.md b/docs/install_guides/installing-openvino-linux-ivad-vpu.md index 3e3ffd79786816..1bb02c3531b162 100644 --- a/docs/install_guides/installing-openvino-linux-ivad-vpu.md +++ b/docs/install_guides/installing-openvino-linux-ivad-vpu.md @@ -13,7 +13,7 @@ For Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the followi ```sh source /opt/intel/openvino_2021/setupvars.sh ``` -> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021//3rdparty/hddl`. +> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/runtime/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021/runtime/3rdparty/hddl`. 2. Install dependencies: ```sh diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index c252a0da62e87b..1af837d05cce6e 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -340,7 +340,7 @@ After configuration is done, you are ready to run the verification scripts with 1. Go to the **Inference Engine demo** directory: ```sh -cd /opt/intel/openvino_2021/deployment_tools/demo +cd /opt/intel/openvino_2021/extras/open_model_zoo/demo ``` 2. Run the **Image Classification verification script**. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 502c9c777b7a05..79f69398ae5513 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -286,7 +286,7 @@ Follow the steps below to uninstall the Intel® Distribution of OpenVINO™ Tool ## Additional Resources -- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2021/deployment_tools/demo/`. +- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2021/extras/open_model_zoo/demo/`. - For detailed description of the pre-trained models, go to the [Overview of OpenVINO toolkit Pre-Trained Models](@ref omz_models_group_intel) page. diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 77df1b77a39ba9..a657150fca4c3e 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -131,8 +131,8 @@ install(FILES samples/CMakeLists.txt if(SPEECH_LIBS_AND_DEMOS) ie_cpack_add_component(speech_demo_files) - install(DIRECTORY ${TEMP}/deployment_tools - ${TEMP}/data_processing + install(DIRECTORY ${TEMP}/data_processing + ${TEMP}/deployment_tools/demo DESTINATION extras/open_model_zoo USE_SOURCE_PERMISSIONS COMPONENT speech_demo_files) diff --git a/inference-engine/cmake/dependencies.cmake b/inference-engine/cmake/dependencies.cmake index 11f8e54219fb39..d1dad7ad115e54 100644 --- a/inference-engine/cmake/dependencies.cmake +++ b/inference-engine/cmake/dependencies.cmake @@ -68,13 +68,12 @@ if (THREADING STREQUAL "OMP") update_deps_cache(OMP "${OMP}" "Path to OMP root folder") log_rpath_from_dir(OMP "${OMP}/lib") debug_message(STATUS "intel_omp=" ${OMP}) - + ie_cpack_add_component(omp REQUIRED) file(GLOB_RECURSE source_list "${OMP}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") install(FILES ${source_list} - DESTINATION "3rdparty/omp/lib" + DESTINATION "runtime/3rdparty/omp/lib" COMPONENT omp) - endif () ## TBB package diff --git a/inference-engine/ie_bridges/c/samples/hello_classification/README.md b/inference-engine/ie_bridges/c/samples/hello_classification/README.md index b0cf1b5d21dc9a..318e942acf8cc2 100644 --- a/inference-engine/ie_bridges/c/samples/hello_classification/README.md +++ b/inference-engine/ie_bridges/c/samples/hello_classification/README.md @@ -60,7 +60,7 @@ The application outputs top-10 inference results. ```sh Top 10 results: -Image /opt/intel/openvino/deployment_tools/demo/car.png +Image /opt/intel/openvino/extras/open_model_zoo/demo/car.png classid probability ------- ----------- diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index dfea2262474a4d..2763fdbb434be6 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -140,7 +140,7 @@ To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's] ## Examples of Running the Tool -This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or FPGA devices. As an input, the `car.png` file from the `/deployment_tools/demo/` directory is used. +This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or FPGA devices. As an input, the `car.png` file from the `/extras/open_model_zoo/demo/` directory is used. > **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. @@ -158,15 +158,15 @@ This section provides step-by-step instructions on how to run the Benchmark Tool ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir ``` -3. Run the tool with specifying the `/deployment_tools/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and FPGA devices: +3. Run the tool with specifying the `/extras/open_model_zoo/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and FPGA devices: * On CPU: ```sh - ./benchmark_app -m /googlenet-v1.xml -i /deployment_tools/demo/car.png -d CPU -api async --progress true + ./benchmark_app -m /googlenet-v1.xml -i /extras/open_model_zoo/demo/car.png -d CPU -api async --progress true ``` * On FPGA: ```sh - ./benchmark_app -m /googlenet-v1.xml -i /deployment_tools/demo/car.png -d HETERO:FPGA,CPU -api async --progress true + ./benchmark_app -m /googlenet-v1.xml -i /extras/open_model_zoo/demo/car.png -d HETERO:FPGA,CPU -api async --progress true ``` The application outputs the number of executed iterations, total duration of execution, latency, and throughput. diff --git a/inference-engine/samples/hello_classification/README.md b/inference-engine/samples/hello_classification/README.md index ef79f7b1ad3e7e..64ef12314bb5d1 100644 --- a/inference-engine/samples/hello_classification/README.md +++ b/inference-engine/samples/hello_classification/README.md @@ -60,7 +60,7 @@ The application outputs top-10 inference results. ```sh Top 10 results: -Image /opt/intel/openvino/deployment_tools/demo/car.png +Image /opt/intel/openvino/extras/open_model_zoo/demo/car.png classid probability ------- ----------- diff --git a/inference-engine/samples/speech_libs_and_demos/Speech_library.md b/inference-engine/samples/speech_libs_and_demos/Speech_library.md index b407447156787a..aed57b0053d65f 100644 --- a/inference-engine/samples/speech_libs_and_demos/Speech_library.md +++ b/inference-engine/samples/speech_libs_and_demos/Speech_library.md @@ -12,7 +12,7 @@ Speech Library contains: - Speech library source code in the `src` folder - Speech library header files in the `include` folder. The library API is in the file `speech_library.h`. -To compile the libraries, please run a `.bat/.sh` file in the root folder of speech libraries and demos, or run the demonstration script `/deployment_tools/demo/speech_recogintion.bat/sh`. +To compile the libraries, please run a `.bat/.sh` file in the root folder of speech libraries and demos, or run the demonstration script `/extras/open_model_zoo/demo/speech_recogintion.bat/sh`. ## Architecture diff --git a/inference-engine/samples/speech_libs_and_demos/Speech_libs_and_demos.md b/inference-engine/samples/speech_libs_and_demos/Speech_libs_and_demos.md index 5bd8b99d82a6e6..287e3414f1867b 100644 --- a/inference-engine/samples/speech_libs_and_demos/Speech_libs_and_demos.md +++ b/inference-engine/samples/speech_libs_and_demos/Speech_libs_and_demos.md @@ -38,9 +38,9 @@ Additionally, new acoustic and language models are available in the OpenVINO&tra To download pre-trained models and build all dependencies: -* On Linux* OS, use the shell script `/deployment_tools/demo/demo_speech_recognition.sh` +* On Linux* OS, use the shell script `/extras/open_model_zoo/demo/demo_speech_recognition.sh` -* On Windows* OS, use the batch file `\deployment_tools\demo\demo_speech_recognition.bat` +* On Windows* OS, use the batch file `\extras\open_model_zoo\demo\demo_speech_recognition.bat` The script follows the steps below: diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 7d218e6772371e..872b104aacd1cf 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -164,7 +164,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE pugixml openvino::itt ${CMAKE_DL_LI target_include_directories(${TARGET_NAME} INTERFACE $ - $ + $ PRIVATE $ $) @@ -237,22 +237,28 @@ if((THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") AND TBBROOT MATCH install(DIRECTORY "${TBB}/lib" DESTINATION runtime/3rdparty/tbb COMPONENT tbb) + # Windows only if(EXISTS "${TBB}/bin") install(DIRECTORY "${TBB}/bin" DESTINATION runtime/3rdparty/tbb COMPONENT tbb) endif() + # if(EXISTS "${TBB}/doc") + # install(DIRECTORY "${TBB}/doc" + # DESTINATION runtime/3rdparty/tbb + # COMPONENT tbb) + # endif() install(FILES "${TBB}/LICENSE" DESTINATION runtime/3rdparty/tbb COMPONENT tbb) - set(IE_TBB_DIR_INSTALL "runtime/3rdarty/tbb/cmake") + set(IE_TBB_DIR_INSTALL "3rdparty/tbb/cmake") set(IE_TBB_DIR "${TBB_DIR}") list(APPEND PATH_VARS "IE_TBB_DIR") install(FILES "${TBB}/cmake/TBBConfig.cmake" "${TBB}/cmake/TBBConfigVersion.cmake" - DESTINATION ${IE_TBB_DIR_INSTALL} + DESTINATION runtime/${IE_TBB_DIR_INSTALL} COMPONENT tbb) endif() @@ -261,7 +267,7 @@ endif() ie_cpack_add_component(core REQUIRED DEPENDS ${core_components}) ie_cpack_add_component(core_dev REQUIRED core ngraph_dev) -install(DIRECTORY "${IE_MAIN_SOURCE_DIR}/include" +install(DIRECTORY "${IE_MAIN_SOURCE_DIR}/include/" DESTINATION runtime/include/ie COMPONENT core_dev) @@ -292,13 +298,13 @@ configure_package_config_file("${InferenceEngine_SOURCE_DIR}/cmake/templates/Inf PATH_VARS ${PATH_VARS}) set(IE_INCLUDE_DIR "include") -set(IE_NGRAPH_DIR "../ngraph/cmake") +set(IE_NGRAPH_DIR ".") set(IE_TBB_DIR "${IE_TBB_DIR_INSTALL}") -set(IE_PARALLEL_CMAKE "share/ie_parallel.cmake") +set(IE_PARALLEL_CMAKE "cmake/ie_parallel.cmake") configure_package_config_file("${InferenceEngine_SOURCE_DIR}/cmake/templates/InferenceEngineConfig.cmake.in" "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" - INSTALL_DESTINATION share + INSTALL_DESTINATION cmake PATH_VARS ${PATH_VARS}) configure_file("${IE_MAIN_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in" diff --git a/inference-engine/tools/benchmark_tool/README.md b/inference-engine/tools/benchmark_tool/README.md index 57e0640cd1e00c..e293bc771bf75d 100644 --- a/inference-engine/tools/benchmark_tool/README.md +++ b/inference-engine/tools/benchmark_tool/README.md @@ -151,7 +151,7 @@ To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's] ## Examples of Running the Tool -This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or FPGA devices. As an input, the `car.png` file from the `/deployment_tools/demo/` directory is used. +This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or FPGA devices. As an input, the `car.png` file from the `/extras/open_model_zoo/demo/` directory is used. > **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. @@ -169,15 +169,15 @@ This section provides step-by-step instructions on how to run the Benchmark Tool ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir ``` -3. Run the tool with specifying the `/deployment_tools/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and FPGA devices: +3. Run the tool with specifying the `/extras/open_model_zoo/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and FPGA devices: * On CPU: ```sh - python3 benchmark_app.py -m /googlenet-v1.xml -d CPU -api async -i /deployment_tools/demo/car.png --progress true -b 1 + python3 benchmark_app.py -m /googlenet-v1.xml -d CPU -api async -i /extras/open_model_zoo/demo/car.png --progress true -b 1 ``` * On FPGA: ```sh - python3 benchmark_app.py -m /googlenet-v1.xml -d HETERO:FPGA,CPU -api async -i /deployment_tools/demo/car.png --progress true -b 1 + python3 benchmark_app.py -m /googlenet-v1.xml -d HETERO:FPGA,CPU -api async -i /extras/open_model_zoo/demo/car.png --progress true -b 1 ``` The application outputs number of executed iterations, total duration of execution, latency and throughput. diff --git a/inference-engine/tools/vpu/vpu_compile/CMakeLists.txt b/inference-engine/tools/vpu/vpu_compile/CMakeLists.txt index d18eb04adc116a..383060bc9f42c8 100644 --- a/inference-engine/tools/vpu/vpu_compile/CMakeLists.txt +++ b/inference-engine/tools/vpu/vpu_compile/CMakeLists.txt @@ -36,11 +36,3 @@ set_target_properties(${TARGET_NAME} PROPERTIES ) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) - -# install - -ie_cpack_add_component(myriad_dev DEPENDS myriad) - -install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} - COMPONENT myriad_dev) diff --git a/inference-engine/tools/vpu/vpu_perfcheck/CMakeLists.txt b/inference-engine/tools/vpu/vpu_perfcheck/CMakeLists.txt index eaa65dc188072e..8d56a2e171567a 100644 --- a/inference-engine/tools/vpu/vpu_perfcheck/CMakeLists.txt +++ b/inference-engine/tools/vpu/vpu_perfcheck/CMakeLists.txt @@ -38,10 +38,4 @@ endfunction() if(ENABLE_MYRIAD) add_perfcheck_target(myriad_perfcheck myriadPlugin) - - ie_cpack_add_component(myriad_tools DEPENDS myriad) - - install(TARGETS myriad_perfcheck - RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} - COMPONENT myriad_tools) endif() diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index 00579fffc3aff9..fc3594ebaeafc3 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -4,7 +4,6 @@ :: SPDX-License-Identifier: Apache-2.0 set ROOT=%~dp0 -call :GetFullPath "%ROOT%\.." ROOT set SCRIPT_NAME=%~nx0 set "INTEL_OPENVINO_DIR=%ROOT%" diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 75dd6b218b68a4..6e8e8c8abab11a 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -4,13 +4,8 @@ # SPDX-License-Identifier: Apache-2.0 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" >/dev/null 2>&1 && pwd )" -BASE_DIR="$( dirname "$SCRIPT_DIR" )" - -INSTALLDIR="${BASE_DIR}" - - +INSTALLDIR="${SCRIPT_DIR}" export INTEL_OPENVINO_DIR="$INSTALLDIR" -export INTEL_CVSDK_DIR="$INTEL_OPENVINO_DIR" # parse command line options while [[ $# -gt 0 ]] @@ -32,7 +27,7 @@ done if [ -e "$INSTALLDIR/runtime" ]; then export InferenceEngine_DIR=$INTEL_OPENVINO_DIR/runtime/cmake export ngraph_DIR=$INTEL_OPENVINO_DIR/runtime/cmake - system_type=$(ls "$INTEL_OPENVINO_DIR/runtime/lib/") + IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/runtime/lib export HDDL_INSTALL_DIR=$INSTALLDIR/runtime/3rdparty/hddl From 08c32464c8e740a3403d28554de55681d0770a29 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 5 Jul 2021 19:02:12 +0300 Subject: [PATCH 04/57] Fixed code style in ngraph tests --- .../test/frontend/paddlepaddle/basic_api.cpp | 10 +++++----- .../paddlepaddle/cut_specific_model.cpp | 6 +++--- .../test/frontend/paddlepaddle/load_from.cpp | 6 +++--- .../frontend/paddlepaddle/partial_shape.cpp | 20 +++++++++---------- .../paddlepaddle/set_element_type.cpp | 6 +++--- 5 files changed, 24 insertions(+), 24 deletions(-) diff --git a/ngraph/test/frontend/paddlepaddle/basic_api.cpp b/ngraph/test/frontend/paddlepaddle/basic_api.cpp index 633e8edbcf4ef4..d191e4fccda3af 100644 --- a/ngraph/test/frontend/paddlepaddle/basic_api.cpp +++ b/ngraph/test/frontend/paddlepaddle/basic_api.cpp @@ -21,8 +21,8 @@ static const std::vector models{ }; INSTANTIATE_TEST_SUITE_P(PDPDBasicTest, - FrontEndBasicTest, - ::testing::Combine(::testing::Values(PDPD), - ::testing::Values(std::string(TEST_PDPD_MODELS)), - ::testing::ValuesIn(models)), - FrontEndBasicTest::getTestCaseName); + FrontEndBasicTest, + ::testing::Combine(::testing::Values(PDPD), + ::testing::Values(std::string(TEST_PDPD_MODELS)), + ::testing::ValuesIn(models)), + FrontEndBasicTest::getTestCaseName); diff --git a/ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp b/ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp index c4f00198b26d0d..3251762b6f9421 100644 --- a/ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp +++ b/ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp @@ -28,6 +28,6 @@ static CutModelParam getTestData_2in_2out() } INSTANTIATE_TEST_SUITE_P(PDPDCutTest, - FrontEndCutModelTest, - ::testing::Values(getTestData_2in_2out()), - FrontEndCutModelTest::getTestCaseName); \ No newline at end of file + FrontEndCutModelTest, + ::testing::Values(getTestData_2in_2out()), + FrontEndCutModelTest::getTestCaseName); \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/load_from.cpp b/ngraph/test/frontend/paddlepaddle/load_from.cpp index 0f3256fc2bcac6..2950c3d271f4f7 100644 --- a/ngraph/test/frontend/paddlepaddle/load_from.cpp +++ b/ngraph/test/frontend/paddlepaddle/load_from.cpp @@ -24,6 +24,6 @@ static LoadFromFEParam getTestData() } INSTANTIATE_TEST_SUITE_P(PDPDCutTest, - FrontEndLoadFromTest, - ::testing::Values(getTestData()), - FrontEndLoadFromTest::getTestCaseName); \ No newline at end of file + FrontEndLoadFromTest, + ::testing::Values(getTestData()), + FrontEndLoadFromTest::getTestCaseName); \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/partial_shape.cpp b/ngraph/test/frontend/paddlepaddle/partial_shape.cpp index 0cef8886760e2f..ddb7213f9ec75f 100644 --- a/ngraph/test/frontend/paddlepaddle/partial_shape.cpp +++ b/ngraph/test/frontend/paddlepaddle/partial_shape.cpp @@ -62,13 +62,13 @@ static PartShape getTestShape_conv2d_relu() } INSTANTIATE_TEST_SUITE_P(PDPDPartialShapeTest, - FrontEndPartialShapeTest, - ::testing::Combine(::testing::Values(BaseFEParam{ - PDPD, std::string(TEST_PDPD_MODELS)}), - ::testing::ValuesIn(std::vector{ - getTestShape_2in_2out(), - getTestShape_conv2d_relu(), - getTestShape_conv2d(), - getTestShape_conv2d_setDynamicBatch(), - getTestShape_2in_2out_dynbatch()})), - FrontEndPartialShapeTest::getTestCaseName); \ No newline at end of file + FrontEndPartialShapeTest, + ::testing::Combine(::testing::Values(BaseFEParam{ + PDPD, std::string(TEST_PDPD_MODELS)}), + ::testing::ValuesIn(std::vector{ + getTestShape_2in_2out(), + getTestShape_conv2d_relu(), + getTestShape_conv2d(), + getTestShape_conv2d_setDynamicBatch(), + getTestShape_2in_2out_dynbatch()})), + FrontEndPartialShapeTest::getTestCaseName); \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/set_element_type.cpp b/ngraph/test/frontend/paddlepaddle/set_element_type.cpp index c14ce0c8b6f9c3..e53ea790ac869f 100644 --- a/ngraph/test/frontend/paddlepaddle/set_element_type.cpp +++ b/ngraph/test/frontend/paddlepaddle/set_element_type.cpp @@ -21,6 +21,6 @@ static SetTypeFEParam getTestData_relu() } INSTANTIATE_TEST_SUITE_P(PDPDCutTest, - FrontEndElementTypeTest, - ::testing::Values(getTestData_relu()), - FrontEndElementTypeTest::getTestCaseName); \ No newline at end of file + FrontEndElementTypeTest, + ::testing::Values(getTestData_relu()), + FrontEndElementTypeTest::getTestCaseName); \ No newline at end of file From 851e2189caee7157663013363e94fe9d9afb75ca Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 5 Jul 2021 19:14:52 +0300 Subject: [PATCH 05/57] Fixes --- scripts/CMakeLists.txt | 8 ++++---- scripts/setupvars/setupvars.bat | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 45395f910fc423..1fb0735d4aab96 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -29,11 +29,11 @@ ie_cpack_add_component(setupvars REQUIRED) if(UNIX) install(PROGRAMS setupvars/setupvars.sh - DESTINATION bin + DESTINATION . COMPONENT setupvars) elseif(WIN32) install(PROGRAMS setupvars/setupvars.bat - DESTINATION bin + DESTINATION . COMPONENT setupvars) endif() @@ -52,13 +52,13 @@ ie_cpack_add_component(demo_scripts DEPENDS core) if(UNIX) install(DIRECTORY demo/ - DESTINATION deployment_tools/demo + DESTINATION extras/open_model_zoo/demo COMPONENT demo_scripts USE_SOURCE_PERMISSIONS PATTERN *.bat EXCLUDE) elseif(WIN32) install(DIRECTORY demo/ - DESTINATION deployment_tools/demo + DESTINATION extras/open_model_zoo/demo COMPONENT demo_scripts USE_SOURCE_PERMISSIONS PATTERN *.sh EXCLUDE) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index b019062f7b1bb2..e3438ef7dc052f 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -38,9 +38,9 @@ set "PATH=%INTEL_OPENVINO_DIR%\tools\model_optimizer;%PATH%" :: Model Downloader -if exist %INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader ( -set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader;%PYTHONPATH% -set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader;%PATH%" +if exist %INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader ( +set PYTHONPATH=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader;%PYTHONPATH% +set "PATH=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader;%PATH%" ) :: Inference Engine From 65aaa01be98e4bb33dc5c94254a25b0d2aeb4fd0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 5 Jul 2021 19:21:08 +0300 Subject: [PATCH 06/57] Paths to setupvars inside demo scripts --- scripts/demo/demo_benchmark_app.sh | 4 ++-- scripts/demo/demo_security_barrier_camera.sh | 4 ++-- scripts/demo/demo_squeezenet_download_convert_run.sh | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/demo/demo_benchmark_app.sh b/scripts/demo/demo_benchmark_app.sh index ec9d97f3c778fc..916859087e6a6e 100755 --- a/scripts/demo/demo_benchmark_app.sh +++ b/scripts/demo/demo_benchmark_app.sh @@ -69,8 +69,8 @@ target_image_path="$ROOT_DIR/car.png" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../setupvars.sh" +if [ -e "$ROOT_DIR/../../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi diff --git a/scripts/demo/demo_security_barrier_camera.sh b/scripts/demo/demo_security_barrier_camera.sh index 2646249cdb9927..39540244d0e047 100755 --- a/scripts/demo/demo_security_barrier_camera.sh +++ b/scripts/demo/demo_security_barrier_camera.sh @@ -55,8 +55,8 @@ target_image_path="$ROOT_DIR/car_1.bmp" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../bin/setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../bin/setupvars.sh" +if [ -e "$ROOT_DIR/../../../bin/setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../../bin/setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi diff --git a/scripts/demo/demo_squeezenet_download_convert_run.sh b/scripts/demo/demo_squeezenet_download_convert_run.sh index 8181c360ebd27a..9047a011dcbceb 100755 --- a/scripts/demo/demo_squeezenet_download_convert_run.sh +++ b/scripts/demo/demo_squeezenet_download_convert_run.sh @@ -65,8 +65,8 @@ target_image_path="$ROOT_DIR/car.png" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../setupvars.sh" +if [ -e "$ROOT_DIR/../../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi From cce1261202457c9874c27bb85644f016822605dd Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Jul 2021 11:58:53 +0300 Subject: [PATCH 07/57] Fixed demo_security_barrier_camera.sh --- inference-engine/src/inference_engine/CMakeLists.txt | 4 ++-- scripts/demo/demo_benchmark_app.sh | 2 +- scripts/demo/demo_security_barrier_camera.sh | 2 +- scripts/demo/demo_squeezenet_download_convert_run.sh | 2 +- thirdparty/protobuf/CMakeLists.txt | 7 +++---- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 3d78fa42f375c6..149410ae52202c 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -267,8 +267,8 @@ endif() ie_cpack_add_component(core REQUIRED DEPENDS ${core_components}) ie_cpack_add_component(core_dev REQUIRED core ngraph_dev) -install(DIRECTORY "${PUBLIC_HEADERS_DIR}" - DESTINATION "runtime/include" +install(DIRECTORY "${PUBLIC_HEADERS_DIR}/" + DESTINATION "runtime/include/ie" COMPONENT core_dev) install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets diff --git a/scripts/demo/demo_benchmark_app.sh b/scripts/demo/demo_benchmark_app.sh index 916859087e6a6e..ae222ada043541 100755 --- a/scripts/demo/demo_benchmark_app.sh +++ b/scripts/demo/demo_benchmark_app.sh @@ -125,7 +125,7 @@ fi . "$VENV_DIR/bin/activate" python -m pip install -U pip -python -m pip install -r "$ROOT_DIR/extras/open_model_zoo/tools/downloader/requirements.in" +python -m pip install -r "$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader/requirements.in" # Step 1. Download the Caffe model and the prototxt of the model echo -ne "\n###############|| Downloading the Caffe model and the prototxt ||###############\n\n" diff --git a/scripts/demo/demo_security_barrier_camera.sh b/scripts/demo/demo_security_barrier_camera.sh index 39540244d0e047..4c3e54140e7fe9 100755 --- a/scripts/demo/demo_security_barrier_camera.sh +++ b/scripts/demo/demo_security_barrier_camera.sh @@ -112,7 +112,7 @@ fi . "$VENV_DIR/bin/activate" python -m pip install -U pip -python -m pip install -r "$ROOT_DIR/extras/open_model_zoo/tools/downloader/requirements.in" +python -m pip install -r "$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader/requirements.in" # Step 1. Downloading Intel models echo -ne "\n###############|| Downloading Intel models ||###############\n\n" diff --git a/scripts/demo/demo_squeezenet_download_convert_run.sh b/scripts/demo/demo_squeezenet_download_convert_run.sh index 9047a011dcbceb..d3ef76f11cd944 100755 --- a/scripts/demo/demo_squeezenet_download_convert_run.sh +++ b/scripts/demo/demo_squeezenet_download_convert_run.sh @@ -121,7 +121,7 @@ fi . "$VENV_DIR/bin/activate" python -m pip install -U pip -python -m pip install -r "$ROOT_DIR/extras/open_model_zoo/tools/downloader/requirements.in" +python -m pip install -r "$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader/requirements.in" # Step 1. Download the Caffe model and the prototxt of the model echo -ne "\n###############|| Downloading the Caffe model and the prototxt ||###############\n\n" diff --git a/thirdparty/protobuf/CMakeLists.txt b/thirdparty/protobuf/CMakeLists.txt index 611e26d200bed0..ad31a440e5002c 100644 --- a/thirdparty/protobuf/CMakeLists.txt +++ b/thirdparty/protobuf/CMakeLists.txt @@ -111,11 +111,10 @@ if(CMAKE_CROSSCOMPILING AND NOT PROTOC_VERSION VERSION_EQUAL protobuf_VERSION) endif() if (NOT BUILD_STANDALONE_STATIC) - message("NGRAPH_INSTALL_LIB = ${NGRAPH_INSTALL_LIB}") install(TARGETS ${Protobuf_LIBRARIES} - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) export(TARGETS ${Protobuf_LIBRARIES} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") endif() From e55a46103885135e71e253850a2ee8b024cd5cab Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Jul 2021 13:22:00 +0300 Subject: [PATCH 08/57] Added setupvars.sh to old location as well --- scripts/CMakeLists.txt | 11 +++++++++++ scripts/setupvars/setupvars.bat | 2 ++ scripts/setupvars/setupvars.sh | 4 +++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 6eba8018a922e4..211a9bc717b821 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -38,6 +38,17 @@ elseif(WIN32) COMPONENT setupvars) endif() +# TODO: remove before merge +if(UNIX) + install(PROGRAMS setupvars/setupvars.sh + DESTINATION setupvars + COMPONENT setupvars) +elseif(WIN32) + install(PROGRAMS setupvars/setupvars.bat + DESTINATION setupvars + COMPONENT setupvars) +endif() + # install install_dependencies if(UNIX) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index e3438ef7dc052f..359d607f535d6d 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -4,6 +4,8 @@ :: SPDX-License-Identifier: Apache-2.0 set ROOT=%~dp0 +:: TODO: remove before merge +call :GetFullPath "%ROOT%\.." ROOT set SCRIPT_NAME=%~nx0 set "INTEL_OPENVINO_DIR=%ROOT%" diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 50e86791bbfde3..76f77fa202c211 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -4,7 +4,9 @@ # SPDX-License-Identifier: Apache-2.0 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" >/dev/null 2>&1 && pwd )" -INSTALLDIR="${SCRIPT_DIR}" +# TODO: remove before merge +BASE_DIR="$( dirname "$SCRIPT_DIR" )" +INSTALLDIR="${BASE_DIR}" export INTEL_OPENVINO_DIR="$INSTALLDIR" # parse command line options From e3d612dc6467f912cd5fdb405b090d73b9ce911a Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 6 Jul 2021 15:06:42 +0300 Subject: [PATCH 09/57] Fixed path --- scripts/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 211a9bc717b821..c7f4759e6c221b 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -41,11 +41,11 @@ endif() # TODO: remove before merge if(UNIX) install(PROGRAMS setupvars/setupvars.sh - DESTINATION setupvars + DESTINATION bin COMPONENT setupvars) elseif(WIN32) install(PROGRAMS setupvars/setupvars.bat - DESTINATION setupvars + DESTINATION bin COMPONENT setupvars) endif() From 970871e857b02355f64d4b855ec2ea6d785638e0 Mon Sep 17 00:00:00 2001 From: y Date: Tue, 10 Aug 2021 10:03:19 +0300 Subject: [PATCH 10/57] Fixed MO install path in .co --- .ci/azure/linux.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 7e878bca3c6e76..c2adf2b02ace2f 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -172,8 +172,8 @@ jobs: continueOnError: false - script: | - export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer - . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/deployment_tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml + export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer + . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml displayName: 'Model Optimizer UT' continueOnError: false From 331a512161b8ce41e26f10f92cf59e1e30c5820e Mon Sep 17 00:00:00 2001 From: y Date: Tue, 10 Aug 2021 11:59:59 +0300 Subject: [PATCH 11/57] Fixed install of public headers --- inference-engine/src/inference_engine/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 4a2dd556f85f43..42e29c80214746 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -281,7 +281,7 @@ endif() ie_cpack_add_component(core REQUIRED DEPENDS ${core_components}) ie_cpack_add_component(core_dev REQUIRED core ngraph_dev) -install(DIRECTORY "${PUBLIC_HEADERS_DIR}" DESTINATION runtime/include +install(DIRECTORY "${PUBLIC_HEADERS_DIR}/" DESTINATION runtime/include COMPONENT core_dev) install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets From a0434995b7a512ea009890d89da7fd54168efe61 Mon Sep 17 00:00:00 2001 From: y Date: Mon, 30 Aug 2021 15:09:46 +0300 Subject: [PATCH 12/57] Fixed frontends installation --- .../mock_mo_ngraph_frontend/CMakeLists.txt | 6 ++---- ngraph/frontend/ir/CMakeLists.txt | 6 +++--- ngraph/frontend/onnx/frontend/CMakeLists.txt | 6 +++--- .../tests/mock/mock_py_ngraph_frontend/CMakeLists.txt | 4 ++-- scripts/CMakeLists.txt | 11 ----------- scripts/setupvars/setupvars.bat | 2 -- 6 files changed, 10 insertions(+), 25 deletions(-) diff --git a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt index 232ca18727e69c..d0a51148af7f32 100644 --- a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt +++ b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt @@ -20,8 +20,6 @@ target_link_libraries(${TARGET_FE_NAME} PUBLIC ngraph PRIVATE ngraph::builder) add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) -set(NGRAPH_INSTALL_LIB "deployment_tools/ngraph/lib") - install(TARGETS ${TARGET_FE_NAME} - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/ngraph/frontend/ir/CMakeLists.txt b/ngraph/frontend/ir/CMakeLists.txt index 181bfda9d62e07..cf2c778d0e873c 100644 --- a/ngraph/frontend/ir/CMakeLists.txt +++ b/ngraph/frontend/ir/CMakeLists.txt @@ -48,9 +48,9 @@ add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS}) install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/ir_frontend DESTINATION ${FRONTEND_INSTALL_INCLUDE} diff --git a/ngraph/frontend/onnx/frontend/CMakeLists.txt b/ngraph/frontend/onnx/frontend/CMakeLists.txt index ad830d0ddbc6cb..cfb5414618d609 100644 --- a/ngraph/frontend/onnx/frontend/CMakeLists.txt +++ b/ngraph/frontend/onnx/frontend/CMakeLists.txt @@ -56,9 +56,9 @@ if(NGRAPH_USE_PROTOBUF_LITE) endif() install(TARGETS onnx_ngraph_frontend EXPORT ngraphTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${ONNX_FRONTEND_INCLUDE_DIR}/onnx_frontend ${ONNX_FRONTEND_INCLUDE_DIR}/onnx_import diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt b/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt index b218da4751fdbb..3b11a12de915c4 100644 --- a/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt +++ b/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt @@ -20,5 +20,5 @@ target_link_libraries(${TARGET_FE_NAME} PRIVATE ngraph::frontend_manager::static add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) install(TARGETS ${TARGET_FE_NAME} - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 10ecc94130eb63..2fc005481c8565 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -38,17 +38,6 @@ elseif(WIN32) COMPONENT setupvars) endif() -# TODO: remove before merge -if(UNIX) - install(PROGRAMS setupvars/setupvars.sh - DESTINATION bin - COMPONENT setupvars) -elseif(WIN32) - install(PROGRAMS setupvars/setupvars.bat - DESTINATION bin - COMPONENT setupvars) -endif() - # install install_dependencies if(UNIX) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index 359d607f535d6d..e3438ef7dc052f 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -4,8 +4,6 @@ :: SPDX-License-Identifier: Apache-2.0 set ROOT=%~dp0 -:: TODO: remove before merge -call :GetFullPath "%ROOT%\.." ROOT set SCRIPT_NAME=%~nx0 set "INTEL_OPENVINO_DIR=%ROOT%" From 441d18cacb4ea52bbe8dbf8e5a9e70e616edb387 Mon Sep 17 00:00:00 2001 From: y Date: Mon, 30 Aug 2021 15:33:47 +0300 Subject: [PATCH 13/57] Updated DM config files --- .../deployment_manager/configs/darwin.json | 41 ++++++------ scripts/deployment_manager/configs/linux.json | 65 +++++++++--------- .../deployment_manager/configs/windows.json | 66 +++++++++---------- 3 files changed, 90 insertions(+), 82 deletions(-) diff --git a/scripts/deployment_manager/configs/darwin.json b/scripts/deployment_manager/configs/darwin.json index 09be8f75ed9913..fcc99ff12d41e1 100644 --- a/scripts/deployment_manager/configs/darwin.json +++ b/scripts/deployment_manager/configs/darwin.json @@ -4,7 +4,7 @@ "setupvars": { "mandatory" : "yes", "files": [ - "bin" + "setupvars.sh" ] }, "openvino_license": { @@ -16,18 +16,21 @@ "ie_core": { "group": ["ie"], "files": [ - "deployment_tools/inference_engine/version.txt", - "deployment_tools/inference_engine/lib/intel64/libinference_engine.dylib", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_transformations.dylib", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_preproc.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_ir_reader.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_c_api.dylib", - "deployment_tools/inference_engine/lib/intel64/libAutoPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libHeteroPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libMultiDevicePlugin.so", - "deployment_tools/inference_engine/lib/intel64/plugins.xml", - "deployment_tools/inference_engine/external/tbb", - "deployment_tools/ngraph/lib" + "runtime/lib/libinference_engine.dylib", + "runtime/lib/libinference_engine_transformations.dylib", + "runtime/lib/libinference_engine_preproc.so", + "runtime/lib/libinference_engine_ir_reader.so", + "runtime/lib/libinference_engine_c_api.dylib", + "runtime/lib/libAutoPlugin.so", + "runtime/lib/libHeteroPlugin.so", + "runtime/lib/libMultiDevicePlugin.so", + "runtime/lib/libngraph.dylib", + "runtime/lib/libfrontend_manager.dylib", + "runtime/lib/libir_ngraph_frontend.dylib", + "runtime/lib/libonnx_ngraph_frontend.dylib", + "runtime/lib/libpaddlepaddle_ngraph_frontend.dylib", + "runtime/lib/plugins.xml", + "runtime/3rdparty/tbb" ] }, "cpu": { @@ -35,8 +38,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libinference_engine_lp_transformations.dylib", - "deployment_tools/inference_engine/lib/intel64/libMKLDNNPlugin.so" + "runtime/lib/libinference_engine_lp_transformations.dylib", + "runtime/lib/libMKLDNNPlugin.so" ] }, "vpu": { @@ -44,10 +47,10 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libmyriadPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.dylib", - "deployment_tools/inference_engine/lib/intel64/usb-ma2x8x.mvcmd", - "deployment_tools/inference_engine/lib/intel64/pcie-ma2x8x.mvcmd" + "runtime/lib/libmyriadPlugin.so", + "runtime/lib/libinference_engine_legacy.dylib", + "runtime/lib/usb-ma2x8x.mvcmd", + "runtime/lib/pcie-ma2x8x.mvcmd" ] }, "opencv": { diff --git a/scripts/deployment_manager/configs/linux.json b/scripts/deployment_manager/configs/linux.json index 0c39eeb82a4963..7393f0087a79d8 100644 --- a/scripts/deployment_manager/configs/linux.json +++ b/scripts/deployment_manager/configs/linux.json @@ -4,7 +4,7 @@ "setupvars": { "mandatory" : "yes", "files": [ - "bin" + "setupvars.sh" ] }, "openvino_dependencies": { @@ -22,18 +22,21 @@ "ie_core": { "group": ["ie"], "files": [ - "deployment_tools/inference_engine/version.txt", - "deployment_tools/inference_engine/lib/intel64/libinference_engine.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_transformations.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_preproc.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_ir_reader.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_c_api.so", - "deployment_tools/inference_engine/lib/intel64/libAutoPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libHeteroPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libMultiDevicePlugin.so", - "deployment_tools/inference_engine/lib/intel64/plugins.xml", - "deployment_tools/inference_engine/external/tbb", - "deployment_tools/ngraph/lib" + "runtime/lib/libinference_engine.so", + "runtime/lib/libinference_engine_transformations.so", + "runtime/lib/libinference_engine_preproc.so", + "runtime/lib/libinference_engine_ir_reader.so", + "runtime/lib/libinference_engine_c_api.so", + "runtime/lib/libAutoPlugin.so", + "runtime/lib/libHeteroPlugin.so", + "runtime/lib/libMultiDevicePlugin.so", + "runtime/lib/libngraph.so", + "runtime/lib/libfrontend_manager.so", + "runtime/lib/libir_ngraph_frontend.so", + "runtime/lib/libonnx_ngraph_frontend.so", + "runtime/lib/libpaddlepaddle_ngraph_frontend.so", + "runtime/lib/plugins.xml", + "runtime/3rdparty/tbb" ] }, "cpu": { @@ -41,8 +44,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libinference_engine_lp_transformations.so", - "deployment_tools/inference_engine/lib/intel64/libMKLDNNPlugin.so" + "runtime/lib/libinference_engine_lp_transformations.so", + "runtime/lib/libMKLDNNPlugin.so" ] }, "gpu": { @@ -50,9 +53,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/cache.json", - "deployment_tools/inference_engine/lib/intel64/libclDNNPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_lp_transformations.so", + "runtime/lib/cache.json", + "runtime/lib/libclDNNPlugin.so", + "runtime/lib/libinference_engine_lp_transformations.so", "install_dependencies/install_NEO_OCL_driver.sh" ] }, @@ -61,12 +64,12 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/external/97-myriad-usbboot.rules", - "deployment_tools/inference_engine/lib/intel64/usb-ma2x8x.mvcmd", - "deployment_tools/inference_engine/lib/intel64/pcie-ma2x8x.mvcmd", - "deployment_tools/inference_engine/lib/intel64/libmyriadPlugin.so", - "deployment_tools/inference_engine/lib/intel64/vpu_custom_kernels", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.so", + "runtime/3rdparty/97-myriad-usbboot.rules", + "runtime/lib/usb-ma2x8x.mvcmd", + "runtime/lib/pcie-ma2x8x.mvcmd", + "runtime/lib/libmyriadPlugin.so", + "runtime/lib/vpu_custom_kernels", + "runtime/lib/libinference_engine_legacy.so", "install_dependencies/install_NCS_udev_rules.sh" ] }, @@ -75,9 +78,11 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/external/gna", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.so", - "deployment_tools/inference_engine/lib/intel64/libGNAPlugin.so" + "runtime/lib/libgna.so", + "runtime/lib/libgna.so.2", + "runtime/lib/libgna.so.2.0.0.1226", + "runtime/lib/libinference_engine_legacy.so", + "runtime/lib/libGNAPlugin.so" ] }, "hddl": { @@ -85,9 +90,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libHDDLPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.so", - "deployment_tools/inference_engine/external/hddl" + "runtime/lib/libHDDLPlugin.so", + "runtime/lib/libinference_engine_legacy.so", + "runtime/3rdparty/hddl" ] }, "opencv": { diff --git a/scripts/deployment_manager/configs/windows.json b/scripts/deployment_manager/configs/windows.json index 14ceedbff8a3fb..bd3a1269dcde58 100644 --- a/scripts/deployment_manager/configs/windows.json +++ b/scripts/deployment_manager/configs/windows.json @@ -4,7 +4,7 @@ "setupvars": { "mandatory" : "yes", "files": [ - "bin" + "setupvars.bat" ] }, "openvino_license": { @@ -16,21 +16,21 @@ "ie_core": { "group": ["ie"], "files": [ - "deployment_tools/inference_engine/version.txt", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_transformations.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_preproc.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_ir_reader.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_c_api.dll", - "deployment_tools/inference_engine/bin/intel64/Release/AutoPlugin.dll", - "deployment_tools/inference_engine/lib/intel64/Release/HeteroPlugin.dll", - "deployment_tools/inference_engine/lib/intel64/Release/MultiDevicePlugin.dll", - "deployment_tools/inference_engine/bin/intel64/Release/plugins.xml", - "deployment_tools/inference_engine/lib/intel64/Release/inference_engine.lib", - "deployment_tools/inference_engine/lib/intel64/Release/inference_engine_transformations.lib", - "deployment_tools/inference_engine/lib/intel64/Release/inference_engine_c_api.lib", - "deployment_tools/inference_engine/external/tbb", - "deployment_tools/ngraph/lib" + "runtime/bin/Release/inference_engine.dll", + "runtime/bin/Release/inference_engine_transformations.dll", + "runtime/bin/Release/inference_engine_preproc.dll", + "runtime/bin/Release/inference_engine_ir_reader.dll", + "runtime/bin/Release/inference_engine_c_api.dll", + "runtime/bin/Release/AutoPlugin.dll", + "runtime/bin/Release/HeteroPlugin.dll", + "runtime/bin/Release/MultiDevicePlugin.dll", + "runtime/bin/Release/ngraph.dll", + "runtime/bin/Release/frontend_manager.dll", + "runtime/bin/Release/ir_ngraph_frontend.dll", + "runtime/bin/Release/onnx_ngraph_frontend.dll", + "runtime/bin/Release/paddlepaddle_ngraph_frontend.dll", + "runtime/bin/Release/plugins.xml", + "runtime/3rdparty/tbb" ] }, "cpu": { @@ -38,8 +38,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_lp_transformations.dll", - "deployment_tools/inference_engine/bin/intel64/Release/MKLDNNPlugin.dll" + "runtime/bin/Release/inference_engine_lp_transformations.dll", + "runtime/bin/Release/MKLDNNPlugin.dll" ] }, "gpu": { @@ -47,9 +47,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/cache.json", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_lp_transformations.dll", - "deployment_tools/inference_engine/bin/intel64/Release/clDNNPlugin.dll" + "runtime/bin/Release/cache.json", + "runtime/bin/Release/inference_engine_lp_transformations.dll", + "runtime/bin/Release/clDNNPlugin.dll" ] }, "vpu": { @@ -57,10 +57,10 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/usb-ma2x8x.mvcmd", - "deployment_tools/inference_engine/bin/intel64/Release/pcie-ma2x8x.elf", - "deployment_tools/inference_engine/bin/intel64/Release/myriadPlugin.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_legacy.dll" + "runtime/bin/Release/usb-ma2x8x.mvcmd", + "runtime/bin/Release/pcie-ma2x8x.elf", + "runtime/bin/Release/myriadPlugin.dll", + "runtime/bin/Release/inference_engine_legacy.dll" ] }, "gna": { @@ -68,9 +68,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/gna.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_legacy.dll", - "deployment_tools/inference_engine/bin/intel64/Release/GNAPlugin.dll" + "runtime/bin/Release/gna.dll", + "runtime/bin/Release/inference_engine_legacy.dll", + "runtime/bin/Release/GNAPlugin.dll" ] }, "hddl": { @@ -78,11 +78,11 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/HDDLPlugin.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_legacy.dll", - "deployment_tools/inference_engine/bin/intel64/Release/hddl_perfcheck.exe", - "deployment_tools/inference_engine/external/MovidiusDriver", - "deployment_tools/inference_engine/external/hddl" + "runtime/bin/Release/HDDLPlugin.dll", + "runtime/bin/Release/inference_engine_legacy.dll", + "runtime/bin/Release/hddl_perfcheck.exe", + "runtime/3rdparty/MovidiusDriver", + "runtime/3rdparty/hddl" ] }, "opencv": { From 9bd5708e744a5804c90ca381a450ed6be6b6e97e Mon Sep 17 00:00:00 2001 From: y Date: Wed, 1 Sep 2021 13:03:04 +0300 Subject: [PATCH 14/57] Keep opencv in the root --- scripts/setupvars/setupvars.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 50e86791bbfde3..67f822f8acabbb 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -57,13 +57,13 @@ if [ -e "$INSTALLDIR/tools/compile_tool" ]; then export LD_LIBRARY_PATH=$INSTALLDIR/tools/compile_tool${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} fi -if [ -e "$INSTALLDIR/extras/opencv" ]; then - if [ -f "$INSTALLDIR/extras/opencv/setupvars.sh" ]; then - source "$INSTALLDIR/extras/opencv/setupvars.sh" +if [ -e "$INSTALLDIR/opencv" ]; then + if [ -f "$INSTALLDIR/opencv/setupvars.sh" ]; then + source "$INSTALLDIR/opencv/setupvars.sh" else - export OpenCV_DIR="$INSTALLDIR/extras/opencv/share/OpenCV" - export LD_LIBRARY_PATH="$INSTALLDIR/extras/opencv/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" - export LD_LIBRARY_PATH="$INSTALLDIR/extras/opencv/share/OpenCV/3rdparty/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" + export OpenCV_DIR="$INSTALLDIR/opencv/share/OpenCV" + export LD_LIBRARY_PATH="$INSTALLDIR/opencv/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" + export LD_LIBRARY_PATH="$INSTALLDIR/opencv/share/OpenCV/3rdparty/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" fi fi From 69d6ecc076bb107623047612236d174ff098bb75 Mon Sep 17 00:00:00 2001 From: y Date: Thu, 2 Sep 2021 18:16:17 +0300 Subject: [PATCH 15/57] Improvements --- .ci/azure/linux.yml | 2 +- docs/HOWTO/Custom_Layers_Guide.md | 2 +- docs/IE_DG/Samples_Overview.md | 2 +- docs/IE_DG/Tools_Overview.md | 2 +- .../Convert_Model_From_Paddle.md | 2 +- docs/get_started/get_started_linux.md | 40 +++++++++---------- docs/get_started/get_started_macos.md | 34 ++++++++-------- docs/get_started/get_started_raspbian.md | 2 +- docs/how_tos/MonoDepth_how_to.md | 10 ++--- docs/how_tos/POT_how_to_example.md | 16 ++++---- .../installing-openvino-docker-linux.md | 6 +-- .../installing-openvino-linux-ivad-vpu.md | 6 +-- .../installing-openvino-linux.md | 18 ++++----- .../installing-openvino-macos.md | 18 ++++----- .../installing-openvino-raspbian.md | 14 +++---- docs/install_guides/movidius-setup-guide.md | 2 +- scripts/demo/demo_security_barrier_camera.sh | 4 +- 17 files changed, 90 insertions(+), 90 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index d07f30839fcd59..693e25371c4d5f 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -257,7 +257,7 @@ jobs: - script: | . $(SETUPVARS) python3 -m pip install -r requirements.txt - export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer + export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer export PYTHONPATH=$(LAYER_TESTS_DIR):$PYTHONPATH python3 -m pytest tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=TEST-tf_Roll.xmlTEST workingDirectory: $(LAYER_TESTS_DIR) diff --git a/docs/HOWTO/Custom_Layers_Guide.md b/docs/HOWTO/Custom_Layers_Guide.md index 73d178c74373bf..4bea76f5902baa 100644 --- a/docs/HOWTO/Custom_Layers_Guide.md +++ b/docs/HOWTO/Custom_Layers_Guide.md @@ -313,7 +313,7 @@ operation for the CPU plugin. The code of the library is described in the [Exte To build the extension, run the following:
```bash mkdir build && cd build -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh cmake .. -DCMAKE_BUILD_TYPE=Release make --jobs=$(nproc) ``` diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index 0888fe56604165..cf303f992da5c1 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -212,7 +212,7 @@ vi /.bashrc 2. Add this line to the end of the file: ```sh -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 3. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key. diff --git a/docs/IE_DG/Tools_Overview.md b/docs/IE_DG/Tools_Overview.md index f25d2501adf49e..2df228bb3e932d 100644 --- a/docs/IE_DG/Tools_Overview.md +++ b/docs/IE_DG/Tools_Overview.md @@ -8,7 +8,7 @@ The OpenVINO™ toolkit installation includes the following tools: |-----------------------------------------------------------------------------|---------------------------------------| |[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/tools/accuracy_checker`| |[Post-Training Optimization Tool](@ref pot_README) | `/tools/post_training_optimization_toolkit`| -|[Model Downloader](@ref omz_tools_downloader) | `/extras/open_model_zoo/tools/model_downloader`| +|[Model Downloader](@ref omz_tools_downloader) | `/extras/open_model_zoo/extras/open_model_zoo/tools/downloader`| |[Cross Check Tool](../../inference-engine/tools/cross_check_tool/README.md) | `/tools/cross_check_tool`| |[Compile Tool](../../inference-engine/tools/compile_tool/README.md) | `/tools/compile_tool`| diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md index 65f5c8fbbab1ba..d2d75aefb08541 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md @@ -29,7 +29,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert a Paddle\* model: -1. Go to the `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer` directory. +1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model, specifying the framework, the path to the input model `.pdmodel` file and the path to an output directory with write permissions: ```sh python3 mo.py --input_model .pdmodel --output_dir --framework=paddle diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 717a7b5bcfb2e1..7381b778cc810c 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -27,11 +27,11 @@ By default, the Intel® Distribution of OpenVINO™ is installed to the followin * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` -For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/` +For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2022/` If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. -The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2021/tools` directory. +The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2022/tools` directory.
Click for the Intel® Distribution of OpenVINO™ toolkit directory structure @@ -63,7 +63,7 @@ The simplified OpenVINO™ workflow is: ## Use the Demo Scripts to Learn the Workflow -The demo scripts in `/opt/intel/openvino_2021/extras/open_model_zoo/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: +The demo scripts in `/opt/intel/openvino_2022/extras/open_model_zoo/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit. * Download trained models. * Perform pipeline steps and see the output on the console. @@ -235,7 +235,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/tools/model_downloader/ +cd /opt/intel/openvino_2022/extras/open_model_zoo/extras/open_model_zoo/tools/downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -339,7 +339,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino_2021/tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -352,7 +352,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino_2021/tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -360,9 +360,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2021/extras/open_model_zoo/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2022/extras/open_model_zoo/demo/squeezenet1.1.labels ```
@@ -373,8 +373,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png` -* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp` +* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png` +* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -384,7 +384,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -397,32 +397,32 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU + ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU ``` **MYRIAD:** > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` **HDDL:** > **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL + ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: @@ -463,7 +463,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 2. Go to the demo application build directory: ```sh @@ -480,14 +480,14 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU +./security_barrier_camera_demo -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU ``` **MYRIAD:** @@ -512,7 +512,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index 740c5d447b5e3c..ca794ac98d41e5 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -27,7 +27,7 @@ By default, the Intel® Distribution of OpenVINO™ is installed to the followin * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` -For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/`. +For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2022/`. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. @@ -108,7 +108,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png +Image /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png classid probability label ------- ----------- ----- @@ -219,7 +219,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/tools/model_downloader/ +cd /opt/intel/openvino_2022/extras/open_model_zoo/extras/open_model_zoo/tools/downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -326,7 +326,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino_2021/tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -339,7 +339,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino_2021/tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -347,9 +347,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2021/extras/open_model_zoo/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2022/extras/open_model_zoo/demo/squeezenet1.1.labels ```
@@ -360,8 +360,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png` -* `/opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp` +* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png` +* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -371,7 +371,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -384,11 +384,11 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` @@ -396,14 +396,14 @@ The following commands run the Image Classification Code Sample using the `car.p > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: ```sh Top 10 results: -Image /opt/intel/openvino_2021/extras/open_model_zoo/demo/car.png +Image /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png classid probability label ------- ----------- ----- @@ -431,7 +431,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 2. Go to the demo application build directory: ```sh @@ -448,7 +448,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2021/extras/open_model_zoo/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **MYRIAD:** @@ -466,7 +466,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` diff --git a/docs/get_started/get_started_raspbian.md b/docs/get_started/get_started_raspbian.md index 07e51ce7bba4a7..2990d5582bc3b9 100644 --- a/docs/get_started/get_started_raspbian.md +++ b/docs/get_started/get_started_raspbian.md @@ -62,7 +62,7 @@ Follow the steps below to run pre-trained Face Detection network using Inference ``` 2. Build the Object Detection Sample with the following command: ```sh - cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2022/samples/cpp make -j2 object_detection_sample_ssd ``` 3. Download the pre-trained Face Detection model with the [Model Downloader tool](@ref omz_tools_downloader): diff --git a/docs/how_tos/MonoDepth_how_to.md b/docs/how_tos/MonoDepth_how_to.md index 631d48b9ed31ea..69f2feba9d7c43 100644 --- a/docs/how_tos/MonoDepth_how_to.md +++ b/docs/how_tos/MonoDepth_how_to.md @@ -11,7 +11,7 @@ Tested on OpenVINO™ 2021, Ubuntu 18.04. Define the OpenVINO™ install directory: ``` -export OV=/opt/intel/openvino_2021/ +export OV=/opt/intel/openvino_2022/ ``` Define the working directory. Make sure the directory exist: ``` @@ -27,14 +27,14 @@ source $OV/setupvars.sh Install the Model Optimizer prerequisites: ``` -cd $OV/model_optimizer/install_prerequisites/ +cd $OV/tools/model_optimizer/install_prerequisites/ sudo ./install_prerequisites.sh ``` Install the Model Downloader prerequisites: ``` -cd $OV/tools/model_downloader/ +cd $OV/extras/open_model_zoo/tools/downloader/ python3 -mpip install --user -r ./requirements.in sudo python3 -mpip install --user -r ./requirements-pytorch.in sudo python3 -mpip install --user -r ./requirements-caffe2.in @@ -44,7 +44,7 @@ sudo python3 -mpip install --user -r ./requirements-caffe2.in Download all models from the Demo Models list: ``` -python3 $OV/tools/model_downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD +python3 $OV/extras/open_model_zoo/tools/downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD ``` ## 4. Convert Models to Intermediate Representation (IR) @@ -52,7 +52,7 @@ python3 $OV/tools/model_downloader/downloader.py --list $OV/deployment_tools/inf Use the convert script to convert the models to ONNX*, and then to IR format: ``` cd $WD -python3 $OV/tools/model_downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst +python3 $OV/extras/open_model_zoo/tools/downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst ``` ## 5. Run Demo diff --git a/docs/how_tos/POT_how_to_example.md b/docs/how_tos/POT_how_to_example.md index abc9fedd544c45..ea06a7a322f533 100644 --- a/docs/how_tos/POT_how_to_example.md +++ b/docs/how_tos/POT_how_to_example.md @@ -16,7 +16,7 @@ Install OpenVINO™ toolkit and Model Optimizer, Accuracy Checker, and Post-trai 1. Define the OpenVINO™ install directory: ``` -export OV=/opt/intel/openvino_2021/ +export OV=/opt/intel/openvino_2022/ ``` 2. Install the Model Optimizer prerequisites: ``` @@ -25,7 +25,7 @@ sudo ./install_prerequisites.sh ``` 3. Install the Accuracy Checker requirements: ``` -cd $OV/extras/open_model_zoo/tools/accuracy_checker +cd $OV/tools/accuracy_checker sudo python3 setup.py install ``` 4. Install the Post-training Optimization Tool: @@ -46,7 +46,7 @@ mkdir ~/POT cd ~/POT ``` ``` -python3 $OV/tools/model_downloader/downloader.py --name mobilenet-v2-pytorch -o . +python3 $OV/extras/open_model_zoo/tools/downloader/downloader.py --name mobilenet-v2-pytorch -o . ``` ## 3. Prepare Model for Inference @@ -61,13 +61,13 @@ python3 -mpip install --user -r ./requirements-pytorch.in You can find the parameters for Mobilnet v2 conversion here: ``` -vi /opt/intel/openvino_2021/extras/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml +vi /opt/intel/openvino_2022/extras/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml ``` Convert the model from PyTorch to ONNX*: ``` cd ~/POT/public/mobilenet-v2-pytorch -python3 /opt/intel/openvino_2021/extras/open_model_zoo/tools/downloader/pytorch_to_onnx.py \ +python3 /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/pytorch_to_onnx.py \ --model-name=MobileNetV2 \ --model-path=. \ --weights=mobilenet-v2.pth \ @@ -100,11 +100,11 @@ mv mobilenet-v2.bin ~/POT/model.bin Edit the configuration files: ``` -sudo vi $OV/extras/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml +sudo vi $OV/tools/accuracy_checker/dataset_definitions.yml (edit imagenet_1000_classes) ``` ``` -export DEFINITIONS_FILE=/opt/intel/openvino_2021/extras/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml +export DEFINITIONS_FILE=/opt/intel/openvino_2022/tools/accuracy_checker/dataset_definitions.yml ``` Copy the JSON file to my directory and edit: @@ -119,7 +119,7 @@ vi mobilenetV2_pytorch_int8.json Copy the YML file to my directory and edit: ``` -cp /opt/intel/openvino_2021/extras/open_model_zoo/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT +cp /opt/intel/openvino_2022/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT ``` ``` vi mobilenet-v2.yml diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index 0966a6283f5435..e1e174f1e1931e 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -193,7 +193,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` - **CentOS 7**: @@ -223,11 +223,11 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ /bin/mkdir -p '/usr/local/include/libusb-1.0' && \ /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \ /bin/mkdir -p '/usr/local/lib/pkgconfig' && \ - printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2021/setupvars.sh + printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2022/setupvars.sh WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` 2. Run the Docker* image: diff --git a/docs/install_guides/installing-openvino-linux-ivad-vpu.md b/docs/install_guides/installing-openvino-linux-ivad-vpu.md index 1bb02c3531b162..9e7135bdfc3593 100644 --- a/docs/install_guides/installing-openvino-linux-ivad-vpu.md +++ b/docs/install_guides/installing-openvino-linux-ivad-vpu.md @@ -11,9 +11,9 @@ For Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the followi 1. Set the environment variables: ```sh -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` -> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/runtime/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021/runtime/3rdparty/hddl`. +> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/runtime/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2022/runtime/3rdparty/hddl`. 2. Install dependencies: ```sh @@ -52,7 +52,7 @@ E: [ncAPI] [ 965618] [MainThread] ncDeviceOpen:677 Failed to find a device, ```sh kill -9 $(pidof hddldaemon autoboot) pidof hddldaemon autoboot # Make sure none of them is alive -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ${HDDL_INSTALL_DIR}/bin/bsl_reset ``` diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index ee89fa22891793..7cdb662839b5e2 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -134,7 +134,7 @@ sudo ./install.sh -s silent.cfg By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as ``: * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` - For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2021/`. + For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2022/`. 8. **Optional**: You can choose **Customize** to change the installation directory or the components you want to install: > **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions. @@ -157,7 +157,7 @@ These dependencies are required for: 1. Change to the `install_dependencies` directory: ```sh -cd /opt/intel/openvino_2021/install_dependencies +cd /opt/intel/openvino_2022/install_dependencies ``` 2. Run a script to download and install the external software dependencies: ```sh @@ -170,7 +170,7 @@ sudo -E ./install_openvino_dependencies.sh You must update several environment variables before you can compile and run OpenVINO™ applications. Run the following script to temporarily set your environment variables: ```sh -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` **Optional:** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows: @@ -182,7 +182,7 @@ vi /.bashrc 2. Add this line to the end of the file: ```sh -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 3. Save and close the file: press the **Esc** key and type `:wq`. @@ -218,7 +218,7 @@ You can choose to either configure all supported frameworks at once **OR** confi 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -232,7 +232,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: @@ -282,7 +282,7 @@ The steps in this section are required only if you want to enable the toolkit co 1. Go to the install_dependencies directory: ```sh -cd /opt/intel/openvino_2021/install_dependencies/ +cd /opt/intel/openvino_2022/install_dependencies/ ``` 2. Install the **Intel® Graphics Compute Runtime for OpenCL™** driver components required to use the GPU plugin and write custom layers for Intel® Integrated Graphics. The drivers are not included in the package, to install it, make sure you have the internet connection and run the installation script: @@ -314,7 +314,7 @@ sudo usermod -a -G users "$(whoami)" Log out and log in for it to take effect. 2. To perform inference on Intel® Neural Compute Stick 2, install the USB rules as follows: ```sh - sudo cp /opt/intel/openvino_2021/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ + sudo cp /opt/intel/openvino_2022/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ ``` ```sh sudo udevadm control --reload-rules @@ -340,7 +340,7 @@ After configuration is done, you are ready to run the verification scripts with 1. Go to the **Inference Engine demo** directory: ```sh -cd /opt/intel/openvino_2021/extras/open_model_zoo/demo +cd /opt/intel/openvino_2022/extras/open_model_zoo/demo ``` 2. Run the **Image Classification verification script**. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 220db60df64b85..528d87ad2aa29b 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -126,7 +126,7 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_` and autom * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` - For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/`. + For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2022/`. 9. If needed, click **Customize** to change the installation directory or the components you want to install: ![](../img/openvino-install-macos-04.png) > **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions. @@ -145,10 +145,10 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_` and autom You need to update several environment variables before you can compile and run OpenVINO™ applications. Open the macOS Terminal\* or a command-line interface shell you prefer and run the following script to temporarily set your environment variables: ```sh - source /opt/intel/openvino_2021/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` -If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory. +If you didn't choose the default installation option, replace `/opt/intel/openvino_2022` with your directory. Optional: The OpenVINO environment variables are removed when you close the shell. You can permanently set the environment variables as follows: @@ -160,10 +160,10 @@ If you didn't choose the default installation option, replace `/opt/intel/openvi 3. Add this line to the end of the file: ```sh - source /opt/intel/openvino_2021/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` -If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory. +If you didn't choose the default installation option, replace `/opt/intel/openvino_2022` with your directory. 4. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key. @@ -196,7 +196,7 @@ You can choose to either configure the Model Optimizer for all supported framewo 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -210,7 +210,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino_2021/tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: @@ -279,14 +279,14 @@ Now you are ready to get started. To continue, see the following pages: Follow the steps below to uninstall the Intel® Distribution of OpenVINO™ Toolkit from your system: -1. From the the installation directory (by default, `/opt/intel/openvino_2021`), locate and open `openvino_toolkit_uninstaller.app`. +1. From the the installation directory (by default, `/opt/intel/openvino_2022`), locate and open `openvino_toolkit_uninstaller.app`. 2. Follow the uninstallation wizard instructions. 3. When uninstallation is complete, click **Finish**. ## Additional Resources -- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2021/extras/open_model_zoo/demo/`. +- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2022/extras/open_model_zoo/demo/`. - For detailed description of the pre-trained models, go to the [Overview of OpenVINO toolkit Pre-Trained Models](@ref omz_models_group_intel) page. diff --git a/docs/install_guides/installing-openvino-raspbian.md b/docs/install_guides/installing-openvino-raspbian.md index 6848944d3320f6..af6ee21c7cc44b 100644 --- a/docs/install_guides/installing-openvino-raspbian.md +++ b/docs/install_guides/installing-openvino-raspbian.md @@ -75,11 +75,11 @@ The guide assumes you downloaded the OpenVINO toolkit for Raspbian* OS. If you d By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_.tgz`. 3. Create an installation folder. ```sh - sudo mkdir -p /opt/intel/openvino_2021 + sudo mkdir -p /opt/intel/openvino_2022 ``` 4. Unpack the archive: ```sh - sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_.tgz --strip 1 -C /opt/intel/openvino_2021 + sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_.tgz --strip 1 -C /opt/intel/openvino_2022 ``` Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules. @@ -97,12 +97,12 @@ CMake is installed. Continue to the next section to set the environment variable You must update several environment variables before you can compile and run OpenVINO toolkit applications. Run the following script to temporarily set the environment variables: ```sh -source /opt/intel/openvino_2021/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` **(Optional)** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows: ```sh -echo "source /opt/intel/openvino_2021/setupvars.sh" >> ~/.bashrc +echo "source /opt/intel/openvino_2022/setupvars.sh" >> ~/.bashrc ``` To test your change, open a new terminal. You will see the following: @@ -120,11 +120,11 @@ This task applies only if you have an Intel® Neural Compute Stick 2 device. Log out and log in for it to take effect. 2. If you didn't modify `.bashrc` to permanently set the environment variables, run `setupvars.sh` again after logging in: ```sh - source /opt/intel/openvino_2021/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 3. To perform inference on the Intel® Neural Compute Stick 2, install the USB rules running the `install_NCS_udev_rules.sh` script: ```sh - sh /opt/intel/openvino_2021/install_dependencies/install_NCS_udev_rules.sh + sh /opt/intel/openvino_2022/install_dependencies/install_NCS_udev_rules.sh ``` 4. Plug in your Intel® Neural Compute Stick 2. @@ -140,7 +140,7 @@ Follow the next steps to use the pre-trained face detection model using Inferenc ``` 2. Build the Object Detection Sample: ```sh - cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2022/samples/cpp ``` ```sh make -j2 object_detection_sample_ssd diff --git a/docs/install_guides/movidius-setup-guide.md b/docs/install_guides/movidius-setup-guide.md index c26ebbda38d9de..0bb5de0fe69024 100644 --- a/docs/install_guides/movidius-setup-guide.md +++ b/docs/install_guides/movidius-setup-guide.md @@ -46,7 +46,7 @@ The `hddldaemon` is a system service, a binary executable that is run to manage `` refers to the following default OpenVINO™ Inference Engine directories: - **Linux:** ``` - /opt/intel/openvino_2021/inference_engine + /opt/intel/openvino_2022/inference_engine ``` - **Windows:** ``` diff --git a/scripts/demo/demo_security_barrier_camera.sh b/scripts/demo/demo_security_barrier_camera.sh index 4c3e54140e7fe9..48c6ecb245815d 100755 --- a/scripts/demo/demo_security_barrier_camera.sh +++ b/scripts/demo/demo_security_barrier_camera.sh @@ -55,8 +55,8 @@ target_image_path="$ROOT_DIR/car_1.bmp" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../../bin/setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../../bin/setupvars.sh" +if [ -e "$ROOT_DIR/../../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi From 2c47275b7b683ae415f628e468917be5faed502e Mon Sep 17 00:00:00 2001 From: y Date: Thu, 2 Sep 2021 18:32:00 +0300 Subject: [PATCH 16/57] Fixes for demo scripts --- docs/install_guides/deployment-manager-tool.md | 4 ++-- inference-engine/src/inference_engine/CMakeLists.txt | 5 ----- scripts/demo/demo_benchmark_app.bat | 4 ++-- scripts/demo/demo_security_barrier_camera.bat | 4 ++-- scripts/demo/demo_squeezenet_download_convert_run.bat | 4 ++-- 5 files changed, 8 insertions(+), 13 deletions(-) diff --git a/docs/install_guides/deployment-manager-tool.md b/docs/install_guides/deployment-manager-tool.md index cbbe8f15361815..a7a7783767b0fd 100644 --- a/docs/install_guides/deployment-manager-tool.md +++ b/docs/install_guides/deployment-manager-tool.md @@ -92,8 +92,8 @@ To deploy the Inference Engine components from the development machine to the ta ``` * For Windows, use an archiver your prefer. - The package is unpacked to the destination directory and the following subdirectories are created: - * `bin` — Snapshot of the `bin` directory from the OpenVINO installation directory. + The package is unpacked to the destination directory and the following files and subdirectories are created: + * `setupvars.sh` — copy of `setupvars.sh` * `runtime` — Contains the OpenVINO runtime binary files. * `install_dependencies` — Snapshot of the `install_dependencies` directory from the OpenVINO installation directory. * `` — The directory with the user data (IRs, datasets, etc.) you specified while configuring the package. diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index ca266fa09b92aa..03d5b2c58fa4d7 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -259,11 +259,6 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND TBBROOT MATCHES ${TEMP}) DESTINATION runtime/3rdparty/tbb COMPONENT tbb) endif() - # if(EXISTS "${TBB}/doc") - # install(DIRECTORY "${TBB}/doc" - # DESTINATION runtime/3rdparty/tbb - # COMPONENT tbb) - # endif() install(FILES "${TBB}/LICENSE" DESTINATION runtime/3rdparty/tbb COMPONENT tbb) diff --git a/scripts/demo/demo_benchmark_app.bat b/scripts/demo/demo_benchmark_app.bat index 82d3f053566039..b466f04af0a9ac 100644 --- a/scripts/demo/demo_benchmark_app.bat +++ b/scripts/demo/demo_benchmark_app.bat @@ -52,8 +52,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\setupvars.bat" ( - call "%ROOT_DIR%..\..\setupvars.bat" +if exist "%ROOT_DIR%..\..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_security_barrier_camera.bat b/scripts/demo/demo_security_barrier_camera.bat index 283db8d02f1d2e..67b1846e93567b 100644 --- a/scripts/demo/demo_security_barrier_camera.bat +++ b/scripts/demo/demo_security_barrier_camera.bat @@ -43,8 +43,8 @@ set TARGET_PRECISION=FP16 echo target_precision = !TARGET_PRECISION! -if exist "%ROOT_DIR%..\..\setupvars.bat" ( - call "%ROOT_DIR%..\..\setupvars.bat" +if exist "%ROOT_DIR%..\..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_squeezenet_download_convert_run.bat b/scripts/demo/demo_squeezenet_download_convert_run.bat index bfe5e8e84d998b..71de9057a8fa3d 100644 --- a/scripts/demo/demo_squeezenet_download_convert_run.bat +++ b/scripts/demo/demo_squeezenet_download_convert_run.bat @@ -48,8 +48,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\setupvars.bat" ( - call "%ROOT_DIR%..\..\setupvars.bat" +if exist "%ROOT_DIR%..\..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error From 18d30f5d4bb255cb7d466ff1d4213e6e87c2fe07 Mon Sep 17 00:00:00 2001 From: y Date: Thu, 2 Sep 2021 18:54:51 +0300 Subject: [PATCH 17/57] Added path to TBB --- .ci/openvino-onnx/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index fa4b1177d07b3d..ca87fcdc887bf0 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -76,6 +76,6 @@ RUN make -j $(nproc) install # Run tests via tox WORKDIR /openvino/ngraph/python ENV ngraph_DIR=/openvino/dist/runtime -ENV LD_LIBRARY_PATH=/openvino/dist/runtime/lib +ENV LD_LIBRARY_PATH=/openvino/dist/runtime/lib:/openvino/dist/runtime/3rdparty/tbb/lib ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/lib/python_api/python3.8:${PYTHONPATH} CMD tox From dc70c56284fd8af04ada9ebeef662b8b0967ea85 Mon Sep 17 00:00:00 2001 From: y Date: Fri, 3 Sep 2021 10:59:49 +0300 Subject: [PATCH 18/57] Fix for MO unit-tests --- model-optimizer/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model-optimizer/CMakeLists.txt b/model-optimizer/CMakeLists.txt index 7713fd51901e4b..d320fac60ed43d 100644 --- a/model-optimizer/CMakeLists.txt +++ b/model-optimizer/CMakeLists.txt @@ -51,6 +51,6 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/unit_tests EXCLUDE_FROM_ALL) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/automation - DESTINATION deployment_tools/model_optimizer + DESTINATION tools/model_optimizer COMPONENT tests EXCLUDE_FROM_ALL) From 884eba2a7be302ccb87124e77f245cd69013af16 Mon Sep 17 00:00:00 2001 From: y Date: Fri, 3 Sep 2021 11:00:50 +0300 Subject: [PATCH 19/57] Fixed tests on Windows --- .ci/azure/windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index c8d63bf69689c5..81c1ba956017d8 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -33,7 +33,7 @@ jobs: MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe INSTALL_DIR: $(WORK_DIR)\install_pkg INSTALL_TEST_DIR: $(INSTALL_DIR)\tests - SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat + SETUPVARS: $(INSTALL_DIR)\setupvars.bat steps: - script: | From d308eca092b887b36d99052eb4b97e4aa3a20f2f Mon Sep 17 00:00:00 2001 From: y Date: Fri, 3 Sep 2021 13:08:42 +0300 Subject: [PATCH 20/57] Reverted arch --- cmake/developer_package/packaging.cmake | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index b35a25c11f05da..7708de5c77b921 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -12,13 +12,13 @@ include(CPackComponent) # function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/bin PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) else() - set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/lib PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) endif() endfunction() From 82a0454cee2ca8f5ea211fec96aab9d7a67cdd00 Mon Sep 17 00:00:00 2001 From: y Date: Fri, 3 Sep 2021 16:09:21 +0300 Subject: [PATCH 21/57] Removed arch --- cmake/developer_package/packaging.cmake | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index 7708de5c77b921..b35a25c11f05da 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -12,13 +12,13 @@ include(CPackComponent) # function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER}/$ PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/bin PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) else() - set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) endif() endfunction() From 7bedf53ef03e440f38400d735982f2404889b53a Mon Sep 17 00:00:00 2001 From: y Date: Mon, 6 Sep 2021 17:30:40 +0300 Subject: [PATCH 22/57] Reverted arch back: second attemp --- cmake/developer_package/packaging.cmake | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index b35a25c11f05da..e947edeb881ed4 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -12,13 +12,13 @@ include(CPackComponent) # function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/bin PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/${ARCH_FOLDER}/lib/$ PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/${ARCH_FOLDER}/bin/$ PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/${ARCH_FOLDER}/lib/$ PARENT_SCOPE) else() - set(IE_CPACK_LIBRARY_PATH runtime/lib PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/lib PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/lib PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) endif() endfunction() From f72f53f323a97229a441c7e92a8902f99b39cf27 Mon Sep 17 00:00:00 2001 From: y Date: Mon, 6 Sep 2021 18:34:21 +0300 Subject: [PATCH 23/57] System type --- scripts/setupvars/setupvars.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 67f822f8acabbb..2dae9cc7cf8b83 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -28,7 +28,8 @@ if [ -e "$INSTALLDIR/runtime" ]; then export InferenceEngine_DIR=$INTEL_OPENVINO_DIR/runtime/cmake export ngraph_DIR=$INTEL_OPENVINO_DIR/runtime/cmake - IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/runtime/lib + system_type=$(ls "$INTEL_OPENVINO_DIR/runtime/lib/") + IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/runtime/lib/$system_type export HDDL_INSTALL_DIR=$INSTALLDIR/runtime/3rdparty/hddl if [[ "$OSTYPE" == "darwin"* ]]; then From 455152e44a9d09d8da8d6e77b45466279d24cfec Mon Sep 17 00:00:00 2001 From: y Date: Tue, 7 Sep 2021 12:37:07 +0300 Subject: [PATCH 24/57] Fix for Windows --- scripts/setupvars/setupvars.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index e3438ef7dc052f..548ffb4897693c 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -47,7 +47,7 @@ set "PATH=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader;%PATH%" set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "ngraph_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\hddl" -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" +set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\Release;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" :: TBB if exist %INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb ( From 362b6a55afd074df686546988980a183f8abbeb7 Mon Sep 17 00:00:00 2001 From: y Date: Tue, 7 Sep 2021 19:06:32 +0300 Subject: [PATCH 25/57] Resolve merge conflicts --- docs/IE_DG/Tools_Overview.md | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/docs/IE_DG/Tools_Overview.md b/docs/IE_DG/Tools_Overview.md index b1ccedb5b66953..159808956f410e 100644 --- a/docs/IE_DG/Tools_Overview.md +++ b/docs/IE_DG/Tools_Overview.md @@ -6,19 +6,11 @@ The OpenVINO™ toolkit installation includes the following tools: |Tool | Location in the Installation Directory| |-----------------------------------------------------------------------------|---------------------------------------| -<<<<<<< HEAD |[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/tools/accuracy_checker`| |[Post-Training Optimization Tool](@ref pot_README) | `/tools/post_training_optimization_toolkit`| |[Model Downloader](@ref omz_tools_downloader) | `/extras/open_model_zoo/extras/open_model_zoo/tools/downloader`| -|[Cross Check Tool](../../inference-engine/tools/cross_check_tool/README.md) | `/tools/cross_check_tool`| -|[Compile Tool](../../inference-engine/tools/compile_tool/README.md) | `/tools/compile_tool`| -======= -|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/deployment_tools/tools/open_model_zoo/tools/accuracy_checker`| -|[Post-Training Optimization Tool](@ref pot_README) | `/deployment_tools/tools/post_training_optimization_toolkit`| -|[Model Downloader](@ref omz_tools_downloader) | `/deployment_tools/tools/model_downloader`| -|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `/deployment_tools/tools/cross_check_tool`| -|[Compile Tool](../../tools/compile_tool/README.md) | `/deployment_tools/inference_engine/lib/intel64/`| ->>>>>>> upstream/master +|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `/tools/cross_check_tool`| +|[Compile Tool](../../tools/compile_tool/README.md) | `/tools/compile_tool`| ## See Also From ce78b3f31ae13b482609516129245a9d1a860c9e Mon Sep 17 00:00:00 2001 From: y Date: Tue, 7 Sep 2021 19:08:19 +0300 Subject: [PATCH 26/57] Fixed path --- docs/IE_DG/Tools_Overview.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/IE_DG/Tools_Overview.md b/docs/IE_DG/Tools_Overview.md index 159808956f410e..e3acfa7fb483b7 100644 --- a/docs/IE_DG/Tools_Overview.md +++ b/docs/IE_DG/Tools_Overview.md @@ -8,7 +8,7 @@ The OpenVINO™ toolkit installation includes the following tools: |-----------------------------------------------------------------------------|---------------------------------------| |[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/tools/accuracy_checker`| |[Post-Training Optimization Tool](@ref pot_README) | `/tools/post_training_optimization_toolkit`| -|[Model Downloader](@ref omz_tools_downloader) | `/extras/open_model_zoo/extras/open_model_zoo/tools/downloader`| +|[Model Downloader](@ref omz_tools_downloader) | `/extras/open_model_zoo/tools/downloader`| |[Cross Check Tool](../../tools/cross_check_tool/README.md) | `/tools/cross_check_tool`| |[Compile Tool](../../tools/compile_tool/README.md) | `/tools/compile_tool`| From 54300d9b55b814e377cf70e89997f8d0911045df Mon Sep 17 00:00:00 2001 From: y Date: Tue, 7 Sep 2021 20:23:59 +0300 Subject: [PATCH 27/57] Path for Windows --- scripts/setupvars/setupvars.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index 548ffb4897693c..6bcfb9bc30a7d9 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -47,7 +47,7 @@ set "PATH=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader;%PATH%" set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "ngraph_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\hddl" -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\Release;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" +set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" :: TBB if exist %INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb ( From 75b297add2ee0068f2e260e85178ee8c2b7011bd Mon Sep 17 00:00:00 2001 From: y Date: Thu, 9 Sep 2021 19:12:17 +0300 Subject: [PATCH 28/57] Added debug for Windows --- scripts/setupvars/setupvars.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index 6bcfb9bc30a7d9..fb92fe8df98ad2 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -47,7 +47,7 @@ set "PATH=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader;%PATH%" set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "ngraph_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\hddl" -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" +set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" :: TBB if exist %INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb ( From 76cdd576b5b53f671a731f0253b9aae2abfe7aa4 Mon Sep 17 00:00:00 2001 From: y Date: Thu, 9 Sep 2021 19:16:24 +0300 Subject: [PATCH 29/57] Added requirements_dev.txt to install --- model-optimizer/CMakeLists.txt | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/model-optimizer/CMakeLists.txt b/model-optimizer/CMakeLists.txt index d320fac60ed43d..220388cc87186e 100644 --- a/model-optimizer/CMakeLists.txt +++ b/model-optimizer/CMakeLists.txt @@ -31,13 +31,13 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ PATTERN "requirements_dev.txt" EXCLUDE PATTERN "README.md" EXCLUDE PATTERN "CMakeLists.txt" EXCLUDE - + PATTERN "extensions/front/caffe/CustomLayersMapping.xml" EXCLUDE PATTERN "mo/utils/convert.py" EXCLUDE PATTERN "unit_tests" EXCLUDE PATTERN "openvino_mo.egg-info" EXCLUDE PATTERN "build" EXCLUDE - + REGEX ".*__pycache__.*" EXCLUDE REGEX ".*\\.pyc$" EXCLUDE REGEX ".*\\.swp" EXCLUDE @@ -45,12 +45,17 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ REGEX ".*_test\.py$" EXCLUDE ) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/unit_tests +install(FILES requirements_dev.txt + DESTINATION tools/model_optimizer + COMPONENT tests + EXCLUDE_FROM_ALL) + +install(DIRECTORY unit_tests DESTINATION tools/model_optimizer COMPONENT tests EXCLUDE_FROM_ALL) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/automation +install(DIRECTORY automation DESTINATION tools/model_optimizer COMPONENT tests EXCLUDE_FROM_ALL) From 1b83a69599ff40d66735bbce9f42521ac6ac9c2c Mon Sep 17 00:00:00 2001 From: y Date: Thu, 9 Sep 2021 19:47:37 +0300 Subject: [PATCH 30/57] Fixed wheel's setup.py --- inference-engine/ie_bridges/python/wheel/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py index eaa2417678f676..715043236f6f40 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.py +++ b/inference-engine/ie_bridges/python/wheel/setup.py @@ -42,8 +42,8 @@ # The following variables can be defined in environment or .env file CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.') -OV_RUNTIME_LIBS_DIR = config('OV_RUNTIME_LIBS_DIR', '') -TBB_LIBS_DIR = config('TBB_LIBS_DIR', '') +OV_RUNTIME_LIBS_DIR = config('OV_RUNTIME_LIBS_DIR', f'runtime/{LIBS_DIR}/{ARCH}/{CONFIG}') +TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'runtime/3rdparty/tbb/{LIBS_DIR}') PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}') LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path' From 7e251b72d3d028edcbbc85f9f2ae25ea02745e68 Mon Sep 17 00:00:00 2001 From: y Date: Fri, 10 Sep 2021 12:58:08 +0300 Subject: [PATCH 31/57] Fixed lin build --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 40a888800dac98..53d17f1f909f68 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -83,13 +83,13 @@ include(cmake/test_model_zoo.cmake) add_subdirectory(thirdparty) add_subdirectory(openvino) add_subdirectory(ngraph) +add_subdirectory(runtime) add_subdirectory(inference-engine) # for Template plugin openvino_developer_export_targets(COMPONENT ngraph TARGETS ngraph_backend interpreter_backend) include(cmake/extra_modules.cmake) -add_subdirectory(runtime) add_subdirectory(model-optimizer) add_subdirectory(docs) add_subdirectory(tools) From 2092312b58af25f5f8e8e4484e6486357e78f7cc Mon Sep 17 00:00:00 2001 From: y Date: Fri, 10 Sep 2021 16:28:04 +0300 Subject: [PATCH 32/57] Fixes after merge --- inference-engine/ie_bridges/c/src/CMakeLists.txt | 4 ++-- inference-engine/src/inference_engine/CMakeLists.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/inference-engine/ie_bridges/c/src/CMakeLists.txt b/inference-engine/ie_bridges/c/src/CMakeLists.txt index b880ee3bdb8d84..cec561d39ee6e3 100644 --- a/inference-engine/ie_bridges/c/src/CMakeLists.txt +++ b/inference-engine/ie_bridges/c/src/CMakeLists.txt @@ -42,13 +42,13 @@ install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core_c LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c - INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie) + INCLUDES DESTINATION runtime/include/ie) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core_c LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c - INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie) + INCLUDES DESTINATION runtime/include/ie) install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/ DESTINATION runtime/include/ie diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index cf5fff60278f80..c0003923a34b73 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -286,16 +286,16 @@ install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core - INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie) + INCLUDES DESTINATION runtime/include/ie) set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME runtime) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core - INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include + INCLUDES DESTINATION runtime/include # TODO: remove later once samples are updated - ${IE_CPACK_IE_DIR}/include/ie) + runtime/include/ie) install(FILES $/plugins.xml DESTINATION ${IE_CPACK_RUNTIME_PATH} From 24030854a406cf704ebba9e29e78e87fe428369a Mon Sep 17 00:00:00 2001 From: y Date: Fri, 10 Sep 2021 17:04:32 +0300 Subject: [PATCH 33/57] Fix 2 --- inference-engine/src/inference_engine/CMakeLists.txt | 4 ++-- ngraph/core/CMakeLists.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index c0003923a34b73..267f79bfa5232c 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -321,7 +321,7 @@ install(EXPORT InferenceEngineTargets install(EXPORT OpenVINOTargets FILE OpenVINOTargets.cmake NAMESPACE openvino:: - DESTINATION ${IE_CPACK_IE_DIR}/share + DESTINATION runtime/cmake COMPONENT core_dev) set(IE_NGRAPH_DIR "${CMAKE_BINARY_DIR}/ngraph") @@ -366,5 +366,5 @@ install(FILES "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" install(FILES "${CMAKE_BINARY_DIR}/share/OpenVINOConfig.cmake" "${CMAKE_BINARY_DIR}/OpenVINOConfig-version.cmake" - DESTINATION ${IE_CPACK_IE_DIR}/share + DESTINATION runtime/cmake COMPONENT core_dev) diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index 4e2a75122b2314..43ea6bb58892ac 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -102,7 +102,7 @@ install(TARGETS ngraph EXPORT ngraphTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph - INCLUDES DESTINATION ${NGRAPH_INSTALL_INCLUDE}) + INCLUDES DESTINATION runtime/include) # because ngraph is exported in multiple export list # it needs to be exported in each list it's used @@ -110,13 +110,13 @@ install(TARGETS ngraph EXPORT InferenceEngineTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph - INCLUDES DESTINATION ${NGRAPH_INSTALL_INCLUDE}) + INCLUDES DESTINATION runtime/include) install(TARGETS ngraph EXPORT OpenVINOTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph - INCLUDES DESTINATION ${NGRAPH_INSTALL_INCLUDE}) + INCLUDES DESTINATION runtime/include) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ DESTINATION "runtime/include" From c2284d5f51501c4da84a2a5b31341b5a9ec00e78 Mon Sep 17 00:00:00 2001 From: y Date: Fri, 10 Sep 2021 17:32:52 +0300 Subject: [PATCH 34/57] Fixes --- ngraph/frontend/frontend_manager/CMakeLists.txt | 6 +++--- ngraph/frontend/paddlepaddle/CMakeLists.txt | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ngraph/frontend/frontend_manager/CMakeLists.txt b/ngraph/frontend/frontend_manager/CMakeLists.txt index d9d88b811586c2..89c8a9c6d53b1c 100644 --- a/ngraph/frontend/frontend_manager/CMakeLists.txt +++ b/ngraph/frontend/frontend_manager/CMakeLists.txt @@ -57,9 +57,9 @@ install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::manager) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/frontend_manager DESTINATION ${FRONTEND_INSTALL_INCLUDE} diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index e92cf630911f31..8ee6c0037fe23c 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -89,9 +89,9 @@ install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::paddlepaddle) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/paddlepaddle_frontend DESTINATION ${FRONTEND_INSTALL_INCLUDE} From c1c2c7eff644c6dfacc1923dfa04c160f06ea0fe Mon Sep 17 00:00:00 2001 From: y Date: Fri, 10 Sep 2021 19:19:38 +0300 Subject: [PATCH 35/57] Frontends path --- scripts/setupvars/setupvars.bat | 1 + scripts/setupvars/setupvars.sh | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index e97eba1966520d..e19444384e60de 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -49,6 +49,7 @@ set "ngraph_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "OpenVINO_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\hddl" set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" +set "OV_FRONTEND_PATH=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release'%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%OV_FRONTEND_PATH%" :: TBB if exist %INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb ( diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index fb9b49e67cceeb..c021e2f93e70e7 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -25,12 +25,13 @@ shift done if [ -e "$INSTALLDIR/runtime" ]; then - export InferenceEngine_DIR=$INTEL_OPENVINO_DIR/runtime/cmake - export ngraph_DIR=$INTEL_OPENVINO_DIR/runtime/cmake - export OpenVINO_DIR=$INTEL_OPENVINO_DIR/runtime/cmake + export InferenceEngine_DIR=$INSTALLDIR/runtime/cmake + export ngraph_DIR=$INSTALLDIR/runtime/cmake + export OpenVINO_DIR=$INSTALLDIR/runtime/cmake - system_type=$(ls "$INTEL_OPENVINO_DIR/runtime/lib/") - IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/runtime/lib/$system_type + system_type=$(ls "$INSTALLDIR/runtime/lib/") + IE_PLUGINS_PATH=$INSTALLDIR/runtime/lib/$system_type + export OV_FRONTEND_PATH=$IE_PLUGINS_PATH{OV_FRONTEND_PATH:+:$OV_FRONTEND_PATH} export HDDL_INSTALL_DIR=$INSTALLDIR/runtime/3rdparty/hddl if [[ "$OSTYPE" == "darwin"* ]]; then From 454a0a886a26ed6de86552c9f7044de52b666e6f Mon Sep 17 00:00:00 2001 From: y Date: Fri, 10 Sep 2021 19:45:27 +0300 Subject: [PATCH 36/57] Fixed deployment manager --- tools/deployment_manager/configs/darwin.json | 38 ++++++------ tools/deployment_manager/configs/linux.json | 60 +++++++++---------- tools/deployment_manager/configs/windows.json | 56 ++++++++--------- 3 files changed, 77 insertions(+), 77 deletions(-) diff --git a/tools/deployment_manager/configs/darwin.json b/tools/deployment_manager/configs/darwin.json index 72fa36fbb37e12..452b14c0ef4c1a 100644 --- a/tools/deployment_manager/configs/darwin.json +++ b/tools/deployment_manager/configs/darwin.json @@ -16,19 +16,19 @@ "ie_core": { "group": ["ie"], "files": [ - "runtime/lib/libinference_engine.dylib", - "runtime/lib/libinference_engine_transformations.dylib", - "runtime/lib/libinference_engine_preproc.so", - "runtime/lib/libinference_engine_ir_reader.so", - "runtime/lib/libinference_engine_c_api.dylib", - "runtime/lib/libHeteroPlugin.so", - "runtime/lib/libMultiDevicePlugin.so", - "runtime/lib/libngraph.dylib", - "runtime/lib/libfrontend_manager.dylib", - "runtime/lib/libir_ngraph_frontend.dylib", - "runtime/lib/libonnx_ngraph_frontend.dylib", - "runtime/lib/libpaddlepaddle_ngraph_frontend.dylib", - "runtime/lib/plugins.xml", + "runtime/lib/intel64/libinference_engine.dylib", + "runtime/lib/intel64/libinference_engine_transformations.dylib", + "runtime/lib/intel64/libinference_engine_preproc.so", + "runtime/lib/intel64/libinference_engine_ir_reader.so", + "runtime/lib/intel64/libinference_engine_c_api.dylib", + "runtime/lib/intel64/libHeteroPlugin.so", + "runtime/lib/intel64/libMultiDevicePlugin.so", + "runtime/lib/intel64/libngraph.dylib", + "runtime/lib/intel64/libfrontend_manager.dylib", + "runtime/lib/intel64/libir_ngraph_frontend.dylib", + "runtime/lib/intel64/libonnx_ngraph_frontend.dylib", + "runtime/lib/intel64/libpaddlepaddle_ngraph_frontend.dylib", + "runtime/lib/intel64/plugins.xml", "runtime/3rdparty/tbb" ] }, @@ -37,8 +37,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/lib/libinference_engine_lp_transformations.dylib", - "runtime/lib/libMKLDNNPlugin.so" + "runtime/lib/intel64/libinference_engine_lp_transformations.dylib", + "runtime/lib/intel64/libMKLDNNPlugin.so" ] }, "vpu": { @@ -46,10 +46,10 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/lib/libmyriadPlugin.so", - "runtime/lib/libinference_engine_legacy.dylib", - "runtime/lib/usb-ma2x8x.mvcmd", - "runtime/lib/pcie-ma2x8x.mvcmd" + "runtime/lib/intel64/libmyriadPlugin.so", + "runtime/lib/intel64/libinference_engine_legacy.dylib", + "runtime/lib/intel64/usb-ma2x8x.mvcmd", + "runtime/lib/intel64/pcie-ma2x8x.mvcmd" ] }, "opencv": { diff --git a/tools/deployment_manager/configs/linux.json b/tools/deployment_manager/configs/linux.json index f0865d068c274b..29912e3ff3e389 100644 --- a/tools/deployment_manager/configs/linux.json +++ b/tools/deployment_manager/configs/linux.json @@ -22,19 +22,19 @@ "ie_core": { "group": ["ie"], "files": [ - "runtime/lib/libinference_engine.so", - "runtime/lib/libinference_engine_transformations.so", - "runtime/lib/libinference_engine_preproc.so", - "runtime/lib/libinference_engine_ir_reader.so", - "runtime/lib/libinference_engine_c_api.so", - "runtime/lib/libHeteroPlugin.so", - "runtime/lib/libMultiDevicePlugin.so", - "runtime/lib/libngraph.so", - "runtime/lib/libfrontend_manager.so", - "runtime/lib/libir_ngraph_frontend.so", - "runtime/lib/libonnx_ngraph_frontend.so", - "runtime/lib/libpaddlepaddle_ngraph_frontend.so", - "runtime/lib/plugins.xml", + "runtime/lib/intel64/libinference_engine.so", + "runtime/lib/intel64/libinference_engine_transformations.so", + "runtime/lib/intel64/libinference_engine_preproc.so", + "runtime/lib/intel64/libinference_engine_ir_reader.so", + "runtime/lib/intel64/libinference_engine_c_api.so", + "runtime/lib/intel64/libHeteroPlugin.so", + "runtime/lib/intel64/libMultiDevicePlugin.so", + "runtime/lib/intel64/libngraph.so", + "runtime/lib/intel64/libfrontend_manager.so", + "runtime/lib/intel64/libir_ngraph_frontend.so", + "runtime/lib/intel64/libonnx_ngraph_frontend.so", + "runtime/lib/intel64/libpaddlepaddle_ngraph_frontend.so", + "runtime/lib/intel64/plugins.xml", "runtime/3rdparty/tbb" ] }, @@ -43,8 +43,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/lib/libinference_engine_lp_transformations.so", - "runtime/lib/libMKLDNNPlugin.so" + "runtime/lib/intel64/libinference_engine_lp_transformations.so", + "runtime/lib/intel64/libMKLDNNPlugin.so" ] }, "gpu": { @@ -52,9 +52,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/lib/cache.json", - "runtime/lib/libclDNNPlugin.so", - "runtime/lib/libinference_engine_lp_transformations.so", + "runtime/lib/intel64/cache.json", + "runtime/lib/intel64/libclDNNPlugin.so", + "runtime/lib/intel64/libinference_engine_lp_transformations.so", "install_dependencies/install_NEO_OCL_driver.sh" ] }, @@ -64,11 +64,11 @@ "dependencies" : ["ie_core"], "files": [ "runtime/3rdparty/97-myriad-usbboot.rules", - "runtime/lib/usb-ma2x8x.mvcmd", - "runtime/lib/pcie-ma2x8x.mvcmd", - "runtime/lib/libmyriadPlugin.so", - "runtime/lib/vpu_custom_kernels", - "runtime/lib/libinference_engine_legacy.so", + "runtime/lib/intel64/usb-ma2x8x.mvcmd", + "runtime/lib/intel64/pcie-ma2x8x.mvcmd", + "runtime/lib/intel64/libmyriadPlugin.so", + "runtime/lib/intel64/vpu_custom_kernels", + "runtime/lib/intel64/libinference_engine_legacy.so", "install_dependencies/install_NCS_udev_rules.sh" ] }, @@ -77,11 +77,11 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/lib/libgna.so", - "runtime/lib/libgna.so.2", - "runtime/lib/libgna.so.2.0.0.1226", - "runtime/lib/libinference_engine_legacy.so", - "runtime/lib/libGNAPlugin.so" + "runtime/lib/intel64/libgna.so", + "runtime/lib/intel64/libgna.so.2", + "runtime/lib/intel64/libgna.so.2.0.0.1226", + "runtime/lib/intel64/libinference_engine_legacy.so", + "runtime/lib/intel64/libGNAPlugin.so" ] }, "hddl": { @@ -89,8 +89,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/lib/libHDDLPlugin.so", - "runtime/lib/libinference_engine_legacy.so", + "runtime/lib/intel64/libHDDLPlugin.so", + "runtime/lib/intel64/libinference_engine_legacy.so", "runtime/3rdparty/hddl" ] }, diff --git a/tools/deployment_manager/configs/windows.json b/tools/deployment_manager/configs/windows.json index 1ff334d380bcbd..f49d9d3b16a81a 100644 --- a/tools/deployment_manager/configs/windows.json +++ b/tools/deployment_manager/configs/windows.json @@ -16,19 +16,19 @@ "ie_core": { "group": ["ie"], "files": [ - "runtime/bin/Release/inference_engine.dll", - "runtime/bin/Release/inference_engine_transformations.dll", - "runtime/bin/Release/inference_engine_preproc.dll", - "runtime/bin/Release/inference_engine_ir_reader.dll", - "runtime/bin/Release/inference_engine_c_api.dll", - "runtime/bin/Release/HeteroPlugin.dll", - "runtime/bin/Release/MultiDevicePlugin.dll", - "runtime/bin/Release/ngraph.dll", - "runtime/bin/Release/frontend_manager.dll", - "runtime/bin/Release/ir_ngraph_frontend.dll", - "runtime/bin/Release/onnx_ngraph_frontend.dll", - "runtime/bin/Release/paddlepaddle_ngraph_frontend.dll", - "runtime/bin/Release/plugins.xml", + "runtime/bin/intel64/Release/inference_engine.dll", + "runtime/bin/intel64/Release/inference_engine_transformations.dll", + "runtime/bin/intel64/Release/inference_engine_preproc.dll", + "runtime/bin/intel64/Release/inference_engine_ir_reader.dll", + "runtime/bin/intel64/Release/inference_engine_c_api.dll", + "runtime/bin/intel64/Release/HeteroPlugin.dll", + "runtime/bin/intel64/Release/MultiDevicePlugin.dll", + "runtime/bin/intel64/Release/ngraph.dll", + "runtime/bin/intel64/Release/frontend_manager.dll", + "runtime/bin/intel64/Release/ir_ngraph_frontend.dll", + "runtime/bin/intel64/Release/onnx_ngraph_frontend.dll", + "runtime/bin/intel64/Release/paddlepaddle_ngraph_frontend.dll", + "runtime/bin/intel64/Release/plugins.xml", "runtime/3rdparty/tbb" ] }, @@ -37,8 +37,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/bin/Release/inference_engine_lp_transformations.dll", - "runtime/bin/Release/MKLDNNPlugin.dll" + "runtime/bin/intel64/Release/inference_engine_lp_transformations.dll", + "runtime/bin/intel64/Release/MKLDNNPlugin.dll" ] }, "gpu": { @@ -46,9 +46,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/bin/Release/cache.json", - "runtime/bin/Release/inference_engine_lp_transformations.dll", - "runtime/bin/Release/clDNNPlugin.dll" + "runtime/bin/intel64/Release/cache.json", + "runtime/bin/intel64/Release/inference_engine_lp_transformations.dll", + "runtime/bin/intel64/Release/clDNNPlugin.dll" ] }, "vpu": { @@ -56,10 +56,10 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/bin/Release/usb-ma2x8x.mvcmd", - "runtime/bin/Release/pcie-ma2x8x.elf", - "runtime/bin/Release/myriadPlugin.dll", - "runtime/bin/Release/inference_engine_legacy.dll" + "runtime/bin/intel64/Release/usb-ma2x8x.mvcmd", + "runtime/bin/intel64/Release/pcie-ma2x8x.elf", + "runtime/bin/intel64/Release/myriadPlugin.dll", + "runtime/bin/intel64/Release/inference_engine_legacy.dll" ] }, "gna": { @@ -67,9 +67,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/bin/Release/gna.dll", - "runtime/bin/Release/inference_engine_legacy.dll", - "runtime/bin/Release/GNAPlugin.dll" + "runtime/bin/intel64/Release/gna.dll", + "runtime/bin/intel64/Release/inference_engine_legacy.dll", + "runtime/bin/intel64/Release/GNAPlugin.dll" ] }, "hddl": { @@ -77,9 +77,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "runtime/bin/Release/HDDLPlugin.dll", - "runtime/bin/Release/inference_engine_legacy.dll", - "runtime/bin/Release/hddl_perfcheck.exe", + "runtime/bin/intel64/Release/HDDLPlugin.dll", + "runtime/bin/intel64/Release/inference_engine_legacy.dll", + "runtime/bin/intel64/Release/hddl_perfcheck.exe", "runtime/3rdparty/MovidiusDriver", "runtime/3rdparty/hddl" ] From 557fd8f0a77db8b275051b1ad7cd5cd323146e70 Mon Sep 17 00:00:00 2001 From: y Date: Sat, 11 Sep 2021 15:46:26 +0300 Subject: [PATCH 37/57] Fixed Windows --- cmake/developer_package/packaging.cmake | 6 +++--- scripts/setupvars/setupvars.bat | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index e947edeb881ed4..7708de5c77b921 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -12,9 +12,9 @@ include(CPackComponent) # function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH runtime/${ARCH_FOLDER}/lib/$ PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH runtime/${ARCH_FOLDER}/bin/$ PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH runtime/${ARCH_FOLDER}/lib/$ PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) else() set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index e19444384e60de..364c651be48816 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -49,7 +49,7 @@ set "ngraph_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "OpenVINO_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\hddl" set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" -set "OV_FRONTEND_PATH=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release'%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%OV_FRONTEND_PATH%" +set "OV_FRONTEND_PATH=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%OV_FRONTEND_PATH%" :: TBB if exist %INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb ( From 6a7cc2e3c633d9b8c26fe6635c6fd1a7e80eff45 Mon Sep 17 00:00:00 2001 From: y Date: Sat, 11 Sep 2021 17:44:21 +0300 Subject: [PATCH 38/57] Added cldnn unit tests installation --- inference-engine/thirdparty/clDNN/tests/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt b/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt index 9d865973a0442d..02189c28ffc01c 100644 --- a/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt @@ -116,3 +116,8 @@ endif() ie_sse42_optimization_flags(sse4_2_flags) set_source_files_properties(${__CLDNN_AllSources} PROPERTIES COMPILE_FLAGS "${sse4_2_flags}") + +install(TARGETS "${CLDNN_BUILD__PROJ}" + RUNTIME DESTINATION tests + COMPONENT tests + EXCLUDE_FROM_ALL) From 9341cca94674b0766706f2fc8f851ea4b98ac0cb Mon Sep 17 00:00:00 2001 From: y Date: Sat, 11 Sep 2021 18:07:13 +0300 Subject: [PATCH 39/57] Install samples --- inference-engine/CMakeLists.txt | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 7cc6ffa3744f42..bf8451c9ab94f8 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -14,6 +14,18 @@ endif() add_subdirectory(samples) +# TODO: remove this +if(ENABLE_SAMPLES) + install(TARGETS benchmark_app classification_sample_async hello_classification + hello_nv12_input_classification hello_query_device hello_reshape_ssd + ngraph_function_creation_sample object_detection_sample_ssd + speech_sample style_transfer_sample hello_classification_c + object_detection_sample_ssd_c hello_nv12_input_classification_c + RUNTIME DESTINATION tests + COMPONENT tests + EXCLUDE_FROM_ALL) +endif() + openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils) if(ENABLE_TESTS) From 0de6c6dca54e4beadbcddd80abcb11fabc0606a0 Mon Sep 17 00:00:00 2001 From: y Date: Sat, 11 Sep 2021 20:56:13 +0300 Subject: [PATCH 40/57] Fix samples --- inference-engine/CMakeLists.txt | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index bf8451c9ab94f8..e67a5e0cc98fdb 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -16,14 +16,16 @@ add_subdirectory(samples) # TODO: remove this if(ENABLE_SAMPLES) - install(TARGETS benchmark_app classification_sample_async hello_classification - hello_nv12_input_classification hello_query_device hello_reshape_ssd - ngraph_function_creation_sample object_detection_sample_ssd - speech_sample style_transfer_sample hello_classification_c - object_detection_sample_ssd_c hello_nv12_input_classification_c - RUNTIME DESTINATION tests - COMPONENT tests - EXCLUDE_FROM_ALL) + foreach(sample benchmark_app classification_sample_async hello_classification + hello_nv12_input_classification hello_query_device hello_reshape_ssd + ngraph_function_creation_sample object_detection_sample_ssd + speech_sample style_transfer_sample hello_classification_c + object_detection_sample_ssd_c hello_nv12_input_classification_c) + if(TARGET ${sample}) + install(TARGRTS ${sample} + RUNTIME DESTINATION tests + COMPONENT tests + EXCLUDE_FROM_ALL) endif() openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils) From 914fcb1fe2255bf241b110c462f7b2bbebb83a7b Mon Sep 17 00:00:00 2001 From: y Date: Sat, 11 Sep 2021 21:00:37 +0300 Subject: [PATCH 41/57] Fix path for samples --- inference-engine/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index e67a5e0cc98fdb..67da6765ccf61f 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -23,7 +23,7 @@ if(ENABLE_SAMPLES) object_detection_sample_ssd_c hello_nv12_input_classification_c) if(TARGET ${sample}) install(TARGRTS ${sample} - RUNTIME DESTINATION tests + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL) endif() From 21ab8f1e3f8d60ccc4e415dbde47d83048d92895 Mon Sep 17 00:00:00 2001 From: y Date: Sat, 11 Sep 2021 21:38:51 +0300 Subject: [PATCH 42/57] Proper path --- .../python/src/openvino/inference_engine/CMakeLists.txt | 6 ++++-- .../src/openvino/offline_transformations/CMakeLists.txt | 8 +++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt index 5fcdd37c790c77..c55ae9cd1e84c6 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt @@ -72,8 +72,10 @@ add_custom_command(TARGET ${TARGET_NAME} # install install(TARGETS ${INSTALLED_TARGETS} - RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT} - LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT}) + RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine + COMPONENT ${PYTHON_COMPONENT} + LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine + COMPONENT ${PYTHON_COMPONENT}) install(PROGRAMS __init__.py DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt index 5aeb5224aa14eb..0aa8280bc5c5d1 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt @@ -59,9 +59,11 @@ add_custom_command(TARGET ${TARGET_NAME} # ie_cpack_add_component(${PYTHON_VERSION}_dev DEPENDS ${PYTHON_COMPONENT}) install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT} - LIBRARY DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT}) + RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations + COMPONENT ${PYTHON_COMPONENT} + LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations + COMPONENT ${PYTHON_COMPONENT}) install(PROGRAMS __init__.py - DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations + DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT}) From d7cce14208857b6cdd3c7e547fab88586bba24c6 Mon Sep 17 00:00:00 2001 From: y Date: Sat, 11 Sep 2021 21:48:15 +0300 Subject: [PATCH 43/57] Try to fix MO hardcodes --- model-optimizer/mo/utils/find_ie_version.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/model-optimizer/mo/utils/find_ie_version.py b/model-optimizer/mo/utils/find_ie_version.py index 59fada431fe58a..3c9e698ae91702 100644 --- a/model-optimizer/mo/utils/find_ie_version.py +++ b/model-optimizer/mo/utils/find_ie_version.py @@ -88,11 +88,10 @@ def find_ie_version(silent=False): bindings_paths_windows = [ # Package { - "module": os.path.join(script_path, '../../../../python/', python_version), + "module": os.path.join(script_path, '../../../python/', python_version), "libs": [ - os.path.join(script_path, '../../../inference_engine/bin/intel64/Release'), - os.path.join(script_path, '../../../inference_engine/external/tbb/bin'), - os.path.join(script_path, '../../../ngraph/lib'), + os.path.join(script_path, '../../../runtime/bin/intel64/Release'), + os.path.join(script_path, '../../../runtime/3rdparty/tbb/bin'), ], }, # Local builds @@ -118,11 +117,10 @@ def find_ie_version(silent=False): bindings_paths_linux = [ # Package { - "module": os.path.join(script_path, '../../../../python/', python_version), + "module": os.path.join(script_path, '../../../python/', python_version), "libs": [ - os.path.join(script_path, '../../../inference_engine/lib/intel64'), - os.path.join(script_path, '../../../inference_engine/external/tbb/lib'), - os.path.join(script_path, '../../../ngraph/lib'), + os.path.join(script_path, '../../../runtime/lib/intel64'), + os.path.join(script_path, '../../../runtime/3rdparty/tbb/lib'), ], }, # Local builds From 30dd84843679b81b36eb93f29bda9265d532b17a Mon Sep 17 00:00:00 2001 From: y Date: Sun, 12 Sep 2021 10:14:19 +0300 Subject: [PATCH 44/57] samples binary location --- inference-engine/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 1ca75fcf3aea57..42c2cd842e9474 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -22,7 +22,7 @@ foreach(sample benchmark_app classification_sample_async hello_classification object_detection_sample_ssd_c hello_nv12_input_classification_c) if(TARGET ${sample}) install(TARGETS ${sample} - RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} + RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) endif() From 6ec71b10bd3f4fc6317fbe4951e9cea5d5007965 Mon Sep 17 00:00:00 2001 From: y Date: Sun, 12 Sep 2021 10:35:08 +0300 Subject: [PATCH 45/57] MO print --- model-optimizer/mo/utils/find_ie_version.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/model-optimizer/mo/utils/find_ie_version.py b/model-optimizer/mo/utils/find_ie_version.py index 3c9e698ae91702..525b68677e7586 100644 --- a/model-optimizer/mo/utils/find_ie_version.py +++ b/model-optimizer/mo/utils/find_ie_version.py @@ -60,6 +60,8 @@ def try_to_import_ie(module="", libs=[], silent=False): cmd_args.append("--silent") status = subprocess.run(cmd_args, env=os.environ) + print(status.stderr) + print(status.stdout) if status.returncode == 0: return True else: From 955eb9c8d85a592c0b98096266fc87e7d7b0cf29 Mon Sep 17 00:00:00 2001 From: y Date: Sun, 12 Sep 2021 20:26:11 +0300 Subject: [PATCH 46/57] Added install for libopencv_c_wrapper.so --- inference-engine/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 42c2cd842e9474..2a41426f3fdd2c 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -19,7 +19,8 @@ foreach(sample benchmark_app classification_sample_async hello_classification hello_nv12_input_classification hello_query_device hello_reshape_ssd ngraph_function_creation_sample object_detection_sample_ssd speech_sample style_transfer_sample hello_classification_c - object_detection_sample_ssd_c hello_nv12_input_classification_c) + object_detection_sample_ssd_c hello_nv12_input_classification_c + opencv_c_wrapper) if(TARGET ${sample}) install(TARGETS ${sample} RUNTIME DESTINATION tests From 4bfc9923a5aa23e37b2ebbd57b7ed9261abd8964 Mon Sep 17 00:00:00 2001 From: y Date: Sun, 12 Sep 2021 20:34:27 +0300 Subject: [PATCH 47/57] Added library destination --- inference-engine/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 2a41426f3fdd2c..f4d149cc45937b 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -24,6 +24,7 @@ foreach(sample benchmark_app classification_sample_async hello_classification if(TARGET ${sample}) install(TARGETS ${sample} RUNTIME DESTINATION tests + LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) endif() From 36a5bb6f9e93d17f93a5ce3997f286721b937329 Mon Sep 17 00:00:00 2001 From: y Date: Sun, 12 Sep 2021 20:48:58 +0300 Subject: [PATCH 48/57] Fixed install rule for samples --- inference-engine/CMakeLists.txt | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index f4d149cc45937b..8b00973bceb2b3 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -23,10 +23,8 @@ foreach(sample benchmark_app classification_sample_async hello_classification opencv_c_wrapper) if(TARGET ${sample}) install(TARGETS ${sample} - RUNTIME DESTINATION tests - LIBRARY DESTINATION tests - COMPONENT tests - EXCLUDE_FROM_ALL) + RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) endif() endforeach() From ad4be6b0ae153af127028e2769279b03941ed5a3 Mon Sep 17 00:00:00 2001 From: y Date: Sun, 12 Sep 2021 21:08:15 +0300 Subject: [PATCH 49/57] Updated demo scripts readme.md --- inference-engine/src/vpu/CMakeLists.txt | 1 + scripts/CMakeLists.txt | 2 + scripts/demo/README.txt | 53 +++++-------------------- tools/CMakeLists.txt | 8 +++- 4 files changed, 18 insertions(+), 46 deletions(-) diff --git a/inference-engine/src/vpu/CMakeLists.txt b/inference-engine/src/vpu/CMakeLists.txt index de3ff34325bb3d..b50739c72615f1 100644 --- a/inference-engine/src/vpu/CMakeLists.txt +++ b/inference-engine/src/vpu/CMakeLists.txt @@ -26,6 +26,7 @@ if(ENABLE_MYRIAD) install(DIRECTORY ${VPU_CLC_MA2X8X_ROOT}/ DESTINATION tools/cl_compiler COMPONENT myriad + USE_SOURCE_PERMISSIONS PATTERN ie_dependency.info EXCLUDE) endif() endif() diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 188edcc3df46f0..a58babcbf37c44 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -56,11 +56,13 @@ if(UNIX) DESTINATION extras/open_model_zoo/demo COMPONENT demo_scripts USE_SOURCE_PERMISSIONS + PATTERN demo_security_barrier_camera.* EXCLUDE PATTERN *.bat EXCLUDE) elseif(WIN32) install(DIRECTORY demo/ DESTINATION extras/open_model_zoo/demo COMPONENT demo_scripts USE_SOURCE_PERMISSIONS + PATTERN demo_security_barrier_camera.* EXCLUDE PATTERN *.sh EXCLUDE) endif() diff --git a/scripts/demo/README.txt b/scripts/demo/README.txt index 3f9c8713c6b355..9ebbe957fca93e 100644 --- a/scripts/demo/README.txt +++ b/scripts/demo/README.txt @@ -23,27 +23,23 @@ The "demo" folder contains three scripts: 1. Classification demo using public SqueezeNet topology (demo_squeezenet_download_convert_run.sh|bat) -2. Security barrier camera demo that showcases three models coming with the product (demo_squeezenet_download_convert_run.sh|bat) +2. Benchmark demo using public SqueezeNet topology (demo_benchmark_app.sh|bat) -3. Benchmark demo using public SqueezeNet topology (demo_benchmark_app.sh|bat) - -4. Speech recognition demo utilizing models trained on open LibriSpeech dataset - -To run the demos, run demo_squeezenet_download_convert_run.sh or demo_security_barrier_camera.sh or demo_benchmark_app.sh or demo_speech_recognition.sh (*.bat on Windows) scripts from the console without parameters, for example: +To run the demos, run demo_squeezenet_download_convert_run.sh or demo_benchmark_app.sh (*.bat on Windows) scripts from the console without parameters, for example: ./demo_squeezenet_download_convert_run.sh -The script allows to specify the target device to infer on using -d option. +The script allows to specify the target device to infer on using -d option. Classification Demo Using SqueezeNet ==================================== The demo illustrates the general workflow of using the Intel(R) Deep Learning Deployment Toolkit and performs the following: - - Downloads a public SqueezeNet model using the Model Downloader (open_model_zoo\tools\downloader\downloader.py) + - Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py) - Installs all prerequisites required for running the Model Optimizer using the scripts from the "model_optimizer\install_prerequisites" folder - - Converts SqueezeNet to an IR using the Model Optimizer (model_optimizer\mo.py) via the Model Converter (open_model_zoo\tools\downloader\converter.py) - - Builds the Inference Engine classification_sample (inference_engine\samples\classification_sample) + - Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py) + - Builds the Inference Engine classification_sample (samples\cpp\classification_sample) - Runs the sample with the car.png picture located in the demo folder The sample application prints top-10 inference results for the picture. @@ -51,22 +47,6 @@ The sample application prints top-10 inference results for the picture. For more information about the Inference Engine classification sample, refer to the documentation available in the sample folder. -Security Barrier Camera Demo -============================ - -The demo illustrates using the Inference Engine with pre-trained models to perform vehicle detection, vehicle attributes and license-plate recognition tasks. -As the sample produces visual output, it should be run in GUI mode. - -The demo script does the following: - -- Builds the Inference Engine security barrier camera sample (inference_engine\samples\security_barrier_camera_sample) -- Runs the sample with the car_1.bmp located in the demo folder - -The sample application displays the resulting frame with detections rendered as bounding boxes and text. - -For more information about the Inference Engine security barrier camera sample, refer to the documentation available in the sample folder. - - Benchmark Demo Using SqueezeNet =============================== @@ -74,27 +54,12 @@ The demo illustrates how to use the Benchmark Application to estimate deep learn The demo script does the following: - - Downloads a public SqueezeNet model using the Model Downloader (open_model_zoo\tools\downloader\downloader.py) + - Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py) - Installs all prerequisites required for running the Model Optimizer using the scripts from the "model_optimizer\install_prerequisites" folder - - Converts SqueezeNet to an IR using the Model Optimizer (model_optimizer\mo.py) via the Model Converter (open_model_zoo\tools\downloader\converter.py) - - Builds the Inference Engine benchmark tool (inference_engine\samples\demo_benchmark_app) + - Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py) + - Builds the Inference Engine benchmark tool (samples\cpp\benchmark_app) - Runs the tool with the car.png picture located in the demo folder The benchmark app prints performance counters, resulting latency, and throughput values. For more information about the Inference Engine benchmark app, refer to the documentation available in the sample folder. - -Speech Recognition Demo Using LibriSpeech models -================================================ - -The demo illustrates live speech recognition - transcribing speech from microphone or offline (from wave file). -The demo is also capable of live close captioning of an audio clip or movie, where signal is intercepted from the speaker. - -The demo script does the following: - - - Downloads US English models trained on LibriSpeech dataset prepared for direct usage by the Inference Engine - - Installs the required components - - Runs the command line offline demo - - As a final step, runs live speech recognition application with graphical interface - -The GUI application prints the speech transcribed from input signal in window. Up to two channels can be transcribed in parallel: microphone & speakers streams. diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 6c7dfcec9a12bb..f18d5f9ce1013b 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -49,15 +49,19 @@ if(ENABLE_PYTHON) # install cross_check_tool tool install(DIRECTORY cross_check_tool DESTINATION tools + USE_SOURCE_PERMISSIONS COMPONENT python_tools) # install benchmark_app tool - install(FILES benchmark_tool/benchmark_app.py - benchmark_tool/README.md + install(FILES benchmark_tool/README.md benchmark_tool/requirements.txt DESTINATION tools/benchmark_tool COMPONENT python_tools) + install(PROGRAMS benchmark_tool/benchmark_app.py + DESTINATION tools/benchmark_tool + COMPONENT python_tools) + # install openvino/tools/benchmark as a python package install(DIRECTORY benchmark_tool/openvino/tools/benchmark DESTINATION python/${PYTHON_VERSION}/openvino/tools From cf4da90c991c62d211acf5c5311e75c2d1450240 Mon Sep 17 00:00:00 2001 From: y Date: Sun, 12 Sep 2021 22:13:52 +0300 Subject: [PATCH 50/57] Samples --- inference-engine/CMakeLists.txt | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 8b00973bceb2b3..1e4de700ab0158 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -19,15 +19,19 @@ foreach(sample benchmark_app classification_sample_async hello_classification hello_nv12_input_classification hello_query_device hello_reshape_ssd ngraph_function_creation_sample object_detection_sample_ssd speech_sample style_transfer_sample hello_classification_c - object_detection_sample_ssd_c hello_nv12_input_classification_c - opencv_c_wrapper) + object_detection_sample_ssd_c hello_nv12_input_classification_c) if(TARGET ${sample}) install(TARGETS ${sample} - RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) endif() endforeach() +if(TARGET opencv_c_wrapper) + install(TARGETS opencv_c_wrapper + RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) +endif() + openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils) if(ENABLE_TESTS) From 5f34bb721b4feaca38efce0767a668939154b7a3 Mon Sep 17 00:00:00 2001 From: y Date: Mon, 13 Sep 2021 14:57:55 +0300 Subject: [PATCH 51/57] Keep source permissions for Python samples --- inference-engine/ie_bridges/python/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/inference-engine/ie_bridges/python/CMakeLists.txt b/inference-engine/ie_bridges/python/CMakeLists.txt index 8c7056090bfb3b..a320d71a5230c2 100644 --- a/inference-engine/ie_bridges/python/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/CMakeLists.txt @@ -100,6 +100,7 @@ ie_cpack_add_component(python_samples) install(DIRECTORY sample/ DESTINATION samples/python + USE_SOURCE_PERMISSIONS COMPONENT python_samples) ie_cpack(${PYTHON_COMPONENT} python_samples) From c1e549ce62342817945b2395c7cdcdca43db6df7 Mon Sep 17 00:00:00 2001 From: y Date: Mon, 13 Sep 2021 17:35:59 +0300 Subject: [PATCH 52/57] Fixed python --- model-optimizer/mo/utils/find_ie_version.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/model-optimizer/mo/utils/find_ie_version.py b/model-optimizer/mo/utils/find_ie_version.py index 525b68677e7586..d627289e0dd378 100644 --- a/model-optimizer/mo/utils/find_ie_version.py +++ b/model-optimizer/mo/utils/find_ie_version.py @@ -90,10 +90,10 @@ def find_ie_version(silent=False): bindings_paths_windows = [ # Package { - "module": os.path.join(script_path, '../../../python/', python_version), + "module": os.path.join(script_path, '../../../../python/', python_version), "libs": [ - os.path.join(script_path, '../../../runtime/bin/intel64/Release'), - os.path.join(script_path, '../../../runtime/3rdparty/tbb/bin'), + os.path.join(script_path, '../../../../runtime/bin/intel64/Release'), + os.path.join(script_path, '../../../../runtime/3rdparty/tbb/bin'), ], }, # Local builds @@ -119,10 +119,10 @@ def find_ie_version(silent=False): bindings_paths_linux = [ # Package { - "module": os.path.join(script_path, '../../../python/', python_version), + "module": os.path.join(script_path, '../../../../python/', python_version), "libs": [ - os.path.join(script_path, '../../../runtime/lib/intel64'), - os.path.join(script_path, '../../../runtime/3rdparty/tbb/lib'), + os.path.join(script_path, '../../../../runtime/lib/intel64'), + os.path.join(script_path, '../../../../runtime/3rdparty/tbb/lib'), ], }, # Local builds From 54a912ef933f8b715f54e46199dbbdf20841ed2d Mon Sep 17 00:00:00 2001 From: y Date: Mon, 13 Sep 2021 19:35:03 +0300 Subject: [PATCH 53/57] Updated path to fast run scripts --- docs/get_started/get_started_linux.md | 28 +++++++++---------- docs/get_started/get_started_macos.md | 26 ++++++++--------- .../installing-openvino-docker-linux.md | 8 +++--- .../installing-openvino-linux.md | 2 +- .../installing-openvino-macos.md | 2 +- .../c/samples/hello_classification/README.md | 2 +- .../samples/benchmark_app/README.md | 8 +++--- .../samples/hello_classification/README.md | 2 +- scripts/CMakeLists.txt | 4 +-- scripts/demo/demo_benchmark_app.bat | 4 +-- scripts/demo/demo_benchmark_app.sh | 4 +-- scripts/demo/demo_security_barrier_camera.bat | 4 +-- scripts/demo/demo_security_barrier_camera.sh | 6 ++-- .../demo_squeezenet_download_convert_run.bat | 4 +-- .../demo_squeezenet_download_convert_run.sh | 4 +-- tools/benchmark_tool/README.md | 8 +++--- 16 files changed, 58 insertions(+), 58 deletions(-) diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 7381b778cc810c..08923d5c53a318 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -63,7 +63,7 @@ The simplified OpenVINO™ workflow is: ## Use the Demo Scripts to Learn the Workflow -The demo scripts in `/opt/intel/openvino_2022/extras/open_model_zoo/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: +The demo scripts in `/opt/intel/openvino_2022/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit. * Download trained models. * Perform pipeline steps and see the output on the console. @@ -97,7 +97,7 @@ The script: To preview the image that the script will classify: ```sh -cd ${INTEL_OPENVINO_DIR}/extras/open_model_zoo/demo +cd ${INTEL_OPENVINO_DIR}/samples/scripts eog car.png ``` @@ -360,9 +360,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2022/extras/open_model_zoo/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels ```
@@ -373,8 +373,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png` -* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp` +* `/opt/intel/openvino_2022/samples/scripts/car.png` +* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -397,32 +397,32 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU ``` **MYRIAD:** > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` **HDDL:** > **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: @@ -480,14 +480,14 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2022/samples/scripts/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU +./security_barrier_camera_demo -i /opt/intel/openvino_2022/samples/scripts/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU ``` **MYRIAD:** @@ -530,7 +530,7 @@ To build all the demos and samples: cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp # to compile C samples, go here also: cd /inference_engine/samples/c build_samples.sh -cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos +cd $INTEL_OPENVINO_DIR/samples/scriptss build_demos.sh ``` diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index ca794ac98d41e5..4b6a3c928d9ace 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -66,7 +66,7 @@ The simplified OpenVINO™ workflow is: ## Use the Demo Scripts to Learn the Workflow -The demo scripts in `/extras/open_model_zoo/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: +The demo scripts in `/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit * Download trained models * Perform pipeline steps and see the output on the console @@ -108,7 +108,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png +Image /opt/intel/openvino_2022/samples/scripts/car.png classid probability label ------- ----------- ----- @@ -347,9 +347,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2022/extras/open_model_zoo/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels ```
@@ -360,8 +360,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png` -* `/opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp` +* `/opt/intel/openvino_2022/samples/scripts/car.png` +* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -384,11 +384,11 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/extras/open_model_zoo/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` @@ -396,14 +396,14 @@ The following commands run the Image Classification Code Sample using the `car.p > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: ```sh Top 10 results: -Image /opt/intel/openvino_2022/extras/open_model_zoo/demo/car.png +Image /opt/intel/openvino_2022/samples/scripts/car.png classid probability label ------- ----------- ----- @@ -448,7 +448,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino_2022/extras/open_model_zoo/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2022/samples/scripts/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **MYRIAD:** @@ -484,7 +484,7 @@ To build all the demos and samples: cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp # to compile C samples, go here also: cd /inference_engine/samples/c build_samples.sh -cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos +cd $INTEL_OPENVINO_DIR/samples/scriptss build_demos.sh ``` @@ -503,7 +503,7 @@ Template to call sample code or a demo application: With the sample information specified, the command might look like this: ```sh -cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos/object_detection_demo +cd $INTEL_OPENVINO_DIR/samples/scriptss/object_detection_demo ./object_detection_demo -i ~/Videos/catshow.mp4 -m ~/ir/fp32/mobilenet-ssd.xml -d CPU ``` diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index b5092570c4ceb3..58580d1ed8d7bf 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -329,28 +329,28 @@ To run the Security Barrier Camera Demo on a specific inference device, run the ```sh docker run -itu root:root --rm -/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d CPU -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && samples/scripts/demo_security_barrier_camera.sh -d CPU -sample-options -no_show" ``` **GPU**: ```sh docker run -itu root:root --rm --device /dev/dri:/dev/dri -/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d GPU -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && samples/scripts/demo_security_barrier_camera.sh -d GPU -sample-options -no_show" ``` **MYRIAD**: ```sh docker run -itu root:root --rm --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb -/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d MYRIAD -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && samples/scripts/demo_security_barrier_camera.sh -d MYRIAD -sample-options -no_show" ``` **HDDL**: ```sh docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp -/bin/bash -c "apt update && apt install sudo && extras/open_model_zoo/demo/demo_security_barrier_camera.sh -d HDDL -sample-options -no_show" +/bin/bash -c "apt update && apt install sudo && samples/scripts/demo_security_barrier_camera.sh -d HDDL -sample-options -no_show" ``` ## Troubleshooting diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index f19f89252e0098..2ae97fad677bad 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -341,7 +341,7 @@ After configuration is done, you are ready to run the verification scripts with 1. Go to the **Inference Engine demo** directory: ```sh -cd /opt/intel/openvino_2022/extras/open_model_zoo/demo +cd /opt/intel/openvino_2022/samples/scripts ``` 2. Run the **Image Classification verification script**. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 87a1c0ee6a974f..36249196da0626 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -279,7 +279,7 @@ Follow the steps below to uninstall the Intel® Distribution of OpenVINO™ Tool ## Additional Resources -- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2022/extras/open_model_zoo/demo/`. +- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2022/samples/scripts/`. - For detailed description of the pre-trained models, go to the [Overview of OpenVINO toolkit Pre-Trained Models](@ref omz_models_group_intel) page. diff --git a/inference-engine/ie_bridges/c/samples/hello_classification/README.md b/inference-engine/ie_bridges/c/samples/hello_classification/README.md index dae637e32fc732..8765c6e1428951 100644 --- a/inference-engine/ie_bridges/c/samples/hello_classification/README.md +++ b/inference-engine/ie_bridges/c/samples/hello_classification/README.md @@ -72,7 +72,7 @@ The application outputs top-10 inference results. ``` Top 10 results: -Image /opt/intel/openvino/extras/open_model_zoo/demo/car.png +Image /opt/intel/openvino/samples/scripts/car.png classid probability ------- ----------- diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index aa8c690d5eef24..c19fb5c4f70d56 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -148,7 +148,7 @@ To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's] ## Examples of Running the Tool -This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/extras/open_model_zoo/demo/` directory is used. +This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/samples/scripts/` directory is used. > **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. @@ -166,15 +166,15 @@ This section provides step-by-step instructions on how to run the Benchmark Tool ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir ``` -3. Run the tool with specifying the `/extras/open_model_zoo/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: +3. Run the tool with specifying the `/samples/scripts/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: * On CPU: ```sh - ./benchmark_app -m /googlenet-v1.xml -i /extras/open_model_zoo/demo/car.png -d CPU -api async --progress true + ./benchmark_app -m /googlenet-v1.xml -i /samples/scripts/car.png -d CPU -api async --progress true ``` * On GPU: ```sh - ./benchmark_app -m /googlenet-v1.xml -i /extras/open_model_zoo/demo/car.png -d GPU -api async --progress true + ./benchmark_app -m /googlenet-v1.xml -i /samples/scripts/car.png -d GPU -api async --progress true ``` The application outputs the number of executed iterations, total duration of execution, latency, and throughput. diff --git a/inference-engine/samples/hello_classification/README.md b/inference-engine/samples/hello_classification/README.md index 8b24264269fb07..eebad1ef8ff2b5 100644 --- a/inference-engine/samples/hello_classification/README.md +++ b/inference-engine/samples/hello_classification/README.md @@ -72,7 +72,7 @@ The application outputs top-10 inference results. ``` Top 10 results: -Image /opt/intel/openvino/extras/open_model_zoo/demo/car.png +Image /opt/intel/openvino/samples/scripts/car.png classid probability ------- ----------- diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index a58babcbf37c44..c8449e26ae24ce 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -53,14 +53,14 @@ ie_cpack_add_component(demo_scripts DEPENDS core) if(UNIX) install(DIRECTORY demo/ - DESTINATION extras/open_model_zoo/demo + DESTINATION samples/scripts COMPONENT demo_scripts USE_SOURCE_PERMISSIONS PATTERN demo_security_barrier_camera.* EXCLUDE PATTERN *.bat EXCLUDE) elseif(WIN32) install(DIRECTORY demo/ - DESTINATION extras/open_model_zoo/demo + DESTINATION samples/scripts COMPONENT demo_scripts USE_SOURCE_PERMISSIONS PATTERN demo_security_barrier_camera.* EXCLUDE diff --git a/scripts/demo/demo_benchmark_app.bat b/scripts/demo/demo_benchmark_app.bat index b466f04af0a9ac..82d3f053566039 100644 --- a/scripts/demo/demo_benchmark_app.bat +++ b/scripts/demo/demo_benchmark_app.bat @@ -52,8 +52,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\..\setupvars.bat" ( - call "%ROOT_DIR%..\..\..\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_benchmark_app.sh b/scripts/demo/demo_benchmark_app.sh index ae222ada043541..83c397111bbab7 100755 --- a/scripts/demo/demo_benchmark_app.sh +++ b/scripts/demo/demo_benchmark_app.sh @@ -69,8 +69,8 @@ target_image_path="$ROOT_DIR/car.png" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../../setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../../setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi diff --git a/scripts/demo/demo_security_barrier_camera.bat b/scripts/demo/demo_security_barrier_camera.bat index 67b1846e93567b..283db8d02f1d2e 100644 --- a/scripts/demo/demo_security_barrier_camera.bat +++ b/scripts/demo/demo_security_barrier_camera.bat @@ -43,8 +43,8 @@ set TARGET_PRECISION=FP16 echo target_precision = !TARGET_PRECISION! -if exist "%ROOT_DIR%..\..\..\setupvars.bat" ( - call "%ROOT_DIR%..\..\..\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_security_barrier_camera.sh b/scripts/demo/demo_security_barrier_camera.sh index 48c6ecb245815d..330f4b2f8988bf 100755 --- a/scripts/demo/demo_security_barrier_camera.sh +++ b/scripts/demo/demo_security_barrier_camera.sh @@ -55,8 +55,8 @@ target_image_path="$ROOT_DIR/car_1.bmp" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../../setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../../setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi @@ -143,7 +143,7 @@ done < "$ROOT_DIR/demo_security_barrier_camera.conf" # Step 2. Build samples echo -ne "\n###############|| Build Inference Engine demos ||###############\n\n" -demos_path="${INTEL_OPENVINO_DIR}/extras/open_model_zoo/demos" +demos_path="${INTEL_OPENVINO_DIR}/samples/scriptss" if ! command -v cmake &>/dev/null; then echo -ne "\n\nCMAKE is not installed. It is required to build Inference Engine demos. Please install it. ${run_again}" diff --git a/scripts/demo/demo_squeezenet_download_convert_run.bat b/scripts/demo/demo_squeezenet_download_convert_run.bat index 71de9057a8fa3d..bfe5e8e84d998b 100644 --- a/scripts/demo/demo_squeezenet_download_convert_run.bat +++ b/scripts/demo/demo_squeezenet_download_convert_run.bat @@ -48,8 +48,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\..\setupvars.bat" ( - call "%ROOT_DIR%..\..\..\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error diff --git a/scripts/demo/demo_squeezenet_download_convert_run.sh b/scripts/demo/demo_squeezenet_download_convert_run.sh index d3ef76f11cd944..b0f9e1a2bd8776 100755 --- a/scripts/demo/demo_squeezenet_download_convert_run.sh +++ b/scripts/demo/demo_squeezenet_download_convert_run.sh @@ -65,8 +65,8 @@ target_image_path="$ROOT_DIR/car.png" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../../setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../../setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi diff --git a/tools/benchmark_tool/README.md b/tools/benchmark_tool/README.md index 16e91309ec630a..cccf1aaca0bd76 100644 --- a/tools/benchmark_tool/README.md +++ b/tools/benchmark_tool/README.md @@ -171,7 +171,7 @@ To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's] ## Examples of Running the Tool -This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/extras/open_model_zoo/demo/` directory is used. +This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/samples/scripts/` directory is used. > **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. @@ -189,15 +189,15 @@ This section provides step-by-step instructions on how to run the Benchmark Tool ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir ``` -3. Run the tool with specifying the `/extras/open_model_zoo/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: +3. Run the tool with specifying the `/samples/scripts/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: * On CPU: ```sh - python3 benchmark_app.py -m /googlenet-v1.xml -d CPU -api async -i /extras/open_model_zoo/demo/car.png --progress true -b 1 + python3 benchmark_app.py -m /googlenet-v1.xml -d CPU -api async -i /samples/scripts/car.png --progress true -b 1 ``` * On GPU: ```sh - python3 benchmark_app.py -m /googlenet-v1.xml -d GPU -api async -i /extras/open_model_zoo/demo/car.png --progress true -b 1 + python3 benchmark_app.py -m /googlenet-v1.xml -d GPU -api async -i /samples/scripts/car.png --progress true -b 1 ``` The application outputs number of executed iterations, total duration of execution, latency and throughput. From 40adf0c91029c364cfb9f2cb20d6bfdb2bf21780 Mon Sep 17 00:00:00 2001 From: y Date: Tue, 14 Sep 2021 00:34:44 +0300 Subject: [PATCH 54/57] Fixed C samples tests --- inference-engine/CMakeLists.txt | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 1e4de700ab0158..70d17980991242 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -26,11 +26,13 @@ foreach(sample benchmark_app classification_sample_async hello_classification endif() endforeach() -if(TARGET opencv_c_wrapper) - install(TARGETS opencv_c_wrapper - RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) -endif() +foreach(samples_library opencv_c_wrapper format_reader) + if(TARGET ${samples_library}) + install(TARGETS ${samples_library} + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) + endif() +endforeach() openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils) @@ -65,15 +67,6 @@ elseif(WIN32) PATTERN .clang-format EXCLUDE) endif() -install(TARGETS format_reader - RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) - -if(TARGET benchmark_app) - install(TARGETS benchmark_app - RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) -endif() - # install C samples ie_cpack_add_component(c_samples DEPENDS core_c) From a92b3151c00d1b619dac7a414a1e2b0747d75dfd Mon Sep 17 00:00:00 2001 From: y Date: Tue, 14 Sep 2021 00:43:51 +0300 Subject: [PATCH 55/57] Removed debug output --- model-optimizer/mo/utils/find_ie_version.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/model-optimizer/mo/utils/find_ie_version.py b/model-optimizer/mo/utils/find_ie_version.py index d627289e0dd378..af0df60dc20d3d 100644 --- a/model-optimizer/mo/utils/find_ie_version.py +++ b/model-optimizer/mo/utils/find_ie_version.py @@ -60,8 +60,6 @@ def try_to_import_ie(module="", libs=[], silent=False): cmd_args.append("--silent") status = subprocess.run(cmd_args, env=os.environ) - print(status.stderr) - print(status.stdout) if status.returncode == 0: return True else: From 46f4d8c9374574a43c0d2f15532b0575504dcca5 Mon Sep 17 00:00:00 2001 From: y Date: Tue, 14 Sep 2021 09:33:35 +0300 Subject: [PATCH 56/57] Small fixes --- docs/get_started/get_started_linux.md | 2 +- docs/get_started/get_started_macos.md | 2 +- tests/utils/path_utils.py | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 08923d5c53a318..a923d320389d2f 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -235,7 +235,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2022/extras/open_model_zoo/extras/open_model_zoo/tools/downloader/ +cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/ ``` ```sh python3 info_dumper.py --print_all diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index 4b6a3c928d9ace..4893bfd8ad923d 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -219,7 +219,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2022/extras/open_model_zoo/extras/open_model_zoo/tools/downloader/ +cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/ ``` ```sh python3 info_dumper.py --print_all diff --git a/tests/utils/path_utils.py b/tests/utils/path_utils.py index 4e004bd9bf759b..8e9864059ad46c 100644 --- a/tests/utils/path_utils.py +++ b/tests/utils/path_utils.py @@ -37,14 +37,14 @@ def get_lib_path(lib_name): os_name = get_os_name() all_libs = { 'inference_engine_transformations': { - 'Windows': Path('runtime/bin/inference_engine_transformations.dll'), - 'Linux': Path('runtime/lib/libinference_engine_transformations.so')}, + 'Windows': Path('runtime/bin/intel64/Release/inference_engine_transformations.dll'), + 'Linux': Path('runtime/lib/intel64/libinference_engine_transformations.so')}, 'MKLDNNPlugin': { - 'Windows': Path('runtime/bin/MKLDNNPlugin.dll'), - 'Linux': Path('runtime/lib/libMKLDNNPlugin.so')}, + 'Windows': Path('runtime/bin/intel64/Release/MKLDNNPlugin.dll'), + 'Linux': Path('runtime/lib/intel64/libMKLDNNPlugin.so')}, 'ngraph': { - 'Windows': Path('runtime/bin/ngraph.dll'), - 'Linux': Path('runtime/lib/libngraph.so')} + 'Windows': Path('runtime/bin/intel64/Release/ngraph.dll'), + 'Linux': Path('runtime/lib/intel64/libngraph.so')} } return all_libs[lib_name][os_name] From 5feff6acb00572edd4d42cf9cb90913fa4051b2e Mon Sep 17 00:00:00 2001 From: y Date: Tue, 14 Sep 2021 13:06:19 +0300 Subject: [PATCH 57/57] Try to unify prefix --- inference-engine/ie_bridges/python/wheel/setup.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py index 715043236f6f40..436baa7a7c923a 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.py +++ b/inference-engine/ie_bridges/python/wheel/setup.py @@ -56,37 +56,37 @@ }, 'hetero_plugin': { 'name': 'hetero', - 'prefix': 'libs.plugins', + 'prefix': 'libs.core', 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'gpu_plugin': { 'name': 'gpu', - 'prefix': 'libs.plugins', + 'prefix': 'libs.core', 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'cpu_plugin': { 'name': 'cpu', - 'prefix': 'libs.plugins', + 'prefix': 'libs.core', 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'multi_plugin': { 'name': 'multi', - 'prefix': 'libs.plugins', + 'prefix': 'libs.core', 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'myriad_plugin': { 'name': 'myriad', - 'prefix': 'libs.plugins', + 'prefix': 'libs.core', 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'ngraph_libs': { 'name': 'ngraph', - 'prefix': 'libs.ngraph', + 'prefix': 'libs.core', 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, },