From 6a55fb87d83f025ff71f6f6a74515fe9480b1c58 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 15 Sep 2021 16:49:11 +0300 Subject: [PATCH] New IRC package structure (#6255) * OV new package structure * Fixes * More fixes * Fixed code style in ngraph tests * Fixes * Paths to setupvars inside demo scripts * Fixed demo_security_barrier_camera.sh * Added setupvars.sh to old location as well * Fixed path * Fixed MO install path in .co * Fixed install of public headers * Fixed frontends installation * Updated DM config files * Keep opencv in the root * Improvements * Fixes for demo scripts * Added path to TBB * Fix for MO unit-tests * Fixed tests on Windows * Reverted arch * Removed arch * Reverted arch back: second attemp * System type * Fix for Windows * Resolve merge conflicts * Fixed path * Path for Windows * Added debug for Windows * Added requirements_dev.txt to install * Fixed wheel's setup.py * Fixed lin build * Fixes after merge * Fix 2 * Fixes * Frontends path * Fixed deployment manager * Fixed Windows * Added cldnn unit tests installation * Install samples * Fix samples * Fix path for samples * Proper path * Try to fix MO hardcodes * samples binary location * MO print * Added install for libopencv_c_wrapper.so * Added library destination * Fixed install rule for samples * Updated demo scripts readme.md * Samples * Keep source permissions for Python samples * Fixed python * Updated path to fast run scripts * Fixed C samples tests * Removed debug output * Small fixes * Try to unify prefix --- .ci/azure/linux.yml | 12 ++-- .ci/azure/linux_conditional_compilation.yml | 2 +- .ci/azure/linux_onnxruntime.yml | 14 ++-- .ci/azure/mac.yml | 2 +- .ci/azure/windows.yml | 6 +- .ci/azure/windows_conditional_compilation.yml | 2 +- .ci/openvino-onnx/Dockerfile | 4 +- CMakeLists.txt | 1 - cmake/dependencies.cmake | 2 +- cmake/developer_package/packaging.cmake | 13 ++-- docs/HOWTO/Custom_Layers_Guide.md | 2 +- docs/IE_DG/Cross_Check_Tool.md | 4 +- ...Deep_Learning_Inference_Engine_DevGuide.md | 4 +- docs/IE_DG/Extensibility_DG/GPU_Kernel.md | 2 +- docs/IE_DG/Extensibility_DG/VPU_Kernel.md | 12 ++-- ...grate_with_customer_application_new_API.md | 4 +- docs/IE_DG/Samples_Overview.md | 30 ++++---- docs/IE_DG/Tools_Overview.md | 10 +-- docs/IE_DG/inference_engine_intro.md | 4 +- .../prepare_model/Config_Model_Optimizer.md | 10 +-- .../prepare_model/Model_Optimizer_FAQ.md | 6 +- .../convert_model/Convert_Model_From_Caffe.md | 2 +- .../convert_model/Convert_Model_From_Kaldi.md | 2 +- .../convert_model/Convert_Model_From_MxNet.md | 2 +- .../convert_model/Convert_Model_From_ONNX.md | 2 +- .../Convert_Model_From_Paddle.md | 2 +- .../Convert_Model_From_TensorFlow.md | 10 +-- .../convert_model/Converting_Model.md | 2 +- .../convert_model/Converting_Model_General.md | 2 +- .../convert_model/Cutting_Model.md | 2 +- .../Convert_EfficientDet_Models.md | 4 +- .../Convert_Object_Detection_API_Models.md | 8 +-- .../Convert_YOLO_From_Tensorflow.md | 6 +- ...odel_Optimizer_with_Caffe_Python_Layers.md | 2 +- docs/get_started/get_started_linux.md | 42 +++++------ docs/get_started/get_started_macos.md | 42 +++++------ docs/get_started/get_started_raspbian.md | 8 +-- docs/get_started/get_started_windows.md | 42 +++++------ docs/how_tos/MonoDepth_how_to.md | 12 ++-- docs/how_tos/POT_how_to_example.md | 24 +++---- .../install_guides/deployment-manager-tool.md | 14 ++-- .../installing-openvino-docker-linux.md | 14 ++-- .../installing-openvino-docker-windows.md | 4 +- .../installing-openvino-linux-ivad-vpu.md | 6 +- .../installing-openvino-linux.md | 18 ++--- .../installing-openvino-macos.md | 18 ++--- .../installing-openvino-raspbian.md | 14 ++-- .../installing-openvino-windows.md | 14 ++-- docs/install_guides/movidius-setup-guide.md | 2 +- .../dldt_optimization_guide.md | 2 +- inference-engine/CMakeLists.txt | 41 +++++++---- .../ie_bridges/c/docs/api_overview.md | 2 +- .../c/samples/hello_classification/README.md | 2 +- .../ie_bridges/c/src/CMakeLists.txt | 4 +- .../ie_bridges/python/CMakeLists.txt | 3 +- .../ie_bridges/python/docs/api_overview.md | 10 +-- .../ngraph_function_creation_sample/README.md | 2 +- .../openvino/inference_engine/CMakeLists.txt | 6 +- .../offline_transformations/CMakeLists.txt | 8 ++- .../ie_bridges/python/wheel/.env.in | 4 +- .../ie_bridges/python/wheel/CMakeLists.txt | 7 +- .../ie_bridges/python/wheel/setup.py | 32 ++++----- .../samples/benchmark_app/README.md | 12 ++-- inference-engine/samples/build_samples.sh | 10 ++- .../samples/build_samples_msvc.bat | 12 ++-- .../samples/hello_classification/README.md | 2 +- .../src/gna_plugin/CMakeLists.txt | 2 +- .../src/inference_engine/CMakeLists.txt | 31 ++++---- inference-engine/src/vpu/CMakeLists.txt | 3 +- .../src/vpu/myriad_plugin/CMakeLists.txt | 2 +- .../thirdparty/clDNN/tests/CMakeLists.txt | 5 ++ model-optimizer/CMakeLists.txt | 19 +++-- model-optimizer/README.md | 2 +- .../extensions/analysis/tf_retinanet.py | 2 +- .../extensions/analysis/tf_yolo.py | 4 +- model-optimizer/mo/utils/find_ie_version.py | 10 ++- .../mock_mo_ngraph_frontend/CMakeLists.txt | 6 +- ngraph/CMakeLists.txt | 2 - ngraph/core/CMakeLists.txt | 14 ++-- ngraph/frontend/CMakeLists.txt | 2 +- .../frontend/frontend_manager/CMakeLists.txt | 6 +- ngraph/frontend/ir/CMakeLists.txt | 6 +- ngraph/frontend/onnx/frontend/CMakeLists.txt | 6 +- ngraph/frontend/paddlepaddle/CMakeLists.txt | 6 +- ngraph/test/frontend/CMakeLists.txt | 4 +- ngraph/test/runtime/CMakeLists.txt | 4 +- ngraph/test/runtime/ie/CMakeLists.txt | 4 +- .../test/runtime/interpreter/CMakeLists.txt | 4 +- runtime/bindings/python/BUILDING.md | 6 +- .../mock_py_ngraph_frontend/CMakeLists.txt | 4 +- scripts/CMakeLists.txt | 10 +-- scripts/demo/README.txt | 16 ++--- scripts/demo/run_sample_benchmark_app.bat | 14 ++-- scripts/demo/run_sample_benchmark_app.sh | 14 ++-- scripts/demo/run_sample_squeezenet.bat | 16 ++--- scripts/demo/run_sample_squeezenet.sh | 14 ++-- .../install_NCS_udev_rules.sh | 6 +- scripts/setupvars/setupvars.bat | 59 ++++++--------- scripts/setupvars/setupvars.sh | 72 ++++++++----------- tests/utils/install_pkg.py | 4 +- tests/utils/path_utils.py | 12 ++-- thirdparty/CMakeLists.txt | 6 +- tools/CMakeLists.txt | 16 +++-- tools/benchmark_tool/README.md | 12 ++-- tools/compile_tool/CMakeLists.txt | 4 +- tools/compile_tool/README.md | 2 +- tools/deployment_manager/configs/darwin.json | 39 +++++----- tools/deployment_manager/configs/linux.json | 63 ++++++++-------- tools/deployment_manager/configs/windows.json | 64 ++++++++--------- 109 files changed, 608 insertions(+), 614 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index d949cf656b4541..bfed994a137547 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -33,7 +33,7 @@ jobs: INSTALL_DIR: $(WORK_DIR)/install_pkg INSTALL_TEST_DIR: $(INSTALL_DIR)/tests LAYER_TESTS_DIR: $(INSTALL_TEST_DIR)/layer_tests - SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh + SETUPVARS: $(INSTALL_DIR)/setupvars.sh steps: - script: | @@ -171,11 +171,11 @@ jobs: - script: ls -alR $(INSTALL_DIR) displayName: 'List install files' - - script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/cpp/build_samples.sh + - script: $(INSTALL_DIR)/samples/cpp/build_samples.sh workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build cpp samples' - - script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/c/build_samples.sh + - script: $(INSTALL_DIR)/samples/c/build_samples.sh workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build c samples' @@ -189,8 +189,8 @@ jobs: continueOnError: false - script: | - export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer - . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/deployment_tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml + export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer + . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml displayName: 'Model Optimizer UT' continueOnError: false @@ -258,7 +258,7 @@ jobs: - script: | . $(SETUPVARS) python3 -m pip install -r requirements.txt - export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer + export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer export PYTHONPATH=$(LAYER_TESTS_DIR):$PYTHONPATH python3 -m pytest tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=TEST-tf_Roll.xmlTEST workingDirectory: $(LAYER_TESTS_DIR) diff --git a/.ci/azure/linux_conditional_compilation.yml b/.ci/azure/linux_conditional_compilation.yml index a4063d2c9031f0..1a69b7c3dcd9a2 100644 --- a/.ci/azure/linux_conditional_compilation.yml +++ b/.ci/azure/linux_conditional_compilation.yml @@ -17,7 +17,7 @@ jobs: WORK_DIR: $(Pipeline.Workspace)/_w BUILD_DIR: $(WORK_DIR)/build INSTALL_DIR: $(WORK_DIR)/install_pkg - SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh + SETUPVARS: $(INSTALL_DIR)/setupvars.sh steps: - script: | diff --git a/.ci/azure/linux_onnxruntime.yml b/.ci/azure/linux_onnxruntime.yml index dd9d8b44429021..ad5e630820ddbf 100644 --- a/.ci/azure/linux_onnxruntime.yml +++ b/.ci/azure/linux_onnxruntime.yml @@ -110,44 +110,44 @@ jobs: displayName: 'Install' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh CXXFLAGS="-Wno-error=deprecated-declarations" ./build.sh --config RelWithDebInfo --use_openvino CPU_FP32 --build_shared_lib --parallel --skip_tests --build_dir $(ONNXRUNTIME_BUILD_DIR) workingDirectory: $(ONNXRUNTIME_REPO_DIR) displayName: 'Build Lin ONNX Runtime' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh skip_tests=`tr -s '\n ' ':' < $(ONNXRUNTIME_UTILS)/skip_tests` ./onnxruntime_test_all --gtest_filter=-$skip_tests workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_test_all' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnxruntime_shared_lib_test workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_shared_lib_test' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnxruntime_global_thread_pools_test workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_global_thread_pools_test' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnxruntime_api_tests_without_env workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run onnxruntime_api_tests_without_env' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run pytorch-converted tests' - script: | - source $(INSTALL_DIR)/bin/setupvars.sh + source $(INSTALL_DIR)/setupvars.sh ./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo displayName: 'Run pytorch-operator tests' diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml index cffca56ccfa364..4534c08b0651d5 100644 --- a/.ci/azure/mac.yml +++ b/.ci/azure/mac.yml @@ -30,7 +30,7 @@ jobs: BUILD_DIR: $(WORK_DIR)/build INSTALL_DIR: $(WORK_DIR)/install_pkg INSTALL_TEST_DIR: $(INSTALL_DIR)/tests - SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh + SETUPVARS: $(INSTALL_DIR)/setupvars.sh steps: - script: | diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index e5ec0486f9bcd2..62e0fa7c712cee 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -33,7 +33,7 @@ jobs: MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe INSTALL_DIR: $(WORK_DIR)\install_pkg INSTALL_TEST_DIR: $(INSTALL_DIR)\tests - SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat + SETUPVARS: $(INSTALL_DIR)\setupvars.bat IB_DIR: C:\Program Files (x86)\IncrediBuild IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe @@ -132,11 +132,11 @@ jobs: - script: dir $(INSTALL_DIR) /s displayName: 'List install files' - - script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\cpp\build_samples_msvc.bat + - script: $(INSTALL_DIR)\samples\cpp\build_samples_msvc.bat workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build cpp samples' - - script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\c\build_samples_msvc.bat + - script: $(INSTALL_DIR)\samples\c\build_samples_msvc.bat workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build c samples' diff --git a/.ci/azure/windows_conditional_compilation.yml b/.ci/azure/windows_conditional_compilation.yml index 9024ede46f6018..80c89e8d20f28e 100644 --- a/.ci/azure/windows_conditional_compilation.yml +++ b/.ci/azure/windows_conditional_compilation.yml @@ -19,7 +19,7 @@ jobs: MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe INSTALL_DIR: $(WORK_DIR)\install_pkg - SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat + SETUPVARS: $(INSTALL_DIR)\setupvars.bat steps: - script: | diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index dae27a71177403..8e2365e4ebc506 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -75,7 +75,7 @@ RUN make -j $(nproc) install # Run tests via tox WORKDIR /openvino/runtime/bindings/python -ENV OpenVINO_DIR=/openvino/dist/deployment_tools/inference_engine/share -ENV LD_LIBRARY_PATH=/openvino/dist/deployment_tools/ngraph/lib +ENV OpenVINO_DIR=/openvino/dist/runtime/cmake +ENV LD_LIBRARY_PATH=/openvino/dist/runtime/lib:/openvino/dist/runtime/3rdparty/tbb/lib ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/lib/python_api/python3.8:${PYTHONPATH} CMD tox diff --git a/CMakeLists.txt b/CMakeLists.txt index 328090276e3fb9..7ead5a53212142 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,7 +85,6 @@ include(cmake/test_model_zoo.cmake) add_subdirectory(thirdparty) add_subdirectory(openvino) add_subdirectory(ngraph) - add_subdirectory(runtime) add_subdirectory(inference-engine) diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index 046e9a83d57f8b..b4946d85d11d85 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -89,7 +89,7 @@ if(THREADING STREQUAL "OMP") ie_cpack_add_component(omp REQUIRED) file(GLOB_RECURSE source_list "${OMP}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") install(FILES ${source_list} - DESTINATION "deployment_tools/inference_engine/external/omp/lib" + DESTINATION "runtime/3rdparty/omp/lib" COMPONENT omp) endif() diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake index 4095a16157c068..7708de5c77b921 100644 --- a/cmake/developer_package/packaging.cmake +++ b/cmake/developer_package/packaging.cmake @@ -10,16 +10,15 @@ include(CPackComponent) # # Set library directory for cpack # -set(IE_CPACK_IE_DIR deployment_tools/inference_engine) function(ie_cpack_set_library_dir) if(WIN32) - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH_FOLDER}/$ PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER}/$ PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER}/$ PARENT_SCOPE) else() - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE) endif() endfunction() diff --git a/docs/HOWTO/Custom_Layers_Guide.md b/docs/HOWTO/Custom_Layers_Guide.md index cda4ed1c968f47..4bea76f5902baa 100644 --- a/docs/HOWTO/Custom_Layers_Guide.md +++ b/docs/HOWTO/Custom_Layers_Guide.md @@ -313,7 +313,7 @@ operation for the CPU plugin. The code of the library is described in the [Exte To build the extension, run the following:
```bash mkdir build && cd build -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh cmake .. -DCMAKE_BUILD_TYPE=Release make --jobs=$(nproc) ``` diff --git a/docs/IE_DG/Cross_Check_Tool.md b/docs/IE_DG/Cross_Check_Tool.md index 495afa790fcccc..d53d3dddfe47de 100644 --- a/docs/IE_DG/Cross_Check_Tool.md +++ b/docs/IE_DG/Cross_Check_Tool.md @@ -8,11 +8,11 @@ The Cross Check Tool can compare metrics per layer or all over the model. On Linux* OS, before running the Cross Check Tool binary, make sure your application can find the Deep Learning Inference Engine libraries. -Navigate to the `/deployment_tools/inference_engine/bin` folder and run the `setvars.sh` script to +Navigate to the `` folder and run the `setupvars.sh` script to set all necessary environment variables: ```sh -source setvars.sh +source setupvars.sh ``` ## Running the Cross Check Tool diff --git a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md index f8362188ab2366..e2960c5dd87394 100644 --- a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md +++ b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md @@ -79,9 +79,9 @@ Make sure those libraries are in your computer's path or in the place you pointe * Windows: `PATH` * macOS: `DYLD_LIBRARY_PATH` -On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables. +On Linux and macOS, use the script `setupvars.sh` to set the environment variables. -On Windows, run the `bin\setupvars.bat` batch file to set the environment variables. +On Windows, run the `setupvars.bat` batch file to set the environment variables. To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter. diff --git a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md index d9fd809f8e4227..f206c2c0bcb41e 100644 --- a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md @@ -4,7 +4,7 @@ The GPU codepath abstracts many details about OpenCL\*. You need to provide the There are two options of using the custom operation configuration file: -* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/deployment_tools/inference_engine/bin/intel64/{Debug/Release}` folder +* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/runtime/bin` folder * Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin: @snippet snippets/GPU_Kernel.cpp part0 diff --git a/docs/IE_DG/Extensibility_DG/VPU_Kernel.md b/docs/IE_DG/Extensibility_DG/VPU_Kernel.md index 033097598317bf..4dca14ce50233e 100644 --- a/docs/IE_DG/Extensibility_DG/VPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/VPU_Kernel.md @@ -15,18 +15,18 @@ To customize your topology with an OpenCL layer, follow the steps below: > **NOTE:** OpenCL compiler, targeting Intel® Neural Compute Stick 2 for the SHAVE* processor only, is redistributed with OpenVINO. OpenCL support is provided by ComputeAorta*, and is distributed under a license agreement between Intel® and Codeplay* Software Ltd. -The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline compilation only, so first compile OpenCL C code using the standalone `clc` compiler. You can find the compiler binary at `/deployment_tools/tools/cl_compiler`. +The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline compilation only, so first compile OpenCL C code using the standalone `clc` compiler. You can find the compiler binary at `/tools/cl_compiler`. > **NOTE:** By design, custom OpenCL layers support any OpenCL kernels written with 1.2 version assumed. It also supports half float extension and is optimized for this type, because it is a native type for Intel® Movidius™ VPUs. 1. Prior to running a compilation, make sure that the following variables are set: - * `SHAVE_MA2X8XLIBS_DIR=/deployment_tools/tools/cl_compiler/lib/` - * `SHAVE_LDSCRIPT_DIR=/deployment_tools/tools/cl_compiler/ldscripts/` - * `SHAVE_MYRIAD_LD_DIR=/deployment_tools/tools/cl_compiler/bin/` - * `SHAVE_MOVIASM_DIR=/deployment_tools/tools/cl_compiler/bin/` + * `SHAVE_MA2X8XLIBS_DIR=/tools/cl_compiler/lib/` + * `SHAVE_LDSCRIPT_DIR=/tools/cl_compiler/ldscripts/` + * `SHAVE_MYRIAD_LD_DIR=/tools/cl_compiler/bin/` + * `SHAVE_MOVIASM_DIR=/tools/cl_compiler/bin/` 2. Run the compilation with the command below. You should use `--strip-binary-header` to make an OpenCL runtime-agnostic binary runnable with the Inference Engine. ```bash -cd /deployment_tools/tools/cl_compiler/bin +cd /tools/cl_compiler/bin ./clc --strip-binary-header custom_layer.cl -o custom_layer.bin ``` diff --git a/docs/IE_DG/Integrate_with_customer_application_new_API.md b/docs/IE_DG/Integrate_with_customer_application_new_API.md index 044c1c62ad9672..870d840c95cd21 100644 --- a/docs/IE_DG/Integrate_with_customer_application_new_API.md +++ b/docs/IE_DG/Integrate_with_customer_application_new_API.md @@ -173,7 +173,7 @@ Note that casting `Blob` to `TBlob` via `std::dynamic_pointer_cast` is not the r ## Build Your Application For details about building your application, refer to the CMake files for the sample applications. -All samples source code is located in the `/openvino/inference_engine/samples` directory, where `INSTALL_DIR` is the OpenVINO™ installation directory. +All samples source code is located in the `/samples` directory, where `INSTALL_DIR` is the OpenVINO™ installation directory. ### CMake project creation @@ -199,7 +199,7 @@ add_executable(${PROJECT_NAME} src/main.cpp) target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime ${OpenCV_LIBS}) ``` 3. **To build your project** using CMake with the default build tools currently available on your machine, execute the following commands: -> **NOTE**: Make sure you set environment variables first by running `/bin/setupvars.sh` (or setupvars.bat for Windows)`. Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls. +> **NOTE**: Make sure you set environment variables first by running `/setupvars.sh` (or setupvars.bat for Windows)`. Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls. ```sh cd build/ cmake ../project diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index 6d3cb495831096..db989aac76f596 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -3,9 +3,9 @@ The Inference Engine sample applications are simple console applications that show how to utilize specific Inference Engine capabilities within an application, assist developers in executing specific tasks such as loading a model, running inference, querying specific device capabilities and etc. After installation of Intel® Distribution of OpenVINO™ toolkit, С, C++ and Python* sample applications are available in the following directories, respectively: -* `/inference_engine/samples/c` -* `/inference_engine/samples/cpp` -* `/inference_engine/samples/python` +* `/samples/c` +* `/samples/cpp` +* `/samples/python` Inference Engine sample applications include the following: @@ -64,7 +64,7 @@ The officially supported Linux* build environment is the following: > **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode). -To build the C or C++ sample applications for Linux, go to the `/inference_engine/samples/c` or `/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script: +To build the C or C++ sample applications for Linux, go to the `/samples/c` or `/samples/cpp` directory, respectively, and run the `build_samples.sh` script: ```sh build_samples.sh ``` @@ -91,11 +91,11 @@ cd build 3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples: - For release configuration: ```sh - cmake -DCMAKE_BUILD_TYPE=Release /inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release /samples/cpp ``` - For debug configuration: ```sh - cmake -DCMAKE_BUILD_TYPE=Debug /inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Debug /samples/cpp ``` 4. Run `make` to build the samples: ```sh @@ -114,7 +114,7 @@ The recommended Windows* build environment is the following: > **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14. -To build the C or C++ sample applications on Windows, go to the `\inference_engine\samples\c` or `\inference_engine\samples\cpp` directory, respectively, and run the `build_samples_msvc.bat` batch file: +To build the C or C++ sample applications on Windows, go to the `\samples\c` or `\samples\cpp` directory, respectively, and run the `build_samples_msvc.bat` batch file: ```sh build_samples_msvc.bat ``` @@ -123,7 +123,7 @@ By default, the script automatically detects the highest Microsoft Visual Studio a solution for a sample code. Optionally, you can also specify the preferred Microsoft Visual Studio version to be used by the script. Supported versions are `VS2017` and `VS2019`. For example, to build the C++ samples using the Microsoft Visual Studio 2017, use the following command: ```sh -\inference_engine\samples\cpp\build_samples_msvc.bat VS2017 +\samples\cpp\build_samples_msvc.bat VS2017 ``` Once the build is completed, you can find sample binaries in the following folders: @@ -144,7 +144,7 @@ The officially supported macOS* build environment is the following: > **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode). -To build the C or C++ sample applications for macOS, go to the `/inference_engine/samples/c` or `/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script: +To build the C or C++ sample applications for macOS, go to the `/samples/c` or `/samples/cpp` directory, respectively, and run the `build_samples.sh` script: ```sh build_samples.sh ``` @@ -177,11 +177,11 @@ cd build 3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples: - For release configuration: ```sh - cmake -DCMAKE_BUILD_TYPE=Release /inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release /samples/cpp ``` - For debug configuration: ```sh - cmake -DCMAKE_BUILD_TYPE=Debug /inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Debug /samples/cpp ``` 4. Run `make` to build the samples: ```sh @@ -199,7 +199,7 @@ Before running compiled binary files, make sure your application can find the Inference Engine and OpenCV libraries. Run the `setupvars` script to set all necessary environment variables: ```sh -source /bin/setupvars.sh +source /setupvars.sh ``` **(Optional)**: The OpenVINO environment variables are removed when you close the @@ -212,7 +212,7 @@ vi /.bashrc 2. Add this line to the end of the file: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 3. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key. @@ -228,7 +228,7 @@ Before running compiled binary files, make sure your application can find the Inference Engine and OpenCV libraries. Use the `setupvars` script, which sets all necessary environment variables: ```sh -\bin\setupvars.bat +\setupvars.bat ``` To debug or run the samples on Windows in Microsoft Visual Studio, make sure you @@ -240,7 +240,7 @@ For example, for the **Debug** configuration, go to the project's variable in the **Environment** field to the following: ```sh -PATH=\deployment_tools\inference_engine\bin\intel64\Debug;\opencv\bin;%PATH% +PATH=\runtime\bin;\opencv\bin;%PATH% ``` where `` is the directory in which the OpenVINO toolkit is installed. diff --git a/docs/IE_DG/Tools_Overview.md b/docs/IE_DG/Tools_Overview.md index 0d408ebf1d3bb4..e3acfa7fb483b7 100644 --- a/docs/IE_DG/Tools_Overview.md +++ b/docs/IE_DG/Tools_Overview.md @@ -6,11 +6,11 @@ The OpenVINO™ toolkit installation includes the following tools: |Tool | Location in the Installation Directory| |-----------------------------------------------------------------------------|---------------------------------------| -|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/deployment_tools/tools/open_model_zoo/tools/accuracy_checker`| -|[Post-Training Optimization Tool](@ref pot_README) | `/deployment_tools/tools/post_training_optimization_toolkit`| -|[Model Downloader](@ref omz_tools_downloader) | `/deployment_tools/tools/model_downloader`| -|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `/deployment_tools/tools/cross_check_tool`| -|[Compile Tool](../../tools/compile_tool/README.md) | `/deployment_tools/inference_engine/lib/intel64/`| +|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `/tools/accuracy_checker`| +|[Post-Training Optimization Tool](@ref pot_README) | `/tools/post_training_optimization_toolkit`| +|[Model Downloader](@ref omz_tools_downloader) | `/extras/open_model_zoo/tools/downloader`| +|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `/tools/cross_check_tool`| +|[Compile Tool](../../tools/compile_tool/README.md) | `/tools/compile_tool`| ## See Also diff --git a/docs/IE_DG/inference_engine_intro.md b/docs/IE_DG/inference_engine_intro.md index 89d80654fe4480..3ad44b99144736 100644 --- a/docs/IE_DG/inference_engine_intro.md +++ b/docs/IE_DG/inference_engine_intro.md @@ -84,9 +84,9 @@ Make sure those libraries are in your computer's path or in the place you pointe * Windows: `PATH` * macOS: `DYLD_LIBRARY_PATH` -On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables. +On Linux and macOS, use the script `setupvars.sh` to set the environment variables. -On Windows, run the `bin\setupvars.bat` batch file to set the environment variables. +On Windows, run the `setupvars.bat` batch file to set the environment variables. To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter. diff --git a/docs/MO_DG/prepare_model/Config_Model_Optimizer.md b/docs/MO_DG/prepare_model/Config_Model_Optimizer.md index 3b190dd6272b33..186b8ddabd5b4e 100644 --- a/docs/MO_DG/prepare_model/Config_Model_Optimizer.md +++ b/docs/MO_DG/prepare_model/Config_Model_Optimizer.md @@ -10,7 +10,7 @@ dependencies and provide the fastest and easiest way to configure the Model Optimizer. To configure all three frameworks, go to the -`/deployment_tools/model_optimizer/install_prerequisites` +`/tools/model_optimizer/install_prerequisites` directory and run: * For Linux\* OS: @@ -35,7 +35,7 @@ install_prerequisites.bat ``` To configure a specific framework, go to the -`/deployment_tools/model_optimizer/install_prerequisites` +`/tools/model_optimizer/install_prerequisites` directory and run: * For Caffe\* on Linux: @@ -101,7 +101,7 @@ framework at a time. 1. Go to the Model Optimizer directory: ```shell -cd /deployment_tools/model_optimizer/ +cd /tools/model_optimizer/ ``` 2. **Strongly recommended for all global Model Optimizer dependency installations**: Create and activate a virtual environment. While not required, this step is @@ -181,7 +181,7 @@ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp On Windows, pre-built protobuf packages for Python versions 3.4, 3.5, 3.6, and 3.7 are provided with the installation package and can be found in the -`\deployment_tools\model_optimizer\install_prerequisites` +`\tools\model_optimizer\install_prerequisites` folder. Please note that they are not installed with the `install_prerequisites.bat` installation script due to possible issues with `pip`, and you can install them at your own discretion. Make sure @@ -198,7 +198,7 @@ To install the protobuf package: 1. Open the command prompt as administrator. 2. Go to the `install_prerequisites` folder of the OpenVINO toolkit installation directory: ```sh -cd \deployment_tools\model_optimizer\install_prerequisites +cd \tools\model_optimizer\install_prerequisites ``` 3. Run the following command to install the protobuf for Python 3.6. If diff --git a/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md b/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md index bb599cf93b5632..cd41e9da21d0a8 100644 --- a/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md +++ b/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md @@ -28,7 +28,7 @@ For example, to add the description of the `CustomReshape` layer, which is an ar 2. Generate a new parser: ```shell -cd /deployment_tools/model_optimizer/mo/front/caffe/proto +cd /tools/model_optimizer/mo/front/caffe/proto python3 generate_caffe_pb2.py --input_proto /src/caffe/proto/caffe.proto ``` where `PATH_TO_CUSTOM_CAFFE` is the path to the root directory of custom Caffe\*. @@ -66,7 +66,7 @@ The mean file that you provide for the Model Optimizer must be in a `.binaryprot #### 7. What does the message "Invalid proto file: there is neither 'layer' nor 'layers' top-level messages" mean? -The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `/deployment_tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: +The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: ``` message NetParameter { // ... some other parameters @@ -81,7 +81,7 @@ This means that any topology should contain layers as top-level structures in `p #### 8. What does the message "Old-style inputs (via 'input_dims') are not supported. Please specify inputs via 'input_shape'" mean? -The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `/deployment_tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: +The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure: ```sh message NetParameter { diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md index 4c257d1689ea23..229205f7b68166 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md @@ -38,7 +38,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert a Caffe\* model: -1. Go to the `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer` directory. +1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model, specifying the path to the input model `.caffemodel` file and the path to an output directory with write permissions: ```sh python3 mo.py --input_model .caffemodel --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md index 20f2511dcbf148..3aac41fbd67874 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md @@ -33,7 +33,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert a Kaldi\* model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` or `.mdl` file and to an output directory where you have write permissions: ```sh python3 mo.py --input_model .nnet --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md index 85218eaf1a0a8c..6ac304aa5c236b 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md @@ -43,7 +43,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert an MXNet\* model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. To convert an MXNet\* model contained in a `model-file-symbol.json` and `model-file-0000.params`, run the Model Optimizer launch script `mo.py`, specifying a path to the input model file and a path to an output directory with write permissions: ```sh python3 mo_mxnet.py --input_model model-file-0000.params --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md index 79f740b55ecdd4..6ab9ef30e43782 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md @@ -59,7 +59,7 @@ The Model Optimizer process assumes you have an ONNX model that was directly dow To convert an ONNX\* model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` file and an output directory where you have write permissions: ```sh python3 mo.py --input_model .onnx --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md index 65f5c8fbbab1ba..d2d75aefb08541 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md @@ -29,7 +29,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit To convert a Paddle\* model: -1. Go to the `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer` directory. +1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory. 2. Use the `mo.py` script to simply convert a model, specifying the framework, the path to the input model `.pdmodel` file and the path to an output directory with write permissions: ```sh python3 mo.py --input_model .pdmodel --output_dir --framework=paddle diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md index 17465ef6e62d8a..d5124fab21b0e6 100644 --- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md +++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md @@ -178,7 +178,7 @@ There are three ways to store non-frozen TensorFlow models and load them to the To convert such a TensorFlow model: - 1. Go to the `/deployment_tools/model_optimizer` directory + 1. Go to the `/tools/model_optimizer` directory 2. Run the `mo_tf.py` script with the path to the checkpoint file to convert a model and an output directory where you have write permissions: * If input model is in `.pb` format:
@@ -200,7 +200,7 @@ python3 mo_tf.py --input_model .pbtxt --input_checkpoint /deployment_tools/model_optimizer` directory + 1. Go to the `/tools/model_optimizer` directory 2. Run the `mo_tf.py` script with a path to the MetaGraph `.meta` file and a writable output directory to convert a model:
```sh python3 mo_tf.py --input_meta_graph .meta --output_dir @@ -212,7 +212,7 @@ python3 mo_tf.py --input_meta_graph .meta --output_dir /deployment_tools/model_optimizer` directory + 1. Go to the `/tools/model_optimizer` directory 2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory to convert a model:
```sh python3 mo_tf.py --saved_model_dir --output_dir @@ -251,7 +251,7 @@ Where: To convert a TensorFlow model: -1. Go to the `/deployment_tools/model_optimizer` directory +1. Go to the `/tools/model_optimizer` directory 2. Use the `mo_tf.py` script to simply convert a model with the path to the input model `.pb` file and a writable output directory: ```sh python3 mo_tf.py --input_model .pb --output_dir @@ -342,7 +342,7 @@ Below are the instructions on how to convert each of them. A model in the SavedModel format consists of a directory with a `saved_model.pb` file and two subfolders: `variables` and `assets`. To convert such a model: -1. Go to the `/deployment_tools/model_optimizer` directory. +1. Go to the `/tools/model_optimizer` directory. 2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory: ```sh python3 mo_tf.py --saved_model_dir --output_dir diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md index 60ab7e2ac71eaf..78acbd694e139c 100644 --- a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md @@ -1,6 +1,6 @@ # Converting a Model to Intermediate Representation (IR) {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model} -Use the mo.py script from the `/deployment_tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR): +Use the mo.py script from the `/tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR): ```sh python3 mo.py --input_model INPUT_MODEL --output_dir ``` diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md index 913278a8e2ac0e..114bf7a3ce0f68 100644 --- a/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md +++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md @@ -5,7 +5,7 @@ To simply convert a model trained by any supported framework, run the Model Opti python3 mo.py --input_model INPUT_MODEL --output_dir ``` -The script is in `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/`. The output directory must have write permissions, so you can run mo.py from the output directory or specify an output path with the `--output_dir` option. +The script is in `$INTEL_OPENVINO_DIR/tools/model_optimizer/`. The output directory must have write permissions, so you can run mo.py from the output directory or specify an output path with the `--output_dir` option. > **NOTE:** The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the `RGB<->BGR` conversion specifying the command-line parameter: `--reverse_input_channels`. Otherwise, inference results may be incorrect. For details, refer to [When to Reverse Input Channels](#when_to_reverse_input_channels). diff --git a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md index 203fc94862a7fa..d0248d149bc7cd 100644 --- a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md @@ -39,7 +39,7 @@ In the TensorBoard, it looks the following way together with some predecessors: Convert this model and put the results in a writable output directory: ```sh -${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer +${INTEL_OPENVINO_DIR}/tools/model_optimizer python3 mo.py --input_model inception_v1.pb -b 1 --output_dir ``` (The other examples on this page assume that you first cd to the `model_optimizer` directory and add the `--output_dir` argument with a directory where you have write permissions.) diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md index b78ec640cba19c..fe829c1c21cbd3 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md @@ -47,9 +47,9 @@ As a result the frozen model file `savedmodeldir/efficientdet-d4_frozen.pb` will To generate the IR of the EfficientDet TensorFlow model, run:
```sh -python3 $INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/mo.py \ +python3 $INTEL_OPENVINO_DIR/tools/model_optimizer/mo.py \ --input_model savedmodeldir/efficientdet-d4_frozen.pb \ ---transformations_config $INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/extensions/front/tf/automl_efficientdet.json \ +--transformations_config $INTEL_OPENVINO_DIR/tools/model_optimizer/extensions/front/tf/automl_efficientdet.json \ --input_shape [1,$IMAGE_SIZE,$IMAGE_SIZE,3] \ --reverse_input_channels ``` diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md index fa2084f5ef9b46..076fe4716cc205 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md @@ -11,10 +11,10 @@ You can download TensorFlow\* Object Detection API models from the YOLOv3 architecture overview section, use the `yolo_v3.json` or `yolo_v3_tiny.json` (depending on a model) configuration file with custom operations located in the `/deployment_tools/model_optimizer/extensions/front/tf` repository. +To solve the problems explained in the YOLOv3 architecture overview section, use the `yolo_v3.json` or `yolo_v3_tiny.json` (depending on a model) configuration file with custom operations located in the `/tools/model_optimizer/extensions/front/tf` repository. It consists of several attributes:
```sh @@ -206,7 +206,7 @@ Converted TensorFlow YOLO model is missing `Region` layer and its parameters. Or file under the `[region]` title. To recreate the original model structure, use the corresponding yolo `.json` configuration file with custom operations and `Region` layer -parameters when converting the model to the IR. This file is located in the `/deployment_tools/model_optimizer/extensions/front/tf` directory. +parameters when converting the model to the IR. This file is located in the `/tools/model_optimizer/extensions/front/tf` directory. If chosen model has specific values of this parameters, create another configuration file with custom operations and use it for conversion. @@ -217,7 +217,7 @@ python3 ./mo_tf.py --input_model /.pb \ --batch 1 \ --scale 255 \ ---transformations_config /deployment_tools/model_optimizer/extensions/front/tf/.json +--transformations_config /tools/model_optimizer/extensions/front/tf/.json ``` where: diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md index e4a71a8fdc9298..579437aeb5a98a 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md @@ -35,7 +35,7 @@ operation. Here is a simplified example of the extractor for the custom operation Proposal from Faster-R-CNN model mentioned above. The full code with additional checks is provided in the -`/deployment_tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses +`/tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses operation `ProposalOp` which corresponds to `Proposal` operation described in the [Available Operations Sets](../../../ops/opset.md) document. Refer to the source code below for a detailed explanation of the extractor. diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 4294a31c8c1525..10b1b79aebe946 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -27,11 +27,11 @@ By default, the Intel® Distribution of OpenVINO™ is installed to the followin * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` -For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/` +For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2022/` If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. -The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2021/deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2022/tools` directory.
Click for the Intel® Distribution of OpenVINO™ toolkit directory structure @@ -63,7 +63,7 @@ The simplified OpenVINO™ workflow is: ## Use the Sample Scripts to Learn the Workflow -The sample scripts in `/opt/intel/openvino_2021/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to: +The sample scripts in `/opt/intel/openvino_2022/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit. * Download trained models. * Perform pipeline steps and see the output on the console. @@ -97,7 +97,7 @@ The script: To preview the image that the script will classify: ```sh -cd ${INTEL_OPENVINO_DIR}/deployment_tools/demo +cd ${INTEL_OPENVINO_DIR}/samples/scripts eog car.png ``` @@ -206,7 +206,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ +cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -276,7 +276,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -289,7 +289,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -297,9 +297,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels ```
@@ -310,8 +310,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2021/deployment_tools/demo/car.png` -* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp` +* `/opt/intel/openvino_2022/samples/scripts/car.png` +* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -321,7 +321,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -334,32 +334,32 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU ``` **MYRIAD:** > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` **HDDL:** > **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: @@ -398,7 +398,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` @@ -413,10 +413,10 @@ This section explains how to build and use the sample and demo applications prov To build all the demos and samples: ```sh -cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp -# to compile C samples, go here also: cd /inference_engine/samples/c +cd $INTEL_OPENVINO_DIR/samples/cpp +# to compile C samples, go here also: cd /samples/c build_samples.sh -cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos +cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos build_demos.sh ``` diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index deb7dcdf715406..2f8461ce0f9ea6 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -27,11 +27,11 @@ By default, the Intel® Distribution of OpenVINO™ is installed to the followin * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` -For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/`. +For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2022/`. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. -The primary tools for deploying your models and applications are installed to the `/deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `/tools` directory.
Click for the Intel® Distribution of OpenVINO™ toolkit directory structure @@ -66,7 +66,7 @@ The simplified OpenVINO™ workflow is: ## Use the Sample Scripts to Learn the Workflow -The sample scripts in `/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to: +The sample scripts in `/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit * Download trained models * Perform pipeline steps and see the output on the console @@ -108,7 +108,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image /opt/intel/openvino_2021/deployment_tools/demo/car.png +Image /opt/intel/openvino_2022/samples/scripts/car.png classid probability label ------- ----------- ----- @@ -190,7 +190,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ +cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -258,7 +258,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -271,7 +271,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer + cd /opt/intel/openvino_2022/tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -279,9 +279,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels ```
@@ -292,8 +292,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino_2021/deployment_tools/demo/car.png` -* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp` +* `/opt/intel/openvino_2022/samples/scripts/car.png` +* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -303,7 +303,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -316,11 +316,11 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` @@ -328,14 +328,14 @@ The following commands run the Image Classification Code Sample using the `car.p > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md). ```sh - ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: ```sh Top 10 results: -Image /opt/intel/openvino_2021/deployment_tools/demo/car.png +Image /opt/intel/openvino_2022/samples/scripts/car.png classid probability label ------- ----------- ----- @@ -361,7 +361,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` @@ -376,10 +376,10 @@ This section explains how to build and use the sample and demo applications prov To build all the demos and samples: ```sh -cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp -# to compile C samples, go here also: cd /inference_engine/samples/c +cd $INTEL_OPENVINO_DIR/samples/cpp +# to compile C samples, go here also: cd /samples/c build_samples.sh -cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos +cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos build_demos.sh ``` @@ -398,7 +398,7 @@ Template to call sample code or a demo application: With the sample information specified, the command might look like this: ```sh -cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos/object_detection_demo +cd cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos/object_detection_demo ./object_detection_demo -i ~/Videos/catshow.mp4 -m ~/ir/fp32/mobilenet-ssd.xml -d CPU ``` diff --git a/docs/get_started/get_started_raspbian.md b/docs/get_started/get_started_raspbian.md index 77698eeebe1478..398131290c6af6 100644 --- a/docs/get_started/get_started_raspbian.md +++ b/docs/get_started/get_started_raspbian.md @@ -22,9 +22,9 @@ This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit The OpenVINO toolkit for Raspbian* OS is distributed without installer. This document refers to the directory to which you unpacked the toolkit package as ``. -The primary tools for deploying your models and applications are installed to the `/deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `/tools` directory.
- Click for the deployment_tools directory structure + Click for the tools directory structure | Directory         | Description | @@ -62,7 +62,7 @@ Follow the steps below to run pre-trained Face Detection network using Inference ``` 2. Build the Object Detection Sample with the following command: ```sh - cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/deployment_tools/inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2022/samples/cpp make -j2 object_detection_sample_ssd ``` 3. Download the pre-trained Face Detection model with the [Model Downloader tool](@ref omz_tools_downloader): @@ -84,7 +84,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /bin/setupvars.sh +source /setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries diff --git a/docs/get_started/get_started_windows.md b/docs/get_started/get_started_windows.md index e7decc8659195c..90e15e09772125 100644 --- a/docs/get_started/get_started_windows.md +++ b/docs/get_started/get_started_windows.md @@ -24,11 +24,11 @@ In addition, sample scripts, code samples and demo applications are provided to ## Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Windows*](../install_guides/installing-openvino-windows.md). -By default, the installation directory is `C:\Program Files (x86)\Intel\openvino_`, referred to as ``. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. For simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2021`. +By default, the installation directory is `C:\Program Files (x86)\Intel\openvino_`, referred to as ``. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. For simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2022`. -The primary tools for deploying your models and applications are installed to the `\deployment_tools` directory. +The primary tools for deploying your models and applications are installed to the `\tools` directory.
- Click for the deployment_tools directory structure + Click for the tools directory structure | Directory         | Description | @@ -63,7 +63,7 @@ The simplified OpenVINO™ workflow is: ## Use the Sample Scripts to Learn the Workflow -The sample scripts in `\deployment_tools\demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps demonstrate how to: +The sample scripts in `\samples\scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps demonstrate how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit * Download trained models * Perform pipeline steps and see the output on the console @@ -107,7 +107,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png +Image C:\Program Files (x86)\Intel\openvino_2022\samples\scripts\car.png classid probability label ------- ----------- ----- @@ -189,7 +189,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```bat -cd \deployment_tools\tools\model_downloader\ +cd \tools\model_downloader\ ``` ```bat python info_dumper.py --print_all @@ -254,7 +254,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M 3. Run the Model Optimizer script: ```bat - cd \deployment_tools\model_optimizer + cd \tools\model_optimizer ``` ```bat python .\mo.py --input_model \ --data_type --output_dir @@ -267,7 +267,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `C:\Users\\Documents\models\public\squeezenet1.1\ir` output directory: ```bat - cd \deployment_tools\model_optimizer + cd \tools\model_optimizer ``` ```bat python .\mo.py --input_model C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir C:\Users\username\Documents\models\public\squeezenet1.1\ir @@ -275,9 +275,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `C:\Users\\Documents\models\public\squeezenet1.1\ir` directory. -Copy the `squeezenet1.1.labels` file from the `\deployment_tools\demo\` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `\samples\scripts\` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```batch - cp \deployment_tools\demo\squeezenet1.1.labels + cp \samples\scripts\squeezenet1.1.labels ```
@@ -288,8 +288,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `\deployment_tools\demo\car.png` -* `\deployment_tools\demo\car_1.bmp` +* `\samples\scripts\car.png` +* `\samples\scripts\car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -299,7 +299,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```bat - \openvino\bin\setupvars.sh + \setupvars.sh ``` 2. Go to the code samples build directory: ```bat @@ -312,31 +312,31 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `\deployment_tools\demo` directory as an input image, the IR of your model from `C:\Users\\Documents\models\public\squeezenet1.1\ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `\samples\scripts` directory as an input image, the IR of your model from `C:\Users\\Documents\models\public\squeezenet1.1\ir` and on different hardware devices: **CPU:** ```bat - .\classification_sample_async -i \deployment_tools\demo\car.png -m C:\Users\\Documents\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d CPU + .\classification_sample_async -i \samples\scripts\car.png -m C:\Users\\Documents\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md). ```bat - .\classification_sample_async -i \deployment_tools\demo\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d GPU + .\classification_sample_async -i \samples\scripts\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d GPU ``` **MYRIAD:** ```bat - .\classification_sample_async -i \deployment_tools\demo\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d MYRIAD + .\classification_sample_async -i \samples\scripts\car.png -m C:\Users\\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d MYRIAD ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: ```bat Top 10 results: -Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png +Image C:\Program Files (x86)\Intel\openvino_2022\samples\scripts\car.png classid probability label ------- ----------- ----- @@ -362,7 +362,7 @@ Below you can find basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```bat -\bin\setupvars.bat +\setupvars.bat ``` 2. Make sure to have the directory path for the following: - Code Sample binaries located in `C:\Users\\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release` @@ -378,9 +378,9 @@ To build all the demos and samples: ```sh cd $INTEL_OPENVINO_DIR\inference_engine_samples\cpp -# to compile C samples, go here also: cd \inference_engine\samples\c +# to compile C samples, go here also: cd \samples\c build_samples_msvc.bat -cd $INTEL_OPENVINO_DIR\deployment_tools\open_model_zoo\demos +cd $INTEL_OPENVINO_DIR\extras\open_model_zoo\demos build_demos_msvc.bat ``` diff --git a/docs/how_tos/MonoDepth_how_to.md b/docs/how_tos/MonoDepth_how_to.md index 329eac9e063b49..69f2feba9d7c43 100644 --- a/docs/how_tos/MonoDepth_how_to.md +++ b/docs/how_tos/MonoDepth_how_to.md @@ -11,7 +11,7 @@ Tested on OpenVINO™ 2021, Ubuntu 18.04. Define the OpenVINO™ install directory: ``` -export OV=/opt/intel/openvino_2021/ +export OV=/opt/intel/openvino_2022/ ``` Define the working directory. Make sure the directory exist: ``` @@ -22,19 +22,19 @@ export WD=~/MonoDepth_Python/ Initialize OpenVINO™: ``` -source $OV/bin/setupvars.sh +source $OV/setupvars.sh ``` Install the Model Optimizer prerequisites: ``` -cd $OV/deployment_tools/model_optimizer/install_prerequisites/ +cd $OV/tools/model_optimizer/install_prerequisites/ sudo ./install_prerequisites.sh ``` Install the Model Downloader prerequisites: ``` -cd $OV/deployment_tools/tools/model_downloader/ +cd $OV/extras/open_model_zoo/tools/downloader/ python3 -mpip install --user -r ./requirements.in sudo python3 -mpip install --user -r ./requirements-pytorch.in sudo python3 -mpip install --user -r ./requirements-caffe2.in @@ -44,7 +44,7 @@ sudo python3 -mpip install --user -r ./requirements-caffe2.in Download all models from the Demo Models list: ``` -python3 $OV/deployment_tools/tools/model_downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD +python3 $OV/extras/open_model_zoo/tools/downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD ``` ## 4. Convert Models to Intermediate Representation (IR) @@ -52,7 +52,7 @@ python3 $OV/deployment_tools/tools/model_downloader/downloader.py --list $OV/dep Use the convert script to convert the models to ONNX*, and then to IR format: ``` cd $WD -python3 $OV/deployment_tools/tools/model_downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst +python3 $OV/extras/open_model_zoo/tools/downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst ``` ## 5. Run Demo diff --git a/docs/how_tos/POT_how_to_example.md b/docs/how_tos/POT_how_to_example.md index 28adc19062b4a6..ea06a7a322f533 100644 --- a/docs/how_tos/POT_how_to_example.md +++ b/docs/how_tos/POT_how_to_example.md @@ -16,21 +16,21 @@ Install OpenVINO™ toolkit and Model Optimizer, Accuracy Checker, and Post-trai 1. Define the OpenVINO™ install directory: ``` -export OV=/opt/intel/openvino_2021/ +export OV=/opt/intel/openvino_2022/ ``` 2. Install the Model Optimizer prerequisites: ``` -cd $OV/deployment_tools/model_optimizer/install_prerequisites +cd $OV/tools/model_optimizer/install_prerequisites sudo ./install_prerequisites.sh ``` 3. Install the Accuracy Checker requirements: ``` -cd $OV/deployment_tools/open_model_zoo/tools/accuracy_checker +cd $OV/tools/accuracy_checker sudo python3 setup.py install ``` 4. Install the Post-training Optimization Tool: ``` -cd $OV/deployment_tools/tools/post_training_optimization_toolkit +cd $OV/tools/post_training_optimization_toolkit sudo python3 setup.py install ``` @@ -46,14 +46,14 @@ mkdir ~/POT cd ~/POT ``` ``` -python3 $OV/deployment_tools/tools/model_downloader/downloader.py --name mobilenet-v2-pytorch -o . +python3 $OV/extras/open_model_zoo/tools/downloader/downloader.py --name mobilenet-v2-pytorch -o . ``` ## 3. Prepare Model for Inference Install requirements for PyTorch using the commands below: ``` -cd $OV/deployment_tools/open_model_zoo/tools/downloader +cd $OV/extras/open_model_zoo/tools/downloader ``` ``` python3 -mpip install --user -r ./requirements-pytorch.in @@ -61,13 +61,13 @@ python3 -mpip install --user -r ./requirements-pytorch.in You can find the parameters for Mobilnet v2 conversion here: ``` -vi /opt/intel/openvino_2021/deployment_tools/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml +vi /opt/intel/openvino_2022/extras/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml ``` Convert the model from PyTorch to ONNX*: ``` cd ~/POT/public/mobilenet-v2-pytorch -python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/pytorch_to_onnx.py \ +python3 /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/pytorch_to_onnx.py \ --model-name=MobileNetV2 \ --model-path=. \ --weights=mobilenet-v2.pth \ @@ -100,17 +100,17 @@ mv mobilenet-v2.bin ~/POT/model.bin Edit the configuration files: ``` -sudo vi $OV/deployment_tools/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml +sudo vi $OV/tools/accuracy_checker/dataset_definitions.yml (edit imagenet_1000_classes) ``` ``` -export DEFINITIONS_FILE=/opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml +export DEFINITIONS_FILE=/opt/intel/openvino_2022/tools/accuracy_checker/dataset_definitions.yml ``` Copy the JSON file to my directory and edit: ``` -cp $OV/deployment_tools/tools/post_training_optimization_toolkit/configs/examples/quantization/classification/mobilenetV2_pytorch_int8.json ~/POT +cp $OV/tools/post_training_optimization_toolkit/configs/examples/quantization/classification/mobilenetV2_pytorch_int8.json ~/POT ``` ``` vi mobilenetV2_pytorch_int8.json @@ -119,7 +119,7 @@ vi mobilenetV2_pytorch_int8.json Copy the YML file to my directory and edit: ``` -cp /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT +cp /opt/intel/openvino_2022/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT ``` ``` vi mobilenet-v2.yml diff --git a/docs/install_guides/deployment-manager-tool.md b/docs/install_guides/deployment-manager-tool.md index 0989a3d5929c57..a7a7783767b0fd 100644 --- a/docs/install_guides/deployment-manager-tool.md +++ b/docs/install_guides/deployment-manager-tool.md @@ -2,7 +2,7 @@ The Deployment Manager of Intel® Distribution of OpenVINO™ creates a deployment package by assembling the model, IR files, your application, and associated dependencies into a runtime package for your target device. -The Deployment Manager is a Python\* command-line tool that is delivered within the Intel® Distribution of OpenVINO™ toolkit for Linux\* and Windows\* release packages and available after installation in the `/deployment_tools/tools/deployment_manager` directory. +The Deployment Manager is a Python\* command-line tool that is delivered within the Intel® Distribution of OpenVINO™ toolkit for Linux\* and Windows\* release packages and available after installation in the `/tools/deployment_manager` directory. ## Pre-Requisites @@ -32,7 +32,7 @@ Interactive mode provides a user-friendly command-line interface that will guide 1. To launch the Deployment Manager in the interactive mode, open a new terminal window, go to the Deployment Manager tool directory and run the tool script without parameters: ```sh - /deployment_tools/tools/deployment_manager + /tools/deployment_manager ``` ```sh ./deployment_manager.py @@ -92,9 +92,9 @@ To deploy the Inference Engine components from the development machine to the ta ``` * For Windows, use an archiver your prefer. - The package is unpacked to the destination directory and the following subdirectories are created: - * `bin` — Snapshot of the `bin` directory from the OpenVINO installation directory. - * `deployment_tools/inference_engine` — Contains the Inference Engine binary files. + The package is unpacked to the destination directory and the following files and subdirectories are created: + * `setupvars.sh` — copy of `setupvars.sh` + * `runtime` — Contains the OpenVINO runtime binary files. * `install_dependencies` — Snapshot of the `install_dependencies` directory from the OpenVINO installation directory. * `` — The directory with the user data (IRs, datasets, etc.) you specified while configuring the package. 3. For Linux, to run inference on a target Intel® GPU, Intel® Movidius™ VPU, or Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, you need to install additional dependencies by running the `install_openvino_dependencies.sh` script: @@ -110,14 +110,14 @@ To deploy the Inference Engine components from the development machine to the ta cd /openvino/ ``` ```sh - source ./bin/setupvars.sh + source ./setupvars.sh ``` * For Windows: ``` cd \openvino\ ``` ``` - .\bin\setupvars.bat + .\setupvars.bat ``` Congratulations, you have finished the deployment of the Inference Engine components to the target host. \ No newline at end of file diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index cf299767dbe9c0..1c3030fa9ae1e6 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -193,7 +193,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` - **CentOS 7**: @@ -223,11 +223,11 @@ RUN /bin/mkdir -p '/usr/local/lib' && \ /bin/mkdir -p '/usr/local/include/libusb-1.0' && \ /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \ /bin/mkdir -p '/usr/local/lib/pkgconfig' && \ - printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2021/bin/setupvars.sh + printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2022/setupvars.sh WORKDIR /opt/libusb-1.0.22/ RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ - cp /opt/intel/openvino_2021/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ + cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \ ldconfig ``` 2. Run the Docker* image: @@ -329,28 +329,28 @@ To run the Classification Demo Using SqueezeNet on a specific inference device, ```sh docker run -itu root:root --rm -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/run_sample_squeezenet.sh -d CPU" +/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d CPU" ``` **GPU**: ```sh docker run -itu root:root --rm --device /dev/dri:/dev/dri -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/run_sample_squeezenet.sh -d GPU" +/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d GPU" ``` **MYRIAD**: ```sh docker run -itu root:root --rm --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/run_sample_squeezenet.sh -d MYRIAD" +/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d MYRIAD" ``` **HDDL**: ```sh docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp -/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/run_sample_squeezenet.sh -d HDDL" +/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d HDDL" ``` ## Troubleshooting diff --git a/docs/install_guides/installing-openvino-docker-windows.md b/docs/install_guides/installing-openvino-docker-windows.md index b7eb56e750586c..31348f393b1366 100644 --- a/docs/install_guides/installing-openvino-docker-windows.md +++ b/docs/install_guides/installing-openvino-docker-windows.md @@ -83,7 +83,7 @@ docker run -it --rm If you want to try some demos then run image with the root privileges (some additional 3-rd party dependencies will be installed): ```bat -docker run -itu ContainerAdministrator --rm cmd /S /C "cd deployment_tools\demo && run_sample_squeezenet.bat -d CPU" +docker run -itu ContainerAdministrator --rm cmd /S /C "cd samples\scripts && run_sample_squeezenet.bat -d CPU" ``` ## Configure and Run the Docker* Image for GPU @@ -140,7 +140,7 @@ GPU Acceleration in Windows containers feature requires to meet Windows host, Op ``` 3. For example, run the `run_sample_squeezenet` demo with the command below: ```bat - cd bin && setupvars.bat && cd ../ && cd deployment_tools\demo && run_sample_squeezenet.bat -d GPU + cd samples\scripts && run_sample_squeezenet.bat -d GPU ``` > **NOTE**: Addittional third-party dependencies will be installed. diff --git a/docs/install_guides/installing-openvino-linux-ivad-vpu.md b/docs/install_guides/installing-openvino-linux-ivad-vpu.md index cd86804307c7fe..9e7135bdfc3593 100644 --- a/docs/install_guides/installing-openvino-linux-ivad-vpu.md +++ b/docs/install_guides/installing-openvino-linux-ivad-vpu.md @@ -11,9 +11,9 @@ For Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the followi 1. Set the environment variables: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` -> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/deployment_tools/inference_engine/external/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021//deployment_tools/inference_engine/external/hddl`. +> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `/runtime/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2022/runtime/3rdparty/hddl`. 2. Install dependencies: ```sh @@ -52,7 +52,7 @@ E: [ncAPI] [ 965618] [MainThread] ncDeviceOpen:677 Failed to find a device, ```sh kill -9 $(pidof hddldaemon autoboot) pidof hddldaemon autoboot # Make sure none of them is alive -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ${HDDL_INSTALL_DIR}/bin/bsl_reset ``` diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index 2f36a0739018f3..8cf4f4dc213273 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -133,7 +133,7 @@ sudo ./install.sh -s silent.cfg By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as ``: * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` - For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2021/`. + For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2022/`. 8. **Optional**: You can choose **Customize** to change the installation directory or the components you want to install: > **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions. @@ -156,7 +156,7 @@ These dependencies are required for: 1. Change to the `install_dependencies` directory: ```sh -cd /opt/intel/openvino_2021/install_dependencies +cd /opt/intel/openvino_2022/install_dependencies ``` 2. Run a script to download and install the external software dependencies: ```sh @@ -169,7 +169,7 @@ sudo -E ./install_openvino_dependencies.sh You must update several environment variables before you can compile and run OpenVINO™ applications. Run the following script to temporarily set your environment variables: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` **Optional:** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows: @@ -181,7 +181,7 @@ vi /.bashrc 2. Add this line to the end of the file: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` 3. Save and close the file: press the **Esc** key and type `:wq`. @@ -217,7 +217,7 @@ You can choose to either configure all supported frameworks at once **OR** confi 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -231,7 +231,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: @@ -281,7 +281,7 @@ The steps in this section are required only if you want to enable the toolkit co 1. Go to the install_dependencies directory: ```sh -cd /opt/intel/openvino_2021/install_dependencies/ +cd /opt/intel/openvino_2022/install_dependencies/ ``` 2. Install the **Intel® Graphics Compute Runtime for OpenCL™** driver components required to use the GPU plugin and write custom layers for Intel® Integrated Graphics. The drivers are not included in the package and must be installed separately. @@ -315,7 +315,7 @@ sudo usermod -a -G users "$(whoami)" Log out and log in for it to take effect. 2. To perform inference on Intel® Neural Compute Stick 2, install the USB rules as follows: ```sh - sudo cp /opt/intel/openvino_2021/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ + sudo cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ ``` ```sh sudo udevadm control --reload-rules @@ -341,7 +341,7 @@ After configuration is done, you are ready to run the verification scripts with 1. Go to the **Inference Engine demo** directory: ```sh -cd /opt/intel/openvino_2021/deployment_tools/demo +cd /opt/intel/openvino_2022/samples/scripts ``` 2. Run the **Image Classification verification script**. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index f4b3f177713939..36249196da0626 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -119,7 +119,7 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_` and autom * For root or administrator: `/opt/intel/openvino_/` * For regular users: `/home//intel/openvino_/` - For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/`. + For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2022/`. 9. If needed, click **Customize** to change the installation directory or the components you want to install: ![](../img/openvino-install-macos-04.png) > **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions. @@ -138,10 +138,10 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_` and autom You need to update several environment variables before you can compile and run OpenVINO™ applications. Open the macOS Terminal\* or a command-line interface shell you prefer and run the following script to temporarily set your environment variables: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` -If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory. +If you didn't choose the default installation option, replace `/opt/intel/openvino_2022` with your directory. Optional: The OpenVINO environment variables are removed when you close the shell. You can permanently set the environment variables as follows: @@ -153,10 +153,10 @@ If you didn't choose the default installation option, replace `/opt/intel/openvi 3. Add this line to the end of the file: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` -If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory. +If you didn't choose the default installation option, replace `/opt/intel/openvino_2022` with your directory. 4. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key. @@ -189,7 +189,7 @@ You can choose to either configure the Model Optimizer for all supported framewo 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -203,7 +203,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: @@ -272,14 +272,14 @@ Now you are ready to get started. To continue, see the following pages: Follow the steps below to uninstall the Intel® Distribution of OpenVINO™ Toolkit from your system: -1. From the installation directory (by default, `/opt/intel/openvino_2021`), locate and open `openvino_toolkit_uninstaller.app`. +1. From the installation directory (by default, `/opt/intel/openvino_2022`), locate and open `openvino_toolkit_uninstaller.app`. 2. Follow the uninstallation wizard instructions. 3. When uninstallation is complete, click **Finish**. ## Additional Resources -- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2021/deployment_tools/demo/`. +- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2022/samples/scripts/`. - For detailed description of the pre-trained models, go to the [Overview of OpenVINO toolkit Pre-Trained Models](@ref omz_models_group_intel) page. diff --git a/docs/install_guides/installing-openvino-raspbian.md b/docs/install_guides/installing-openvino-raspbian.md index 338beda73c813a..af6ee21c7cc44b 100644 --- a/docs/install_guides/installing-openvino-raspbian.md +++ b/docs/install_guides/installing-openvino-raspbian.md @@ -75,11 +75,11 @@ The guide assumes you downloaded the OpenVINO toolkit for Raspbian* OS. If you d By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_.tgz`. 3. Create an installation folder. ```sh - sudo mkdir -p /opt/intel/openvino_2021 + sudo mkdir -p /opt/intel/openvino_2022 ``` 4. Unpack the archive: ```sh - sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_.tgz --strip 1 -C /opt/intel/openvino_2021 + sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_.tgz --strip 1 -C /opt/intel/openvino_2022 ``` Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules. @@ -97,12 +97,12 @@ CMake is installed. Continue to the next section to set the environment variable You must update several environment variables before you can compile and run OpenVINO toolkit applications. Run the following script to temporarily set the environment variables: ```sh -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` **(Optional)** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows: ```sh -echo "source /opt/intel/openvino_2021/bin/setupvars.sh" >> ~/.bashrc +echo "source /opt/intel/openvino_2022/setupvars.sh" >> ~/.bashrc ``` To test your change, open a new terminal. You will see the following: @@ -120,11 +120,11 @@ This task applies only if you have an Intel® Neural Compute Stick 2 device. Log out and log in for it to take effect. 2. If you didn't modify `.bashrc` to permanently set the environment variables, run `setupvars.sh` again after logging in: ```sh - source /opt/intel/openvino_2021/bin/setupvars.sh + source /opt/intel/openvino_2022/setupvars.sh ``` 3. To perform inference on the Intel® Neural Compute Stick 2, install the USB rules running the `install_NCS_udev_rules.sh` script: ```sh - sh /opt/intel/openvino_2021/install_dependencies/install_NCS_udev_rules.sh + sh /opt/intel/openvino_2022/install_dependencies/install_NCS_udev_rules.sh ``` 4. Plug in your Intel® Neural Compute Stick 2. @@ -140,7 +140,7 @@ Follow the next steps to use the pre-trained face detection model using Inferenc ``` 2. Build the Object Detection Sample: ```sh - cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/deployment_tools/inference_engine/samples/cpp + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2022/samples/cpp ``` ```sh make -j2 object_detection_sample_ssd diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index 7b99292e32688f..d82044529ea5ea 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -107,7 +107,7 @@ The following components are installed by default: 1. If you have not downloaded the Intel® Distribution of OpenVINO™ toolkit, [download the latest version](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/download.html). By default, the file is saved to the `Downloads` directory as `w_openvino_toolkit_p_.exe`. 2. Go to the `Downloads` folder and double-click `w_openvino_toolkit_p_.exe`. A window opens to let you choose your installation directory and components. ![](../img/openvino-install-windows-01.png) - The default installation directory is `C:\Program Files (x86)\Intel\openvino_`, for simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2021`. If you choose a different installation directory, the installer will create the directory for you. + The default installation directory is `C:\Program Files (x86)\Intel\openvino_`, for simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2022`. If you choose a different installation directory, the installer will create the directory for you. > **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions. 3. Click **Next**. 4. You are asked if you want to provide consent to gather information. Choose the option of your choice. Click **Next**. @@ -126,7 +126,7 @@ The screen example below indicates you are missing two dependencies: You must update several environment variables before you can compile and run OpenVINO™ applications. Open the Command Prompt, and run the `setupvars.bat` batch file to temporarily set your environment variables: ```sh -"C:\Program Files (x86)\Intel\openvino_2021\bin\setupvars.bat" +"C:\Program Files (x86)\Intel\openvino_2022\setupvars.bat" ``` > **IMPORTANT**: Windows PowerShell* is not recommended to run the configuration commands, please use the Command Prompt instead. @@ -147,7 +147,7 @@ The Model Optimizer is a key component of the Intel® Distribution of OpenVINO The Inference Engine reads, loads, and infers the IR files, using a common API across the CPU, GPU, or VPU hardware. -The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use. +The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use. This section explains how to use scripts to configure the Model Optimizer either for all of the supported frameworks at the same time or for individual frameworks. If you want to manually configure the Model Optimizer instead of using scripts, see the **Using Manual Configuration Process** section on the [Configuring the Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) page. @@ -162,7 +162,7 @@ You can configure the Model Optimizer either for all supported frameworks at onc > **NOTE**: > In the steps below: -> - If you you want to use the Model Optimizer from another installed versions of Intel® Distribution of OpenVINO™ toolkit installed, replace `openvino_2021` with `openvino_`, where `` is the required version. +> - If you you want to use the Model Optimizer from another installed versions of Intel® Distribution of OpenVINO™ toolkit installed, replace `openvino_2022` with `openvino_`, where `` is the required version. > - If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\Intel` with the directory where you installed the software. These steps use a command prompt to make sure you see error messages. @@ -176,7 +176,7 @@ Type commands in the opened window: 2. Go to the Model Optimizer prerequisites directory.
```sh -cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites +cd C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer\install_prerequisites ``` 3. Run the following batch file to configure the Model Optimizer for Caffe\*, TensorFlow\* 1.x, MXNet\*, Kaldi\*, and ONNX\*:
@@ -188,7 +188,7 @@ install_prerequisites.bat 1. Go to the Model Optimizer prerequisites directory:
```sh -cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites +cd C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer\install_prerequisites ``` 2. Run the batch file for the framework you will use with the Model Optimizer. You can use more than one: @@ -269,7 +269,7 @@ To perform inference on Intel® Vision Accelerator Design with Intel® Movidius 1. Download and install Visual C++ Redistributable for Visual Studio 2017 2. Check with a support engineer if your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs card requires SMBUS connection to PCIe slot (most unlikely). Install the SMBUS driver only if confirmed (by default, it's not required): - 1. Go to the `\deployment_tools\inference-engine\external\hddl\drivers\SMBusDriver` directory, where `` is the directory in which the Intel Distribution of OpenVINO toolkit is installed. + 1. Go to the `\runtime\3rdparty\hddl\drivers\SMBusDriver` directory, where `` is the directory in which the Intel Distribution of OpenVINO toolkit is installed. 2. Right click on the `hddlsmbus.inf` file and choose **Install** from the pop-up menu. You are done installing your device driver and are ready to use your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs. diff --git a/docs/install_guides/movidius-setup-guide.md b/docs/install_guides/movidius-setup-guide.md index c26ebbda38d9de..0bb5de0fe69024 100644 --- a/docs/install_guides/movidius-setup-guide.md +++ b/docs/install_guides/movidius-setup-guide.md @@ -46,7 +46,7 @@ The `hddldaemon` is a system service, a binary executable that is run to manage `` refers to the following default OpenVINO™ Inference Engine directories: - **Linux:** ``` - /opt/intel/openvino_2021/inference_engine + /opt/intel/openvino_2022/inference_engine ``` - **Windows:** ``` diff --git a/docs/optimization_guide/dldt_optimization_guide.md b/docs/optimization_guide/dldt_optimization_guide.md index 73d8df9d308f8f..698e9f7952adc2 100644 --- a/docs/optimization_guide/dldt_optimization_guide.md +++ b/docs/optimization_guide/dldt_optimization_guide.md @@ -305,7 +305,7 @@ Other than that, when implementing the kernels, you can try the methods from the ### A Few Device-Specific Tips - As already outlined in the CPU Checklist, align the threading model that you use in your CPU kernels with the model that the rest of the Inference Engine compiled with. -- For CPU extensions, consider kernel flavor that supports blocked layout, if your kernel is in the hotspots (see Internal Inference Performance Counters). Since Intel MKL-DNN internally operates on the blocked layouts, this would save you a data packing (Reorder) on tensor inputs/outputs of your kernel. For example of the blocked layout support, please, refer to the extensions in `/deployment_tools/samples/extension/`. +- For CPU extensions, consider kernel flavor that supports blocked layout, if your kernel is in the hotspots (see Internal Inference Performance Counters). Since Intel MKL-DNN internally operates on the blocked layouts, this would save you a data packing (Reorder) on tensor inputs/outputs of your kernel. ## Plugging Inference Engine to Applications diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index fdefac633c77c1..70d17980991242 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -14,6 +14,26 @@ endif() add_subdirectory(samples) +# TODO: remove this +foreach(sample benchmark_app classification_sample_async hello_classification + hello_nv12_input_classification hello_query_device hello_reshape_ssd + ngraph_function_creation_sample object_detection_sample_ssd + speech_sample style_transfer_sample hello_classification_c + object_detection_sample_ssd_c hello_nv12_input_classification_c) + if(TARGET ${sample}) + install(TARGETS ${sample} + RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) + endif() +endforeach() + +foreach(samples_library opencv_c_wrapper format_reader) + if(TARGET ${samples_library}) + install(TARGETS ${samples_library} + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) + endif() +endforeach() + openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils) if(ENABLE_TESTS) @@ -31,7 +51,7 @@ ie_cpack_add_component(cpp_samples DEPENDS cpp_samples_deps core) if(UNIX) install(DIRECTORY samples/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp + DESTINATION samples/cpp COMPONENT cpp_samples USE_SOURCE_PERMISSIONS PATTERN *.bat EXCLUDE @@ -39,7 +59,7 @@ if(UNIX) PATTERN .clang-format EXCLUDE) elseif(WIN32) install(DIRECTORY samples/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp + DESTINATION samples/cpp COMPONENT cpp_samples USE_SOURCE_PERMISSIONS PATTERN *.sh EXCLUDE @@ -47,35 +67,26 @@ elseif(WIN32) PATTERN .clang-format EXCLUDE) endif() -install(TARGETS format_reader - RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) - -if(TARGET benchmark_app) - install(TARGETS benchmark_app - RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) -endif() - # install C samples ie_cpack_add_component(c_samples DEPENDS core_c) if(UNIX) install(PROGRAMS samples/build_samples.sh - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples) elseif(WIN32) install(PROGRAMS samples/build_samples_msvc.bat - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples) endif() install(DIRECTORY ie_bridges/c/samples/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples PATTERN ie_bridges/c/samples/CMakeLists.txt EXCLUDE PATTERN ie_bridges/c/samples/.clang-format EXCLUDE) install(FILES samples/CMakeLists.txt - DESTINATION ${IE_CPACK_IE_DIR}/samples/c + DESTINATION samples/c COMPONENT c_samples) diff --git a/inference-engine/ie_bridges/c/docs/api_overview.md b/inference-engine/ie_bridges/c/docs/api_overview.md index f2d9dd92b32df7..298ff4f3e650c1 100644 --- a/inference-engine/ie_bridges/c/docs/api_overview.md +++ b/inference-engine/ie_bridges/c/docs/api_overview.md @@ -22,7 +22,7 @@ Supported Python* versions: To configure the environment for the Inference Engine C* API, run: -- On Ubuntu 16.04: `source /bin/setupvars.sh .` +- On Ubuntu 16.04: `source /setupvars.sh .` - On Windows 10: XXXX The script automatically detects latest installed C* version and configures required environment if the version is supported. diff --git a/inference-engine/ie_bridges/c/samples/hello_classification/README.md b/inference-engine/ie_bridges/c/samples/hello_classification/README.md index 7d1b45d9c3f008..8765c6e1428951 100644 --- a/inference-engine/ie_bridges/c/samples/hello_classification/README.md +++ b/inference-engine/ie_bridges/c/samples/hello_classification/README.md @@ -72,7 +72,7 @@ The application outputs top-10 inference results. ``` Top 10 results: -Image C:\images\car.bmp +Image /opt/intel/openvino/samples/scripts/car.png classid probability ------- ----------- diff --git a/inference-engine/ie_bridges/c/src/CMakeLists.txt b/inference-engine/ie_bridges/c/src/CMakeLists.txt index d09b267731641a..1a5f914b7d634b 100644 --- a/inference-engine/ie_bridges/c/src/CMakeLists.txt +++ b/inference-engine/ie_bridges/c/src/CMakeLists.txt @@ -39,8 +39,8 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core_c LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c - INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie) + INCLUDES DESTINATION runtime/include/ie) install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/ - DESTINATION ${IE_CPACK_IE_DIR}/include/ie + DESTINATION runtime/include/ie COMPONENT core_c_dev) diff --git a/inference-engine/ie_bridges/python/CMakeLists.txt b/inference-engine/ie_bridges/python/CMakeLists.txt index b5e535f87405a8..a320d71a5230c2 100644 --- a/inference-engine/ie_bridges/python/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/CMakeLists.txt @@ -99,7 +99,8 @@ install(PROGRAMS src/openvino/__init__.py ie_cpack_add_component(python_samples) install(DIRECTORY sample/ - DESTINATION ${IE_CPACK_IE_DIR}/samples/python + DESTINATION samples/python + USE_SOURCE_PERMISSIONS COMPONENT python_samples) ie_cpack(${PYTHON_COMPONENT} python_samples) diff --git a/inference-engine/ie_bridges/python/docs/api_overview.md b/inference-engine/ie_bridges/python/docs/api_overview.md index 577edcc080c181..3938c71b1480f2 100644 --- a/inference-engine/ie_bridges/python/docs/api_overview.md +++ b/inference-engine/ie_bridges/python/docs/api_overview.md @@ -26,11 +26,11 @@ Supported Python* versions: ## Set Up the Environment To configure the environment for the Inference Engine Python\* API, run: - * On Ubuntu\* 18.04 or 20.04: `source /bin/setupvars.sh .` - * On CentOS\* 7.4: `source /bin/setupvars.sh .` - * On macOS\* 10.x: `source /bin/setupvars.sh .` - * On Raspbian\* 9,: `source /bin/setupvars.sh .` - * On Windows\* 10: `call \bin\setupvars.bat` + * On Ubuntu\* 18.04 or 20.04: `source /setupvars.sh .` + * On CentOS\* 7.4: `source /setupvars.sh .` + * On macOS\* 10.x: `source /setupvars.sh .` + * On Raspbian\* 9,: `source /setupvars.sh .` + * On Windows\* 10: `call \setupvars.bat` The script automatically detects latest installed Python\* version and configures required environment if the version is supported. If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=/python/` diff --git a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md index 43a60b3a331db3..588efb3d22617c 100644 --- a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md +++ b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md @@ -85,7 +85,7 @@ The sample application logs each step in a standard output stream and outputs to ``` [ INFO ] Creating Inference Engine -[ INFO ] Loading the network using ngraph function with weights from c:\openvino\deployment_tools\inference_engine\samples\python\ngraph_function_creation_sample\lenet.bin +[ INFO ] Loading the network using ngraph function with weights from c:\openvino\samples\python\ngraph_function_creation_sample\lenet.bin [ INFO ] Configuring input and output blobs [ INFO ] Loading the model to the plugin [ WARNING ] Image c:\images\3.png is inverted to white over black diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt index 5fcdd37c790c77..c55ae9cd1e84c6 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt @@ -72,8 +72,10 @@ add_custom_command(TARGET ${TARGET_NAME} # install install(TARGETS ${INSTALLED_TARGETS} - RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT} - LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT}) + RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine + COMPONENT ${PYTHON_COMPONENT} + LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine + COMPONENT ${PYTHON_COMPONENT}) install(PROGRAMS __init__.py DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt index 5aeb5224aa14eb..0aa8280bc5c5d1 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt @@ -59,9 +59,11 @@ add_custom_command(TARGET ${TARGET_NAME} # ie_cpack_add_component(${PYTHON_VERSION}_dev DEPENDS ${PYTHON_COMPONENT}) install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT} - LIBRARY DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT}) + RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations + COMPONENT ${PYTHON_COMPONENT} + LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations + COMPONENT ${PYTHON_COMPONENT}) install(PROGRAMS __init__.py - DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations + DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT}) diff --git a/inference-engine/ie_bridges/python/wheel/.env.in b/inference-engine/ie_bridges/python/wheel/.env.in index 9ba0660d5d2557..760f8bcb358f29 100644 --- a/inference-engine/ie_bridges/python/wheel/.env.in +++ b/inference-engine/ie_bridges/python/wheel/.env.in @@ -9,8 +9,6 @@ WHEEL_REQUIREMENTS=@WHEEL_REQUIREMENTS@ WHEEL_OVERVIEW=@WHEEL_OVERVIEW@ CMAKE_BUILD_DIR=@CMAKE_BINARY_DIR@ -CORE_LIBS_DIR=@IE_CPACK_RUNTIME_PATH@ -PLUGINS_LIBS_DIR=@PLUGINS_LIBS_DIR@ -NGRAPH_LIBS_DIR=@NGRAPH_LIBS_DIR@ +OV_RUNTIME_LIBS_DIR=@IE_CPACK_RUNTIME_PATH@ TBB_LIBS_DIR=@TBB_LIBS_DIR@ PY_PACKAGES_DIR=@PY_PACKAGES_DIR@ diff --git a/inference-engine/ie_bridges/python/wheel/CMakeLists.txt b/inference-engine/ie_bridges/python/wheel/CMakeLists.txt index 1b1931c08a493d..00deb5b0887cab 100644 --- a/inference-engine/ie_bridges/python/wheel/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/wheel/CMakeLists.txt @@ -18,11 +18,8 @@ set(WHEEL_OVERVIEW "${CMAKE_CURRENT_SOURCE_DIR}/meta/pypi_overview.md" CACHE STR set(SETUP_PY "${CMAKE_CURRENT_SOURCE_DIR}/setup.py") set(SETUP_ENV "${CMAKE_CURRENT_SOURCE_DIR}/.env.in") -set(CORE_LIBS_DIR ${IE_CPACK_RUNTIME_PATH}) -set(PLUGINS_LIBS_DIR ${IE_CPACK_RUNTIME_PATH}) -set(NGRAPH_LIBS_DIR deployment_tools/ngraph/lib) set(PY_PACKAGES_DIR ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}) -set(TBB_LIBS_DIR deployment_tools/inference_engine/external/tbb/lib) +set(TBB_LIBS_DIR runtime/3rdparty/tbb/lib) if(APPLE) set(WHEEL_PLATFORM macosx_10_15_x86_64) @@ -30,7 +27,7 @@ elseif(UNIX) set(WHEEL_PLATFORM manylinux2014_x86_64) elseif(WIN32) set(WHEEL_PLATFORM win_amd64) - set(TBB_LIBS_DIR deployment_tools/inference_engine/external/tbb/bin) + set(TBB_LIBS_DIR runtime/3rdparty/tbb/bin) else() message(FATAL_ERROR "This platform is not supported") endif() diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py index 3a8827b388a316..436baa7a7c923a 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.py +++ b/inference-engine/ie_bridges/python/wheel/setup.py @@ -42,10 +42,8 @@ # The following variables can be defined in environment or .env file CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.') -CORE_LIBS_DIR = config('CORE_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}') -PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}') -NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', 'deployment_tools/ngraph/lib') -TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'deployment_tools/inference_engine/external/tbb/{LIBS_DIR}') +OV_RUNTIME_LIBS_DIR = config('OV_RUNTIME_LIBS_DIR', f'runtime/{LIBS_DIR}/{ARCH}/{CONFIG}') +TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'runtime/3rdparty/tbb/{LIBS_DIR}') PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}') LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path' @@ -53,43 +51,43 @@ 'ie_libs': { 'name': 'core', 'prefix': 'libs.core', - 'install_dir': CORE_LIBS_DIR, + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'hetero_plugin': { 'name': 'hetero', - 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'prefix': 'libs.core', + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'gpu_plugin': { 'name': 'gpu', - 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'prefix': 'libs.core', + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'cpu_plugin': { 'name': 'cpu', - 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'prefix': 'libs.core', + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'multi_plugin': { 'name': 'multi', - 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'prefix': 'libs.core', + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'myriad_plugin': { 'name': 'myriad', - 'prefix': 'libs.plugins', - 'install_dir': PLUGINS_LIBS_DIR, + 'prefix': 'libs.core', + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'ngraph_libs': { 'name': 'ngraph', - 'prefix': 'libs.ngraph', - 'install_dir': NGRAPH_LIBS_DIR, + 'prefix': 'libs.core', + 'install_dir': OV_RUNTIME_LIBS_DIR, 'rpath': LIBS_RPATH, }, 'tbb_libs': { diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index d2edfc77b162e2..c19fb5c4f70d56 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -148,33 +148,33 @@ To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's] ## Examples of Running the Tool -This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/deployment_tools/demo/` directory is used. +This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/samples/scripts/` directory is used. > **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. 1. Download the model. Go to the the Model Downloader directory and run the `downloader.py` script with specifying the model name and directory to download the model to: ```sh - cd /deployment_tools/open_model_zoo/tools/downloader + cd /extras/open_model_zoo/tools/downloader ``` ```sh python3 downloader.py --name googlenet-v1 -o ``` 2. Convert the model to the Inference Engine IR format. Go to the Model Optimizer directory and run the `mo.py` script with specifying the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files: ```sh - cd /deployment_tools/model_optimizer + cd /tools/model_optimizer ``` ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir ``` -3. Run the tool with specifying the `/deployment_tools/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: +3. Run the tool with specifying the `/samples/scripts/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: * On CPU: ```sh - ./benchmark_app -m /googlenet-v1.xml -i /deployment_tools/demo/car.png -d CPU -api async --progress true + ./benchmark_app -m /googlenet-v1.xml -i /samples/scripts/car.png -d CPU -api async --progress true ``` * On GPU: ```sh - ./benchmark_app -m /googlenet-v1.xml -i /deployment_tools/demo/car.png -d GPU -api async --progress true + ./benchmark_app -m /googlenet-v1.xml -i /samples/scripts/car.png -d GPU -api async --progress true ``` The application outputs the number of executed iterations, total duration of execution, latency, and throughput. diff --git a/inference-engine/samples/build_samples.sh b/inference-engine/samples/build_samples.sh index d584a11011985d..ad92920ae50be8 100755 --- a/inference-engine/samples/build_samples.sh +++ b/inference-engine/samples/build_samples.sh @@ -19,12 +19,10 @@ SAMPLES_PATH="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )" printf "\nSetting environment variables for building samples...\n" if [ -z "$INTEL_OPENVINO_DIR" ]; then - if [ -e "$SAMPLES_PATH/../../../bin/setupvars.sh" ]; then - setvars_path="$SAMPLES_PATH/../../../bin/setupvars.sh" - elif [ -e "$SAMPLES_PATH/../../../../bin/setupvars.sh" ]; then - setvars_path="$SAMPLES_PATH/../../../../bin/setupvars.sh" + if [ -e "$SAMPLES_PATH/../../setupvars.sh" ]; then + setvars_path="$SAMPLES_PATH/../../setupvars.sh" else - printf "Error: Failed to set the environment variables automatically. To fix, run the following command:\n source /bin/setupvars.sh\n where INSTALL_DIR is the OpenVINO installation directory.\n\n" + printf "Error: Failed to set the environment variables automatically. To fix, run the following command:\n source /setupvars.sh\n where INSTALL_DIR is the OpenVINO installation directory.\n\n" exit 1 fi if ! source "$setvars_path" ; then @@ -33,7 +31,7 @@ if [ -z "$INTEL_OPENVINO_DIR" ]; then fi else # case for run with `sudo -E` - source "$INTEL_OPENVINO_DIR/bin/setupvars.sh" + source "$INTEL_OPENVINO_DIR/setupvars.sh" fi if ! command -v cmake &>/dev/null; then diff --git a/inference-engine/samples/build_samples_msvc.bat b/inference-engine/samples/build_samples_msvc.bat index d7f0bce1dd4c1d..d986d7277d0667 100644 --- a/inference-engine/samples/build_samples_msvc.bat +++ b/inference-engine/samples/build_samples_msvc.bat @@ -9,7 +9,6 @@ set "ROOT_DIR=%~dp0" FOR /F "delims=\" %%i IN ("%ROOT_DIR%") DO set SAMPLES_TYPE=%%~nxi set "SOLUTION_DIR64=%USERPROFILE%\Documents\Intel\OpenVINO\inference_engine_%SAMPLES_TYPE%_samples_build" -if "%InferenceEngine_DIR%"=="" set "InferenceEngine_DIR=%ROOT_DIR%\..\share" set MSBUILD_BIN= set VS_PATH= @@ -30,19 +29,16 @@ if not "%1" == "" ( ) if "%INTEL_OPENVINO_DIR%"=="" ( - if exist "%ROOT_DIR%\..\..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%\..\..\..\bin\setupvars.bat" + if exist "%ROOT_DIR%\..\..\setupvars.bat" ( + call "%ROOT_DIR%\..\..\setupvars.bat" ) else ( - if exist "%ROOT_DIR%\..\..\..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%\..\..\..\..\bin\setupvars.bat" - ) else ( echo Failed to set the environment variables automatically - echo To fix, run the following command: ^\bin\setupvars.bat + echo To fix, run the following command: ^\setupvars.bat echo where INSTALL_DIR is the OpenVINO installation directory. GOTO errorHandling ) ) -) +) if "%PROCESSOR_ARCHITECTURE%" == "AMD64" ( set "PLATFORM=x64" diff --git a/inference-engine/samples/hello_classification/README.md b/inference-engine/samples/hello_classification/README.md index 1ab5e2537913b5..eebad1ef8ff2b5 100644 --- a/inference-engine/samples/hello_classification/README.md +++ b/inference-engine/samples/hello_classification/README.md @@ -72,7 +72,7 @@ The application outputs top-10 inference results. ``` Top 10 results: -Image C:\images\car.bmp +Image /opt/intel/openvino/samples/scripts/car.png classid probability ------- ----------- diff --git a/inference-engine/src/gna_plugin/CMakeLists.txt b/inference-engine/src/gna_plugin/CMakeLists.txt index 36b9d6d5cc0b8e..f90cfce5c8a229 100644 --- a/inference-engine/src/gna_plugin/CMakeLists.txt +++ b/inference-engine/src/gna_plugin/CMakeLists.txt @@ -81,5 +81,5 @@ set_target_properties(${TARGET_NAME} ${TARGET_NAME}_test_static file(GLOB_RECURSE source_list "${libGNA_LIBRARIES_BASE_PATH}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") install(FILES ${source_list} - DESTINATION ${IE_CPACK_IE_DIR}/external/gna/lib + DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT gna) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 0b35dd96fe2703..1162adf49c6fce 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -248,25 +248,26 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND TBBROOT MATCHES ${TEMP}) list(APPEND core_components tbb) install(DIRECTORY "${TBB}/include" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) install(DIRECTORY "${TBB}/lib" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) + # Windows only if(EXISTS "${TBB}/bin") install(DIRECTORY "${TBB}/bin" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) endif() install(FILES "${TBB}/LICENSE" - DESTINATION ${IE_CPACK_IE_DIR}/external/tbb + DESTINATION runtime/3rdparty/tbb COMPONENT tbb) - set(IE_TBB_DIR_INSTALL "external/tbb/cmake") + set(IE_TBB_DIR_INSTALL "3rdparty/tbb/cmake") install(FILES "${TBB}/cmake/TBBConfig.cmake" "${TBB}/cmake/TBBConfigVersion.cmake" - DESTINATION ${IE_CPACK_IE_DIR}/${IE_TBB_DIR_INSTALL} + DESTINATION runtime/${IE_TBB_DIR_INSTALL} COMPONENT tbb) endif() @@ -275,7 +276,7 @@ endif() ie_cpack_add_component(core REQUIRED DEPENDS ${core_components}) ie_cpack_add_component(core_dev REQUIRED core ngraph_dev) -install(DIRECTORY "${PUBLIC_HEADERS_DIR}" DESTINATION ${IE_CPACK_IE_DIR} +install(DIRECTORY "${PUBLIC_HEADERS_DIR}/" DESTINATION runtime/include COMPONENT core_dev) set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME runtime) @@ -283,9 +284,9 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core - INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include + INCLUDES DESTINATION runtime/include # TODO: remove later once samples are updated - ${IE_CPACK_IE_DIR}/include/ie) + runtime/include/ie) install(FILES $/plugins.xml DESTINATION ${IE_CPACK_RUNTIME_PATH} @@ -305,7 +306,7 @@ endif() install(EXPORT OpenVINOTargets FILE OpenVINOTargets.cmake NAMESPACE openvino:: - DESTINATION ${IE_CPACK_IE_DIR}/share + DESTINATION runtime/cmake COMPONENT core_dev) set(IE_NGRAPH_DIR "${CMAKE_BINARY_DIR}/ngraph") @@ -323,13 +324,13 @@ configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOCo PATH_VARS ${PATH_VARS}) set(IE_INCLUDE_DIR "include/ie") -set(IE_NGRAPH_DIR "../ngraph/cmake") +set(IE_NGRAPH_DIR ".") set(IE_TBB_DIR "${IE_TBB_DIR_INSTALL}") -set(IE_PARALLEL_CMAKE "share/ie_parallel.cmake") +set(IE_PARALLEL_CMAKE "cmake/ie_parallel.cmake") configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig.cmake.in" "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" - INSTALL_DESTINATION share + INSTALL_DESTINATION cmake PATH_VARS ${PATH_VARS}) configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig.cmake.in" @@ -345,10 +346,10 @@ configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig-version.cm install(FILES "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" "${InferenceEngine_SOURCE_DIR}/cmake/ie_parallel.cmake" - DESTINATION ${IE_CPACK_IE_DIR}/share + DESTINATION runtime/cmake COMPONENT core_dev) install(FILES "${CMAKE_BINARY_DIR}/share/OpenVINOConfig.cmake" "${CMAKE_BINARY_DIR}/OpenVINOConfig-version.cmake" - DESTINATION ${IE_CPACK_IE_DIR}/share + DESTINATION runtime/cmake COMPONENT core_dev) diff --git a/inference-engine/src/vpu/CMakeLists.txt b/inference-engine/src/vpu/CMakeLists.txt index 3a11a33509736c..b50739c72615f1 100644 --- a/inference-engine/src/vpu/CMakeLists.txt +++ b/inference-engine/src/vpu/CMakeLists.txt @@ -24,8 +24,9 @@ if(ENABLE_MYRIAD) DESTINATION ${IE_CPACK_LIBRARY_PATH}/vpu_custom_kernels COMPONENT myriad) install(DIRECTORY ${VPU_CLC_MA2X8X_ROOT}/ - DESTINATION deployment_tools/tools/cl_compiler + DESTINATION tools/cl_compiler COMPONENT myriad + USE_SOURCE_PERMISSIONS PATTERN ie_dependency.info EXCLUDE) endif() endif() diff --git a/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt b/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt index 66e15697baaa07..c3c1d7f2a5675a 100644 --- a/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt +++ b/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt @@ -55,6 +55,6 @@ set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_REL # install if (LINUX) install(FILES ${mvnc_SOURCE_DIR}/src/97-myriad-usbboot.rules - DESTINATION ${IE_CPACK_IE_DIR}/external + DESTINATION runtime/3rdparty COMPONENT myriad) endif() diff --git a/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt b/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt index 9d865973a0442d..02189c28ffc01c 100644 --- a/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt @@ -116,3 +116,8 @@ endif() ie_sse42_optimization_flags(sse4_2_flags) set_source_files_properties(${__CLDNN_AllSources} PROPERTIES COMPILE_FLAGS "${sse4_2_flags}") + +install(TARGETS "${CLDNN_BUILD__PROJ}" + RUNTIME DESTINATION tests + COMPONENT tests + EXCLUDE_FROM_ALL) diff --git a/model-optimizer/CMakeLists.txt b/model-optimizer/CMakeLists.txt index 1cb74d6d67a7ad..220388cc87186e 100644 --- a/model-optimizer/CMakeLists.txt +++ b/model-optimizer/CMakeLists.txt @@ -23,7 +23,7 @@ configure_file( @ONLY) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ - DESTINATION deployment_tools/model_optimizer + DESTINATION tools/model_optimizer USE_SOURCE_PERMISSIONS COMPONENT model_optimizer PATTERN ".*" EXCLUDE @@ -31,13 +31,13 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ PATTERN "requirements_dev.txt" EXCLUDE PATTERN "README.md" EXCLUDE PATTERN "CMakeLists.txt" EXCLUDE - + PATTERN "extensions/front/caffe/CustomLayersMapping.xml" EXCLUDE PATTERN "mo/utils/convert.py" EXCLUDE PATTERN "unit_tests" EXCLUDE PATTERN "openvino_mo.egg-info" EXCLUDE PATTERN "build" EXCLUDE - + REGEX ".*__pycache__.*" EXCLUDE REGEX ".*\\.pyc$" EXCLUDE REGEX ".*\\.swp" EXCLUDE @@ -45,12 +45,17 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ REGEX ".*_test\.py$" EXCLUDE ) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/unit_tests - DESTINATION deployment_tools/model_optimizer +install(FILES requirements_dev.txt + DESTINATION tools/model_optimizer + COMPONENT tests + EXCLUDE_FROM_ALL) + +install(DIRECTORY unit_tests + DESTINATION tools/model_optimizer COMPONENT tests EXCLUDE_FROM_ALL) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/automation - DESTINATION deployment_tools/model_optimizer +install(DIRECTORY automation + DESTINATION tools/model_optimizer COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/model-optimizer/README.md b/model-optimizer/README.md index bedd7dea26b8bf..7731565ad01e61 100644 --- a/model-optimizer/README.md +++ b/model-optimizer/README.md @@ -10,7 +10,7 @@ Model Optimizer requires: 1. Go to the Model Optimizer folder:
-    cd PATH_TO_INSTALL_DIR/deployment_tools/model_optimizer
+    cd PATH_TO_INSTALL_DIR/tools/model_optimizer
 
2. Create virtual environment and activate it. This option is strongly recommended as it creates a Python sandbox and diff --git a/model-optimizer/extensions/analysis/tf_retinanet.py b/model-optimizer/extensions/analysis/tf_retinanet.py index 35fdcf9d49c04a..6d78c81f5905bd 100644 --- a/model-optimizer/extensions/analysis/tf_retinanet.py +++ b/model-optimizer/extensions/analysis/tf_retinanet.py @@ -59,7 +59,7 @@ def analyze(self, graph: Graph): "To generate the IR, provide model to the Model Optimizer with the following parameters:\n" \ "\t--input_model /.pb\n" \ "\t--input_shape [1,600,600,3]\n" \ - "\t--tensorflow_use_custom_operations_config /deployment_tools/model_optimizer/extensions/front/tf/retinanet.json\n" \ + "\t--tensorflow_use_custom_operations_config /tools/model_optimizer/extensions/front/tf/retinanet.json\n" \ "\t--reverse_input_channels" return {'model_type': {'TF_RetinaNet': result}}, message diff --git a/model-optimizer/extensions/analysis/tf_yolo.py b/model-optimizer/extensions/analysis/tf_yolo.py index 626187b25f508b..f409cf283135b0 100644 --- a/model-optimizer/extensions/analysis/tf_yolo.py +++ b/model-optimizer/extensions/analysis/tf_yolo.py @@ -72,7 +72,7 @@ def analyze(self, graph: Graph): "To generate the IR, provide TensorFlow YOLOv1 or YOLOv2 Model to the Model Optimizer with the following parameters:\n" \ "\t--input_model /.pb\n" \ "\t--batch 1\n" \ - "\t--tensorflow_use_custom_operations_config /deployment_tools/model_optimizer/extensions/front/tf/.json\n" \ + "\t--tensorflow_use_custom_operations_config /tools/model_optimizer/extensions/front/tf/.json\n" \ "All detailed information about conversion of this model can be found at\n" \ "https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message @@ -99,7 +99,7 @@ def analyze(self, graph: Graph): "To generate the IR, provide TensorFlow YOLOv3 Model to the Model Optimizer with the following parameters:\n" \ "\t--input_model /yolo_v3.pb\n" \ "\t--batch 1\n" \ - "\t--tensorflow_use_custom_operations_config /deployment_tools/model_optimizer/extensions/front/tf/yolo_v3.json\n" \ + "\t--tensorflow_use_custom_operations_config /tools/model_optimizer/extensions/front/tf/yolo_v3.json\n" \ "Detailed information about conversion of this model can be found at\n" \ "https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message diff --git a/model-optimizer/mo/utils/find_ie_version.py b/model-optimizer/mo/utils/find_ie_version.py index 59fada431fe58a..af0df60dc20d3d 100644 --- a/model-optimizer/mo/utils/find_ie_version.py +++ b/model-optimizer/mo/utils/find_ie_version.py @@ -90,9 +90,8 @@ def find_ie_version(silent=False): { "module": os.path.join(script_path, '../../../../python/', python_version), "libs": [ - os.path.join(script_path, '../../../inference_engine/bin/intel64/Release'), - os.path.join(script_path, '../../../inference_engine/external/tbb/bin'), - os.path.join(script_path, '../../../ngraph/lib'), + os.path.join(script_path, '../../../../runtime/bin/intel64/Release'), + os.path.join(script_path, '../../../../runtime/3rdparty/tbb/bin'), ], }, # Local builds @@ -120,9 +119,8 @@ def find_ie_version(silent=False): { "module": os.path.join(script_path, '../../../../python/', python_version), "libs": [ - os.path.join(script_path, '../../../inference_engine/lib/intel64'), - os.path.join(script_path, '../../../inference_engine/external/tbb/lib'), - os.path.join(script_path, '../../../ngraph/lib'), + os.path.join(script_path, '../../../../runtime/lib/intel64'), + os.path.join(script_path, '../../../../runtime/3rdparty/tbb/lib'), ], }, # Local builds diff --git a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt index 0017302b851b7c..18a1550e758963 100644 --- a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt +++ b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt @@ -20,8 +20,6 @@ target_link_libraries(${TARGET_FE_NAME} PUBLIC ngraph PRIVATE ngraph::builder) add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) -set(NGRAPH_INSTALL_LIB "deployment_tools/ngraph/lib") - install(TARGETS ${TARGET_FE_NAME} - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/ngraph/CMakeLists.txt b/ngraph/CMakeLists.txt index a5573427a89e03..163c395842735b 100644 --- a/ngraph/CMakeLists.txt +++ b/ngraph/CMakeLists.txt @@ -16,8 +16,6 @@ project (ngraph) # Installation logic... #----------------------------------------------------------------------------------------------- -set(NGRAPH_INSTALL_LIB "deployment_tools/ngraph/lib") -set(NGRAPH_INSTALL_INCLUDE "deployment_tools/ngraph/include") set(NGRAPH_TARGETS_FILE "${CMAKE_CURRENT_BINARY_DIR}/ngraphTargets.cmake") add_definitions(-DPROJECT_ROOT_DIR="${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index d570edd1249e35..726bc4c7402595 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -93,13 +93,13 @@ export(TARGETS ngraph NAMESPACE openvino:: #----------------------------------------------------------------------------------------------- install(TARGETS ngraph EXPORT OpenVINOTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - INCLUDES DESTINATION ${NGRAPH_INSTALL_INCLUDE}) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph + INCLUDES DESTINATION runtime/include) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ - DESTINATION ${NGRAPH_INSTALL_INCLUDE} + DESTINATION "runtime/include" COMPONENT ngraph_dev FILES_MATCHING PATTERN "*.hpp" @@ -107,7 +107,7 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ PATTERN "*version.in.hpp" EXCLUDE) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/ngraph/version.hpp - DESTINATION ${NGRAPH_INSTALL_INCLUDE}/ngraph + DESTINATION "runtime/include/ngraph" COMPONENT ngraph_dev) configure_package_config_file(${OpenVINO_SOURCE_DIR}/cmake/templates/ngraphConfig.cmake.in @@ -120,5 +120,5 @@ write_basic_package_version_file(${ngraph_BINARY_DIR}/ngraphConfigVersion.cmake install(FILES ${ngraph_BINARY_DIR}/ngraphConfig.cmake ${ngraph_BINARY_DIR}/ngraphConfigVersion.cmake - DESTINATION "deployment_tools/ngraph/cmake" + DESTINATION "runtime/cmake" COMPONENT ngraph_dev) diff --git a/ngraph/frontend/CMakeLists.txt b/ngraph/frontend/CMakeLists.txt index 7689778a115a4f..6ab49ec2f706b6 100644 --- a/ngraph/frontend/CMakeLists.txt +++ b/ngraph/frontend/CMakeLists.txt @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -set(FRONTEND_INSTALL_INCLUDE "${NGRAPH_INSTALL_INCLUDE}/ngraph/frontend") +set(FRONTEND_INSTALL_INCLUDE "runtime/include/ngraph/frontend") add_subdirectory(frontend_manager) diff --git a/ngraph/frontend/frontend_manager/CMakeLists.txt b/ngraph/frontend/frontend_manager/CMakeLists.txt index ca37a566f29977..feee88b695b849 100644 --- a/ngraph/frontend/frontend_manager/CMakeLists.txt +++ b/ngraph/frontend/frontend_manager/CMakeLists.txt @@ -52,9 +52,9 @@ endif() set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::manager) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/frontend_manager DESTINATION ${FRONTEND_INSTALL_INCLUDE} diff --git a/ngraph/frontend/ir/CMakeLists.txt b/ngraph/frontend/ir/CMakeLists.txt index 5ec3a54036127c..b9c9b3d32abb48 100644 --- a/ngraph/frontend/ir/CMakeLists.txt +++ b/ngraph/frontend/ir/CMakeLists.txt @@ -47,6 +47,6 @@ add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS}) install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) diff --git a/ngraph/frontend/onnx/frontend/CMakeLists.txt b/ngraph/frontend/onnx/frontend/CMakeLists.txt index 3ba52a443cb71d..72838ee735eaf2 100644 --- a/ngraph/frontend/onnx/frontend/CMakeLists.txt +++ b/ngraph/frontend/onnx/frontend/CMakeLists.txt @@ -59,9 +59,9 @@ endif() set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::onnx) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${ONNX_FRONTEND_INCLUDE_DIR}/onnx_frontend ${ONNX_FRONTEND_INCLUDE_DIR}/onnx_import diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index 6b63e527d9659a..1f347f9c9d241d 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -84,9 +84,9 @@ add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::paddlepaddle) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ngraph + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ngraph + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ngraph) install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/paddlepaddle_frontend DESTINATION ${FRONTEND_INSTALL_INCLUDE} diff --git a/ngraph/test/frontend/CMakeLists.txt b/ngraph/test/frontend/CMakeLists.txt index 901d917edc7295..798118442b4357 100644 --- a/ngraph/test/frontend/CMakeLists.txt +++ b/ngraph/test/frontend/CMakeLists.txt @@ -24,5 +24,5 @@ add_dependencies(unit-test mock1_ngraph_frontend) add_clang_format_target(mock1_ngraph_frontend_clang FOR_TARGETS mock1_ngraph_frontend) install(TARGETS mock1_ngraph_frontend - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/ngraph/test/runtime/CMakeLists.txt b/ngraph/test/runtime/CMakeLists.txt index d17a37a8b7080a..8bd949c46d0cb7 100644 --- a/ngraph/test/runtime/CMakeLists.txt +++ b/ngraph/test/runtime/CMakeLists.txt @@ -47,8 +47,8 @@ endif() target_include_directories(ngraph_backend PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) install(TARGETS ngraph_backend - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) add_subdirectory(interpreter) add_subdirectory(ie) diff --git a/ngraph/test/runtime/ie/CMakeLists.txt b/ngraph/test/runtime/ie/CMakeLists.txt index 938b2d32ce5f6b..d89b723ee24981 100644 --- a/ngraph/test/runtime/ie/CMakeLists.txt +++ b/ngraph/test/runtime/ie/CMakeLists.txt @@ -29,5 +29,5 @@ add_dependencies(ie_backend inference_engine) target_link_libraries(ie_backend PUBLIC ngraph_backend inference_engine) install(TARGETS ie_backend - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/ngraph/test/runtime/interpreter/CMakeLists.txt b/ngraph/test/runtime/interpreter/CMakeLists.txt index 58ca96a1c6b54e..9977342702b01a 100644 --- a/ngraph/test/runtime/interpreter/CMakeLists.txt +++ b/ngraph/test/runtime/interpreter/CMakeLists.txt @@ -24,5 +24,5 @@ target_compile_definitions(interpreter_backend PRIVATE INTERPRETER_BACKEND_EXPOR target_link_libraries(interpreter_backend PUBLIC ngraph_backend) install(TARGETS interpreter_backend - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/runtime/bindings/python/BUILDING.md b/runtime/bindings/python/BUILDING.md index 57a923d5cc66c7..4be330e95783a0 100644 --- a/runtime/bindings/python/BUILDING.md +++ b/runtime/bindings/python/BUILDING.md @@ -57,7 +57,7 @@ set the mentioned flags to `ON`. Note the `CMAKE_INSTALL_PREFIX`, which defaults The Python module is installed in the `${OPENVINO_BASEDIR}/openvino_dist/python/python/` folder. Set up the OpenVINO™ environment in order to add the module path to `PYTHONPATH`: - source ${OPENVINO_BASEDIR}/openvino_dist/bin/setupvars.sh + source ${OPENVINO_BASEDIR}/openvino_dist/setupvars.sh If you would like to use a specific version of Python, or use a virtual environment, you can set the `PYTHON_EXECUTABLE` variable. For example: @@ -136,7 +136,7 @@ adjust the number of threads used in the building process to your machine's capa Set up the OpenVINO™ environment in order to add a module path to `PYTHONPATH`: - %OPENVINO_BASEDIR%\openvino_dist\bin\setupvars.bat + %OPENVINO_BASEDIR%\openvino_dist\setupvars.bat ### Build an nGraph Python Wheel on Windows @@ -173,7 +173,7 @@ You should now be able to run tests. You may need to run the `setupvars` script from the OpenVINO™ Toolkit to set paths to OpenVINO™ components. - source ${OPENVINO_BASEDIR}/openvino_dist/bin/setupvars.sh + source ${OPENVINO_BASEDIR}/openvino_dist/setupvars.sh Now you can run tests using `pytest`: diff --git a/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt b/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt index fac5294a572fdc..62fb1879de5c87 100644 --- a/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt +++ b/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt @@ -20,5 +20,5 @@ target_link_libraries(${TARGET_FE_NAME} PRIVATE frontend_manager::static) add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) install(TARGETS ${TARGET_FE_NAME} - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT tests EXCLUDE_FROM_ALL) + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 4a56a98229a8d3..c8449e26ae24ce 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -30,11 +30,11 @@ ie_cpack_add_component(setupvars REQUIRED) if(UNIX) install(PROGRAMS setupvars/setupvars.sh - DESTINATION bin + DESTINATION . COMPONENT setupvars) elseif(WIN32) install(PROGRAMS setupvars/setupvars.bat - DESTINATION bin + DESTINATION . COMPONENT setupvars) endif() @@ -53,14 +53,16 @@ ie_cpack_add_component(demo_scripts DEPENDS core) if(UNIX) install(DIRECTORY demo/ - DESTINATION deployment_tools/demo + DESTINATION samples/scripts COMPONENT demo_scripts USE_SOURCE_PERMISSIONS + PATTERN demo_security_barrier_camera.* EXCLUDE PATTERN *.bat EXCLUDE) elseif(WIN32) install(DIRECTORY demo/ - DESTINATION deployment_tools/demo + DESTINATION samples/scripts COMPONENT demo_scripts USE_SOURCE_PERMISSIONS + PATTERN demo_security_barrier_camera.* EXCLUDE PATTERN *.sh EXCLUDE) endif() diff --git a/scripts/demo/README.txt b/scripts/demo/README.txt index b5632723a2b733..a013aad62cce48 100644 --- a/scripts/demo/README.txt +++ b/scripts/demo/README.txt @@ -36,10 +36,10 @@ Classification Sample Using SqueezeNet The sample illustrates the general workflow of using the Intel(R) Deep Learning Deployment Toolkit and performs the following: - - Downloads a public SqueezeNet model using the Model Downloader (open_model_zoo\tools\downloader\downloader.py) - - Installs all prerequisites required for running the Model Optimizer using the scripts from the "model_optimizer\install_prerequisites" folder - - Converts SqueezeNet to an IR using the Model Optimizer (model_optimizer\mo.py) via the Model Converter (open_model_zoo\tools\downloader\converter.py) - - Builds the Inference Engine classification_sample (inference_engine\samples\classification_sample) + - Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py) + - Installs all prerequisites required for running the Model Optimizer using the scripts from the "tools\model_optimizer\install_prerequisites" folder + - Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py) + - Builds the Inference Engine classification_sample (samples\cpp\classification_sample) - Runs the sample with the car.png picture located in the demo folder The sample application prints top-10 inference results for the picture. @@ -53,10 +53,10 @@ The sample illustrates how to use the Benchmark Application to estimate deep lea The sample script does the following: - - Downloads a public SqueezeNet model using the Model Downloader (open_model_zoo\tools\downloader\downloader.py) - - Installs all prerequisites required for running the Model Optimizer using the scripts from the "model_optimizer\install_prerequisites" folder - - Converts SqueezeNet to an IR using the Model Optimizer (model_optimizer\mo.py) via the Model Converter (open_model_zoo\tools\downloader\converter.py) - - Builds the Inference Engine benchmark tool (inference_engine\samples\benchmark_app) + - Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py) + - Installs all prerequisites required for running the Model Optimizer using the scripts from the "tools\model_optimizer\install_prerequisites" folder + - Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py) + - Builds the Inference Engine benchmark tool (samples\benchmark_app) - Runs the tool with the car.png picture located in the demo folder The benchmark app prints performance counters, resulting latency, and throughput values. diff --git a/scripts/demo/run_sample_benchmark_app.bat b/scripts/demo/run_sample_benchmark_app.bat index cfa22e452d2f00..2476a75fef260a 100644 --- a/scripts/demo/run_sample_benchmark_app.bat +++ b/scripts/demo/run_sample_benchmark_app.bat @@ -52,8 +52,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%..\..\bin\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error @@ -108,7 +108,7 @@ python -m pip install -r "%ROOT_DIR%..\open_model_zoo\tools\downloader\requireme if ERRORLEVEL 1 GOTO errorHandling -set downloader_dir=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader +set downloader_dir=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader for /F "tokens=* usebackq" %%d in ( `python "%downloader_dir%\info_dumper.py" --name "%model_name%" ^| @@ -138,7 +138,7 @@ echo. echo ###############^|^| Install Model Optimizer prerequisites ^|^|############### echo. CALL :delay 3 -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer" +cd /d "%INTEL_OPENVINO_DIR%\tools\model_optimizer" python -m pip install -r requirements.txt if ERRORLEVEL 1 GOTO errorHandling @@ -149,8 +149,8 @@ echo. CALL :delay 3 ::set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp -echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" -python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 @@ -230,7 +230,7 @@ set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build" echo Creating Visual Studio !MSBUILD_VERSION! %PLATFORM% files in %SOLUTION_DIR64%... && ^ if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt" -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" +cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 diff --git a/scripts/demo/run_sample_benchmark_app.sh b/scripts/demo/run_sample_benchmark_app.sh index fd30df6e832e40..3b2d90a371ad6d 100644 --- a/scripts/demo/run_sample_benchmark_app.sh +++ b/scripts/demo/run_sample_benchmark_app.sh @@ -69,8 +69,8 @@ target_image_path="$ROOT_DIR/car.png" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../bin/setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../bin/setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi @@ -120,12 +120,12 @@ fi . "$VENV_DIR/bin/activate" python -m pip install -U pip -python -m pip install -r "$ROOT_DIR/../open_model_zoo/tools/downloader/requirements.in" +python -m pip install -r "$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader/requirements.in" # Step 1. Download the Caffe model and the prototxt of the model echo -ne "\n###############|| Downloading the Caffe model and the prototxt ||###############\n\n" -downloader_dir="${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/downloader" +downloader_dir="${INTEL_OPENVINO_DIR}/extras/open_model_zoo/tools/downloader" model_dir=$(python "$downloader_dir/info_dumper.py" --name "$model_name" | python -c 'import sys, json; print(json.load(sys.stdin)[0]["subdirectory"])') @@ -139,14 +139,14 @@ ir_dir="${irs_path}/${model_dir}/${target_precision}" if [ ! -e "$ir_dir" ]; then # Step 2. Configure Model Optimizer echo -ne "\n###############|| Install Model Optimizer dependencies ||###############\n\n" - cd "${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer" + cd "${INTEL_OPENVINO_DIR}/tools/model_optimizer" python -m pip install -r requirements.txt cd "$PWD" # Step 3. Convert a model with Model Optimizer echo -ne "\n###############|| Convert a model with Model Optimizer ||###############\n\n" - mo_path="${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py" + mo_path="${INTEL_OPENVINO_DIR}/tools/model_optimizer/mo.py" export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp print_and_run python "$downloader_dir/converter.py" --mo "$mo_path" --name "$model_name" -d "$models_path" -o "$irs_path" --precisions "$target_precision" @@ -166,7 +166,7 @@ if [ "$OS_PATH" == "x86_64" ]; then NUM_THREADS="-j8" fi -samples_path="${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/samples/cpp" +samples_path="${INTEL_OPENVINO_DIR}/samples/cpp" build_dir="$HOME/inference_engine_cpp_samples_build" binaries_dir="${build_dir}/${OS_PATH}/Release" diff --git a/scripts/demo/run_sample_squeezenet.bat b/scripts/demo/run_sample_squeezenet.bat index 79ba38ea3b2156..e07707e22e34d4 100644 --- a/scripts/demo/run_sample_squeezenet.bat +++ b/scripts/demo/run_sample_squeezenet.bat @@ -48,8 +48,8 @@ set model_name=squeezenet1.1 set target_image_path=%ROOT_DIR%car.png -if exist "%ROOT_DIR%..\..\bin\setupvars.bat" ( - call "%ROOT_DIR%..\..\bin\setupvars.bat" +if exist "%ROOT_DIR%..\..\setupvars.bat" ( + call "%ROOT_DIR%..\..\setupvars.bat" ) else ( echo setupvars.bat is not found, INTEL_OPENVINO_DIR can't be set goto error @@ -104,7 +104,7 @@ python -m pip install -r "%ROOT_DIR%..\open_model_zoo\tools\downloader\requireme if ERRORLEVEL 1 GOTO errorHandling -set downloader_dir=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader +set downloader_dir=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader for /F "tokens=* usebackq" %%d in ( `python "%downloader_dir%\info_dumper.py" --name "%model_name%" ^| @@ -134,7 +134,7 @@ echo. echo ###############^|^| Install Model Optimizer prerequisites ^|^|############### echo. CALL :delay 3 -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer" +cd /d "%INTEL_OPENVINO_DIR%\tools\model_optimizer" python -m pip install -r requirements.txt if ERRORLEVEL 1 GOTO errorHandling @@ -145,8 +145,8 @@ echo. CALL :delay 3 ::set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp -echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" -python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +echo python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" +python "%downloader_dir%\converter.py" --mo "%INTEL_OPENVINO_DIR%\tools\model_optimizer\mo.py" --name "%model_name%" -d "%models_path%" -o "%irs_path%" --precisions "%TARGET_PRECISION%" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 @@ -226,7 +226,7 @@ set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build" echo Creating Visual Studio !MSBUILD_VERSION! %PLATFORM% files in %SOLUTION_DIR64%... && ^ if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt" -cd /d "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\samples\cpp" +cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 @@ -250,7 +250,7 @@ CALL :delay 3 copy /Y "%ROOT_DIR%%model_name%.labels" "%ir_dir%" cd /d "%SOLUTION_DIR64%\intel64\Release" if not exist classification_sample_async.exe ( - cd /d "%INTEL_OPENVINO_DIR%\inference_engine\samples\cpp\intel64\Release" + cd /d "%INTEL_OPENVINO_DIR%\samples\cpp\intel64\Release" ) echo classification_sample_async.exe -i "%target_image_path%" -m "%ir_dir%\%model_name%.xml" -d !TARGET! !SAMPLE_OPTIONS! classification_sample_async.exe -i "%target_image_path%" -m "%ir_dir%\%model_name%.xml" -d !TARGET! !SAMPLE_OPTIONS! diff --git a/scripts/demo/run_sample_squeezenet.sh b/scripts/demo/run_sample_squeezenet.sh index ae0cc0398d54e2..fc75592f3415a3 100644 --- a/scripts/demo/run_sample_squeezenet.sh +++ b/scripts/demo/run_sample_squeezenet.sh @@ -65,8 +65,8 @@ target_image_path="$ROOT_DIR/car.png" run_again="Then run the script again\n\n" -if [ -e "$ROOT_DIR/../../bin/setupvars.sh" ]; then - setupvars_path="$ROOT_DIR/../../bin/setupvars.sh" +if [ -e "$ROOT_DIR/../../setupvars.sh" ]; then + setupvars_path="$ROOT_DIR/../../setupvars.sh" else echo -ne "Error: setupvars.sh is not found\n" fi @@ -116,12 +116,12 @@ fi . "$VENV_DIR/bin/activate" python -m pip install -U pip -python -m pip install -r "$ROOT_DIR/../open_model_zoo/tools/downloader/requirements.in" +python -m pip install -r "$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader/requirements.in" # Step 1. Download the Caffe model and the prototxt of the model echo -ne "\n###############|| Downloading the Caffe model and the prototxt ||###############\n\n" -downloader_dir="${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/downloader" +downloader_dir="${INTEL_OPENVINO_DIR}/extras/open_model_zoo/tools/downloader" model_dir=$(python "$downloader_dir/info_dumper.py" --name "$model_name" | python -c 'import sys, json; print(json.load(sys.stdin)[0]["subdirectory"])') @@ -135,14 +135,14 @@ ir_dir="${irs_path}/${model_dir}/${target_precision}" if [ ! -e "$ir_dir" ]; then # Step 2. Configure Model Optimizer echo -ne "\n###############|| Install Model Optimizer dependencies ||###############\n\n" - cd "${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer" + cd "${INTEL_OPENVINO_DIR}/tools/model_optimizer" python -m pip install -r requirements.txt cd "$PWD" # Step 3. Convert a model with Model Optimizer echo -ne "\n###############|| Convert a model with Model Optimizer ||###############\n\n" - mo_path="${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py" + mo_path="${INTEL_OPENVINO_DIR}/tools/model_optimizer/mo.py" export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp print_and_run python "$downloader_dir/converter.py" --mo "$mo_path" --name "$model_name" -d "$models_path" -o "$irs_path" --precisions "$target_precision" @@ -162,7 +162,7 @@ if [ "$OS_PATH" == "x86_64" ]; then NUM_THREADS="-j8" fi -samples_path="${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/samples/cpp" +samples_path="${INTEL_OPENVINO_DIR}/samples/cpp" build_dir="$HOME/inference_engine_cpp_samples_build" binaries_dir="${build_dir}/${OS_PATH}/Release" diff --git a/scripts/install_dependencies/install_NCS_udev_rules.sh b/scripts/install_dependencies/install_NCS_udev_rules.sh index 7062e9d753db92..b05342d6cf4b6d 100755 --- a/scripts/install_dependencies/install_NCS_udev_rules.sh +++ b/scripts/install_dependencies/install_NCS_udev_rules.sh @@ -6,14 +6,14 @@ echo "Updating udev rules..." if [ -z "$INTEL_OPENVINO_DIR" ]; then - echo "Please set up your environment. Run 'source /bin/setupvars.sh'." + echo "Please set up your environment. Run 'source /setupvars.sh'." exit -1 fi -if [ -f "$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/97-myriad-usbboot.rules" ]; then +if [ -f "$INTEL_OPENVINO_DIR/runtime/3rdparty/97-myriad-usbboot.rules" ]; then sudo usermod -a -G users "$(whoami)" - sudo cp "$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/97-myriad-usbboot.rules" /etc/udev/rules.d/ + sudo cp "$INTEL_OPENVINO_DIR/runtime/3rdparty/97-myriad-usbboot.rules" /etc/udev/rules.d/ sudo udevadm control --reload-rules sudo udevadm trigger sudo ldconfig diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index e0579fde533371..364c651be48816 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -4,7 +4,6 @@ :: SPDX-License-Identifier: Apache-2.0 set ROOT=%~dp0 -call :GetFullPath "%ROOT%\.." ROOT set SCRIPT_NAME=%~nx0 set "INTEL_OPENVINO_DIR=%ROOT%" @@ -32,49 +31,35 @@ set "PATH=%INTEL_OPENVINO_DIR%\opencv\x64\vc14\bin;%PATH%" ) :: Model Optimizer -if exist %INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer ( -set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer;%PYTHONPATH% -set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer;%PATH%" +if exist %INTEL_OPENVINO_DIR%\tools\model_optimizer ( +set PYTHONPATH=%INTEL_OPENVINO_DIR%\tools\model_optimizer;%PYTHONPATH% +set "PATH=%INTEL_OPENVINO_DIR%\tools\model_optimizer;%PATH%" ) :: Model Downloader -if exist %INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader ( -set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader;%PYTHONPATH% -set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader;%PATH%" +if exist %INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader ( +set PYTHONPATH=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader;%PYTHONPATH% +set "PATH=%INTEL_OPENVINO_DIR%\extras\open_model_zoo\tools\downloader;%PATH%" ) -:: Inference Engine -set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share" -set "OpenVINO_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share" -set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\hddl" -set "OPENMP_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\omp\lib" -set "GNA_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\gna\lib" - -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Release;%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%OPENMP_DIR%;%GNA_DIR%;%OPENVINO_LIB_PATHS%" -if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions ( -set ARCH_ROOT_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions -) -if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions ( -set ARCH_ROOT_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions -) +:: OpenVINO runtime +set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" +set "ngraph_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" +set "OpenVINO_DIR=%INTEL_OPENVINO_DIR%\runtime\cmake" +set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\hddl" +set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%OPENVINO_LIB_PATHS%" +set "OV_FRONTEND_PATH=%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Release;%INTEL_OPENVINO_DIR%\runtime\bin\intel64\Debug;%OV_FRONTEND_PATH%" :: TBB -if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb ( -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\bin;%OPENVINO_LIB_PATHS%" -set "TBB_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\cmake" -) - -:: nGraph -if exist %INTEL_OPENVINO_DIR%\deployment_tools\ngraph ( -set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\lib;%OPENVINO_LIB_PATHS%" -set "ngraph_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\cmake" -set "OV_FRONTEND_PATH=%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\lib;%OV_FRONTEND_PATH%" +if exist %INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb ( +set "OPENVINO_LIB_PATHS=%INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb\bin;%OPENVINO_LIB_PATHS%" +set "TBB_DIR=%INTEL_OPENVINO_DIR%\runtime\3rdparty\tbb\cmake" ) :: Compile tool -if exist %INTEL_OPENVINO_DIR%\deployment_tools\tools\compile_tool ( -set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\tools\compile_tool;%PATH%" +if exist %INTEL_OPENVINO_DIR%\tools\compile_tool ( +set "PATH=%INTEL_OPENVINO_DIR%\tools\compile_tool;%PATH%" ) :: Add libs dirs to the PATH @@ -129,12 +114,12 @@ if not "%bitness%"=="64" ( set PYTHONPATH=%INTEL_OPENVINO_DIR%\python\python%pyversion_major%.%pyversion_minor%;%INTEL_OPENVINO_DIR%\python\python3;%PYTHONPATH% -if exist %INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\accuracy_checker ( - set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\accuracy_checker;%PYTHONPATH% +if exist %INTEL_OPENVINO_DIR%\tools\accuracy_checker ( + set PYTHONPATH=%INTEL_OPENVINO_DIR%\tools\accuracy_checker;%PYTHONPATH% ) -if exist %INTEL_OPENVINO_DIR%\deployment_tools\tools\post_training_optimization_toolkit ( - set PYTHONPATH=%INTEL_OPENVINO_DIR%\deployment_tools\tools\post_training_optimization_toolkit;%PYTHONPATH% +if exist %INTEL_OPENVINO_DIR%\post_training_optimization_toolkit ( + set PYTHONPATH=%INTEL_OPENVINO_DIR%\post_training_optimization_toolkit;%PYTHONPATH% ) echo [setupvars.bat] OpenVINO environment initialized diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index e44d8bbecc0ead..fc6251211e6378 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -4,13 +4,8 @@ # SPDX-License-Identifier: Apache-2.0 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" >/dev/null 2>&1 && pwd )" -BASE_DIR="$( dirname "$SCRIPT_DIR" )" - -INSTALLDIR="${BASE_DIR}" - - +INSTALLDIR="${SCRIPT_DIR}" export INTEL_OPENVINO_DIR="$INSTALLDIR" -export INTEL_CVSDK_DIR="$INTEL_OPENVINO_DIR" # parse command line options while [[ $# -gt 0 ]] @@ -29,47 +24,40 @@ esac shift done -if [ -e "$INSTALLDIR/deployment_tools/inference_engine" ]; then - export InferenceEngine_DIR=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/share - export OpenVINO_DIR=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/share - system_type=$(ls "$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/") - IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/$system_type +if [ -e "$INSTALLDIR/runtime" ]; then + export InferenceEngine_DIR=$INSTALLDIR/runtime/cmake + export ngraph_DIR=$INSTALLDIR/runtime/cmake + export OpenVINO_DIR=$INSTALLDIR/runtime/cmake - if [[ -e ${IE_PLUGINS_PATH}/arch_descriptions ]]; then - export ARCH_ROOT_DIR=${IE_PLUGINS_PATH}/arch_descriptions - fi + system_type=$(ls "$INSTALLDIR/runtime/lib/") + IE_PLUGINS_PATH=$INSTALLDIR/runtime/lib/$system_type + export OV_FRONTEND_PATH=${IE_PLUGINS_PATH}${OV_FRONTEND_PATH:+:$OV_FRONTEND_PATH} - export HDDL_INSTALL_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/hddl + export HDDL_INSTALL_DIR=$INSTALLDIR/runtime/3rdparty/hddl if [[ "$OSTYPE" == "darwin"* ]]; then - export DYLD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:${IE_PLUGINS_PATH}${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export DYLD_LIBRARY_PATH=${IE_PLUGINS_PATH}${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} + export LD_LIBRARY_PATH=${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} else - export LD_LIBRARY_PATH=$HDDL_INSTALL_DIR/lib:$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/gna/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_lnx/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export LD_LIBRARY_PATH=$HDDL_INSTALL_DIR/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} fi - HDDL_UNITE_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/hddl_unite + HDDL_UNITE_DIR=$INSTALLDIR/runtime/3rdparty/hddl_unite if [ -e "$HDDL_UNITE_DIR" ]; then export LD_LIBRARY_PATH=$HDDL_UNITE_DIR/lib:$HDDL_UNITE_DIR/thirdparty/XLink/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} fi fi -if [ -e "$INSTALLDIR/deployment_tools/inference_engine/external/tbb" ]; then +if [ -e "$INSTALLDIR/runtime/3rdparty/tbb" ]; then if [[ "$OSTYPE" == "darwin"* ]]; then - export DYLD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} + export DYLD_LIBRARY_PATH=$INSTALLDIR/runtime/3rdparty/tbb/lib:${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} fi - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} - export TBB_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/cmake -fi - -if [ -e "$INSTALLDIR/deployment_tools/tools/compile_tool" ]; then - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/tools/compile_tool${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export LD_LIBRARY_PATH=$INSTALLDIR/runtime/3rdparty/tbb/lib:${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export TBB_DIR=$INSTALLDIR/runtime/3rdparty/tbb/cmake fi -if [ -e "$INSTALLDIR/deployment_tools/ngraph" ]; then - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/ngraph/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} - export ngraph_DIR=$INSTALLDIR/deployment_tools/ngraph/cmake - export OV_FRONTEND_PATH=$INSTALLDIR/deployment_tools/ngraph/lib${OV_FRONTEND_PATH:+:$OV_FRONTEND_PATH} +if [ -e "$INSTALLDIR/tools/compile_tool" ]; then + export LD_LIBRARY_PATH=$INSTALLDIR/tools/compile_tool${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} fi if [ -e "$INSTALLDIR/opencv" ]; then @@ -83,26 +71,26 @@ if [ -e "$INSTALLDIR/opencv" ]; then fi -if [ -f "$INTEL_OPENVINO_DIR/data_processing/dl_streamer/bin/setupvars.sh" ]; then - source "$INTEL_OPENVINO_DIR/data_processing/dl_streamer/bin/setupvars.sh" +if [ -f "$INTEL_OPENVINO_DIR/extras/dl_streamer/setupvars.sh" ]; then + source "$INTEL_OPENVINO_DIR/extras/dl_streamer/setupvars.sh" fi -export PATH="$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer${PATH:+:$PATH}" -export PYTHONPATH="$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer${PYTHONPATH:+:$PYTHONPATH}" +export PATH="$INTEL_OPENVINO_DIR/tools/model_optimizer${PATH:+:$PATH}" +export PYTHONPATH="$INTEL_OPENVINO_DIR/tools/model_optimizer${PYTHONPATH:+:$PYTHONPATH}" -if [ -e "$INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/tools/downloader" ]; then - export PYTHONPATH="$INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/tools/downloader:$PYTHONPATH" - export PATH="$INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/tools/downloader:$PATH" +if [ -e "$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader" ]; then + export PYTHONPATH="$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader:$PYTHONPATH" + export PATH="$INTEL_OPENVINO_DIR/extras/open_model_zoo/tools/downloader:$PATH" fi -if [ -e "$INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/tools/accuracy_checker" ]; then - export PYTHONPATH="$INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/tools/accuracy_checker:$PYTHONPATH" +if [ -e "$INTEL_OPENVINO_DIR/tools/accuracy_checker" ]; then + export PYTHONPATH="$INTEL_OPENVINO_DIR/tools/accuracy_checker:$PYTHONPATH" fi -if [ -e "$INTEL_OPENVINO_DIR/deployment_tools/tools/post_training_optimization_toolkit" ]; then - export PYTHONPATH="$INTEL_OPENVINO_DIR/deployment_tools/tools/post_training_optimization_toolkit:$PYTHONPATH" +if [ -e "$INTEL_OPENVINO_DIR/tools/post_training_optimization_toolkit" ]; then + export PYTHONPATH="$INTEL_OPENVINO_DIR/tools/post_training_optimization_toolkit:$PYTHONPATH" fi if [ -z "$python_version" ]; then diff --git a/tests/utils/install_pkg.py b/tests/utils/install_pkg.py index c45d985c10a111..aca427c5db3726 100644 --- a/tests/utils/install_pkg.py +++ b/tests/utils/install_pkg.py @@ -16,10 +16,10 @@ def get_openvino_environment(install_prefix: Path): """ Get OpenVINO environment variables """ if sys.platform == "win32": - script = install_prefix / "bin" / "setupvars.bat" + script = install_prefix / "setupvars.bat" cmd = f"{script} && set" else: - script = install_prefix / "bin" / "setupvars.sh" + script = install_prefix / "setupvars.sh" # setupvars.sh is not compatible with /bin/sh. Using bash. cmd = f'bash -c ". {script} && env"' diff --git a/tests/utils/path_utils.py b/tests/utils/path_utils.py index 718849fbb43be9..8e9864059ad46c 100644 --- a/tests/utils/path_utils.py +++ b/tests/utils/path_utils.py @@ -37,14 +37,14 @@ def get_lib_path(lib_name): os_name = get_os_name() all_libs = { 'inference_engine_transformations': { - 'Windows': Path('deployment_tools/inference_engine/bin/intel64/Release/inference_engine_transformations.dll'), - 'Linux': Path('deployment_tools/inference_engine/lib/intel64/libinference_engine_transformations.so')}, + 'Windows': Path('runtime/bin/intel64/Release/inference_engine_transformations.dll'), + 'Linux': Path('runtime/lib/intel64/libinference_engine_transformations.so')}, 'MKLDNNPlugin': { - 'Windows': Path('deployment_tools/inference_engine/bin/intel64/Release/MKLDNNPlugin.dll'), - 'Linux': Path('deployment_tools/inference_engine/lib/intel64/libMKLDNNPlugin.so')}, + 'Windows': Path('runtime/bin/intel64/Release/MKLDNNPlugin.dll'), + 'Linux': Path('runtime/lib/intel64/libMKLDNNPlugin.so')}, 'ngraph': { - 'Windows': Path('deployment_tools/ngraph/lib/ngraph.dll'), - 'Linux': Path('deployment_tools/ngraph/lib/libngraph.so')} + 'Windows': Path('runtime/bin/intel64/Release/ngraph.dll'), + 'Linux': Path('runtime/lib/intel64/libngraph.so')} } return all_libs[lib_name][os_name] diff --git a/thirdparty/CMakeLists.txt b/thirdparty/CMakeLists.txt index ef0acd6559b804..3924fd210440b1 100644 --- a/thirdparty/CMakeLists.txt +++ b/thirdparty/CMakeLists.txt @@ -123,16 +123,16 @@ endif() ie_cpack_add_component(cpp_samples_deps) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/gflags - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp/thirdparty + DESTINATION "samples/cpp/thirdparty" COMPONENT cpp_samples_deps USE_SOURCE_PERMISSIONS) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/zlib - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp/thirdparty + DESTINATION "samples/cpp/thirdparty" COMPONENT cpp_samples_deps USE_SOURCE_PERMISSIONS) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cnpy - DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp/thirdparty + DESTINATION "samples/cpp/thirdparty" COMPONENT cpp_samples_deps USE_SOURCE_PERMISSIONS) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 4d52674546e73f..f18d5f9ce1013b 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -21,8 +21,8 @@ endif() # install deployment_manager ie_cpack_add_component(deployment_manager REQUIRED) -install(DIRECTORY deployment_manager/ - DESTINATION deployment_tools/tools/deployment_manager +install(DIRECTORY deployment_manager + DESTINATION tools COMPONENT deployment_manager USE_SOURCE_PERMISSIONS) @@ -48,14 +48,18 @@ if(ENABLE_PYTHON) # install cross_check_tool tool install(DIRECTORY cross_check_tool - DESTINATION deployment_tools/tools + DESTINATION tools + USE_SOURCE_PERMISSIONS COMPONENT python_tools) # install benchmark_app tool - install(FILES benchmark_tool/benchmark_app.py - benchmark_tool/README.md + install(FILES benchmark_tool/README.md benchmark_tool/requirements.txt - DESTINATION deployment_tools/tools/benchmark_tool + DESTINATION tools/benchmark_tool + COMPONENT python_tools) + + install(PROGRAMS benchmark_tool/benchmark_app.py + DESTINATION tools/benchmark_tool COMPONENT python_tools) # install openvino/tools/benchmark as a python package diff --git a/tools/benchmark_tool/README.md b/tools/benchmark_tool/README.md index d53e54808b0b1d..cccf1aaca0bd76 100644 --- a/tools/benchmark_tool/README.md +++ b/tools/benchmark_tool/README.md @@ -171,33 +171,33 @@ To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's] ## Examples of Running the Tool -This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/deployment_tools/demo/` directory is used. +This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `/samples/scripts/` directory is used. > **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. 1. Download the model. Go to the the Model Downloader directory and run the `downloader.py` script with specifying the model name and directory to download the model to: ```sh - cd /deployment_tools/open_model_zoo/tools/downloader + cd /extras/open_model_zoo/tools/downloader ``` ```sh python3 downloader.py --name googlenet-v1 -o ``` 2. Convert the model to the Inference Engine IR format. Go to the Model Optimizer directory and run the `mo.py` script with specifying the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files: ```sh - cd /deployment_tools/model_optimizer + cd /tools/model_optimizer ``` ```sh python3 mo.py --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir ``` -3. Run the tool with specifying the `/deployment_tools/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: +3. Run the tool with specifying the `/samples/scripts/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices: * On CPU: ```sh - python3 benchmark_app.py -m /googlenet-v1.xml -d CPU -api async -i /deployment_tools/demo/car.png --progress true -b 1 + python3 benchmark_app.py -m /googlenet-v1.xml -d CPU -api async -i /samples/scripts/car.png --progress true -b 1 ``` * On GPU: ```sh - python3 benchmark_app.py -m /googlenet-v1.xml -d GPU -api async -i /deployment_tools/demo/car.png --progress true -b 1 + python3 benchmark_app.py -m /googlenet-v1.xml -d GPU -api async -i /samples/scripts/car.png --progress true -b 1 ``` The application outputs number of executed iterations, total duration of execution, latency and throughput. diff --git a/tools/compile_tool/CMakeLists.txt b/tools/compile_tool/CMakeLists.txt index 704b05cfcdeeeb..3f9459174be836 100644 --- a/tools/compile_tool/CMakeLists.txt +++ b/tools/compile_tool/CMakeLists.txt @@ -38,9 +38,9 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) ie_cpack_add_component(core_tools DEPENDS core) install(TARGETS compile_tool - RUNTIME DESTINATION deployment_tools/tools/compile_tool + RUNTIME DESTINATION tools/compile_tool COMPONENT core_tools) install(FILES README.md - DESTINATION deployment_tools/tools/compile_tool + DESTINATION tools/compile_tool COMPONENT core_tools) diff --git a/tools/compile_tool/README.md b/tools/compile_tool/README.md index 0cdd097a15b638..279425f7f77572 100644 --- a/tools/compile_tool/README.md +++ b/tools/compile_tool/README.md @@ -8,7 +8,7 @@ The tool compiles networks for the following target devices using corresponding The tool is delivered as an executable file that can be run on both Linux* and Windows*. -The tool is located in the `/deployment_tools/tools/compile_tool` directory. +The tool is located in the `/tools/compile_tool` directory. The workflow of the Compile tool is as follows: diff --git a/tools/deployment_manager/configs/darwin.json b/tools/deployment_manager/configs/darwin.json index b00e38cc041e0f..452b14c0ef4c1a 100644 --- a/tools/deployment_manager/configs/darwin.json +++ b/tools/deployment_manager/configs/darwin.json @@ -4,7 +4,7 @@ "setupvars": { "mandatory" : "yes", "files": [ - "bin" + "setupvars.sh" ] }, "openvino_license": { @@ -16,17 +16,20 @@ "ie_core": { "group": ["ie"], "files": [ - "deployment_tools/inference_engine/version.txt", - "deployment_tools/inference_engine/lib/intel64/libinference_engine.dylib", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_transformations.dylib", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_preproc.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_ir_reader.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_c_api.dylib", - "deployment_tools/inference_engine/lib/intel64/libHeteroPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libMultiDevicePlugin.so", - "deployment_tools/inference_engine/lib/intel64/plugins.xml", - "deployment_tools/inference_engine/external/tbb", - "deployment_tools/ngraph/lib" + "runtime/lib/intel64/libinference_engine.dylib", + "runtime/lib/intel64/libinference_engine_transformations.dylib", + "runtime/lib/intel64/libinference_engine_preproc.so", + "runtime/lib/intel64/libinference_engine_ir_reader.so", + "runtime/lib/intel64/libinference_engine_c_api.dylib", + "runtime/lib/intel64/libHeteroPlugin.so", + "runtime/lib/intel64/libMultiDevicePlugin.so", + "runtime/lib/intel64/libngraph.dylib", + "runtime/lib/intel64/libfrontend_manager.dylib", + "runtime/lib/intel64/libir_ngraph_frontend.dylib", + "runtime/lib/intel64/libonnx_ngraph_frontend.dylib", + "runtime/lib/intel64/libpaddlepaddle_ngraph_frontend.dylib", + "runtime/lib/intel64/plugins.xml", + "runtime/3rdparty/tbb" ] }, "cpu": { @@ -34,8 +37,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libinference_engine_lp_transformations.dylib", - "deployment_tools/inference_engine/lib/intel64/libMKLDNNPlugin.so" + "runtime/lib/intel64/libinference_engine_lp_transformations.dylib", + "runtime/lib/intel64/libMKLDNNPlugin.so" ] }, "vpu": { @@ -43,10 +46,10 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libmyriadPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.dylib", - "deployment_tools/inference_engine/lib/intel64/usb-ma2x8x.mvcmd", - "deployment_tools/inference_engine/lib/intel64/pcie-ma2x8x.mvcmd" + "runtime/lib/intel64/libmyriadPlugin.so", + "runtime/lib/intel64/libinference_engine_legacy.dylib", + "runtime/lib/intel64/usb-ma2x8x.mvcmd", + "runtime/lib/intel64/pcie-ma2x8x.mvcmd" ] }, "opencv": { diff --git a/tools/deployment_manager/configs/linux.json b/tools/deployment_manager/configs/linux.json index cbcd82c850f324..29912e3ff3e389 100644 --- a/tools/deployment_manager/configs/linux.json +++ b/tools/deployment_manager/configs/linux.json @@ -4,7 +4,7 @@ "setupvars": { "mandatory" : "yes", "files": [ - "bin" + "setupvars.sh" ] }, "openvino_dependencies": { @@ -22,17 +22,20 @@ "ie_core": { "group": ["ie"], "files": [ - "deployment_tools/inference_engine/version.txt", - "deployment_tools/inference_engine/lib/intel64/libinference_engine.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_transformations.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_preproc.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_ir_reader.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_c_api.so", - "deployment_tools/inference_engine/lib/intel64/libHeteroPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libMultiDevicePlugin.so", - "deployment_tools/inference_engine/lib/intel64/plugins.xml", - "deployment_tools/inference_engine/external/tbb", - "deployment_tools/ngraph/lib" + "runtime/lib/intel64/libinference_engine.so", + "runtime/lib/intel64/libinference_engine_transformations.so", + "runtime/lib/intel64/libinference_engine_preproc.so", + "runtime/lib/intel64/libinference_engine_ir_reader.so", + "runtime/lib/intel64/libinference_engine_c_api.so", + "runtime/lib/intel64/libHeteroPlugin.so", + "runtime/lib/intel64/libMultiDevicePlugin.so", + "runtime/lib/intel64/libngraph.so", + "runtime/lib/intel64/libfrontend_manager.so", + "runtime/lib/intel64/libir_ngraph_frontend.so", + "runtime/lib/intel64/libonnx_ngraph_frontend.so", + "runtime/lib/intel64/libpaddlepaddle_ngraph_frontend.so", + "runtime/lib/intel64/plugins.xml", + "runtime/3rdparty/tbb" ] }, "cpu": { @@ -40,8 +43,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libinference_engine_lp_transformations.so", - "deployment_tools/inference_engine/lib/intel64/libMKLDNNPlugin.so" + "runtime/lib/intel64/libinference_engine_lp_transformations.so", + "runtime/lib/intel64/libMKLDNNPlugin.so" ] }, "gpu": { @@ -49,9 +52,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/cache.json", - "deployment_tools/inference_engine/lib/intel64/libclDNNPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_lp_transformations.so", + "runtime/lib/intel64/cache.json", + "runtime/lib/intel64/libclDNNPlugin.so", + "runtime/lib/intel64/libinference_engine_lp_transformations.so", "install_dependencies/install_NEO_OCL_driver.sh" ] }, @@ -60,12 +63,12 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/external/97-myriad-usbboot.rules", - "deployment_tools/inference_engine/lib/intel64/usb-ma2x8x.mvcmd", - "deployment_tools/inference_engine/lib/intel64/pcie-ma2x8x.mvcmd", - "deployment_tools/inference_engine/lib/intel64/libmyriadPlugin.so", - "deployment_tools/inference_engine/lib/intel64/vpu_custom_kernels", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.so", + "runtime/3rdparty/97-myriad-usbboot.rules", + "runtime/lib/intel64/usb-ma2x8x.mvcmd", + "runtime/lib/intel64/pcie-ma2x8x.mvcmd", + "runtime/lib/intel64/libmyriadPlugin.so", + "runtime/lib/intel64/vpu_custom_kernels", + "runtime/lib/intel64/libinference_engine_legacy.so", "install_dependencies/install_NCS_udev_rules.sh" ] }, @@ -74,9 +77,11 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/external/gna", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.so", - "deployment_tools/inference_engine/lib/intel64/libGNAPlugin.so" + "runtime/lib/intel64/libgna.so", + "runtime/lib/intel64/libgna.so.2", + "runtime/lib/intel64/libgna.so.2.0.0.1226", + "runtime/lib/intel64/libinference_engine_legacy.so", + "runtime/lib/intel64/libGNAPlugin.so" ] }, "hddl": { @@ -84,9 +89,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/lib/intel64/libHDDLPlugin.so", - "deployment_tools/inference_engine/lib/intel64/libinference_engine_legacy.so", - "deployment_tools/inference_engine/external/hddl" + "runtime/lib/intel64/libHDDLPlugin.so", + "runtime/lib/intel64/libinference_engine_legacy.so", + "runtime/3rdparty/hddl" ] }, "opencv": { diff --git a/tools/deployment_manager/configs/windows.json b/tools/deployment_manager/configs/windows.json index 6ee8e12cc6e843..f49d9d3b16a81a 100644 --- a/tools/deployment_manager/configs/windows.json +++ b/tools/deployment_manager/configs/windows.json @@ -4,7 +4,7 @@ "setupvars": { "mandatory" : "yes", "files": [ - "bin" + "setupvars.bat" ] }, "openvino_license": { @@ -16,20 +16,20 @@ "ie_core": { "group": ["ie"], "files": [ - "deployment_tools/inference_engine/version.txt", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_transformations.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_preproc.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_ir_reader.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_c_api.dll", - "deployment_tools/inference_engine/lib/intel64/Release/HeteroPlugin.dll", - "deployment_tools/inference_engine/lib/intel64/Release/MultiDevicePlugin.dll", - "deployment_tools/inference_engine/bin/intel64/Release/plugins.xml", - "deployment_tools/inference_engine/lib/intel64/Release/inference_engine.lib", - "deployment_tools/inference_engine/lib/intel64/Release/inference_engine_transformations.lib", - "deployment_tools/inference_engine/lib/intel64/Release/inference_engine_c_api.lib", - "deployment_tools/inference_engine/external/tbb", - "deployment_tools/ngraph/lib" + "runtime/bin/intel64/Release/inference_engine.dll", + "runtime/bin/intel64/Release/inference_engine_transformations.dll", + "runtime/bin/intel64/Release/inference_engine_preproc.dll", + "runtime/bin/intel64/Release/inference_engine_ir_reader.dll", + "runtime/bin/intel64/Release/inference_engine_c_api.dll", + "runtime/bin/intel64/Release/HeteroPlugin.dll", + "runtime/bin/intel64/Release/MultiDevicePlugin.dll", + "runtime/bin/intel64/Release/ngraph.dll", + "runtime/bin/intel64/Release/frontend_manager.dll", + "runtime/bin/intel64/Release/ir_ngraph_frontend.dll", + "runtime/bin/intel64/Release/onnx_ngraph_frontend.dll", + "runtime/bin/intel64/Release/paddlepaddle_ngraph_frontend.dll", + "runtime/bin/intel64/Release/plugins.xml", + "runtime/3rdparty/tbb" ] }, "cpu": { @@ -37,8 +37,8 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_lp_transformations.dll", - "deployment_tools/inference_engine/bin/intel64/Release/MKLDNNPlugin.dll" + "runtime/bin/intel64/Release/inference_engine_lp_transformations.dll", + "runtime/bin/intel64/Release/MKLDNNPlugin.dll" ] }, "gpu": { @@ -46,9 +46,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/cache.json", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_lp_transformations.dll", - "deployment_tools/inference_engine/bin/intel64/Release/clDNNPlugin.dll" + "runtime/bin/intel64/Release/cache.json", + "runtime/bin/intel64/Release/inference_engine_lp_transformations.dll", + "runtime/bin/intel64/Release/clDNNPlugin.dll" ] }, "vpu": { @@ -56,10 +56,10 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/usb-ma2x8x.mvcmd", - "deployment_tools/inference_engine/bin/intel64/Release/pcie-ma2x8x.elf", - "deployment_tools/inference_engine/bin/intel64/Release/myriadPlugin.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_legacy.dll" + "runtime/bin/intel64/Release/usb-ma2x8x.mvcmd", + "runtime/bin/intel64/Release/pcie-ma2x8x.elf", + "runtime/bin/intel64/Release/myriadPlugin.dll", + "runtime/bin/intel64/Release/inference_engine_legacy.dll" ] }, "gna": { @@ -67,9 +67,9 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/gna.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_legacy.dll", - "deployment_tools/inference_engine/bin/intel64/Release/GNAPlugin.dll" + "runtime/bin/intel64/Release/gna.dll", + "runtime/bin/intel64/Release/inference_engine_legacy.dll", + "runtime/bin/intel64/Release/GNAPlugin.dll" ] }, "hddl": { @@ -77,11 +77,11 @@ "group": ["ie"], "dependencies" : ["ie_core"], "files": [ - "deployment_tools/inference_engine/bin/intel64/Release/HDDLPlugin.dll", - "deployment_tools/inference_engine/bin/intel64/Release/inference_engine_legacy.dll", - "deployment_tools/inference_engine/bin/intel64/Release/hddl_perfcheck.exe", - "deployment_tools/inference_engine/external/MovidiusDriver", - "deployment_tools/inference_engine/external/hddl" + "runtime/bin/intel64/Release/HDDLPlugin.dll", + "runtime/bin/intel64/Release/inference_engine_legacy.dll", + "runtime/bin/intel64/Release/hddl_perfcheck.exe", + "runtime/3rdparty/MovidiusDriver", + "runtime/3rdparty/hddl" ] }, "opencv": {