Skip to content

Commit

Permalink
Merge branch 'master' into do_not_parse_input_for_pyapi
Browse files Browse the repository at this point in the history
  • Loading branch information
pavel-esir authored Sep 19, 2023
2 parents 4fed660 + f926e0e commit 78631a4
Show file tree
Hide file tree
Showing 271 changed files with 9,147 additions and 10,053 deletions.
2 changes: 1 addition & 1 deletion .ci/azure/linux_debian.yml
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ jobs:
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list
sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/intel-openvino-2023.list
sudo apt-get update
sudo apt-get install openvino -y
# install our local one and make sure the conflicts are resolved
sudo apt-get install --no-install-recommends dpkg-dev -y
Expand Down
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/good_first_issue.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ body:
- type: textarea
id: example_prs
attributes:
label: Exmaple Pull Requests
label: Example Pull Requests
description: |
Provide example Pull requests, if there are any.
validations:
Expand Down
19 changes: 14 additions & 5 deletions .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ jobs:
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py \
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_zoo_models.py \
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_backend.py
- name: Python API snippets
run: |
source ${{ env.INSTALL_DIR }}/setupvars.sh
Expand Down Expand Up @@ -774,19 +774,27 @@ jobs:
python3 -m pip install --upgrade pip
python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/functional_test_utils/requirements.txt
- name: Cache Tests Execution Time
id: tests-functional-cpu-cache
uses: actions/cache@v3
- name: Restore tests execution time
uses: actions/cache/restore@v3
with:
path: ${{ env.PARALLEL_TEST_CACHE }}
key: ${{ runner.os }}-tests-functional-cpu-cache
key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }}
restore-keys: |
${{ runner.os }}-tests-functional-cpu-stamp
- name: Intel CPU plugin func tests (parallel)
run: |
source ${{ env.INSTALL_DIR }}/setupvars.sh
python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke*
timeout-minutes: 25

- name: Save tests execution time
uses: actions/cache/save@v3
if: github.ref_name == 'master'
with:
path: ${{ env.PARALLEL_TEST_CACHE }}
key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }}

- name: Upload Test Results
uses: actions/upload-artifact@v3
if: ${{ always() }}
Expand Down Expand Up @@ -912,6 +920,7 @@ jobs:
- name: PyTorch Models Tests
run: |
python3 -m pip install -r ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/requirements.txt
python3 -m pip install -r ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/requirements_secondary.txt
export PYTHONPATH=${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}:$PYTHONPATH
python3 -m pytest ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/ -m ${{ env.TYPE }} --html=${{ env.INSTALL_TEST_DIR }}/TEST-torch_model_tests.html --self-contained-html
env:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/linux_debian.yml
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ jobs:
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list
sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/intel-openvino-2023.list
sudo apt-get update
sudo apt-get install openvino -y
# install our local one and make sure the conflicts are resolved
sudo apt-get install --no-install-recommends dpkg-dev -y
Expand Down
2 changes: 0 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,6 @@ function(openvino_developer_export_targets)
if(TARGET "${target_name}")
get_target_property(original_name ${target_name} ALIASED_TARGET)
if(TARGET "${original_name}")
message(STATUS "The name ${target_name} is an ALIAS for ${original_name}. "
"It will be exported to the OpenVINODeveloperPackage with the original name.")
list(REMOVE_ITEM ${EXPORT_COMPONENT} ${target_name})
list(APPEND ${EXPORT_COMPONENT} ${original_name})
endif()
Expand Down
4 changes: 4 additions & 0 deletions cmake/developer_package/plugins/plugins.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ function(ov_add_plugin)
# install rules
if(NOT OV_PLUGIN_SKIP_INSTALL OR NOT BUILD_SHARED_LIBS)
string(TOLOWER "${OV_PLUGIN_DEVICE_NAME}" install_component)
if(NOT BUILD_SHARED_LIBS)
# in case of static libs everything is installed to 'core'
set(install_component ${OV_CPACK_COMP_CORE})
endif()

if(OV_PLUGIN_PSEUDO_DEVICE)
set(plugin_hidden HIDDEN)
Expand Down
2 changes: 1 addition & 1 deletion cmake/features.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ endif()

ie_dependent_option (ENABLE_INTEL_GPU "GPU OpenCL-based plugin for OpenVINO Runtime" ${ENABLE_INTEL_GPU_DEFAULT} "X86_64 OR AARCH64;NOT APPLE;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF)

if (ANDROID OR MINGW OR (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) OR NOT BUILD_SHARED_LIBS)
if (ANDROID OR MINGW OR (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) OR (NOT BUILD_SHARED_LIBS AND ENABLE_INTEL_CPU))
# oneDNN doesn't support old compilers and android builds for now, so we'll build GPU plugin without oneDNN
# also, in case of static build CPU's and GPU's oneDNNs will conflict, so we are disabling GPU's one in this case
set(ENABLE_ONEDNN_FOR_GPU_DEFAULT OFF)
Expand Down
1 change: 1 addition & 0 deletions cmake/packaging/debian.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ macro(ov_cpack_settings)
# - 2022.3 is the first release where Debian updated packages are introduced, others 2022.3.X are LTS
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
2023.0.0 2023.0.1 2023.0.2 2023.0.3
2023.1.0
)

#
Expand Down
1 change: 1 addition & 0 deletions cmake/packaging/rpm.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ macro(ov_cpack_settings)
# - 2022.3 is the first release where RPM updated packages are introduced, others 2022.3.X are LTS
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
2023.0.0 2023.0.1 2023.0.2 2023.0.3
2023.1.0
)

find_host_program(rpmlint_PROGRAM NAMES rpmlint DOC "Path to rpmlint")
Expand Down
63 changes: 52 additions & 11 deletions cmake/templates/OpenVINOConfig.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,10 @@ macro(_ov_find_tbb)
PATHS ${_tbb_bind_dir}
NO_CMAKE_FIND_ROOT_PATH
NO_DEFAULT_PATH)
if(TARGET TBBbind::tbbbind_2_5)
# To solve https://cmake.org/cmake/help/latest/policy/CMP0111.html warnings
set_property(TARGET TBBbind::tbbbind_2_5 PROPERTY IMPORTED_CONFIGURATIONS RELEASE DEBUG)
endif()
unset(_tbb_bind_dir)
endif()
unset(install_tbbbind)
Expand Down Expand Up @@ -343,11 +347,15 @@ endmacro()
macro(_ov_find_intel_cpu_dependencies)
set(_OV_ENABLE_CPU_ACL "@DNNL_USE_ACL@")
if(_OV_ENABLE_CPU_ACL)
set(_ov_in_install_tree "@PACKAGE_ARM_COMPUTE_LIB_DIR@")
set(_ov_in_install_tree "@PACKAGE_OPENVINO_LIB_DIR@")
if(_ov_in_install_tree)
set_and_check(ARM_COMPUTE_LIB_DIR "@PACKAGE_ARM_COMPUTE_LIB_DIR@")
set_and_check(ARM_COMPUTE_LIB_DIR "@PACKAGE_OPENVINO_LIB_DIR@")
set(ACL_DIR "${CMAKE_CURRENT_LIST_DIR}")
else()
if(NOT TARGET arm_compute::arm_compute)
# for case when build tree is used separately, e.g. OpenVINODeveloperPackageConfig.cmake
set_and_check(ARM_COMPUTE_LIB_DIR "@PACKAGE_CMAKE_ARCHIVE_OUTPUT_DIRECTORY@")
endif()
set_and_check(ACL_DIR "@PACKAGE_FIND_ACL_PATH@")
endif()

Expand All @@ -363,16 +371,50 @@ macro(_ov_find_intel_gpu_dependencies)
set(_OV_ENABLE_INTEL_GPU "@ENABLE_INTEL_GPU@")
set(_OV_ENABLE_SYSTEM_OPENCL "@ENABLE_SYSTEM_OPENCL@")
if(_OV_ENABLE_INTEL_GPU AND _OV_ENABLE_SYSTEM_OPENCL)
set(_OV_OpenCLICDLoader_FOUND "@OpenCLICDLoader_FOUND@")
if(_OV_OpenCLICDLoader_FOUND)
_ov_find_dependency(OpenCLICDLoader)
else()
_ov_find_dependency(OpenCL)
endif()
unset(_OV_OpenCLICDLoader_FOUND)
_ov_find_dependency(OpenCL)
endif()
unset(_OV_ENABLE_INTEL_GPU)
unset(_OV_ENABLE_SYSTEM_OPENCL)

set(_OV_ENABLE_ONEDNN_FOR_GPU "@ENABLE_ONEDNN_FOR_GPU@")
if(_OV_ENABLE_ONEDNN_FOR_GPU AND NOT TARGET onednn_gpu_tgt)
set(_OV_DNNL_GPU_LIBRARY_NAME "@DNNL_GPU_LIBRARY_NAME@")

set(_ov_in_install_tree "@PACKAGE_OPENVINO_LIB_DIR@")
if(_ov_in_install_tree)
set(onednn_gpu_lib "${CMAKE_STATIC_LIBRARY_PREFIX}${_OV_DNNL_GPU_LIBRARY_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}")
set_and_check(onednn_gpu_lib_root "@PACKAGE_OPENVINO_LIB_DIR@")
if(WIN32)
if(OV_GENERATOR_MULTI_CONFIG)
set(extra_args PATH_SUFFIXES ${CMAKE_CONFIGURATION_TYPES})
else()
set(extra_args PATH_SUFFIXES ${CMAKE_BUILD_TYPE})
endif()
endif()

find_library(onednn_gpu_lib_path
NAMES ${_OV_DNNL_GPU_LIBRARY_NAME}
PATHS ${onednn_gpu_lib_root}
${extra_args})

if(NOT onednn_gpu_lib_path)
message(FATAL_ERROR "Internal error: failed to find '${_OV_DNNL_GPU_LIBRARY_NAME}' in '${onednn_gpu_lib_root}'")
endif()

unset(extra_args)
unset(onednn_gpu_lib)
else()
set_and_check(onednn_gpu_lib_path "@PACKAGE_ONEDNN_GPU_LIB_PATH@")
endif()

set_target_properties(openvino::onednn_gpu_tgt PROPERTIES
INTERFACE_LINK_LIBRARIES "${onednn_gpu_lib_path}")

unset(onednn_gpu_lib_path)
unset(_ov_in_install_tree)
unset(_OV_DNNL_GPU_LIBRARY_NAME)
endif()
unset(_OV_ENABLE_ONEDNN_FOR_GPU)
endmacro()

macro(_ov_find_intel_gna_dependencies)
Expand Down Expand Up @@ -455,6 +497,7 @@ set(_OV_ENABLE_OPENVINO_BUILD_SHARED "@BUILD_SHARED_LIBS@")

if(NOT TARGET openvino)
set(_ov_as_external_package ON)
include("${CMAKE_CURRENT_LIST_DIR}/OpenVINOTargets.cmake")
endif()

if(NOT _OV_ENABLE_OPENVINO_BUILD_SHARED)
Expand Down Expand Up @@ -487,8 +530,6 @@ set(_ov_imported_libs openvino::runtime openvino::runtime::c
openvino::frontend::pytorch openvino::frontend::tensorflow_lite)

if(_ov_as_external_package)
include("${CMAKE_CURRENT_LIST_DIR}/OpenVINOTargets.cmake")

foreach(target IN LISTS _ov_imported_libs)
if(TARGET ${target})
get_target_property(imported_configs ${target} IMPORTED_CONFIGURATIONS)
Expand Down
41 changes: 28 additions & 13 deletions docs/Documentation/model_introduction.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,37 @@
openvino_docs_OV_Converter_UG_prepare_model_convert_model_Converting_Model


Every deep learning workflow begins with obtaining a model. You can choose to prepare a custom one, use a ready-made solution and adjust it to your needs, or even download and run a pre-trained network from an online database, such as `TensorFlow Hub <https://tfhub.dev/>`__, `Hugging Face <https://huggingface.co/>`__, or `Torchvision models <https://pytorch.org/hub/>`__.

OpenVINO™ :doc:`supports several model formats <Supported_Model_Formats>` and can convert them into its own representation, `openvino.Model <api/ie_python_api/_autosummary/openvino.Model.html>`__ (`ov.Model <api/ie_python_api/_autosummary/openvino.runtime.Model.html>`__), providing a conversion API. Converted models can be used for inference with one or multiple OpenVINO Hardware plugins. There are two ways to use the conversion API: using a Python script or calling the ``ovc`` command line tool.
Every deep learning workflow begins with obtaining a model. You can choose to prepare
a custom one, use a ready-made solution and adjust it to your needs, or even download
and run a pre-trained network from an online database, such as
`TensorFlow Hub <https://tfhub.dev/>`__, `Hugging Face <https://huggingface.co/>`__,
or `Torchvision models <https://pytorch.org/hub/>`__.

If your selected model is in one of the :doc:`OpenVINO™ supported model formats <Supported_Model_Formats>`,
you can use it directly, without the need to save as the OpenVINO IR.
(`openvino.Model <api/ie_python_api/_autosummary/openvino.Model.html>`__ -
`ov.Model <api/ie_python_api/_autosummary/openvino.runtime.Model.html>`__).
For this purpose, you can use ``openvino.Core.read_model`` and ``openvino.Core.compile_model``
methods, so that conversion is performed automatically before inference, for
maximum convenience (note that working with PyTorch differs slightly, the Python API
being the only option, while TensorFlow may present additional considerations
:doc:`TensorFlow Frontend Capabilities and Limitations <openvino_docs_MO_DG_TensorFlow_Frontend>`).


For better performance and more optimization options, OpenVINO offers a conversion
API with two possible approaches: the Python API functions (``openvino.convert_model``
and ``openvino.save_model``) and the ``ovc`` command line tool, which are described in detail in this article.

.. note::

Prior to OpenVINO 2023.1, model conversion API was exposed as the ``openvino.tools.mo.convert_model``
function and the ``mo`` command line tool. Now, a new and simplified API is used: the
``openvino.convert_model`` function and the ``ovc`` command line tool.
Model conversion API prior to OpenVINO 2023.1 is considered deprecated.
Both existing and new projects are recommended to transition to the new
solutions, keeping in mind that they are not fully backwards compatible
with ``openvino.tools.mo.convert_model`` or the ``mo`` CLI tool.
For more details, see the :doc:`Model Conversion API Transition Guide <openvino_docs_OV_Converter_UG_prepare_model_convert_model_MO_OVC_transition>`.



All new projects are recommended to use the new tools, keeping in mind that they are not fully
backwards compatible. For more details, consult the :doc:`Model Conversion API Transition Guide <openvino_docs_OV_Converter_UG_prepare_model_convert_model_MO_OVC_transition>`.

Convert a Model in Python: ``convert_model``
##############################################
Expand Down Expand Up @@ -202,19 +221,15 @@ The figure below illustrates the typical workflow for deploying a trained deep-l
Convert a Model in CLI: ``ovc``
###############################

Another option for model conversion is to use ``ovc`` command-line tool, which stands for OpenVINO Model Converter. The tool combines both ``openvino.convert_model`` and ``openvino.save_model`` functionalities. It is convenient to use when the original model is ready for inference and is in one of the supported file formats: ONNX, TensorFlow, TensorFlow Lite, or PaddlePaddle. As a result, ``ovc`` produces an OpenVINO IR, consisting of ``.xml`` and ``.bin`` files, which needs to be read with the ``ov.read_model()`` method. You can compile and infer the ``ov.Model`` later with :doc:`OpenVINO™ Runtime <openvino_docs_OV_UG_OV_Runtime_User_Guide>`
Another option for model conversion is to use ``ovc`` command-line tool, which stands for OpenVINO Model Converter. The tool combines both ``openvino.convert_model`` and ``openvino.save_model`` functionalities. It is convenient to use when the original model is ready for inference and is in one of the supported file formats: ONNX, TensorFlow, TensorFlow Lite, or PaddlePaddle. As a result, ``ovc`` produces an OpenVINO IR, consisting of ``.xml`` and ``.bin`` files, which needs to be read with the ``openvino.Core.read_model`` method. You can compile and infer the ``ov.Model`` later with :doc:`OpenVINO™ Runtime <openvino_docs_OV_UG_OV_Runtime_User_Guide>`

.. note::
PyTorch models cannot be converted with ``ovc``, use ``openvino.convert_model`` instead.

The results of both ``ovc`` and ``openvino.convert_model``/``openvino.save_model`` conversion methods are the same. You can choose either of them based on your convenience. Note that there should not be any differences in the results of model conversion if the same set of parameters is used and the model is saved into OpenVINO IR.

Cases when Model Preparation is not Required
############################################

If a model is represented as a single file from ONNX, PaddlePaddle, TensorFlow and TensorFlow Lite (check :doc:`TensorFlow Frontend Capabilities and Limitations <openvino_docs_MO_DG_TensorFlow_Frontend>`), it does not require a separate conversion and IR-saving step, that is ``openvino.convert_model`` and ``openvino.save_model``, or ``ovc``.

OpenVINO provides C++ and Python APIs for reading such models by just calling the ``openvino.Core.read_model`` or ``openvino.Core.compile_model`` methods. These methods perform conversion of the model from the original representation. While this conversion may take extra time compared to using prepared OpenVINO IR, it is convenient when you need to read a model in the original format in C++, since ``openvino.convert_model`` is only available in Python. However, for efficient model deployment with the OpenVINO Runtime, it is still recommended to prepare OpenVINO IR and then use it in your inference application.

Additional Resources
####################
Expand Down
1 change: 1 addition & 0 deletions docs/Documentation/openvino_ecosystem.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ More resources:
* :doc:`Documentation <tmo_introduction>`
* `GitHub <https://github.com/openvinotoolkit/nncf>`__
* `PyPI <https://pypi.org/project/nncf/>`__
* `Conda Forge <https://anaconda.org/conda-forge/nncf/>`__


**OpenVINO™ Training Extensions**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ parameter to be set, for example:

Sometimes ``convert_model`` will produce inputs of the model with dynamic rank or dynamic type.
Such model may not be supported by the hardware chosen for inference. To avoid this issue,
use the ``input`` argument of ``convert_model``. For more information, refer to `Convert Models Represented as Python Objects <openvino_docs_MO_DG_Python_API>`.
use the ``input`` argument of ``convert_model``. For more information, refer to :doc:`Convert Models Represented as Python Objects <openvino_docs_MO_DG_Python_API>`.

.. important::

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ Some PaddlePaddle models may require setting ``example_input`` or ``output`` for

* Example of converting ``paddle.fluid.dygraph.layers.Layer`` format model:

``example_input`` is required while ``output`` is optional, which accept the following formats:
``example_input`` is required while ``output`` is optional. ``example_input`` accepts the following formats:

``list`` with tensor (``paddle.Tensor``) or InputSpec (``paddle.static.input.InputSpec``)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ The value for the ``example_input`` parameter can be easily derived from knowing
import torch
import openvino as ov

model = torchvision.models.resnet50(pretrained=True)
ov_model = ov.convert_model(model, example_input=example_input=torch.rand(1, 3, 224, 224))
model = torchvision.models.resnet50(weights='DEFAULT')
ov_model = ov.convert_model(model, example_input=torch.rand(1, 3, 224, 224))

In practice, the code to evaluate or test the PyTorch model is usually provided with the model itself and can be used to generate a proper ``example_input`` value. A modified example of using ``resnet50`` model from ``torchvision`` is presented below. It demonstrates how to switch inference in the existing PyTorch application to OpenVINO and how to get value for ``example_input``:

Expand Down
1 change: 1 addition & 0 deletions docs/OV_Runtime_UG/deployment/local-distribution.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ OpenVINO Runtime uses frontend libraries dynamically to read models in different
- ``openvino_tensorflow_lite_frontend`` is used to read the TensorFlow Lite file format.
- ``openvino_onnx_frontend`` is used to read the ONNX file format.
- ``openvino_paddle_frontend`` is used to read the Paddle file format.
- ``openvino_pytorch_frontend`` is used to convert PyTorch model via ``openvino.convert_model`` API.

Depending on the model format types that are used in the application in `ov::Core::read_model <classov_1_1Core.html#doxid-classov-1-1-core-1ae0576a95f841c3a6f5e46e4802716981>`__, select the appropriate libraries.

Expand Down
Loading

0 comments on commit 78631a4

Please sign in to comment.