diff --git a/.ci/azure/linux_debian.yml b/.ci/azure/linux_debian.yml
index 96db6129c72ee1..e94a0819705bd7 100644
--- a/.ci/azure/linux_debian.yml
+++ b/.ci/azure/linux_debian.yml
@@ -228,7 +228,7 @@ jobs:
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list
- sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/intel-openvino-2023.list
+ sudo apt-get update
sudo apt-get install openvino -y
# install our local one and make sure the conflicts are resolved
sudo apt-get install --no-install-recommends dpkg-dev -y
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 74661a77af5794..80e95ea5a9fe73 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -154,6 +154,7 @@ updates:
time: "09:00"
timezone: "Asia/Dubai"
assignees:
- - "ilyachur"
+ - "akashchi"
+ - "mryzhov"
- "ilya-lavrenov"
open-pull-requests-limit: 3
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
index 486cc879ec02d0..d7cba0bc390e59 100644
--- a/.github/workflows/linux.yml
+++ b/.github/workflows/linux.yml
@@ -585,7 +585,7 @@ jobs:
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py \
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_zoo_models.py \
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_backend.py
-
+
- name: Python API snippets
run: |
source ${{ env.INSTALL_DIR }}/setupvars.sh
@@ -774,12 +774,13 @@ jobs:
python3 -m pip install --upgrade pip
python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/functional_test_utils/requirements.txt
- - name: Cache Tests Execution Time
- id: tests-functional-cpu-cache
- uses: actions/cache@v3
+ - name: Restore tests execution time
+ uses: actions/cache/restore@v3
with:
path: ${{ env.PARALLEL_TEST_CACHE }}
- key: ${{ runner.os }}-tests-functional-cpu-cache
+ key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }}
+ restore-keys: |
+ ${{ runner.os }}-tests-functional-cpu-stamp
- name: Intel CPU plugin func tests (parallel)
run: |
@@ -787,6 +788,13 @@ jobs:
python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke*
timeout-minutes: 25
+ - name: Save tests execution time
+ uses: actions/cache/save@v3
+ if: github.ref_name == 'master'
+ with:
+ path: ${{ env.PARALLEL_TEST_CACHE }}
+ key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }}
+
- name: Upload Test Results
uses: actions/upload-artifact@v3
if: ${{ always() }}
diff --git a/.github/workflows/linux_debian.yml b/.github/workflows/linux_debian.yml
index b517a368c46ced..bff429be48c90a 100644
--- a/.github/workflows/linux_debian.yml
+++ b/.github/workflows/linux_debian.yml
@@ -219,7 +219,7 @@ jobs:
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list
- sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/intel-openvino-2023.list
+ sudo apt-get update
sudo apt-get install openvino -y
# install our local one and make sure the conflicts are resolved
sudo apt-get install --no-install-recommends dpkg-dev -y
diff --git a/.github/workflows/linux_onnxruntime.yml b/.github/workflows/linux_onnxruntime.yml
new file mode 100644
index 00000000000000..178aae4fefac6b
--- /dev/null
+++ b/.github/workflows/linux_onnxruntime.yml
@@ -0,0 +1,182 @@
+name: Linux ONNX Runtime (Ubuntu 20.04, Python 3.11)
+on:
+ workflow_dispatch:
+ schedule:
+ # run daily at 00:00
+ - cron: '0 0 * * *'
+# pull_request:
+# paths-ignore:
+# - '**/docs/**'
+# - 'docs/**'
+# - '**/**.md'
+# - '**.md'
+# - '**/layer_tests_summary/**'
+# - '**/conformance/**'
+# push:
+# paths-ignore:
+# - '**/docs/**'
+# - 'docs/**'
+# - '**/**.md'
+# - '**.md'
+# - '**/layer_tests_summary/**'
+# - '**/conformance/**'
+# branches:
+# - master
+
+concurrency:
+ group: ${{ github.head_ref || github.run_id }}-linux-onnx-runtime
+ cancel-in-progress: true
+
+jobs:
+ Build:
+ # TODO: remove. Temporary measure to prevent the workflow from scheduling on forks.
+ if: ${{ github.repository_owner == 'openvinotoolkit' }}
+ defaults:
+ run:
+ shell: bash
+ runs-on: ubuntu-20.04-8-cores
+ env:
+ CMAKE_BUILD_TYPE: 'Release'
+ CMAKE_GENERATOR: 'Ninja'
+ CMAKE_CXX_COMPILER_LAUNCHER: ccache
+ CMAKE_C_COMPILER_LAUNCHER: ccache
+ CMAKE_CXX_LINKER_LAUNCHER: ccache
+ CMAKE_C_LINKER_LAUNCHER: ccache
+ BUILD_TYPE: Release
+ OPENVINO_REPO: ${{ github.workspace }}/openvino
+ ONNX_RUNTIME_REPO: ${{ github.workspace }}/onnxruntime
+ ONNX_RUNTIME_UTILS: ${{ github.workspace }}/openvino/.ci/azure/ci_utils/onnxruntime
+ ONNX_RUNTIME_BUILD_DIR: ${{ github.workspace }}/onnxruntime/build
+ BUILD_DIR: ${{ github.workspace }}/build
+ INSTALL_DIR: ${{ github.workspace }}/install/openvino
+ steps:
+ - name: Clone OpenVINO
+ uses: actions/checkout@v4
+ with:
+ path: 'openvino'
+ submodules: 'true'
+
+ - name: Clone ONNX Runtime
+ run: |
+ branch=`tr -s '\n ' < ${{ env.ONNX_RUNTIME_UTILS }}/version`
+ git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git ${{ env.ONNX_RUNTIME_REPO }}
+
+ - name: Create Directories
+ run: |
+ mkdir -p ${{ env.BUILD_DIR }}
+ mkdir -p ${{ env.INSTALL_DIR }}
+
+ - name: Setup Python 3.11
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.11'
+
+ #
+ # Dependencies
+ #
+
+ - name: Install build dependencies
+ run: |
+ sudo -E ${{ env.OPENVINO_REPO }}/install_build_dependencies.sh
+
+ - name: Setup ccache
+ uses: hendrikmuhs/ccache-action@v1.2
+ with:
+ max-size: "2000M"
+ # Should save cache only if run in the master branch of the base repo
+ # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push
+ save: ${{ github.ref_name == 'master' && 'true' || 'false' }}
+ verbose: 2
+ key: ${{ github.job }}-linux-onnx-runtime
+ restore-keys: |
+ ${{ github.job }}-linux-onnx-runtime
+
+ #
+ # Build
+ #
+
+ - name: Get number of CPU cores
+ uses: SimenB/github-actions-cpu-cores@v2
+ id: cpu-cores
+
+ - name: CMake configure
+ run: |
+ cmake \
+ -GNinja \
+ -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \
+ -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \
+ -DENABLE_INTEL_GNA=OFF \
+ -DENABLE_INTEL_GPU=OFF \
+ -DENABLE_CPPLINT=OFF \
+ -DENABLE_PROFILING_ITT=OFF \
+ -DENABLE_SAMPLES=OFF \
+ -DENABLE_OV_TF_FRONTEND=OFF \
+ -DENABLE_OV_TF_LITE=OFF \
+ -DENABLE_OV_PADDLE_FRONTEND=OFF \
+ -DENABLE_OV_PYTORCH_FRONTEND=OFF \
+ -S ${{ env.OPENVINO_REPO }} \
+ -B ${{ env.BUILD_DIR }}
+
+ - name: Clean ccache stats
+ run: ccache --zero-stats --show-config
+
+ - name: Build
+ run: cmake --build ${{ env.BUILD_DIR }} --parallel ${{ steps.cpu-cores.outputs.count }} --config ${{ env.BUILD_TYPE }}
+
+ - name: Show ccache stats
+ run: ccache --show-stats
+
+ - name: Install OpenVINO
+ run: cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake
+
+ - name: Build Lin ONNX Runtime
+ run: |
+ source ${{ env.INSTALL_DIR }}/setupvars.sh
+
+ ${{ env.ONNX_RUNTIME_REPO }}/build.sh \
+ --config RelWithDebInfo \
+ --use_openvino CPU_FP32 \
+ --build_shared_lib \
+ --parallel \
+ --skip_tests \
+ --compile_no_warning_as_error \
+ --build_dir ${{ env.ONNX_RUNTIME_BUILD_DIR }}
+ env:
+ CXXFLAGS: "-Wno-error=deprecated-declarations"
+
+ - name: Run onnxruntime_test_all
+ run: |
+ source ${{ env.INSTALL_DIR }}/setupvars.sh
+ skip_tests=$(tr -s '\n ' ':' < ${{ env.ONNX_RUNTIME_UTILS }}/skip_tests)
+ ./onnxruntime_test_all --gtest_filter=-$skip_tests
+ working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo
+
+ - name: Run onnxruntime_shared_lib_test
+ run: |
+ source ${{ env.INSTALL_DIR }}/setupvars.sh
+ ./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library
+ working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo
+
+ - name: Run onnxruntime_global_thread_pools_test
+ run: |
+ source ${{ env.INSTALL_DIR }}/setupvars.sh
+ ./onnxruntime_global_thread_pools_test
+ working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo
+
+ - name: Run onnxruntime_api_tests_without_env
+ run: |
+ source ${{ env.INSTALL_DIR }}/setupvars.sh
+ ./onnxruntime_api_tests_without_env
+ working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo
+
+ - name: Run pytorch-converted tests
+ run: |
+ source ${{ env.INSTALL_DIR }}/setupvars.sh
+ ./onnx_test_runner "${{ env.ONNX_RUNTIME_REPO }}/cmake/external/onnx/onnx/backend/test/data/pytorch-converted"
+ working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo
+
+ - name: Run pytorch-operator tests
+ run: |
+ source ${{ env.INSTALL_DIR }}/setupvars.sh
+ ./onnx_test_runner "${{ env.ONNX_RUNTIME_REPO }}/cmake/external/onnx/onnx/backend/test/data/pytorch-operator"
+ working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0b552b3da3406c..e9d4760eb86916 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -106,8 +106,6 @@ function(openvino_developer_export_targets)
if(TARGET "${target_name}")
get_target_property(original_name ${target_name} ALIASED_TARGET)
if(TARGET "${original_name}")
- message(STATUS "The name ${target_name} is an ALIAS for ${original_name}. "
- "It will be exported to the OpenVINODeveloperPackage with the original name.")
list(REMOVE_ITEM ${EXPORT_COMPONENT} ${target_name})
list(APPEND ${EXPORT_COMPONENT} ${original_name})
endif()
diff --git a/cmake/packaging/debian.cmake b/cmake/packaging/debian.cmake
index 48adc7acf3980a..d359a40aa6df51 100644
--- a/cmake/packaging/debian.cmake
+++ b/cmake/packaging/debian.cmake
@@ -91,6 +91,7 @@ macro(ov_cpack_settings)
# - 2022.3 is the first release where Debian updated packages are introduced, others 2022.3.X are LTS
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
2023.0.0 2023.0.1 2023.0.2 2023.0.3
+ 2023.1.0
)
#
diff --git a/cmake/packaging/rpm.cmake b/cmake/packaging/rpm.cmake
index ad1172b6c833ae..077e970d138bc4 100644
--- a/cmake/packaging/rpm.cmake
+++ b/cmake/packaging/rpm.cmake
@@ -77,6 +77,7 @@ macro(ov_cpack_settings)
# - 2022.3 is the first release where RPM updated packages are introduced, others 2022.3.X are LTS
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
2023.0.0 2023.0.1 2023.0.2 2023.0.3
+ 2023.1.0
)
find_host_program(rpmlint_PROGRAM NAMES rpmlint DOC "Path to rpmlint")
diff --git a/cspell.json b/cspell.json
index cfbab3df891ec3..f59d00a6a052f6 100644
--- a/cspell.json
+++ b/cspell.json
@@ -4,80 +4,407 @@
"dictionaryDefinitions": [],
"dictionaries": [],
"words": [
+ "aarch64",
+ "acdadcfa",
+ "acea",
+ "abmrd",
+ "acfb",
+ "acosh",
+ "Acosh",
+ "adfcd",
+ "addcmul",
+ "addif",
+ "addmm",
+ "aeaa",
+ "agem",
+ "agew",
+ "armeabi",
"armhf",
+ "artefacts",
+ "ARTEFACTS",
+ "Asinh",
+ "asynch",
+ "Atanh",
+ "autodoc",
+ "Autograd",
+ "autoplugin",
+ "AUTOPLUGIN",
"autoremove",
+ "autosummary",
+ "bace",
+ "Backprop",
"bblayers",
+ "Beautif",
+ "Bilat",
+ "bindir",
"bitbake",
+ "BFYX",
+ "BFXY",
+ "bkgr",
+ "brctl",
+ "Bucketize",
+ "BUILDDIR",
"buildtools",
+ "buildsystems",
+ "BYXF",
+ "bvalue",
+ "bvlc",
"caffe",
+ "caffemodel",
+ "camvid",
+ "cbba",
+ "cbcd",
+ "cdad",
+ "cdrom",
"chrpath",
+ "classov",
+ "cldnn",
+ "clumber",
+ "codepath",
+ "codepaths",
+ "coeffs",
+ "concat",
+ "Concat",
+ "Conts",
+ "constexpr",
+ "consts",
+ "Consts",
+ "conv",
+ "Convolutional",
+ "CPPLINT",
+ "cpplint",
+ "crbegin",
+ "crend",
+ "ctest",
+ "ctput",
+ "CVAT",
+ "cython",
+ "dadb",
+ "DANDROID",
+ "DARM",
+ "Datumaro",
+ "datumaro",
+ "DBUILD",
+ "DCMAKE",
+ "ddepth",
+ "Depthwise",
+ "dearmor",
+ "devicesupport",
+ "dequantization",
+ "Dequantization",
+ "deeplabv",
+ "deeced",
+ "DENABLE",
+ "delif",
+ "denormal",
+ "DENORMAL",
+ "denormalized",
+ "Detectron",
+ "Dequantize",
"devel",
"devtoolset",
"dgpu",
"diffstat",
+ "dldt",
+ "dlstreamer",
+ "dkms",
"Dockerfiles",
+ "DOPENVINO",
+ "downscript",
+ "doxid",
+ "doxygen",
+ "Doxygen",
+ "doxygensnippet",
+ "DTHREADING",
"dpkg",
+ "DPYTHON",
+ "DSELECTIVE",
+ "dylib",
"DWORD",
+ "efficientdet",
+ "Efficientdet",
+ "Einsum",
+ "Elems",
+ "Elementwise",
+ "elementwise",
+ "Eltwise",
"endsphinxdirective",
+ "enumov",
+ "emcmake",
+ "emmake",
+ "emod",
+ "emom",
+ "emow",
+ "Emscripten",
+ "emscripten",
+ "emsdk",
"epel",
"ERRORLEVEL",
+ "evolutionally",
"executionpolicy",
"fafe",
+ "fdupes",
+ "flatbuffers",
+ "FLATBUFFERS",
+ "frontends",
+ "Frontends",
+ "FYXB",
+ "gaddb",
+ "GAPI",
+ "gapi",
+ "Gaussed",
+ "gcompoundkernel",
+ "gcomputation",
+ "GCPU",
+ "gcpukernel",
+ "Gelu",
+ "GELU",
+ "Geti",
+ "getitem",
+ "gimg",
+ "gitee",
+ "gflags",
"globbing",
"gmmlib",
"GNAs",
+ "gmock",
+ "gnueabihf",
"googlenet",
"gpgcheck",
"gpgkey",
+ "graphviz",
+ "Graphviz",
+ "groupov",
+ "gtest",
+ "hardtanh",
"hashfile",
+ "HDDL",
"HKLM",
"HOSTTOOLS",
+ "Hotspots",
+ "hotspots",
+ "hostnet",
+ "hwloc",
+ "hwquote",
+ "idbf",
+ "IDFT",
"iigd",
+ "ifdef",
+ "ifdown",
+ "ifup",
+ "imgproc",
+ "imshow",
+ "inet",
+ "INTEGRITYCHECK",
+ "ILSVRC",
+ "inferenced",
+ "Informations",
"insmod",
"intelocl",
+ "INTERPROCEDURAL",
+ "INSTALLDIR",
+ "IRDFT",
+ "jemalloc",
"kaldi",
+ "Keras",
+ "keypress",
+ "keyrings",
"Khronos",
+ "KROIs",
+ "Landm",
+ "landm",
+ "Latency",
+ "Lcov",
"ldconfig",
+ "libc",
"libopencl",
+ "libopencv",
"libpython",
+ "libtbb",
+ "libtbbbind",
+ "libtpm",
+ "libvirtd",
"linmac",
+ "Liskov",
+ "lowlatency",
"LTSC",
+ "LSTM",
+ "makefiles",
+ "malloc",
+ "memleaks",
+ "manylinux",
"maxdepth",
+ "miktext",
+ "Mish",
"mklink",
+ "mmap",
+ "mobilenet",
+ "Mobilenet",
"monodepth",
+ "mozallowfullscreen",
+ "msallowfullscreen",
+ "MSVC",
+ "msvc",
+ "Multiclass",
+ "muxed",
"mxnet",
+ "namespaceov",
+ "NCHW",
+ "ncpu",
+ "netdev",
+ "netplan",
+ "ngraph",
+ "nireq",
+ "NNCF",
+ "nncf",
"nocache",
"noglob",
"nohup",
+ "nlohmann",
"norestart",
+ "noqueue",
+ "nproc",
+ "NUMA",
+ "numpy",
+ "Numpy",
+ "oallowfullscreen",
"ocloc",
+ "OCSP",
+ "oneapi",
+ "onetbb",
"onnx",
"opencl",
"openembedded",
"openvino",
+ "Opset",
+ "opset",
+ "opsets",
+ "OVMS",
+ "ovms",
+ "ovsa",
+ "OVSA",
+ "ovsatool",
+ "OVTF",
"PACKAGECONFIG",
+ "paddlepaddle",
+ "parameterizable",
+ "partitioner",
"patchelf",
+ "passpattern",
+ "Pexels",
+ "pdmodel",
+ "PDPD",
"pkgdata",
"pkgs",
+ "pkill",
+ "polylines",
+ "postproc",
+ "postprocess",
+ "preprocess",
+ "Preprocess",
+ "protobuf",
+ "Protobuf",
+ "PROTOBUF",
+ "prototxt",
+ "PSROI",
+ "Pugi",
+ "pugixml",
+ "PUGIXML",
"pypi",
+ "PYTHONPATH",
"pzstd",
+ "qcow",
+ "qlen",
+ "QSPECTRE",
+ "Qspectre",
"quantizer",
+ "Rects",
+ "Relu",
+ "relu",
+ "rcnn",
+ "RCNN",
+ "RDFT",
"Redistributable",
"remotesigned",
"repolist",
+ "reproject",
+ "reshapable",
+ "Requantize",
+ "retval",
+ "RHODS",
"rmmod",
+ "runtool",
+ "scons",
+ "SCONS",
+ "segm",
+ "Selu",
"servercore",
+ "setuptools",
"setupvars",
"SETX",
+ "SIMD",
+ "Softmax",
"skylake",
"sphinxdirective",
+ "Strided",
+ "squeezenet",
+ "SWTPM",
+ "swtpm",
+ "TBBBIND",
+ "TBBROOT",
+ "Tensro",
+ "texlive",
+ "textrm",
+ "tflite",
+ "thirdparty",
+ "Thresholded",
"toctree",
+ "toolset",
+ "Torchvision",
+ "tpmrm",
+ "tpmstate",
+ "tput",
+ "Tunables",
+ "unet",
"Uninstallation",
+ "unixio",
+ "unsharp",
+ "Unsharp",
+ "Unsh",
+ "Unsqueeze",
+ "Usecase",
+ "usecases",
+ "USERPROFILE",
"userspace",
+ "VAAPI",
+ "valgrind",
+ "vcpkg",
+ "vcvars",
"venv",
+ "virbr",
+ "virsh",
+ "virt",
+ "virtio",
+ "VMHWM",
+ "VMRSS",
+ "VNNI",
+ "vtune",
+ "vtunesummary",
+ "vtunebottonup",
+ "WHOLEARCHIVE",
"WDDM",
"WORKDIR",
+ "WORKSIZE",
+ "xbyak",
+ "Xbyak",
+ "xdot",
+ "xvfz",
"yocto",
+ "yolo",
+ "YOLO",
+ "yolov",
+ "Yolov",
+ "YXFB",
"zstd"
],
"ignoreWords": [],
diff --git a/docs/Documentation/model_introduction.md b/docs/Documentation/model_introduction.md
index ad38d118a01922..cadd407ba0b6a6 100644
--- a/docs/Documentation/model_introduction.md
+++ b/docs/Documentation/model_introduction.md
@@ -15,18 +15,37 @@
openvino_docs_OV_Converter_UG_prepare_model_convert_model_Converting_Model
-Every deep learning workflow begins with obtaining a model. You can choose to prepare a custom one, use a ready-made solution and adjust it to your needs, or even download and run a pre-trained network from an online database, such as `TensorFlow Hub `__, `Hugging Face `__, or `Torchvision models `__.
-
-OpenVINO™ :doc:`supports several model formats ` and can convert them into its own representation, `openvino.Model `__ (`ov.Model `__), providing a conversion API. Converted models can be used for inference with one or multiple OpenVINO Hardware plugins. There are two ways to use the conversion API: using a Python script or calling the ``ovc`` command line tool.
+Every deep learning workflow begins with obtaining a model. You can choose to prepare
+a custom one, use a ready-made solution and adjust it to your needs, or even download
+and run a pre-trained network from an online database, such as
+`TensorFlow Hub `__, `Hugging Face `__,
+or `Torchvision models `__.
+
+If your selected model is in one of the :doc:`OpenVINO™ supported model formats `,
+you can use it directly, without the need to save as the OpenVINO IR.
+(`openvino.Model `__ -
+`ov.Model `__).
+For this purpose, you can use ``openvino.Core.read_model`` and ``openvino.Core.compile_model``
+methods, so that conversion is performed automatically before inference, for
+maximum convenience (note that working with PyTorch differs slightly, the Python API
+being the only option, while TensorFlow may present additional considerations
+:doc:`TensorFlow Frontend Capabilities and Limitations `).
+
+
+For better performance and more optimization options, OpenVINO offers a conversion
+API with two possible approaches: the Python API functions (``openvino.convert_model``
+and ``openvino.save_model``) and the ``ovc`` command line tool, which are described in detail in this article.
.. note::
- Prior to OpenVINO 2023.1, model conversion API was exposed as the ``openvino.tools.mo.convert_model``
- function and the ``mo`` command line tool. Now, a new and simplified API is used: the
- ``openvino.convert_model`` function and the ``ovc`` command line tool.
+ Model conversion API prior to OpenVINO 2023.1 is considered deprecated.
+ Both existing and new projects are recommended to transition to the new
+ solutions, keeping in mind that they are not fully backwards compatible
+ with ``openvino.tools.mo.convert_model`` or the ``mo`` CLI tool.
+ For more details, see the :doc:`Model Conversion API Transition Guide `.
+
+
- All new projects are recommended to use the new tools, keeping in mind that they are not fully
- backwards compatible. For more details, consult the :doc:`Model Conversion API Transition Guide `.
Convert a Model in Python: ``convert_model``
##############################################
@@ -202,19 +221,15 @@ The figure below illustrates the typical workflow for deploying a trained deep-l
Convert a Model in CLI: ``ovc``
###############################
-Another option for model conversion is to use ``ovc`` command-line tool, which stands for OpenVINO Model Converter. The tool combines both ``openvino.convert_model`` and ``openvino.save_model`` functionalities. It is convenient to use when the original model is ready for inference and is in one of the supported file formats: ONNX, TensorFlow, TensorFlow Lite, or PaddlePaddle. As a result, ``ovc`` produces an OpenVINO IR, consisting of ``.xml`` and ``.bin`` files, which needs to be read with the ``ov.read_model()`` method. You can compile and infer the ``ov.Model`` later with :doc:`OpenVINO™ Runtime `
+Another option for model conversion is to use ``ovc`` command-line tool, which stands for OpenVINO Model Converter. The tool combines both ``openvino.convert_model`` and ``openvino.save_model`` functionalities. It is convenient to use when the original model is ready for inference and is in one of the supported file formats: ONNX, TensorFlow, TensorFlow Lite, or PaddlePaddle. As a result, ``ovc`` produces an OpenVINO IR, consisting of ``.xml`` and ``.bin`` files, which needs to be read with the ``openvino.Core.read_model`` method. You can compile and infer the ``ov.Model`` later with :doc:`OpenVINO™ Runtime `
.. note::
PyTorch models cannot be converted with ``ovc``, use ``openvino.convert_model`` instead.
The results of both ``ovc`` and ``openvino.convert_model``/``openvino.save_model`` conversion methods are the same. You can choose either of them based on your convenience. Note that there should not be any differences in the results of model conversion if the same set of parameters is used and the model is saved into OpenVINO IR.
-Cases when Model Preparation is not Required
-############################################
-If a model is represented as a single file from ONNX, PaddlePaddle, TensorFlow and TensorFlow Lite (check :doc:`TensorFlow Frontend Capabilities and Limitations `), it does not require a separate conversion and IR-saving step, that is ``openvino.convert_model`` and ``openvino.save_model``, or ``ovc``.
-OpenVINO provides C++ and Python APIs for reading such models by just calling the ``openvino.Core.read_model`` or ``openvino.Core.compile_model`` methods. These methods perform conversion of the model from the original representation. While this conversion may take extra time compared to using prepared OpenVINO IR, it is convenient when you need to read a model in the original format in C++, since ``openvino.convert_model`` is only available in Python. However, for efficient model deployment with the OpenVINO Runtime, it is still recommended to prepare OpenVINO IR and then use it in your inference application.
Additional Resources
####################
diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
index 055e94049a78ed..0cafd3066535ab 100644
--- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
+++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
@@ -58,7 +58,7 @@ parameter to be set, for example:
Sometimes ``convert_model`` will produce inputs of the model with dynamic rank or dynamic type.
Such model may not be supported by the hardware chosen for inference. To avoid this issue,
-use the ``input`` argument of ``convert_model``. For more information, refer to `Convert Models Represented as Python Objects `.
+use the ``input`` argument of ``convert_model``. For more information, refer to :doc:`Convert Models Represented as Python Objects `.
.. important::
diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_Paddle.md b/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_Paddle.md
index dd3f821229bf99..8b4c549547ebbd 100644
--- a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_Paddle.md
+++ b/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_Paddle.md
@@ -89,7 +89,7 @@ Some PaddlePaddle models may require setting ``example_input`` or ``output`` for
* Example of converting ``paddle.fluid.dygraph.layers.Layer`` format model:
- ``example_input`` is required while ``output`` is optional, which accept the following formats:
+ ``example_input`` is required while ``output`` is optional. ``example_input`` accepts the following formats:
``list`` with tensor (``paddle.Tensor``) or InputSpec (``paddle.static.input.InputSpec``)
diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_PyTorch.md b/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
index 83005b7e978e8c..6fcd6d7c03aaa8 100644
--- a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
+++ b/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
@@ -40,8 +40,8 @@ The value for the ``example_input`` parameter can be easily derived from knowing
import torch
import openvino as ov
- model = torchvision.models.resnet50(pretrained=True)
- ov_model = ov.convert_model(model, example_input=example_input=torch.rand(1, 3, 224, 224))
+ model = torchvision.models.resnet50(weights='DEFAULT')
+ ov_model = ov.convert_model(model, example_input=torch.rand(1, 3, 224, 224))
In practice, the code to evaluate or test the PyTorch model is usually provided with the model itself and can be used to generate a proper ``example_input`` value. A modified example of using ``resnet50`` model from ``torchvision`` is presented below. It demonstrates how to switch inference in the existing PyTorch application to OpenVINO and how to get value for ``example_input``:
diff --git a/docs/dev/build_windows.md b/docs/dev/build_windows.md
index e63d4830904086..e598fdd33f04e7 100644
--- a/docs/dev/build_windows.md
+++ b/docs/dev/build_windows.md
@@ -25,29 +25,17 @@ Supported configurations:
```sh
git clone https://github.com/openvinotoolkit/openvino.git
cd openvino
- git submodule update --init --recursive
- ```
- (Extra for WoA) To build on Windows on ARM with ARM plugin:
- ```sh
- git clone https://github.com/openvinotoolkit/openvino_contrib.git
- cd openvino_contrib
- git submodule update --init --recursive
+ git submodule update --init
```
2. Create build directory:
```sh
mkdir build && cd build
```
-3. In the `build` directory, run `cmake` to fetch project dependencies and generate a Visual Studio solution.
+3. In the `build` directory, run `cmake` to fetch project dependencies and generate a Visual Studio solution:
- On Windows x86 64-bits:
- ```sh
- cmake -G "Visual Studio 16 2019" -DCMAKE_BUILD_TYPE=Release
- ```
-
- On Windows on ARM for ARM64 architecture:
```sh
- cmake -G "Visual Studio 16 2019" -DOPENVINO_EXTRA_MODULES=/modules/arm_plugin -DCMAKE_BUILD_TYPE=Release
+ cmake -G "Visual Studio 17 2022"
```
> **HINT**: **Generating PDB Files and Debugging Your Build**
@@ -62,16 +50,8 @@ Supported configurations:
### Additional Build Options
-- Internal JIT GEMM implementation is used by default.
-
-- Threading Building Blocks (TBB) is used by default. To build Inference Engine with OpenMP threading, set the `-DTHREADING=OMP` option.
-
-- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you want to use the automatically-downloaded packages but you have already installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command; otherwise they won'tnbe downloaded and the build may fail if incompatible versions were installed.
-
-- If the CMake-based build script can not find and download the OpenCV package that is supported on your platform, or if you want to use a custom build of the OpenCV library, refer to the [Use Custom OpenCV Builds](./cmake_options_for_custom_compilation.md#Building-with-custom-OpenCV) section for details.
-
- To build the OpenVINO Runtime Python API:
- 1. First, install all additional packages (e.g., cython and opencv) listed in the file:
+ 1. First, install all additional packages (e.g., cython) listed in the file:
```sh
pip install -r \src\bindings\python\src\compatibility\openvino\requirements-dev.txt
```
@@ -95,15 +75,12 @@ Supported configurations:
pip install build/wheel/openvino-2023.0.0-9612-cp11-cp11-win_arm64.whl
```
-- OpenVINO runtime compilation options:
- `-DENABLE_OV_ONNX_FRONTEND=ON` enables the building of the ONNX importer.
-
### Building OpenVINO with Ninja* Build System
```sh
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\VC\Auxiliary\Build\vcvars64.bat"
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
-cmake --build . --config Release
+ninja .
```
## See also
diff --git a/docs/dev/cmake_options_for_custom_compilation.md b/docs/dev/cmake_options_for_custom_compilation.md
index 1b4f3b7eb5752b..847997e067d5c3 100644
--- a/docs/dev/cmake_options_for_custom_compilation.md
+++ b/docs/dev/cmake_options_for_custom_compilation.md
@@ -114,6 +114,9 @@ This document provides description and default values for CMake options that can
* `OFF` is default, because it increases binary size.
* `SELECTIVE_BUILD` enables [[Conditional compilation|ConditionalCompilation]] feature.
* `OFF` is default.
+* `ENABLE_MLAS_FOR_CPU` enables MLAS library for CPU plugin
+ * `ON` is default for x86_64 and AARCH64 platforms
+ * Affects only OpenVINO CPU plugin
## Building with OpenCV
diff --git a/docs/dev/static_libaries.md b/docs/dev/static_libaries.md
index e001b6f24c5fc1..e538a37555f558 100644
--- a/docs/dev/static_libaries.md
+++ b/docs/dev/static_libaries.md
@@ -135,7 +135,7 @@ cmake -DCMAKE_TOOLCHAIN_FILE=/cmake/toolchains/mt.runtime.w
* The enabled and tested capabilities of OpenVINO Runtime in a static build:
* OpenVINO common runtime - work with `ov::Model`, perform model loading on particular device
- * CPU and GNA inference plugins (**GPU and MYRIAD are not enabled**)
+ * CPU and GNA inference plugins (**GPU is not enabled**)
* MULTI, HETERO, AUTO, and BATCH inference modes
* IR, ONNX, PDPD, and TF frontends to read `ov::Model`
* Static build support for building static libraries only for OpenVINO Runtime libraries. All other third-party prebuilt dependencies remain in the same format:
diff --git a/docs/documentation.md b/docs/documentation.md
index a25e784165b78f..276e4e6e0930a6 100644
--- a/docs/documentation.md
+++ b/docs/documentation.md
@@ -12,9 +12,9 @@
:hidden:
API Reference
- OpenVINO IR format and Operation Sets
+ OpenVINO IR format and Operation Sets
+ Legacy Features
Tool Ecosystem
- Legacy Features
OpenVINO Extensibility
Media Processing and CV Libraries
OpenVINO™ Security
diff --git a/docs/home.rst b/docs/home.rst
index d8f359e65aaa5a..4ed32d3aea261b 100644
--- a/docs/home.rst
+++ b/docs/home.rst
@@ -24,10 +24,10 @@ OpenVINO 2023.0
An open-source toolkit for optimizing and deploying deep learning models. Boost your AI deep-learning inference performance!
-
Use PyTorch models directly, without converting them first.
+
Use PyTorch models directly, without converting them first. Learn more...
-
OpenVINO via PyTorch 2.0 torch.compile() Use OpenVINO directly in PyTorch-native applications!
+
OpenVINO via PyTorch 2.0 torch.compile() Use OpenVINO directly in PyTorch-native applications! Learn more...
Do you like Generative AI? You will love how it performs with OpenVINO!
diff --git a/docs/install_guides/installing-openvino-linux-header.md b/docs/install_guides/installing-openvino-linux-header.md
index f0bb87d87f0ade..a45b11d20e2f5e 100644
--- a/docs/install_guides/installing-openvino-linux-header.md
+++ b/docs/install_guides/installing-openvino-linux-header.md
@@ -22,14 +22,15 @@
Use Docker
-If you want to install OpenVINO™ Runtime on your Linux machine, these are your options:
+If you want to install OpenVINO™ Runtime on Linux, you have the following options:
-* :doc:`Install OpenVINO Runtime using an Archive File `
+* :doc:`Install OpenVINO using an Archive File `
* :doc:`Install OpenVINO using PyPI `
-* :doc:`Install OpenVINO Runtime using APT `
-* :doc:`Install OpenVINO Runtime using YUM `
-* :doc:`Install OpenVINO Runtime using Conda Forge `
-* :doc:`Install OpenVINO Runtime using Homebrew `
+* :doc:`Install OpenVINO using APT `
+* :doc:`Install OpenVINO using YUM `
+* :doc:`Install OpenVINO using Conda Forge `
+* :doc:`Install OpenVINO using vcpkg `
+* :doc:`Install OpenVINO using Homebrew `
* :doc:`Install OpenVINO using Docker `
diff --git a/docs/install_guides/installing-openvino-macos-header.md b/docs/install_guides/installing-openvino-macos-header.md
index dff827ce9a89e8..2e0d70b61d04be 100644
--- a/docs/install_guides/installing-openvino-macos-header.md
+++ b/docs/install_guides/installing-openvino-macos-header.md
@@ -12,19 +12,21 @@
:maxdepth: 3
:hidden:
- From Archive
- Using Homebrew
- From PyPI
- Using Conda Forge
+ Use Archive
+ Use Homebrew
+ Use PyPI
+ Use Conda Forge
Use vcpkg
-If you want to install OpenVINO™ Runtime on macOS, there are a few ways to accomplish this. We prepared following options for you:
+If you want to install OpenVINO™ Runtime on macOS, you have the following options:
-* :doc:`Install OpenVINO Runtime from an Archive File `
-* :doc:`Install OpenVINO from PyPI `
-* :doc:`Install OpenVINO Runtime using Conda Forge `
-* :doc:`Install OpenVINO Runtime via Homebrew `
+
+* :doc:`Install OpenVINO using an Archive File `
+* :doc:`Install OpenVINO using PyPI `
+* :doc:`Install OpenVINO using Conda Forge `
+* :doc:`Install OpenVINO using Homebrew `
+* :doc:`Install OpenVINO using vcpkg `
diff --git a/docs/install_guides/installing-openvino-windows-header.md b/docs/install_guides/installing-openvino-windows-header.md
index 3044c2accef729..65b1803ec711ff 100644
--- a/docs/install_guides/installing-openvino-windows-header.md
+++ b/docs/install_guides/installing-openvino-windows-header.md
@@ -22,9 +22,10 @@
If you want to install OpenVINO™ Runtime on Windows, you have the following options:
-* :doc:`Install OpenVINO Runtime from an Archive File `
-* :doc:`Install OpenVINO Runtime using PyPI `
-* :doc:`Install OpenVINO Runtime using Conda Forge `
+* :doc:`Install OpenVINO using an Archive File `
+* :doc:`Install OpenVINO using PyPI `
+* :doc:`Install OpenVINO using Conda Forge `
+* :doc:`Install OpenVINO using vcpkg `
* :doc:`Install OpenVINO using Docker `
diff --git a/src/bindings/c/src/CMakeLists.txt b/src/bindings/c/src/CMakeLists.txt
index 737dcc3d272a1e..e491424cb27afb 100644
--- a/src/bindings/c/src/CMakeLists.txt
+++ b/src/bindings/c/src/CMakeLists.txt
@@ -8,11 +8,12 @@ set(TARGET_NAME openvino_c)
ov_deprecated_no_errors()
add_definitions(-DIN_OV_COMPONENT)
-file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
-file(GLOB_RECURSE HEADERS ${OpenVINO_C_API_SOURCE_DIR}/include/*.h)
+file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.h ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
+file(GLOB_RECURSE LEGACY_HEADERS ${OpenVINO_C_API_SOURCE_DIR}/include/c_api/*.h)
+file(GLOB_RECURSE HEADERS ${OpenVINO_C_API_SOURCE_DIR}/include/openvino/*.h)
# create library
-add_library(${TARGET_NAME} ${HEADERS} ${SOURCES})
+add_library(${TARGET_NAME} ${LEGACY_HEADERS} ${HEADERS} ${SOURCES})
add_library(openvino::runtime::c ALIAS ${TARGET_NAME})
target_link_libraries(${TARGET_NAME} PRIVATE openvino openvino::util)
@@ -24,7 +25,7 @@ if(NOT BUILD_SHARED_LIBS)
target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY)
endif()
-ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
+ov_add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${HEADERS} ${SOURCES})
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
diff --git a/src/bindings/c/src/common.h b/src/bindings/c/src/common.h
index 92e024a7123d65..ff2e0bcac07312 100644
--- a/src/bindings/c/src/common.h
+++ b/src/bindings/c/src/common.h
@@ -17,7 +17,7 @@
#define CATCH_IE_EXCEPTION(StatusCode, ExceptionType) \
catch (const InferenceEngine::ExceptionType&) { \
return ov_status_e::StatusCode; \
- } \
+ }
#define CATCH_OV_EXCEPTION(StatusCode, ExceptionType) \
catch (const ov::ExceptionType&) { \
@@ -42,7 +42,7 @@
CATCH_IE_EXCEPTION(INFER_CANCELLED, InferCancelled) \
catch (...) { \
return ov_status_e::UNKNOW_EXCEPTION; \
- } \
+ }
#define GET_PROPERTY_FROM_ARGS_LIST \
std::string property_key = va_arg(args_ptr, char*); \
diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt
index 4c4c9bce4d7804..b46eaaf9883e16 100644
--- a/src/bindings/python/CMakeLists.txt
+++ b/src/bindings/python/CMakeLists.txt
@@ -20,8 +20,7 @@ endif()
# Check python requirements
#
-set(ov_python_req "${OpenVINOPython_SOURCE_DIR}/requirements.txt")
-set(ie_python_req "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/requirements-dev.txt")
+set(ie_build_python_req "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/requirements-dev.txt")
function(ov_check_python_build_conditions)
# user explicitly specified ENABLE_PYTHON=ON
@@ -34,7 +33,7 @@ function(ov_check_python_build_conditions)
endif()
# Try to find python3 and its libs
- find_host_package(PythonInterp 3 ${find_package_mode})
+ find_package(PythonInterp 3 ${find_package_mode})
if(PYTHONINTERP_FOUND)
if(PYTHON_VERSION_MINOR GREATER_EQUAL 11)
set(pybind11_min_version 2.9.2)
@@ -57,14 +56,14 @@ function(ov_check_python_build_conditions)
if(EXISTS ${pybind11_tools_dir})
list(APPEND CMAKE_MODULE_PATH ${pybind11_tools_dir})
else()
- find_host_package(pybind11 ${pybind11_min_version} QUIET)
+ find_package(pybind11 ${pybind11_min_version} QUIET)
if(pybind11_FOUND)
list(APPEND CMAKE_MODULE_PATH "${pybind11_DIR}")
endif()
endif()
# use libraries with the same version as python itself
set(PYBIND11_PYTHON_VERSION ${PYTHON_VERSION_STRING})
- find_host_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} EXACT ${find_package_mode})
+ find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} EXACT ${find_package_mode})
set(PYTHONLIBSNEW_FOUND ${PYTHONLIBS_FOUND} PARENT_SCOPE)
endfunction()
# try to find python libraries
@@ -72,7 +71,7 @@ function(ov_check_python_build_conditions)
if(PYTHONLIBSNEW_FOUND)
# clear Python_ADDITIONAL_VERSIONS to find only python library matching PYTHON_EXECUTABLE
unset(Python_ADDITIONAL_VERSIONS CACHE)
- find_host_package(PythonLibs ${PYTHON_VERSION_STRING} EXACT ${find_package_mode})
+ find_package(PythonLibs ${PYTHON_VERSION_STRING} EXACT ${find_package_mode})
endif()
if(NOT PYTHONLIBS_FOUND)
message(${message_mode} "Python development libraries are not found. OpenVINO Python API will be turned off (ENABLE_PYTHON is OFF)")
@@ -81,31 +80,23 @@ function(ov_check_python_build_conditions)
message(${message_mode} "Python 3.x interpreter is not found. OpenVINO Python API will be turned off (ENABLE_PYTHON is OFF)")
endif()
- # check pyopenvino requirements to OV 2.0 API
- ov_check_pip_packages(REQUIREMENTS_FILE ${ov_python_req}
- RESULT_VAR ov_python_req_FOUND
- WARNING_MESSAGE "install python3 -m pip install -r ${ov_python_req} for OV API 2.0 requirements"
- MESSAGE_MODE TRACE)
- # ov_python_req are not mandatory for build
- set(ov_python_req_FOUND ON)
-
# check for Cython requirement for build IE API 1.0
- ov_check_pip_packages(REQUIREMENTS_FILE ${ie_python_req}
- RESULT_VAR ie_python_req_FOUND
- WARNING_MESSAGE "install python3 -m pip install -r ${ie_python_req} for IE API 1.0 requirements"
+ ov_check_pip_packages(REQUIREMENTS_FILE ${ie_build_python_req}
+ RESULT_VAR ie_build_python_req_FOUND
+ WARNING_MESSAGE "install python3 -m pip install -r ${ie_build_python_req} for IE API 1.0 requirements"
MESSAGE_MODE TRACE)
# cython can be installed as a debian package, so pip requirements can be unsatisfied
# so, let's check to find cython anyway
- if(NOT ie_python_req_FOUND)
+ if(NOT ie_build_python_req_FOUND)
find_package(Cython QUIET
PATHS "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/cmake"
NO_CMAKE_FIND_ROOT_PATH
NO_DEFAULT_PATH)
if(CYTHON_VERSION VERSION_GREATER_EQUAL 0.29)
- set(ie_python_req_FOUND ON)
+ set(ie_build_python_req_FOUND ON)
else()
- message(${message_mode} "Python requirements '${ie_python_req}' are missed, IE Python API 1.0 will not be built (ENABLE_PYTHON is OFF)")
+ message(${message_mode} "Python requirements '${ie_build_python_req}' are missed, IE Python API 1.0 will not be built (ENABLE_PYTHON is OFF)")
endif()
endif()
@@ -116,20 +107,51 @@ function(ov_check_python_build_conditions)
set(python_debug OFF)
endif()
- if(PYTHONLIBS_FOUND AND ov_python_req_FOUND AND ie_python_req_FOUND AND NOT python_debug)
+ if(PYTHONLIBS_FOUND AND ie_build_python_req_FOUND AND NOT python_debug)
set(ENABLE_PYTHON_DEFAULT ON PARENT_SCOPE)
else()
set(ENABLE_PYTHON_DEFAULT OFF PARENT_SCOPE)
endif()
# to disable API 1.0
- set(ie_python_req_FOUND ${ie_python_req_FOUND} PARENT_SCOPE)
+ set(ie_build_python_req_FOUND ${ie_build_python_req_FOUND} PARENT_SCOPE)
# set pybind11 minimal version
set(pybind11_min_version ${pybind11_min_version} PARENT_SCOPE)
endfunction()
ov_check_python_build_conditions()
+# check __init__.py files alignment
+
+function(ov_check_init_files_alignment)
+ # check the files in pairs
+ list(APPEND init_files
+ "${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py"
+ "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py"
+ "${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py"
+ "${OpenVINO_SOURCE_DIR}/tools/pot/openvino/__init__.py"
+ "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py"
+ "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py"
+ "${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py")
+
+ list(LENGTH init_files init_files_count)
+ math(EXPR file_loop_range "${init_files_count}-2")
+ foreach(init_file_idx RANGE 0 ${file_loop_range})
+ math(EXPR init_file_idx_next "${init_file_idx}+1")
+ list(GET init_files ${init_file_idx} file1)
+ list(GET init_files ${init_file_idx_next} file2)
+
+ execute_process(COMMAND ${CMAKE_COMMAND} -E compare_files ${file1} ${file2}
+ RESULT_VARIABLE compare_result
+ )
+ if(compare_result EQUAL 1)
+ message(FATAL_ERROR "The __init__.py files are misaligned: ${file1} and ${file2}")
+ endif()
+ endforeach()
+endfunction()
+
+ov_check_init_files_alignment()
+
ie_option(ENABLE_PYTHON "Enables OpenVINO Python API build" ${ENABLE_PYTHON_DEFAULT})
#
@@ -178,7 +200,7 @@ ie_dependent_option(ENABLE_WHEEL "Build wheel packages for PyPI" ${ENABLE_WHEEL_
if(NOT ENABLE_PYTHON)
if(CMAKE_SOURCE_DIR STREQUAL OpenVINOPython_SOURCE_DIR)
- message(FATAL_ERROR "Python OpenVINO API requirements are not satisfied. Please, install ${ie_python_req} and ${ov_python_req}")
+ message(FATAL_ERROR "Python OpenVINO API requirements are not satisfied. Please, install ${ie_build_python_req}")
else()
return()
endif()
@@ -201,7 +223,7 @@ endif()
add_subdirectory(src/compatibility/pyngraph)
add_subdirectory(src/pyopenvino)
-if(ie_python_req_FOUND)
+if(ie_build_python_req_FOUND)
add_subdirectory(src/compatibility/openvino)
else()
message(WARNING "NOTE: Python API for OpenVINO 1.0 is disabled")
diff --git a/src/bindings/python/constraints.txt b/src/bindings/python/constraints.txt
index 30be494ea327bc..1762fa681551d3 100644
--- a/src/bindings/python/constraints.txt
+++ b/src/bindings/python/constraints.txt
@@ -1,5 +1,5 @@
# used in multiple components
-numpy>=1.16.6,<1.26 # Python bindings, frontends
+numpy>=1.16.6,<1.27 # Python bindings, frontends
# pytest
pytest>=5.0,<7.5
diff --git a/src/bindings/python/src/compatibility/openvino/__init__.py b/src/bindings/python/src/compatibility/openvino/__init__.py
index 8f0113d5bcaf6c..90552e0befed68 100644
--- a/src/bindings/python/src/compatibility/openvino/__init__.py
+++ b/src/bindings/python/src/compatibility/openvino/__init__.py
@@ -57,6 +57,6 @@
# Tools
try:
# Model Conversion API - ovc should reside in the main namespace
- from openvino.tools.ovc import convert_model, InputCutInfo
+ from openvino.tools.ovc import convert_model
except ImportError:
pass
diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py b/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py
index ac7bb04958a3e4..29dc6b1ad58973 100644
--- a/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py
+++ b/src/bindings/python/src/openvino/frontend/tensorflow/graph_iterator.py
@@ -40,6 +40,10 @@ def get_input_names(self) -> list:
return []
inp_ops = filter(lambda op: op.type == "Placeholder", self.m_graph.get_operations())
inp_names = []
+ if hasattr(self.m_graph, 'inputs') and self.m_graph.inputs:
+ for inp in self.m_graph.inputs:
+ inp_names.append(inp.op.name)
+ return inp_names
for inp in inp_ops:
assert isinstance(inp, tf.Operation), "Unknown node type. Expected tf.Operation, got {}".format(type(inp))
assert hasattr(inp, "node_def") and isinstance(inp.node_def, tf.compat.v1.NodeDef), \
@@ -58,11 +62,13 @@ def get_output_names(self) -> list:
# Note: used only for the library functions
if not self.m_inner_graph:
return []
- # tf.Graph has ordered outputs which are stored in 'outputs' field,
- # but using this field results in mismatch of outputs in inner graph and outputs in outer graph
- # during the injection of subgraph.
- # For this reason only nodes without outputs are considered graph outputs here
- # as this approach does not lead to conflicts.
+
+ if hasattr(self.m_graph, 'outputs') and self.m_graph.outputs:
+ outputs = []
+ for out in self.m_graph.outputs:
+ outputs.append(out.name)
+ return outputs
+ # If graph has no 'outputs' field, find nodes without outputs and consider them graph outputs.
# The order of outputs is important and wrong order may lead to conversion error.
non_outputs = set()
for op in self.m_graph.get_operations():
diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py
index 0ecffc4040abe6..4fab05c17de380 100644
--- a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py
+++ b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py
@@ -54,6 +54,7 @@ def __init__(self, operation: tf.Operation, share_weights: bool, inner_graph: bo
self.m_operation = operation
self.m_inner_graph = inner_graph
self.m_data_type = None
+ self.m_parsed_content = None
# Copies value from inner buffer of TF_Operation to NodeDef class.
self.m_node_def = self.m_operation.node_def
@@ -87,11 +88,11 @@ def __init__(self, operation: tf.Operation, share_weights: bool, inner_graph: bo
if self.m_operation.type == "Placeholder":
self.m_data_type = tf.dtypes.DType(self.m_node_def.attr["dtype"].type).name
- if self.m_data_type == "resource" and not self.m_inner_graph:
+ if not self.m_inner_graph:
variable_value = TFGraphNodeDecoder.get_variable(self.m_operation)
if variable_value is not None:
# does not copy data
- self.m_parsed_content = variable_value.value().__array__()
+ self.m_parsed_content = variable_value.__array__()
if isinstance(self.m_parsed_content, bytes):
self.m_data_type = "string"
@@ -103,7 +104,7 @@ def get_op_name(self) -> str:
def get_op_type(self) -> str:
if self.m_operation.type == "Placeholder":
type_attr = tf.dtypes.DType(self.m_node_def.attr["dtype"].type)
- if type_attr.name == "resource" and not self.m_inner_graph:
+ if not self.m_inner_graph and self.m_parsed_content is not None:
if TFGraphNodeDecoder.get_variable(self.m_operation) is not None:
return "Const"
raise Exception("Could not get variable for resource Placeholder {0}".format(self.m_operation.name))
@@ -116,10 +117,11 @@ def get_variable(operation):
return None
for var_tensor, op_tensor in tf_graph.captures:
if operation.outputs[0].name == op_tensor.name:
- resource_name = var_tensor._name
+ if var_tensor.dtype.name != 'resource':
+ return var_tensor
for variable_value in operation.graph.variables:
- if variable_value.name == resource_name:
- return variable_value
+ if id(variable_value.handle) == id(var_tensor):
+ return variable_value.value()
return None
return None
diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py
index 056e0904b4c22f..f4ac5b0a121565 100644
--- a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py
+++ b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py
@@ -339,7 +339,7 @@ def create_tf_graph_iterator(input_model, placeholder_shapes, placeholder_data_t
if hasattr(input_model, 'outputs') and hasattr(input_model, 'structured_outputs') and \
isinstance(input_model.structured_outputs, dict):
external_names = sorted(list(input_model.structured_outputs.keys()))
- internal_names = sorted([tensor.name for tensor in input_model.outputs])
+ internal_names = [tensor.name for tensor in input_model.outputs]
if len(external_names) == len(internal_names):
for external_name, internal_name in zip(external_names, internal_names):
output_names_map = output_names_map or {}
diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp
index 13de82c7acd3aa..d1eb84eb014d46 100644
--- a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp
+++ b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp
@@ -19,7 +19,17 @@ void regclass_graph_op_If(py::module m) {
py::class_, ov::Node> cls(m, "if_op");
cls.doc() = "openvino.impl.op.If wraps ov::op::v0::If";
cls.def(py::init<>());
- cls.def(py::init&>(), py::arg("execution_condition"));
+ cls.def(py::init&>(),
+ py::arg("execution_condition"),
+ R"(
+ Constructs If with condition.
+
+ :param execution_condition: condition node.
+ :type execution_condition: openvino.runtime.Output
+
+ :rtype: openvino.impl.op.If
+ )");
+
cls.def(py::init([](const std::shared_ptr& execution_condition) {
if (MultiSubgraphHelpers::is_constant_or_parameter(execution_condition)) {
return std::make_shared(execution_condition->output(0));
@@ -29,18 +39,114 @@ void regclass_graph_op_If(py::module m) {
return std::make_shared();
}
}),
- py::arg("execution_condition"));
- cls.def("get_else_body", &ov::op::v8::If::get_else_body);
- cls.def("set_then_body", &ov::op::v8::If::set_then_body, py::arg("body"));
- cls.def("set_else_body", &ov::op::v8::If::set_else_body, py::arg("body"));
+ py::arg("execution_condition"),
+ R"(
+ Constructs If with condition.
+
+ :param execution_condition: condition node.
+ :type execution_condition: openvino.runtime.Node
+
+ :rtype: openvino.impl.op.If
+ )");
+
+ cls.def("get_else_body",
+ &ov::op::v8::If::get_else_body,
+ R"(
+ Gets else_body as Model object.
+
+ :return: else_body as Model object.
+ :rtype: openvino.Model
+ )");
+
+ cls.def("set_then_body",
+ &ov::op::v8::If::set_then_body,
+ py::arg("body"),
+ R"(
+ Sets new Model object as new then_body.
+
+ :param body: new body for 'then' branch.
+ :type body: openvino.Model
+
+ :rtype: None
+ )");
+
+ cls.def("set_else_body",
+ &ov::op::v8::If::set_else_body,
+ py::arg("body"),
+ R"(
+ Sets new Model object as new else_body.
+
+ :param body: new body for 'else' branch.
+ :type body: openvino.Model
+
+ :rtype: None
+ )");
+
cls.def("set_input",
&ov::op::v8::If::set_input,
py::arg("value"),
py::arg("then_parameter"),
- py::arg("else_parameter"));
- cls.def("set_output", &ov::op::v8::If::set_output, py::arg("then_result"), py::arg("else_result"));
- cls.def("get_function", &ov::op::util::MultiSubGraphOp::get_function, py::arg("index"));
- cls.def("set_function", &ov::op::util::MultiSubGraphOp::set_function, py::arg("index"), py::arg("func"));
+ py::arg("else_parameter"),
+ R"(
+ Sets new input to the operation associated with parameters of each sub-graphs.
+
+ :param value: input to operation.
+ :type value: openvino.runtime.Output
+
+ :param then_result: parameter for then_body or nullptr.
+ :type then_result: openvino.runtime.Node
+
+ :param else_result: parameter for else_body or nullptr.
+ :type else_result: openvino.runtime.Node
+
+ :rtype: None
+ )");
+
+ cls.def("set_output",
+ &ov::op::v8::If::set_output,
+ py::arg("then_result"),
+ py::arg("else_result"),
+ R"(
+ Sets new output from the operation associated with results of each sub-graphs.
+
+ :param then_result: result from then_body.
+ :type then_result: openvino.runtime.Node
+
+ :param else_result: result from else_body.
+ :type else_result: openvino.runtime.Node
+
+ :return: output from operation.
+ :rtype: openvino.runtime.Output
+ )");
+
+ cls.def("get_function",
+ &ov::op::util::MultiSubGraphOp::get_function,
+ py::arg("index"),
+ R"(
+ Gets internal sub-graph by index in MultiSubGraphOp.
+
+ :param index: sub-graph's index in op.
+ :type index: int
+
+ :return: Model with sub-graph.
+ :rtype: openvino.Model
+ )");
+
+ cls.def("set_function",
+ &ov::op::util::MultiSubGraphOp::set_function,
+ py::arg("index"),
+ py::arg("func"),
+ R"(
+ Adds sub-graph to MultiSubGraphOp.
+
+ :param index: index of new sub-graph.
+ :type index: int
+
+ :param func: func new sub_graph as a Model.
+ :type func: openvino.Model
+
+ :rtype: None
+ )");
cls.def(
"set_input_descriptions",
@@ -48,7 +154,20 @@ void regclass_graph_op_If(py::module m) {
self->set_input_descriptions(index, MultiSubgraphHelpers::list_to_input_descriptor(inputs));
},
py::arg("index"),
- py::arg("inputs"));
+ py::arg("inputs"),
+ R"(
+ Sets list with connections between operation inputs and internal sub-graph parameters.
+
+ :param index: index of internal sub-graph.
+ :type index: int
+
+ :param inputs: list of input descriptions.
+ :type inputs: list[Union[openvino.runtime.op.util.MergedInputDescription,
+ openvino.runtime.op.util.InvariantInputDescription,
+ openvino.runtime.op.util.SliceInputDescription]]
+
+ :rtype: None
+ )");
cls.def(
"set_output_descriptions",
@@ -56,7 +175,19 @@ void regclass_graph_op_If(py::module m) {
self->set_output_descriptions(index, MultiSubgraphHelpers::list_to_output_descriptor(outputs));
},
py::arg("index"),
- py::arg("outputs"));
+ py::arg("outputs"),
+ R"(
+ Sets list with connections between operation outputs and internal sub-graph parameters.
+
+ :param index: index of internal sub-graph.
+ :type index: int
+
+ :param outputs: list of output descriptions.
+ :type outputs: list[Union[openvino.runtime.op.util.BodyOutputDescription,
+ openvino.runtime.op.util.ConcatOutputDescription]]
+
+ :rtype: None
+ )");
cls.def(
"get_output_descriptions",
@@ -69,7 +200,17 @@ void regclass_graph_op_If(py::module m) {
return result;
},
- py::arg("index"));
+ py::arg("index"),
+ R"(
+ Gets list with connections between operation outputs and internal sub-graph parameters.
+
+ :param index: index of internal sub-graph.
+ :type index: int
+
+ :return: list of output descriptions.
+ :rtype: list[Union[openvino.runtime.op.util.BodyOutputDescription,
+ openvino.runtime.op.util.ConcatOutputDescription]]
+ )");
cls.def(
"get_input_descriptions",
@@ -82,7 +223,18 @@ void regclass_graph_op_If(py::module m) {
return result;
},
- py::arg("index"));
+ py::arg("index"),
+ R"(
+ Gets list with connections between operation inputs and internal sub-graph parameters.
+
+ :param index: index of internal sub-graph.
+ :type index: int
+
+ :return: list of input descriptions.
+ :rtype: list[Union[openvino.runtime.op.util.MergedInputDescription,
+ openvino.runtime.op.util.InvariantInputDescription,
+ openvino.runtime.op.util.SliceInputDescription]]
+ )");
cls.def("__repr__", [](const ov::op::v8::If& self) {
std::stringstream shapes_ss;
diff --git a/src/common/conditional_compilation/docs/develop_cc_for_new_component.md b/src/common/conditional_compilation/docs/develop_cc_for_new_component.md
index 4166a907434b13..32c10478dc6310 100644
--- a/src/common/conditional_compilation/docs/develop_cc_for_new_component.md
+++ b/src/common/conditional_compilation/docs/develop_cc_for_new_component.md
@@ -69,7 +69,6 @@ It checks whether the code region in this module is active or inactive by the ma
There is an example of `conditional_compilation_gen.h`:
```
-#define ov_pass_FixRtInfo_run_on_function 1
#define ov_pass_GraphRewrite_run_on_model 1
#define ov_pass_InitNodeInfo_run_on_function 1
#define ov_pass_ConstantFolding_run_on_model 1
diff --git a/src/common/snippets/src/pass/hash.cpp b/src/common/snippets/src/pass/hash.cpp
index 8d509b568867fc..48dd9586ae4337 100644
--- a/src/common/snippets/src/pass/hash.cpp
+++ b/src/common/snippets/src/pass/hash.cpp
@@ -62,7 +62,7 @@ static uint64_t hash_combine(uint64_t seed, const T &v) {
namespace rt_info {
// some node attr is not type of ov::RuntimeAttribute, need dedicate visitor.
-const std::vector list_of_names{
+static const std::vector list_of_names{
"PrimitivesPriority",
"alt_width",
};
diff --git a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp
index d4ff490bd2c388..7f2e533c5b79ed 100644
--- a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp
+++ b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp
@@ -41,7 +41,11 @@ class ov::pass::GroupedGatherElimination : public ov::pass::MatcherPass {
class ov::pass::SimplifyShapeOfSubGraph : public ov::pass::ModelPass {
public:
OPENVINO_RTTI("SimplifyShapeOfSubGraph", "0");
+ explicit SimplifyShapeOfSubGraph(bool use_shapes = true) : m_use_shapes(use_shapes){};
bool run_on_model(const std::shared_ptr& m) override;
+
+private:
+ bool m_use_shapes;
};
/**
diff --git a/src/common/transformations/include/transformations/fix_rt_info.hpp b/src/common/transformations/include/transformations/fix_rt_info.hpp
deleted file mode 100644
index a9c33645633074..00000000000000
--- a/src/common/transformations/include/transformations/fix_rt_info.hpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (C) 2018-2023 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-/**
- * @brief Defines initialize node runtime information pass
- * @file init_node_info.hpp
- */
-
-#include
-#include
-
-#include "openvino/pass/graph_rewrite.hpp"
-#include "transformations_visibility.hpp"
-
-namespace ov {
-namespace pass {
-
-class TRANSFORMATIONS_API FixRtInfo;
-
-} // namespace pass
-} // namespace ov
-
-/**
- * @ingroup ie_transformation_common_api
- * @brief FixRtInfo transformation helps to fix info attributes in a single place.
- * User can pass runtime attribute using various types.
- * This Pass should generalize them runtime info representation.
- *
- * Used to extract runtime attributes from shared pointer to `ov::RuntimeAttributeWrapper` to standard or trivial types
- */
-class ov::pass::FixRtInfo : public ov::pass::ModelPass {
-public:
- OPENVINO_RTTI("FixRtInfo", "0");
- bool run_on_model(const std::shared_ptr& m) override;
-};
diff --git a/src/common/transformations/include/transformations/symbolic_transformations/chained_maximum.hpp b/src/common/transformations/include/transformations/symbolic_transformations/chained_maximum.hpp
new file mode 100644
index 00000000000000..caa2586ebeeea4
--- /dev/null
+++ b/src/common/transformations/include/transformations/symbolic_transformations/chained_maximum.hpp
@@ -0,0 +1,26 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+
+namespace ov {
+namespace pass {
+class TRANSFORMATIONS_API ChainedMaximumOptimization;
+} // namespace pass
+} // namespace ov
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Optimizes graphs based on value labels / symbols
+ * Maximum(Maximum(A, B), B) -> Maximum(A, B)
+ * Maximum(Maximum(A, B), A) -> Maximum(A, B)
+ */
+class ov::pass::ChainedMaximumOptimization : public ov::pass::MatcherPass {
+public:
+ OPENVINO_RTTI("ChainedMaximumOptimization", "0");
+ ChainedMaximumOptimization();
+};
\ No newline at end of file
diff --git a/src/common/transformations/include/transformations/symbolic_transformations/label_optimization.hpp b/src/common/transformations/include/transformations/symbolic_transformations/label_optimization.hpp
new file mode 100644
index 00000000000000..9260be86d2744a
--- /dev/null
+++ b/src/common/transformations/include/transformations/symbolic_transformations/label_optimization.hpp
@@ -0,0 +1,37 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include
+#include
+#include
+
+namespace ov {
+namespace pass {
+class TRANSFORMATIONS_API ApplyTableOfEquivalence;
+class TRANSFORMATIONS_API OptimizeLabelsUsedAsValues;
+} // namespace pass
+} // namespace ov
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Resets symbols / labels on output shapes and values according to table of symbol / label equivalence. It
+ * allows to reduce number of labels used in the model and to disambiguate label values.
+ */
+class ov::pass::ApplyTableOfEquivalence : public ov::pass::ModelPass {
+public:
+ OPENVINO_RTTI("ApplyTableOfEquivalence", "0");
+ bool run_on_model(const std::shared_ptr& m) override;
+};
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Collects sources where each symbol / label initially appeared (on shape or shape sub-graph) and attaches all
+ * value usages of this label to this initial source
+ */
+class ov::pass::OptimizeLabelsUsedAsValues : public ov::pass::ModelPass {
+public:
+ OPENVINO_RTTI("OptimizeLabelsUsedAsValues", "0");
+ bool run_on_model(const std::shared_ptr& m) override;
+};
\ No newline at end of file
diff --git a/src/common/transformations/include/transformations/symbolic_transformations/nop_broadcast.hpp b/src/common/transformations/include/transformations/symbolic_transformations/nop_broadcast.hpp
new file mode 100644
index 00000000000000..c6356277a42e81
--- /dev/null
+++ b/src/common/transformations/include/transformations/symbolic_transformations/nop_broadcast.hpp
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+
+namespace ov {
+namespace pass {
+class TRANSFORMATIONS_API NopBroadcast;
+} // namespace pass
+} // namespace ov
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Optimizes out Broadcast(data, Maximum(shape, ones)) if labels on data and shape are equal
+ * Use case with data being empty should not be considered here since original graph has Maximum with ones
+ */
+class ov::pass::NopBroadcast : public ov::pass::MatcherPass {
+public:
+ OPENVINO_RTTI("NopBroadcast", "0");
+ NopBroadcast();
+};
\ No newline at end of file
diff --git a/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp
new file mode 100644
index 00000000000000..1cf3cf9577dc78
--- /dev/null
+++ b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp
@@ -0,0 +1,50 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+namespace ov {
+namespace pass {
+class TRANSFORMATIONS_API SymbolicOptimizations;
+class TRANSFORMATIONS_API SymbolicPropagation;
+} // namespace pass
+} // namespace ov
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Runs optimizations which are based on symbolic shape inference
+ */
+class ov::pass::SymbolicOptimizations : public ov::pass::ModelPass {
+public:
+ OPENVINO_RTTI("SymbolicOptimizations", "0");
+ explicit SymbolicOptimizations(bool full_run = true);
+ bool run_on_model(const std::shared_ptr& m) override;
+ std::shared_ptr get_manager() {
+ return m_manager;
+ };
+
+private:
+ std::shared_ptr m_manager;
+};
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Assigns labels / symbols to all tensors on shapes and values. Uses shape inference and other special rules to
+ * propagate labels / symbols
+ */
+class ov::pass::SymbolicPropagation : public ov::pass::ModelPass {
+public:
+ OPENVINO_RTTI("SymbolicPropagation");
+ SymbolicPropagation();
+ bool run_on_model(const std::shared_ptr& m) override;
+
+private:
+ std::shared_ptr m_te;
+};
diff --git a/src/common/transformations/include/transformations/symbolic_transformations/utils.hpp b/src/common/transformations/include/transformations/symbolic_transformations/utils.hpp
new file mode 100644
index 00000000000000..2f3d84dfe825ff
--- /dev/null
+++ b/src/common/transformations/include/transformations/symbolic_transformations/utils.hpp
@@ -0,0 +1,43 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+
+#include "openvino/core/descriptor/tensor.hpp"
+#include "openvino/core/dimension.hpp"
+#include "openvino/core/partial_shape.hpp"
+#include "openvino/core/type/element_type.hpp"
+
+namespace ov {
+namespace symbol {
+namespace util {
+
+/// \brief Collects labels from shape. Labels of static dimensions are guaranteed to be ov::no_labels
+///
+/// \param shape Shape object to collect labels from
+/// \param labels TensorLabel object to collect labels to
+///
+/// \return Status of collecting the labels (false if rank is static else true)
+TRANSFORMATIONS_API bool get_labels(const ov::PartialShape& shape, ov::TensorLabel& labels);
+
+/// \brief Collects labels from tensor of Output object
+///
+/// \param output Output object to collect labels from
+/// \param labels TensorLabel object to collect labels to
+///
+/// \return Status of collecting the labels (false if tensor has no labels else true)
+TRANSFORMATIONS_API bool get_labels(const ov::Output& output, ov::TensorLabel& labels);
+
+/// \brief Compares
+///
+/// \param lhs TensorLabel object to compare
+/// \param rhs TensorLabel object to compare
+///
+/// \return true if labels are unique and equal between lhs and rhs else false
+TRANSFORMATIONS_API bool are_unique_and_equal_labels(const ov::TensorLabel& lhs, const ov::TensorLabel& rhs);
+} // namespace util
+} // namespace symbol
+} // namespace ov
diff --git a/src/common/transformations/include/transformations/utils/utils.hpp b/src/common/transformations/include/transformations/utils/utils.hpp
index 1af4dfbf1551e8..9a2036fff1b20d 100644
--- a/src/common/transformations/include/transformations/utils/utils.hpp
+++ b/src/common/transformations/include/transformations/utils/utils.hpp
@@ -214,11 +214,13 @@ TRANSFORMATIONS_API std::vector> get_node_target_inputs(const std::s
TRANSFORMATIONS_API std::shared_ptr node_to_get_shape_value_of_indices_from_shape_node(
const std::shared_ptr& shape_node,
- const std::vector& indices);
+ const std::vector& indices,
+ const std::vector>& copy_rt_info_from = {});
TRANSFORMATIONS_API std::shared_ptr node_to_get_shape_value_of_indices_from_shape_source(
const Output& shape_source,
- const std::vector& indices);
+ const std::vector& indices,
+ const std::vector>& copy_rt_info_from = {});
TRANSFORMATIONS_API bool is_dequantization_subgraph(const Output& node);
@@ -230,6 +232,28 @@ TRANSFORMATIONS_API bool is_constant_and_all_values_equal_int(const Output
TRANSFORMATIONS_API bool is_on_constant_path(const ov::Output& output);
+template
+ov::pass::pattern::op::ValuePredicate constant_predicate(std::function&)> predicate) {
+ return pass::pattern::op::as_value_predicate([=](std::shared_ptr n) -> bool {
+ if (auto constant = as_type_ptr(n)) {
+ auto values = constant->cast_vector();
+ return predicate(values);
+ }
+ return false;
+ });
+}
} // namespace util
} // namespace op
} // namespace ov
+
+#define INT_CONSTANT_WITH_PREDICATE(expression) \
+ pattern::wrap_type( \
+ ov::op::util::constant_predicate([](const std::vector& value) -> bool { \
+ return expression; \
+ }))
+
+#define FLOAT_CONSTANT_WITH_PREDICATE(expression) \
+ pattern::wrap_type( \
+ ov::op::util::constant_predicate([](const std::vector& value) -> bool { \
+ return expression; \
+ }))
diff --git a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp
index 1638f2c4301e2b..8ad09f95dede3e 100644
--- a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp
+++ b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp
@@ -111,6 +111,7 @@
#include "transformations/op_conversions/softmax_decomposition.hpp"
#include "transformations/op_conversions/softsign_decomposition.hpp"
#include "transformations/op_conversions/unique_decomposition.hpp"
+#include "transformations/symbolic_transformations/symbolic_optimizations.hpp"
bool ov::pass::CommonOptimizations::run_on_model(const std::shared_ptr& f) {
RUN_ON_FUNCTION_SCOPE(CommonOptimizations);
@@ -230,7 +231,8 @@ bool ov::pass::CommonOptimizations::run_on_model(const std::shared_ptr();
+ REGISTER_PASS(manager, StridesOptimization)
+ REGISTER_PASS(manager, SymbolicOptimizations)
REGISTER_PASS(manager, Validate)
manager.run_passes(f);
diff --git a/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp b/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp
index 0bdae82810317a..068e1f27a291e9 100644
--- a/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp
+++ b/src/common/transformations/src/transformations/common_optimizations/moc_transformations.cpp
@@ -147,7 +147,7 @@ bool ov::pass::MOCTransformations::run_on_model(const std::shared_ptr
REGISTER_PASS(manager, Validate)
}
REGISTER_PASS(manager, ConvertQuantizeDequantize)
- REGISTER_PASS(manager, SimplifyShapeOfSubGraph)
+ REGISTER_PASS(manager, SimplifyShapeOfSubGraph, m_use_shapes)
if (!m_use_shapes) {
manager.register_pass();
diff --git a/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp b/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp
index 9cbf24a6dae0e3..9ce5ea86b901ab 100644
--- a/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp
+++ b/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp
@@ -833,7 +833,8 @@ ov::pass::NopSliceBeforeGatherElements::NopSliceBeforeGatherElements() {
ov::pass::PrepareShapeOpsForEliminationAroundBE::PrepareShapeOpsForEliminationAroundBE() {
MATCHER_SCOPE(PrepareShapeOpsForEliminationAroundBE);
- auto first_label = pattern::wrap_type(pattern::rank_equals(0));
+ auto first_label = pattern::wrap_type(
+ pattern::rank_equals(0));
auto other_input_label = pattern::any_input(pattern::rank_equals(0));
auto binary_op_label = pattern::wrap_type& f
Manager manager;
manager.set_per_pass_validation(false);
+ REGISTER_PASS(manager, PrepareShapeOpsForEliminationAroundBE)
REGISTER_PASS(manager, SharedOpOptimization)
REGISTER_PASS(manager, EliminateGatherUnsqueeze) // should run after SharedOpOptimization
+ REGISTER_PASS(manager, NopElimination, m_use_shapes)
REGISTER_PASS(manager, GroupedGatherElimination)
// GatherNopElimination depends on shape, so it requires shape propagation
// if previous transformations has resolved some dynamic shapes.
diff --git a/src/common/transformations/src/transformations/fix_rt_info.cpp b/src/common/transformations/src/transformations/fix_rt_info.cpp
deleted file mode 100644
index b70176ca7aecd1..00000000000000
--- a/src/common/transformations/src/transformations/fix_rt_info.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (C) 2018-2023 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "transformations/fix_rt_info.hpp"
-
-#include
-#include
-
-#include "itt.hpp"
-#include "openvino/core/rt_info.hpp"
-#include "openvino/opsets/opset1.hpp"
-#include "transformations/rt_info/primitives_priority_attribute.hpp"
-
-bool ov::pass::FixRtInfo::run_on_model(const std::shared_ptr& f) {
- RUN_ON_FUNCTION_SCOPE(FixRtInfo);
-
- for (auto& node : f->get_ops()) {
- // Recursively apply transformation for sub-graph based operations
- if (auto sub_graph_node = std::dynamic_pointer_cast(node)) {
- if (auto sub_graph = sub_graph_node->get_function()) {
- run_on_model(sub_graph);
- }
- }
- auto& rt_info = node->get_rt_info();
- {
- auto it_info = rt_info.find("PrimitivesPriority");
- if (it_info != rt_info.end()) {
- if (it_info->second.is()) {
- rt_info.emplace(ov::PrimitivesPriority::get_type_info_static(),
- it_info->second.as());
- }
- if (it_info->second.is()) {
- rt_info.emplace(ov::PrimitivesPriority::get_type_info_static(),
- ov::PrimitivesPriority{it_info->second.as()});
- }
- rt_info.erase(it_info);
- }
- }
- }
- return false;
-}
diff --git a/src/common/transformations/src/transformations/init_node_info.cpp b/src/common/transformations/src/transformations/init_node_info.cpp
index 93c958d14f661b..efc2627199c723 100644
--- a/src/common/transformations/src/transformations/init_node_info.cpp
+++ b/src/common/transformations/src/transformations/init_node_info.cpp
@@ -8,9 +8,7 @@
#include
#include "itt.hpp"
-#include "openvino/core/rt_info.hpp"
-#include "openvino/opsets/opset1.hpp"
-#include "transformations/fix_rt_info.hpp"
+#include "openvino/op/util/sub_graph_base.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
@@ -27,6 +25,5 @@ bool ov::pass::InitNodeInfo::run_on_model(const std::shared_ptr& f) {
auto& rtInfo = node->get_rt_info();
rtInfo.emplace(FusedNames::get_type_info_static(), FusedNames{node->get_friendly_name()});
}
- FixRtInfo{}.run_on_model(f);
return false;
}
diff --git a/src/common/transformations/src/transformations/symbolic_transformations/chained_maximum.cpp b/src/common/transformations/src/transformations/symbolic_transformations/chained_maximum.cpp
new file mode 100644
index 00000000000000..02510be34dcb4a
--- /dev/null
+++ b/src/common/transformations/src/transformations/symbolic_transformations/chained_maximum.cpp
@@ -0,0 +1,50 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/symbolic_transformations/chained_maximum.hpp"
+
+#include
+#include
+
+#include "itt.hpp"
+#include "openvino/core/dimension_tracker.hpp"
+#include "transformations/symbolic_transformations/utils.hpp"
+
+using namespace ov::symbol::util;
+
+ov::pass::ChainedMaximumOptimization::ChainedMaximumOptimization() {
+ MATCHER_SCOPE(ChainedMaximumOptimization);
+ auto A_input = pattern::any_input();
+ auto B_input = pattern::any_input();
+ auto C_input = pattern::any_input();
+ auto first_maximum = pattern::wrap_type({A_input, B_input});
+ auto maximum = pattern::wrap_type({first_maximum, C_input});
+
+ ov::matcher_pass_callback matcher_pass_callback = [=](pattern::Matcher& m) {
+ const auto& vm = m.get_pattern_value_map();
+
+ auto A = vm.at(A_input), B = vm.at(B_input), C = vm.at(C_input);
+ auto output_to_replace = vm.at(first_maximum);
+
+ ov::TensorLabel A_labels, B_labels, C_labels;
+ bool A_read = get_labels(A, A_labels);
+ bool B_read = get_labels(B, B_labels);
+ bool C_read = get_labels(C, C_labels);
+
+ if (!A_read && !B_read && !C_read)
+ return false;
+
+ if (are_unique_and_equal_labels(A_labels, C_labels)) {
+ // Matched Maximum(Maximum(A, B), C) with A == C -> Maximum(B, C)
+ return ov::replace_output_update_name(output_to_replace, B);
+ } else if (are_unique_and_equal_labels(B_labels, C_labels)) {
+ // Matched Maximum(Maximum(A, B), C) with B == C -> Maximum(A, C)
+ return ov::replace_output_update_name(output_to_replace, A);
+ }
+ return false;
+ };
+
+ auto m = std::make_shared(maximum, matcher_name);
+ register_matcher(m, matcher_pass_callback);
+}
diff --git a/src/common/transformations/src/transformations/symbolic_transformations/label_optimization.cpp b/src/common/transformations/src/transformations/symbolic_transformations/label_optimization.cpp
new file mode 100644
index 00000000000000..049fd0e72c06c3
--- /dev/null
+++ b/src/common/transformations/src/transformations/symbolic_transformations/label_optimization.cpp
@@ -0,0 +1,265 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/symbolic_transformations/label_optimization.hpp"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "itt.hpp"
+#include "openvino/core/rt_info.hpp"
+#include "openvino/op/util/multi_subgraph_base.hpp"
+
+namespace {
+void update_label(const ov::EqTable& table, ov::label_t& label) {
+ if (label != ov::no_label && table.count(label)) {
+ const auto& alternative_label = *table.at(label)->begin();
+ if (alternative_label != ov::no_label)
+ label = std::min(label, alternative_label);
+ }
+}
+
+void apply_table_of_equivalence_on_model(const std::shared_ptr& m, const ov::EqTable& table) {
+ for (const auto& op : m->get_ordered_ops()) {
+ // handle inner sub-graphs
+ if (auto multi_subgraph_op = std::dynamic_pointer_cast(op))
+ for (const auto& sub_graph : multi_subgraph_op->get_functions())
+ if (sub_graph)
+ apply_table_of_equivalence_on_model(sub_graph, table);
+
+ for (auto& output : op->outputs()) {
+ // shape relabeling
+ auto shape = output.get_partial_shape();
+ for (auto& d : shape) {
+ if (d.is_static())
+ continue;
+ auto label = ov::DimensionTracker::get_label(d);
+ update_label(table, label);
+ if (label != ov::no_label)
+ ov::DimensionTracker::set_label(d, label);
+ }
+ op->set_output_type(output.get_index(), output.get_element_type(), shape);
+ // value relabeling
+ auto value_labels = output.get_tensor().get_value_label();
+ for (auto& label : value_labels)
+ update_label(table, label);
+ output.get_tensor().set_value_label(value_labels);
+ }
+ }
+}
+} // namespace
+
+bool ov::pass::ApplyTableOfEquivalence::run_on_model(const std::shared_ptr& m) {
+ RUN_ON_FUNCTION_SCOPE(ApplyTableOfEquivalence);
+ if (auto te = ov::table_of_equivalence(m))
+ apply_table_of_equivalence_on_model(m, te->get_equivalence_table());
+ return false;
+}
+
+// label to source map
+using LTS_map = std::unordered_map>;
+
+namespace {
+int64_t get_idx_of_label_in_source(const ov::Output& source, const ov::label_t& label) {
+ int64_t idx = -1;
+ if (label == ov::no_label)
+ return idx;
+ auto pshape = source.get_partial_shape();
+ auto rank = pshape.rank();
+ if (rank.is_dynamic())
+ return idx;
+ for (int64_t i = 0; i < rank.get_length(); ++i) {
+ auto l = ov::DimensionTracker::get_label(pshape[i]);
+ if (l == label) {
+ idx = i;
+ break;
+ }
+ }
+ return idx;
+}
+
+ov::Output alternative_source_from_existing_value(const ov::label_t& label,
+ const ov::Output& original_output,
+ LTS_map& label_value_source) {
+ auto alternative_source = ov::Output();
+ if (label_value_source.count(label)) {
+ alternative_source = label_value_source[label];
+ const auto &original_shape = original_output.get_shape(), &alternative_shape = alternative_source.get_shape();
+ const auto &original_et = original_output.get_element_type(),
+ &alternative_et = alternative_source.get_element_type();
+ if (alternative_shape != original_shape && (original_shape.empty() || original_shape == ov::Shape{0})) {
+ auto squeeze = std::make_shared(alternative_source);
+ ov::copy_runtime_info(original_output.get_node_shared_ptr(), squeeze);
+ alternative_source = squeeze->output(0);
+ } else if (alternative_shape != original_shape) {
+ auto shape = ov::op::v0::Constant::create(ov::element::i64, {original_shape.size()}, original_shape);
+ auto reshape = std::make_shared(alternative_source, shape, false);
+ ov::copy_runtime_info(original_output.get_node_shared_ptr(), reshape);
+ alternative_source = reshape->output(0);
+ }
+ if (alternative_et != original_et) {
+ auto convert = std::make_shared(alternative_source, original_et);
+ ov::copy_runtime_info(original_output.get_node_shared_ptr(), convert);
+ alternative_source = convert->output(0);
+ }
+ }
+ return alternative_source;
+}
+
+ov::Output alternative_source_from_shape_source(const LTS_map& label_shape_source,
+ const ov::label_t& label,
+ const ov::Output& original_output,
+ LTS_map& label_value_source) {
+ auto alternative_source = ov::Output();
+ if (label_shape_source.count(label)) {
+ // replacing via constructing the label source and saving it for the future
+ const auto& source = label_shape_source.at(label);
+ const int64_t& idx = get_idx_of_label_in_source(source, label);
+ if (idx == -1)
+ return alternative_source;
+ const auto& original_et = original_output.get_element_type();
+ std::shared_ptr shape;
+ if (original_et == ov::element::i32 || original_et == ov::element::i64) {
+ shape = std::make_shared(source, original_et);
+ } else {
+ shape = std::make_shared(source);
+ ov::copy_runtime_info(original_output.get_node_shared_ptr(), shape);
+ shape = std::make_shared(shape, original_et);
+ }
+ auto indices = ov::op::v0::Constant::create(ov::element::i64, original_output.get_shape(), {idx});
+ auto axis = ov::op::v0::Constant::create(ov::element::i64, {}, {0});
+ auto gather = std::make_shared(shape, indices, axis);
+ ov::copy_runtime_info(original_output.get_node_shared_ptr(), {shape, indices, axis, gather});
+ alternative_source = gather;
+ label_value_source[label] = alternative_source;
+ }
+ return alternative_source;
+}
+
+ov::Output get_alternative_source_from_value_or_shape_source(const LTS_map& label_shape_source,
+ const ov::label_t& label,
+ const ov::Output& original_output,
+ LTS_map& label_value_source) {
+ auto alternative_source = ov::Output();
+ if (label == ov::no_label)
+ return alternative_source;
+ alternative_source = alternative_source_from_existing_value(label, original_output, label_value_source);
+ if (!alternative_source.get_node_shared_ptr())
+ alternative_source =
+ alternative_source_from_shape_source(label_shape_source, label, original_output, label_value_source);
+ return alternative_source;
+}
+
+ov::Output alternative_source_from_concat_input_sources(const LTS_map& label_shape_source,
+ const ov::label_t& label,
+ const ov::Output& original_output,
+ LTS_map& label_value_source) {
+ auto alternative_source = ov::Output();
+ if (label_shape_source.count(label)) {
+ const auto& source = label_shape_source.at(label);
+ auto concat = ov::as_type_ptr(source.get_node_shared_ptr());
+ if (!concat || concat->get_input_size() != 2)
+ return alternative_source;
+ int64_t idx = get_idx_of_label_in_source(source, label);
+ if (idx == -1 || idx != concat->get_concatenation_axis())
+ return alternative_source;
+ // optimize using the knowledge of the Concat SI and what happens on the axis
+ const auto& lhs_pshape = concat->get_input_partial_shape(0);
+ const auto& rhs_pshape = concat->get_input_partial_shape(1);
+ if (lhs_pshape.rank().is_static() && rhs_pshape.rank().is_static()) {
+ auto lhs_label = ov::DimensionTracker::get_label(lhs_pshape[idx]);
+ auto lhs_alternative = get_alternative_source_from_value_or_shape_source(label_shape_source,
+ lhs_label,
+ original_output,
+ label_value_source);
+
+ auto rhs_label = ov::DimensionTracker::get_label(rhs_pshape[idx]);
+ auto rhs_alternative = get_alternative_source_from_value_or_shape_source(label_shape_source,
+ rhs_label,
+ original_output,
+ label_value_source);
+
+ if (lhs_alternative.get_node_shared_ptr() && rhs_alternative.get_node_shared_ptr()) {
+ alternative_source = std::make_shared(lhs_alternative, rhs_alternative);
+ ov::copy_runtime_info(original_output.get_node_shared_ptr(), alternative_source.get_node_shared_ptr());
+ alternative_source.get_tensor().set_value_label({label});
+ label_value_source[label] = alternative_source;
+ }
+ }
+ }
+ return alternative_source;
+}
+
+void optimize_value_usage(ov::Output& output, LTS_map& label_shape_source, LTS_map& label_value_source) {
+ auto value_labels = output.get_tensor().get_value_label();
+ if (value_labels.size() != 1)
+ return;
+ auto label = value_labels[0];
+ if (label == ov::no_label)
+ return;
+ auto pshape = output.get_partial_shape();
+ if (pshape.is_dynamic() || ov::shape_size(pshape.to_shape()) != 1)
+ return;
+
+ ov::Output alternative_source =
+ alternative_source_from_concat_input_sources(label_shape_source, label, output, label_value_source);
+ if (!alternative_source.get_node_shared_ptr())
+ alternative_source =
+ get_alternative_source_from_value_or_shape_source(label_shape_source, label, output, label_value_source);
+
+ if (alternative_source.get_node_shared_ptr() != nullptr) {
+ evaluate_both_bounds(alternative_source);
+ output.replace(alternative_source);
+ } else {
+ // in case we can not optimize it -- it is label which appeared just now on the value path
+ label_value_source[label] = output;
+ }
+}
+
+void save_shape_sources(const ov::Output& output, LTS_map& label_shape_source) {
+ for (const auto& d : output.get_partial_shape()) {
+ if (d.is_static())
+ continue;
+ auto label = ov::DimensionTracker::get_label(d);
+ if (label == ov::no_label || label_shape_source.count(label))
+ continue;
+ label_shape_source[label] = output;
+ }
+}
+} // namespace
+
+bool ov::pass::OptimizeLabelsUsedAsValues::run_on_model(const std::shared_ptr& m) {
+ RUN_ON_FUNCTION_SCOPE(OptimizeLabelsUsedAsValues);
+ LTS_map label_shape_source;
+ LTS_map label_value_source;
+ for (const auto& op : m->get_ordered_ops()) {
+ // Result has output port which has shared (during validate_and_infer_type) tensor with input port.
+ // Transformations may replace input of Result. After replacement and before Result::validate_and_infer_type --
+ // output tensor of Result may contain inaccurate shape / labels due to the sharing with tensor which may be
+ // already detached from the model. To avoid creating ShapeOf from Result in these cases we exclude it from this
+ // optimization entirely
+ if (auto result = ov::as_type_ptr(op))
+ continue;
+
+ // LTS maps aren't shared with sub-graphs because inner graph can not access outer graph for label sources
+ if (auto multi_subgraph_op = std::dynamic_pointer_cast(op))
+ for (const auto& sub_graph : multi_subgraph_op->get_functions())
+ if (sub_graph)
+ run_on_model(sub_graph);
+
+ for (auto& output : op->outputs()) {
+ optimize_value_usage(output, label_shape_source, label_value_source);
+ save_shape_sources(output, label_shape_source);
+ }
+ }
+ return true;
+}
diff --git a/src/common/transformations/src/transformations/symbolic_transformations/nop_broadcast.cpp b/src/common/transformations/src/transformations/symbolic_transformations/nop_broadcast.cpp
new file mode 100644
index 00000000000000..889bdd4b209d20
--- /dev/null
+++ b/src/common/transformations/src/transformations/symbolic_transformations/nop_broadcast.cpp
@@ -0,0 +1,61 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/symbolic_transformations/nop_broadcast.hpp"
+
+#include
+#include
+#include
+#include
+#include
+
+#include "compare.hpp"
+#include "itt.hpp"
+#include "openvino/pass/pattern/op/or.hpp"
+#include "transformations/symbolic_transformations/utils.hpp"
+#include "transformations/utils/utils.hpp"
+
+using namespace std;
+using namespace ov;
+using namespace ov::op;
+using namespace ov::symbol::util;
+
+namespace {
+shared_ptr broadcast_label(const OutputVector& inputs) {
+ return ov::pass::pattern::wrap_type(inputs, [](Output output) {
+ const auto& op = output.get_node_shared_ptr();
+ auto data_rank = op->get_input_partial_shape(0).rank();
+ auto new_shape_shape = op->get_input_partial_shape(1);
+ return data_rank.is_static() && new_shape_shape.is_static() && data_rank == new_shape_shape[0];
+ });
+}
+} // namespace
+
+ov::pass::NopBroadcast::NopBroadcast() {
+ MATCHER_SCOPE(NopBroadcast);
+ auto data_label = pattern::any_input(pattern::has_static_rank());
+
+ auto shape_label = pattern::wrap_type();
+ auto ones = INT_CONSTANT_WITH_PREDICATE(std::all_of(value.begin(), value.end(), cmp::Equal(1)));
+ auto maximum = pattern::wrap_type({shape_label, ones});
+
+ auto broadcast_3_ins = broadcast_label({data_label, maximum, pattern::any_input()});
+ auto broadcast_2_ins = broadcast_label({data_label, maximum});
+ auto broadcast = make_shared(OutputVector{broadcast_2_ins, broadcast_3_ins});
+
+ ov::matcher_pass_callback matcher_pass_callback = [=](pattern::Matcher& m) {
+ const auto& vm = m.get_pattern_value_map();
+ auto data = vm.at(data_label);
+ auto shape = vm.at(shape_label);
+
+ ov::TensorLabel data_labels, shape_labels;
+ if (!get_labels(data.get_partial_shape(), data_labels) || !get_labels(shape, shape_labels) ||
+ !are_unique_and_equal_labels(data_labels, shape_labels))
+ return false;
+ return ov::replace_output_update_name(m.get_match_root(), data);
+ };
+
+ auto m = std::make_shared(broadcast, matcher_name);
+ register_matcher(m, matcher_pass_callback);
+}
diff --git a/src/common/transformations/src/transformations/symbolic_transformations/symbolic_optimizations.cpp b/src/common/transformations/src/transformations/symbolic_transformations/symbolic_optimizations.cpp
new file mode 100644
index 00000000000000..7451df397ba33c
--- /dev/null
+++ b/src/common/transformations/src/transformations/symbolic_transformations/symbolic_optimizations.cpp
@@ -0,0 +1,146 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/symbolic_transformations/symbolic_optimizations.hpp"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "itt.hpp"
+
+namespace {
+void symbolic_set_up_for_shape(ov::DimensionTracker& dt, ov::PartialShape& shape) {
+ if (shape.rank().is_dynamic())
+ return;
+ for (auto& d : shape) {
+ bool is_static = d.is_static(), has_label = ov::DimensionTracker::has_label(d);
+ if (is_static && has_label)
+ dt.reset_tracking_info(d); // remove labels from static dims on shapes to reduce label clutter
+ if (is_static || has_label)
+ continue;
+ dt.set_up_for_tracking(d);
+ }
+}
+
+void special_case_range_label_propagation(const std::shared_ptr& node) {
+ /* Label propagation through specific Range operation
+ start shift
+ | \ /
+ | Add step == 1
+ \ / /
+ Range
+ */
+ if (!ov::is_type(node) && !ov::is_type(node))
+ return;
+
+ auto output_shape = node->get_output_partial_shape(0);
+ if (output_shape.rank().is_dynamic() || output_shape.size() != 1)
+ return;
+
+ OPENVINO_SUPPRESS_DEPRECATED_START
+ auto step_value = ov::get_constant_from_source(node->input_value(2));
+ OPENVINO_SUPPRESS_DEPRECATED_END
+ if (!step_value || step_value->cast_vector()[0] != 1)
+ return;
+
+ auto start_labels = node->get_input_tensor(0).get_value_label();
+ if (start_labels.size() != 1 || start_labels[0] == ov::no_label)
+ return;
+ auto start_label = start_labels[0];
+
+ auto stop_node = node->input_value(1).get_node_shared_ptr();
+ if (!ov::is_type(stop_node))
+ return;
+ auto add_in0_labels = stop_node->get_input_tensor(0).get_value_label();
+ if (add_in0_labels.size() != 1 || add_in0_labels[0] == ov::no_label)
+ return;
+ auto add_in0_label = add_in0_labels[0];
+
+ auto add_in1_labels = stop_node->get_input_tensor(1).get_value_label();
+ if (add_in1_labels.size() != 1 || add_in1_labels[0] == ov::no_label)
+ return;
+ auto add_in1_label = add_in1_labels[0];
+
+ if (add_in0_label == start_label)
+ ov::DimensionTracker::set_label(output_shape[0], add_in1_label);
+ else if (add_in1_label == start_label)
+ ov::DimensionTracker::set_label(output_shape[0], add_in0_label);
+ node->set_output_type(0, node->get_output_element_type(0), output_shape);
+}
+} // namespace
+
+ov::pass::SymbolicPropagation::SymbolicPropagation() {
+ m_te = std::make_shared();
+}
+
+bool ov::pass::SymbolicPropagation::run_on_model(const std::shared_ptr& m) {
+ RUN_ON_MODEL_SCOPE(SymbolicPropagation);
+
+ auto te = m_te;
+ ov::set_up_symbolic_info(m, te);
+ ov::DimensionTracker dt(te);
+
+ for (const auto& op : m->get_ordered_ops()) {
+ // since we disable invalidation with the following two lines, we have to invalidate manually here
+ op->invalidate_values();
+ for (auto& output : op->outputs())
+ ov::set_up_symbolic_info(output, te);
+ op->revalidate_and_infer_types();
+ // Recursively apply transformation for sub-graph based operations
+ if (auto multi_subgraph_op = std::dynamic_pointer_cast(op))
+ for (const auto& sub_graph : multi_subgraph_op->get_functions())
+ if (sub_graph)
+ run_on_model(sub_graph);
+
+ // additional label propagation rules must be triggered here
+ special_case_range_label_propagation(op);
+ // additional label propagation rules must be triggered here
+
+ for (auto& output : op->outputs()) {
+ auto shape = output.get_partial_shape();
+ symbolic_set_up_for_shape(dt, shape);
+ OPENVINO_SUPPRESS_DEPRECATED_START
+ output.get_tensor().set_tensor_type(output.get_element_type(), shape);
+ OPENVINO_SUPPRESS_DEPRECATED_END
+ }
+ }
+ return true;
+}
+
+ov::pass::SymbolicOptimizations::SymbolicOptimizations(bool full_run) {
+ m_manager = std::make_shared();
+ m_manager->set_per_pass_validation(false);
+
+#define REGISTER_SYMBOLIC(region, ...) m_manager->register_pass(__VA_ARGS__);
+
+ REGISTER_SYMBOLIC(SymbolicPropagation)
+ if (full_run) {
+ // symbolic based transformations allowing for better static dimension propagation
+ REGISTER_SYMBOLIC(ChainedMaximumOptimization)
+ REGISTER_SYMBOLIC(NopBroadcast)
+ // regular transformations which are needed right now since they clean up unnecessary operations
+ REGISTER_SYMBOLIC(NopElimination) // Broadcast (Tile) Ones + Remove Slice Before GatherElements
+ REGISTER_SYMBOLIC(SharedOpOptimization) // Shared GatherElements
+ }
+ // transformations which use labels for optimizations
+ REGISTER_SYMBOLIC(ApplyTableOfEquivalence)
+ if (full_run) {
+ REGISTER_SYMBOLIC(OptimizeLabelsUsedAsValues) // reduce shape sub-graphs
+ }
+}
+
+bool ov::pass::SymbolicOptimizations::run_on_model(const std::shared_ptr& m) {
+ RUN_ON_FUNCTION_SCOPE(SymbolicOptimizations);
+ m_manager->run_passes(m);
+ ov::remove_symbolic_info(m);
+ return true;
+}
diff --git a/src/common/transformations/src/transformations/symbolic_transformations/utils.cpp b/src/common/transformations/src/transformations/symbolic_transformations/utils.cpp
new file mode 100644
index 00000000000000..3fedc3bd4c85be
--- /dev/null
+++ b/src/common/transformations/src/transformations/symbolic_transformations/utils.cpp
@@ -0,0 +1,34 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/symbolic_transformations/utils.hpp"
+
+#include
+#include
+#include
+
+bool ov::symbol::util::get_labels(const ov::PartialShape& shape, ov::TensorLabel& labels) {
+ if (shape.rank().is_dynamic())
+ return false;
+ labels.clear();
+ labels.reserve(shape.size());
+ for (const auto& d : shape)
+ labels.push_back((d.is_dynamic() ? ov::DimensionTracker::get_label(d) : ov::no_label));
+ return true;
+}
+
+bool ov::symbol::util::get_labels(const ov::Output& output, ov::TensorLabel& labels) {
+ const auto& tensor = output.get_tensor();
+ labels = tensor.get_value_label();
+ return !labels.empty();
+}
+
+bool ov::symbol::util::are_unique_and_equal_labels(const ov::TensorLabel& lhs, const ov::TensorLabel& rhs) {
+ if (rhs.size() != lhs.size() || rhs.empty())
+ return false;
+ for (size_t i = 0; i < lhs.size(); ++i)
+ if (lhs[i] != rhs[i] || lhs[i] == ov::no_label)
+ return false;
+ return true;
+}
diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp
index d7609c935f1d61..42fa6f85cc61cf 100644
--- a/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp
+++ b/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp
@@ -57,16 +57,17 @@ TSGatherForward::TSGatherForward() {
}
}
- size_t axis;
+ size_t order_axis;
if (axes[0] < 0) {
auto data_rank = main_node->get_input_partial_shape(0).rank();
if (data_rank.is_dynamic()) {
return false;
}
- axis = static_cast(axes[0] + data_rank.get_length());
+ order_axis = static_cast(axes[0] + data_rank.get_length());
} else {
- axis = static_cast(axes[0]);
+ order_axis = static_cast(axes[0]);
}
+ const size_t axis = order_val[order_axis];
/*
https://docs.openvino.ai/2023.0/openvino_docs_ops_movement_Gather_8.html
The Gather output shape has the same shape as the input,
@@ -136,8 +137,7 @@ TSGatherForward::TSGatherForward() {
if (!success) {
return false;
}
- auto new_axis =
- ov::op::v0::Constant::create(gather_axis->get_element_type(), gather_axis->get_shape(), {order_val[axis]});
+ auto new_axis = ov::op::v0::Constant::create(gather_axis->get_element_type(), gather_axis->get_shape(), {axis});
main_node->input(2).replace_source_output(new_axis);
copy_runtime_info(gather_axis, new_axis);
diff --git a/src/common/transformations/src/transformations/utils/utils.cpp b/src/common/transformations/src/transformations/utils/utils.cpp
index a62cd141b9f83b..62b1765e7ba275 100644
--- a/src/common/transformations/src/transformations/utils/utils.cpp
+++ b/src/common/transformations/src/transformations/utils/utils.cpp
@@ -162,16 +162,24 @@ std::vector> get_node_target_inputs(const std::shared_ptr& nod
std::shared_ptr node_to_get_shape_value_of_indices_from_shape_node(
const std::shared_ptr& shape_node,
- const std::vector& indices) {
- return make_try_fold(shape_node,
- v0::Constant::create(ov::element::i64, {indices.size()}, indices),
- v0::Constant::create(ov::element::i64, {}, {0}));
+ const std::vector& indices,
+ const std::vector>& copy_rt_info_from) {
+ const auto& indices_op = v0::Constant::create(ov::element::i64, {indices.size()}, indices);
+ const auto& axis_op = v0::Constant::create(ov::element::i64, {}, {0});
+ auto op = make_try_fold(shape_node, indices_op, axis_op);
+ if (!copy_rt_info_from.empty())
+ ov::copy_runtime_info(copy_rt_info_from, {op, indices_op, axis_op});
+ return op;
}
-std::shared_ptr node_to_get_shape_value_of_indices_from_shape_source(const ov::Output& shape_source,
- const std::vector& indices) {
+std::shared_ptr node_to_get_shape_value_of_indices_from_shape_source(
+ const ov::Output& shape_source,
+ const std::vector& indices,
+ const std::vector>& copy_rt_info_from) {
const auto& shape_node = make_try_fold(shape_source);
- return node_to_get_shape_value_of_indices_from_shape_node(shape_node, indices);
+ if (!copy_rt_info_from.empty())
+ ov::copy_runtime_info(copy_rt_info_from, shape_node);
+ return node_to_get_shape_value_of_indices_from_shape_node(shape_node, indices, copy_rt_info_from);
}
bool shapes_equal_except_dynamic_expected_batch(const ov::PartialShape& expected, const ov::PartialShape& actual) {
diff --git a/src/common/transformations/tests/symbolic_transformations/chained_maximum.cpp b/src/common/transformations/tests/symbolic_transformations/chained_maximum.cpp
new file mode 100644
index 00000000000000..f0507897fc4a36
--- /dev/null
+++ b/src/common/transformations/tests/symbolic_transformations/chained_maximum.cpp
@@ -0,0 +1,134 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/symbolic_transformations/chained_maximum.hpp"
+
+#include
+
+#include
+#include
+#include