diff --git a/.github/components.yml b/.github/components.yml index 31952e2b87c114..f0764d50d9f812 100644 --- a/.github/components.yml +++ b/.github/components.yml @@ -110,7 +110,6 @@ IR_FE: ONNX_FE: revalidate: - - MO - OVC - ONNX_RT build: @@ -119,7 +118,6 @@ ONNX_FE: PDPD_FE: revalidate: - - MO - OVC build: - CPU @@ -127,7 +125,6 @@ PDPD_FE: TF_FE: revalidate: - - MO - OVC build: - CPU @@ -136,7 +133,6 @@ TF_FE: TFL_FE: revalidate: - - MO - OVC build: - CPU @@ -144,7 +140,6 @@ TFL_FE: PyTorch_FE: revalidate: - - MO - OVC build: - CPU @@ -153,7 +148,6 @@ PyTorch_FE: JAX_FE: revalidate: - - MO - OVC build: - CPU @@ -171,7 +165,6 @@ C_API: Python_API: revalidate: - samples - - MO - OVC - tools - TF_FE @@ -228,14 +221,6 @@ OVC: - Python_API - TOKENIZERS # TF_FE tests depends on tokenizers build -MO: - revalidate: - - PyTorch_FE - - TF_FE - build: - - Python_API - - TOKENIZERS # TF_FE tests depends on tokenizers build - tools: build: - CPU diff --git a/.github/github_org_control/config.json b/.github/github_org_control/config.json index 7fc23b7888c170..52a29f2790481b 100644 --- a/.github/github_org_control/config.json +++ b/.github/github_org_control/config.json @@ -36,7 +36,6 @@ "openvino-tf-frontend-maintainers": "category: TF FE", "openvino-onnx-frontend-maintainers": "category: ONNX FE", "openvino-ie-tests-maintainers": "category: IE Tests", - "openvino-mo-maintainers": "category: MO", "openvino-ovc-maintainers": "category: OVC", "openvino-ngraph-maintainers": "category: Core", "openvino-scripts-maintainers": "category: build", diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index e1532d530ff2db..b7c538cd58e6da 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -133,11 +133,11 @@ jobs: --ignore=${INSTALL_TEST_DIR}/onnx/test_python/test_zoo_models.py - name: OVC unit tests - if: fromJSON(inputs.affected-components).MO.test + if: fromJSON(inputs.affected-components).OVC.test run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml - name: OVC Python API Tests - if: fromJSON(inputs.affected-components).MO.test + if: fromJSON(inputs.affected-components).OVC.test run: | # Import 'test_utils' installed in '/tests/python/openvino' export PYTHONPATH=${INSTALL_TEST_DIR}/python diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index de33f2603d7430..5708b529f25acc 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -358,7 +358,7 @@ jobs: --ignore=${{ env.INSTALL_TEST_DIR }}/onnx/test_python/test_zoo_models.py - name: OVC Python API Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test + if: fromJSON(needs.smart_ci.outputs.affected_components).OVC.test shell: cmd run: | :: Used for 'test_utils' installed in '\python\openvino\test_utils' @@ -377,7 +377,7 @@ jobs: python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml - name: OVC unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test + if: fromJSON(needs.smart_ci.outputs.affected_components).OVC.test shell: cmd run: python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/ovc/unit_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-OpenVinoConversion.xml diff --git a/.gitignore b/.gitignore index 2815d16cf28392..3ce289a4a2abf7 100644 --- a/.gitignore +++ b/.gitignore @@ -60,17 +60,6 @@ coverage .npm # Artifacts -/tools/mo/*.bin -/tools/mo/*.xml -/tools/mo/*.json -/tools/mo/*.so -/tools/mo/*.txt -/tools/mo/*.pb -/tools/mo/*.pbtxt -/tools/mo/!CMakeLists.txt -/tools/mo/*.mapping -/tools/mo/*.dat -/tools/mo/*.svg /src/plugins/intel_cpu/tools/commit_slider/*.json /src/plugins/intel_cpu/tools/commit_slider/slider_cache/* /src/plugins/intel_cpu/thirdparty/ComputeLibrary/build/* diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index 260e45b89778d0..ae0bcde8793e5d 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -4,7 +4,6 @@ cmake_policy(SET CMP0054 NEW) -# TODO: fix it, outside of source dir MO cannot find TBB dependency ov_set_temp_directory(TEMP "${CMAKE_SOURCE_DIR}") ## Intel OMP package diff --git a/docs/dev/ci/github_actions/adding_tests.md b/docs/dev/ci/github_actions/adding_tests.md index f3e3ed7b5c77c2..464abc4f79faaa 100644 --- a/docs/dev/ci/github_actions/adding_tests.md +++ b/docs/dev/ci/github_actions/adding_tests.md @@ -41,13 +41,13 @@ An example step from [`job_python_unit_tests.yml`](./../../../../.github/workflo steps: ... - name: OVC unit tests - if: fromJSON(inputs.affected-components).MO.test + if: fromJSON(inputs.affected-components).OVC.test run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml ... ``` The step includes: * a `name`: `OVC unit tests`. -* an `if` condition: `fromJSON(inputs.affected-components).MO.test` +* an `if` condition: `fromJSON(inputs.affected-components).OVC.test` * This step is executed only if the condition is `true`. * This is a part of the Smart CI system implemented for the OpenVINO workflow. Read the [Smart CI Overview](./smart_ci.md) to learn about the system and its usage. * a `run` section with commands to be executed. diff --git a/docs/dev/ci/github_actions/smart_ci.md b/docs/dev/ci/github_actions/smart_ci.md index d9f17595f682af..8a9c2264d3db05 100644 --- a/docs/dev/ci/github_actions/smart_ci.md +++ b/docs/dev/ci/github_actions/smart_ci.md @@ -59,7 +59,7 @@ files inside src/frontends/tensorflow: changed_component_names: {'TF_FE'} # TF_FE is an alias we chose for TensorFlow Frontend component affected_components={ "TF_FE": {"test": true, "build": true}, - "MO": {"test": true, "build": true}, + "OVC": {"test": true, "build": true}, "CPU": {"build": true}, "Python_API": {"build": true}, ... @@ -115,7 +115,7 @@ This file describes the relationships between components, for example: ```yaml PyTorch_FE: # Component name revalidate: # Defines the list of components to revalidate (build + test) if the component above was changed - - MO # This component depends on PyTorch_FE and requires full revalidation + - OVC # This component depends on PyTorch_FE and requires full revalidation build: # Defines the list of components to build if the PyTorch_FE was changed (test runs for them are skipped) - CPU # This component and the component below must be built if PyTorch_FE was changed - Python_API @@ -124,8 +124,8 @@ For the example above, the following pipeline will be executed on changes applie * Build for PyTorch_FE * Tests for PyTorch_FE -* Build for MO -* Tests for MO +* Build for OVC +* Tests for OVC * Build for CPU * Build for Python_API diff --git a/docs/dev/pypi_publish/pypi-openvino-dev.md b/docs/dev/pypi_publish/pypi-openvino-dev.md deleted file mode 100644 index 868a7298b10a14..00000000000000 --- a/docs/dev/pypi_publish/pypi-openvino-dev.md +++ /dev/null @@ -1,190 +0,0 @@ -# OpenVINO™ Development Tools - - -> **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software. - -> **NOTE**: OpenVINO™ Development Tools package has been deprecated and will be discontinued with 2025.0 release. To learn more, refer to the [OpenVINO Legacy Features and Components page](https://docs.openvino.ai/2024/documentation/legacy-features.html). - -Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud. - -OpenVINO™ Development Tools enables you to download models from Open Model Zoo, convert your own models to OpenVINO IR, as well as optimize and tune pre-trained deep learning models. See [What's in the Package](#whats-in-the-package) for more information. - -## System Requirements - -Before you start the installation, check the supported operating systems and required Python* versions. The complete list of supported hardware is available in the [System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html). - -**C++ libraries** are also required for the installation on Windows*. To install that, you can [download the Visual Studio Redistributable file (.exe)](https://aka.ms/vs/17/release/vc_redist.x64.exe). - -> **NOTE**: This package can be installed on other versions of macOS, Linux and Windows, but only the specific versions above are fully validated. - -## Install the OpenVINO™ Development Tools Package - -There are two options to install OpenVINO Development Tools: installation into an existing environment with a deep learning framework used for model training or creation; -or installation in a new environment. - -### Installation into an Existing Environment with the Source Deep Learning Framework - -To install OpenVINO Development Tools (see the [What's in the Package](#whats-in-the-package) section of this article) into an existing environment -with the source deep learning framework used for model training or creation, run the following command: -``` -pip install openvino-dev -``` - -### Installation in a New Environment - -If you do not have an environment with the source deep learning framework for the input model or you encounter any compatibility issues between OpenVINO and your version of deep learning framework, -you may install OpenVINO Development Tools with validated versions of frameworks into a new environment. - -#### Step 1. Set Up Python Virtual Environment - -Use a virtual environment to avoid dependency conflicts. - -To create a virtual environment, use the following commands: - -On Windows: -```sh -python -m venv openvino_env -``` - -On Linux and macOS: -```sh -python3 -m venv openvino_env -``` - -> **NOTE**: On Linux and macOS, you may need to [install pip](https://pip.pypa.io/en/stable/installation/). For example, on Ubuntu execute the following command to get pip installed: `sudo apt install python3-venv python3-pip`. - -#### Step 2. Activate Virtual Environment - -On Linux and macOS: -```sh -source openvino_env/bin/activate -``` -On Windows: -```sh -openvino_env\Scripts\activate -``` - -#### Step 3. Set Up and Update PIP to the Highest Version - -Run the command below: -```sh -python -m pip install --upgrade pip -``` - -#### Step 4. Install the Package - -Use the following command: -```sh -pip install openvino-dev[extras] -``` - where `extras` is the source deep learning framework for the input model and is one or more of the following values separated with "," : - -| Extras Value | DL Framework | -| :-------------------------------| :------------------------------------------------------------------------------- | -| caffe | [Caffe*](https://caffe.berkeleyvision.org/) | -| kaldi | [Kaldi*](https://github.com/kaldi-asr/kaldi) | -| onnx | [ONNX*](https://github.com/microsoft/onnxruntime/) | -| pytorch | [PyTorch*](https://pytorch.org/) | -| tensorflow | [TensorFlow* 1.x](https://www.tensorflow.org/versions#tensorflow_1) | -| tensorflow2 | [TensorFlow* 2.x](https://www.tensorflow.org/versions#tensorflow_2) | - -For example, to install and configure the components for working with TensorFlow 2.x and ONNX models, use the following command: - ```sh - pip install openvino-dev[tensorflow2,onnx] - ``` -> **NOTE**: Model conversion API support for TensorFlow 1.x environment has been deprecated. Use TensorFlow 2.x environment to convert both TensorFlow 1.x and 2.x models. - -> **NOTE**: On macOS, you may need to enclose the package name in quotes: `pip install "openvino-dev[extras]"`. - -## How to Verify that the Package Is Installed - -- To verify that the **developer package** is properly installed, run the command below (this may take a few seconds): - ```sh - mo -h - ``` - You will see the help message for ``mo`` if installation finished successfully. - -- To verify that OpenVINO Runtime from the **runtime package** is available, run the command below: - ```sh - python -c "from openvino import Core; print(Core().available_devices)" - ``` - If installation was successful, you will see a list of available devices. - - - -## What's in the Package? - -> **NOTE**: The openvino-dev package installs [OpenVINO™ Runtime](https://pypi.org/project/openvino) as a dependency, which is the engine that runs the deep learning model and includes a set of libraries for an easy inference integration into your applications. - -**In addition, the openvino-dev package installs the following components by default:** - -| Component | Console Script | Description | -|------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Legacy Model conversion API](https://docs.openvino.ai/2024/documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api.html) | `mo` |**Model conversion API** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by OpenVINO components.
Supported frameworks include Caffe\*, TensorFlow\*, MXNet\*, PaddlePaddle\*, and ONNX\*. | | -| [Model Downloader and other Open Model Zoo tools](https://docs.openvino.ai/2024/omz_tools_downloader.html)| `omz_downloader`
`omz_converter`
`omz_quantizer`
`omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](@ref omz_models_group_public) and [Intel](@ref omz_models_group_intel)-trained models. These free pre-trained models can be used to speed up the development and production deployment process without training your own models. The tool downloads model files from online sources and, if necessary, patches them to make them more usable with model conversion API. A number of additional tools are also provided to automate the process of working with downloaded models:
**Model Converter** is a tool for converting Open Model Zoo models that are stored in an original deep learning framework format into the OpenVINO Intermediate Representation (IR) using model conversion API.
**Model Quantizer** is a tool for automatic quantization of full-precision models in the IR format into low-precision versions using the Post-Training Optimization Tool.
**Model Information Dumper** is a helper utility for dumping information about the models to a stable, machine-readable format. | - -## Troubleshooting - -For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2024/get-started/troubleshooting-install-config.html). The following sections also provide explanations to several error messages. - -### Errors with Installing via PIP for Users in China - -Users in China might encounter errors while downloading sources via PIP during OpenVINO™ installation. To resolve the issues, try the following solution: - -* Add the download source using the ``-i`` parameter with the Python ``pip`` command. For example: - - ``` sh - pip install openvino-dev -i https://mirrors.aliyun.com/pypi/simple/ - ``` - Use the ``--trusted-host`` parameter if the URL above is ``http`` instead of ``https``. - You can also run the following command to install openvino-dev with specific frameworks. For example: - - ``` - pip install openvino-dev[tensorflow2] -i https://mirrors.aliyun.com/pypi/simple/ - ``` - -### zsh: no matches found : openvino-dev[...] - -If you use zsh (Z shell) interpreter, that is the default shell for macOS starting with version 10.15 (Catalina), you may encounter the following error while installing `openvino-dev` package with extras: - -```sh -pip install openvino-dev[tensorflow2,caffe] -zsh: no matches found: openvino-dev[tensorflow2,caffe] -``` - -By default zsh interprets square brackets as an expression for pattern matching. To resolve this issue, you need to escape the command with quotes: - -```sh -pip install 'openvino-dev[tensorflow2,caffe]' -``` - -To avoid such issues you can also disable globbing for PIP commands by defining an alias in `~/.zshrc` file: - -```sh -alias pip='noglob pip' -``` - -### ERROR:root:Could not find OpenVINO Python API. - -On Windows*, some libraries are necessary to run OpenVINO. To resolve this issue, install the [C++ redistributable (.exe)](https://aka.ms/vs/17/release/vc_redist.x64.exe). You can also view a full download list on the [official support page](https://docs.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist). - -### ImportError: libpython3.8.so.1.0: cannot open shared object file: No such file or directory - -To resolve missing external dependency on Ubuntu* 18.04, execute the following command: -```sh -sudo apt-get install libpython3.8 -``` - -## Additional Resources - -- [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/en-us/openvino-toolkit) -- [OpenVINO™ Documentation](https://docs.openvino.ai/) -- [OpenVINO™ Notebooks](https://github.com/openvinotoolkit/openvino_notebooks) -- [OpenVINO Installation Selector Tool](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html) - -Copyright © 2018-2024 Intel Corporation -> **LEGAL NOTICE**: Your use of this software and any required dependent software (the -“Software Package”) is subject to the terms and conditions of the [Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0.html) for the Software Package, which may also include notices, disclaimers, or -license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details. - ->Intel is committed to the respect of human rights and avoiding complicity in human rights abuses, a policy reflected in the [Intel Global Human Rights Principles](https://www.intel.com/content/www/us/en/policy/policy-human-rights.html). Accordingly, by accessing the Intel material on this platform you agree that you will not use the material in a product or application that causes or contributes to a violation of an internationally recognized human right. diff --git a/scripts/utils/create_package.py b/scripts/utils/create_package.py deleted file mode 100644 index 2db714b407267d..00000000000000 --- a/scripts/utils/create_package.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -from shutil import rmtree - -from utils import Automation - -parser = argparse.ArgumentParser() -parser.add_argument("--build_number", type=int, help="Build number to be added to package version", default=0, ) -args = parser.parse_args() - -auto = Automation() -base_dir = os.path.dirname(__file__) -bom_path = os.path.join(base_dir, "package_BOM.txt") -bom = auto.parse_bom(bom_path=bom_path) -dir_to_tar = auto.copy_files_from_bom(root_path=os.path.join(os.path.dirname(__file__), ".."), bom=bom) diff --git a/src/bindings/python/docs/requirements_management.md b/src/bindings/python/docs/requirements_management.md index bb4ebf08ddb65b..f8e4faff412acf 100644 --- a/src/bindings/python/docs/requirements_management.md +++ b/src/bindings/python/docs/requirements_management.md @@ -71,9 +71,6 @@ If a package version differs between `requirements.txt` files, it can't be unifi - Exclude this package from `constraints.txt` and keep its version in `requirements.txt` - Exclude this `requirements.txt` file from the constraints system -#### 3. Lack of support in distutils -`distutils` is a package used for building Python wheels. It does not offer support for constraints files, so custom requirement parsers had to be added. A parser example can be found [here](https://github.com/openvinotoolkit/openvino/blob/master/tools/mo/setup.py) - functions `read_constraints()` and `read_requirements()`. - ## Implementation in OpenVINO The implementation in OpenVINO is a subject to change. At the time of writing, there are three `constraints.txt` files with the following requirement coverage: diff --git a/src/bindings/python/setup.cfg b/src/bindings/python/setup.cfg index 89fabcb659c108..8f28d0291bbd4c 100644 --- a/src/bindings/python/setup.cfg +++ b/src/bindings/python/setup.cfg @@ -7,7 +7,6 @@ skip_install=True deps = -rrequirements.txt -rrequirements_test.txt - -r /openvino/tools/mo/requirements.txt # for torchvision -> OV preprocess converter -r /openvino/src/frontends/onnx/tests/requirements.txt setenv = OV_BACKEND = {env:OV_BACKEND:"CPU"} diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index c385e5467224c0..b08a055d68e721 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -199,7 +199,7 @@ PYBIND11_MODULE(_pyopenvino, m) { R"( Save model into IR files (xml and bin). Floating point weights are compressed to FP16 by default. This method saves a model to IR applying all necessary transformations that usually applied - in model conversion flow provided by mo tool. Paricularly, floatting point weights are + in model conversion flow provided by OVC tool. Paricularly, floatting point weights are compressed to FP16, debug information in model nodes are cleaned up, etc. :param model: model which will be converted to IR representation diff --git a/src/core/include/openvino/core/graph_util.hpp b/src/core/include/openvino/core/graph_util.hpp index f5694ca89fee51..7db3d65f8269b2 100644 --- a/src/core/include/openvino/core/graph_util.hpp +++ b/src/core/include/openvino/core/graph_util.hpp @@ -312,7 +312,7 @@ void serialize(const std::shared_ptr& m, /// \brief Save given model into IR. Floating point weights are compressed to FP16 by default. /// This method saves a model to IR applying all necessary transformations that usually applied -/// in model conversion flow provided by mo tool. Particularly, floating point weights are compressed to FP16. +/// in model conversion flow provided by OVC tool. Particularly, floating point weights are compressed to FP16. /// \param model Model which will be converted to IR representation. /// \param output_model Path to the output model file, must have extension .xml /// \param compress_to_fp16 Whether to compress floating point weights to FP16 (true by default) diff --git a/src/core/include/openvino/pass/manager.hpp b/src/core/include/openvino/pass/manager.hpp index 333512e905e1a6..90c6555c4de574 100644 --- a/src/core/include/openvino/pass/manager.hpp +++ b/src/core/include/openvino/pass/manager.hpp @@ -80,7 +80,7 @@ class OPENVINO_API Manager { /// configuration. /// This object allows to disable/enable transformations execution, set callback to /// particular - /// transformation. For mo details see PassConfig class. + /// transformation. For more details see PassConfig class. std::shared_ptr get_pass_config() { return m_pass_config; } diff --git a/src/docs/architecture.md b/src/docs/architecture.md index e4857202e39435..2d37258506ed46 100644 --- a/src/docs/architecture.md +++ b/src/docs/architecture.md @@ -16,11 +16,9 @@ Each OpenVINO component is projected with using DOTADIW (Do One Thing And Do It ```mermaid flowchart TB subgraph tools [Tools] - mo{{Model Optimizer}} - pot{{PoT}} + ovc{{OpenVINO model converter}} - style mo fill:#6c9f7f - style pot fill:#6c9f7f + style ovc fill:#6c9f7f end subgraph tutorials [Tutorials] samples[Samples] diff --git a/src/frontends/ir/src/ir_deserializer.cpp b/src/frontends/ir/src/ir_deserializer.cpp index 62caebeee7d355..c60b4bf0dda9ce 100644 --- a/src/frontends/ir/src/ir_deserializer.cpp +++ b/src/frontends/ir/src/ir_deserializer.cpp @@ -577,7 +577,7 @@ std::shared_ptr ov::XmlDeserializer::parse_function(const pugi::xml_n // Read meta data from legacy representation if (root.child("rt_info").empty()) { // Legacy representation - // meta_data - MO meta + // meta_data - IR meta // quantization_parameters - NNCF quantization section std::unordered_set meta_names = {"meta_data", "quantization_parameters"}; read_legacy_meta_data(function, meta_names, root); diff --git a/src/frontends/onnx/README.md b/src/frontends/onnx/README.md index 5392511f5e7f1e..aecb94634d635b 100644 --- a/src/frontends/onnx/README.md +++ b/src/frontends/onnx/README.md @@ -19,7 +19,7 @@ In case of any questions, review and merge requests, contact the [openvino-onnx- ONNX Frontend implements an interface common to all frontends defined in the [Frontends API](../common/include/openvino/frontend). For backward compatibility reasons, the ONNX importer API (more lower-level abstraction approach) is still maintained. You can find it in the [ONNX Importer](./frontend/include/onnx_import/onnx.hpp). -The API of ONNX Frontend can be called directly. It is also used internally by [Model Optimizer](../../../tools/mo) during the conversion from ONNX to Intermediate Representation (IR). The capabilities of ONNX Frontend are used by the [ONNX Runtime via OpenVINO Execution Provider](https://onnxruntime.ai/docs/build/eps.html#openvino). +The API of ONNX Frontend can be called directly. It is also used internally by [OpenVINO Model Converter](../../../tools/ovc) during the conversion from ONNX to Intermediate Representation (IR). The capabilities of ONNX Frontend are used by the [ONNX Runtime via OpenVINO Execution Provider](https://onnxruntime.ai/docs/build/eps.html#openvino). Both `C++` and `Python` tests are implemented for the ONNX Frontend. Read the [ONNX Frontend tests](./docs/tests.md#onnx-frontend-testing-places) page for more details. diff --git a/src/frontends/onnx/docs/how_to_add_op.md b/src/frontends/onnx/docs/how_to_add_op.md index 623f670ec2d726..de8a0eab6e87fa 100644 --- a/src/frontends/onnx/docs/how_to_add_op.md +++ b/src/frontends/onnx/docs/how_to_add_op.md @@ -95,7 +95,7 @@ If an OpenVINO Core operation provides exactly what you need (without decomposit ```cpp core.add_extension(ov::frontend::onnx::OpExtension("org.openvinotoolkit", "CustomAdd")); ``` -If you need to register an custom operation for a [Model Optimizer](../../../../tools/mo) scenario, you should consider `SOExtension`. More details about it can be found in [Library with Extensions](../../../../docs/Extensibility_UG/Intro.md#create-a-library-with-extensions). +If you need to register an custom operation for [OpenVINO Model Converter](../../../../tools/ovc) scenario, you should consider `SOExtension`. More details about it can be found in [Library with Extensions](../../../../docs/Extensibility_UG/Intro.md#create-a-library-with-extensions). ### Python-based extensions C++ based extensions have their equivalents in Python. For `ConversionExtension`, an example of usage can look like: ```python diff --git a/src/frontends/tensorflow/README.md b/src/frontends/tensorflow/README.md index 13db207ca6c6c9..d6aea567cc100a 100644 --- a/src/frontends/tensorflow/README.md +++ b/src/frontends/tensorflow/README.md @@ -2,7 +2,7 @@ The TensorFlow Frontend (TF FE) is a C++ based OpenVINO Frontend component that is responsible for reading and converting a TensorFlow model to an `ov::Model` object that further can be serialized into the Intermediate Representation (IR) format. -This is an internal API for OpenVINO that is used to implement user-facing API such as MO tool, Model Conversion API, and OpenVINO Runtime `read_model` function +This is an internal API for OpenVINO that is used to implement user-facing API such as OVC tool, Model Conversion API, and OpenVINO Runtime `read_model` function for reading TensorFlow models of the original format in run-time. Also, OpenVINO Model Server uses the frontend for serving models. Regular users should not use the frontend directly. @@ -21,16 +21,16 @@ flowchart BT tf_fe(TensorFlow Frontend) style tf_fe fill:#ee9a4d fem(Frontend Manager) - mo(Model Optimizer) + ovc(OpenVINO Model Converter) ov_runtime(OpenVINO Runtime) - model --> mo --> fem --> tf_fe + model --> ovc --> fem --> tf_fe model2 --> ov_runtime --> fem model3 --> ovms --> ov_runtime tf_fe --> ov_model click ovms "https://github.com/openvinotoolkit/model_server" ``` -The MO tool and model conversion API now use the TensorFlow Frontend as the default path for conversion to IR. +The OVC tool and model conversion API now use the TensorFlow Frontend as the default path for conversion to IR. ## Key contacts diff --git a/src/inference/src/dev/icompiled_model.cpp b/src/inference/src/dev/icompiled_model.cpp index 3f4a8d397ab4d9..230ddd209c0df5 100644 --- a/src/inference/src/dev/icompiled_model.cpp +++ b/src/inference/src/dev/icompiled_model.cpp @@ -64,7 +64,7 @@ ov::ICompiledModel::ICompiledModel(const std::shared_ptr& model OPENVINO_ASSERT(leaf_names.find(param_name) == leaf_names.end() || param->output(0).get_names().find(param_name) != param->output(0).get_names().end(), "Model operation names have collisions with tensor names.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); + " Please use OVC to generate new IR version, it should allow to avoid the issue"); leaf_names.insert(param_name); param->output(0).get_tensor().add_names({param_name}); new_param->output(0).get_tensor().add_names({param_name}); @@ -96,7 +96,7 @@ ov::ICompiledModel::ICompiledModel(const std::shared_ptr& model OPENVINO_ASSERT(leaf_names.find(res_name) == leaf_names.end() || result->output(0).get_names().find(res_name) != result->output(0).get_names().end(), "Model operation names have collisions with tensor names.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); + " Please use OVC to generate new IR version, it should allow to avoid the issue"); leaf_names.insert(res_name); result->output(0).get_tensor().add_names({res_name}); new_result->output(0).get_tensor().add_names({res_name}); diff --git a/src/inference/src/model_reader.cpp b/src/inference/src/model_reader.cpp index 7babef019b5802..ecfcfc03b11f53 100644 --- a/src/inference/src/model_reader.cpp +++ b/src/inference/src/model_reader.cpp @@ -46,7 +46,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal for (const auto& name : inputs[i].get_names()) { OPENVINO_ASSERT(leaf_names.find(name) == leaf_names.end(), "Model tensor names have collisions.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); + " Please use OVC to generate new IR version, it should allow to avoid the issue"); leaf_names.emplace(name, inputs[i].get_tensor_ptr()); } } @@ -62,7 +62,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal auto tensor_it = leaf_names.find(name); OPENVINO_ASSERT(tensor_it == leaf_names.end() || tensor_it->second == outputs[i].get_tensor_ptr(), "Model tensor names have collisions.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); + " Please use OVC to generate new IR version, it should allow to avoid the issue"); leaf_names.emplace(name, outputs[i].get_tensor_ptr()); } } @@ -84,7 +84,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal OPENVINO_ASSERT(leaf_names.find(res_name) == leaf_names.end() || result->output(0).get_names().find(res_name) != result->output(0).get_names().end(), "Model operation names have collisions with tensor names.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); + " Please use OVC to generate new IR version, it should allow to avoid the issue"); leaf_names.emplace(res_name, nullptr); result->input(0).get_tensor().add_names({std::move(res_name)}); } @@ -93,7 +93,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal OPENVINO_ASSERT(leaf_names.find(param_name) == leaf_names.end() || param->output(0).get_names().find(param_name) != param->output(0).get_names().end(), "Model operation names have collisions with tensor names.", - " Please use MO to generate new IR version, it should allow to avoid the issue"); + " Please use OVC to generate new IR version, it should allow to avoid the issue"); leaf_names.emplace(param_name, nullptr); param->output(0).get_tensor().add_names({param_name}); } diff --git a/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json index 20150fcc2e952b..4251201fbc99d8 100644 --- a/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json +++ b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json @@ -4,7 +4,6 @@ "envVars" : [ {"name" : "PYTHONPATH", "val" : "{gitPath}/bin/intel64/Release/python/"}, {"name" : "LD_LIBRARY_PATH", "val" : "{gitPath}/bin/intel64/Release/"}, - {"name" : "MO_ROOT", "val" : "{gitPath}/tools/mo/openvino/tools/"}, {"name" : "OPENVINO_ROOT_DIR", "val" : "{gitPath}/"} ], "makeCmd" : "cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_PYTHON=ON -DPython3_EXECUTABLE=/usr/bin/python3.8 -DTHREADING=TBB -DENABLE_INTEL_GPU=OFF -DENABLE_SAMPLES=OFF -DENABLE_TESTS=OFF -DENABLE_CPU_DEBUG_CAPS=OFF -DENABLE_HETERO=OFF -DENABLE_TEMPLATE=OFF -DENABLE_CPU_DEBUG_CAPS=OFF -DENABLE_DEBUG_CAPS=OFF -DENABLE_OPENVINO_DEBUG=OFF -DCMAKE_CXX_FLAGS=-Wno-deprecated -DCMAKE_C_FLAGS=-Wno-deprecated -DCMAKE_CXX_FLAGS=-Wno-deprecated-declarations -DCMAKE_C_FLAGS=-Wno-deprecated-declarations ..", diff --git a/src/plugins/intel_gpu/src/plugin/ops/normalize_l2.cpp b/src/plugins/intel_gpu/src/plugin/ops/normalize_l2.cpp index 670780f7617785..7733d0f1fdc847 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/normalize_l2.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/normalize_l2.cpp @@ -27,7 +27,7 @@ static void CreateNormalizeL2Op(ProgramBuilder& p, const std::shared_ptrget_eps(); - // WA for MO outputting %.6f + // WA for OVC outputting %.6f if (eps == 0.0f) { eps = 1e-10f; } diff --git a/tests/e2e_tests/test_base.py b/tests/e2e_tests/test_base.py index 66e5e773269b59..e26dfc459f84a6 100644 --- a/tests/e2e_tests/test_base.py +++ b/tests/e2e_tests/test_base.py @@ -95,7 +95,7 @@ def _test_run(instance, pregen_irs, record_property, prepare_test_info, inferenc log.error('IR pre-generation failed. IR will be generated in runtime ...') else: if not mo_log: - log.warning('IR was collected successfully, but MO log was not saved.') + log.warning('IR was collected successfully, but OVC log was not saved.') else: with open(mo_log, "r") as file: mo_output = file.read() diff --git a/tests/layer_tests/mo_python_api_tests/conftest.py b/tests/layer_tests/mo_python_api_tests/conftest.py deleted file mode 100644 index c90f835efc5df4..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/conftest.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import inspect - -from common.layer_test_class import get_params - - -def pytest_generate_tests(metafunc): - test_gen_attrs_names = list(inspect.signature(get_params).parameters) - params = get_params() - - metafunc.parametrize(test_gen_attrs_names, params, scope="function") diff --git a/tests/layer_tests/mo_python_api_tests/mo_convert_help.py b/tests/layer_tests/mo_python_api_tests/mo_convert_help.py deleted file mode 100644 index 3c8c9155b07054..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/mo_convert_help.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo import convert_model - -if __name__ == "__main__": - convert_model(help=True) diff --git a/tests/layer_tests/mo_python_api_tests/mo_convert_legacy_extensions_test_actual.py b/tests/layer_tests/mo_python_api_tests/mo_convert_legacy_extensions_test_actual.py deleted file mode 100644 index 35690528ff4a3a..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/mo_convert_legacy_extensions_test_actual.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import openvino.runtime as ov -import os -import tempfile -import tensorflow as tf -import unittest - -from openvino.runtime import PartialShape, Model -from openvino.test_utils import compare_functions -from common.utils.common_utils import generate_ir - - -def create_tf_model(): - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - relu = tf.nn.relu(inp1 + inp2, name='Relu') - - output = tf.nn.sigmoid(relu, name='Sigmoid') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - return tf_net - - -def create_ref_model_1(): - shape = [1, 2, 3] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape) - param2 = ov.opset10.parameter(shape) - add = ov.opset10.add(param1, param2) - relu = ov.opset10.relu(add) - sin = ov.opset10.sin(relu) - sigm = ov.opset10.sigmoid(sin) - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - return ref_model - - -def create_ref_model_2(): - shape = [1, 2, 3] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape) - param2 = ov.opset10.parameter(shape) - add = ov.opset10.add(param1, param2) - relu = ov.opset10.relu(add) - sin = ov.opset10.sin(relu) - sigm = ov.opset10.sigmoid(sin) - tanh = ov.opset10.tanh(sigm) - parameter_list = [param1, param2] - ref_model = Model([tanh], parameter_list, "test") - return ref_model - - -class LegacyExtTest(unittest.TestCase): - test_directory = os.path.dirname(os.path.realpath(__file__)) - def test_legacy_extensions(self): - from openvino.tools.mo import convert_model - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: - ext_path1 = os.path.join(os.path.dirname(__file__), "test_legacy_exts/test_exts_dir1") - ext_path2 = os.path.join(os.path.dirname(__file__), "test_legacy_exts/test_exts_dir2") - tf_model = create_tf_model() - tf.io.write_graph(tf_model, tmpdir, 'model.pb', False) - model = os.path.join(tmpdir, 'model.pb') - - # tests for convert_model() - ov_model = convert_model(model, extensions=ext_path1) - flag, msg = compare_functions(ov_model, create_ref_model_1(), False) - assert flag, msg - - ov_model = convert_model(model, extensions=[ext_path1, ext_path2]) - flag, msg = compare_functions(ov_model, create_ref_model_2(), False) - assert flag, msg - - ov_model = convert_model(model, extensions=','.join([ext_path1, ext_path2])) - flag, msg = compare_functions(ov_model, create_ref_model_2(), False) - assert flag, msg - - from openvino.runtime import Core - core = Core() - - # tests for MO cli tool - exit_code, stderr = generate_ir(coverage=False, **{"input_model": model, - "extensions": ext_path1, - "output_dir": tmpdir}) - assert not exit_code - - ov_model = core.read_model(os.path.join(tmpdir, "model.xml")) - flag, msg = compare_functions(ov_model, create_ref_model_1(), False) - assert flag, msg - - exit_code, stderr = generate_ir(coverage=False, **{"input_model": model, - "extensions": ','.join([ext_path1, ext_path2]), - "output_dir": tmpdir}) - assert not exit_code - - ov_model = core.read_model(os.path.join(tmpdir, "model.xml")) - flag, msg = compare_functions(ov_model, create_ref_model_2(), False) - assert flag, msg diff --git a/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_config_transform/front/custom_transform.py b/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_config_transform/front/custom_transform.py deleted file mode 100644 index b3b9cdf72fe1e4..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_config_transform/front/custom_transform.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error - - -class ConfigBasedTestReplacement(FrontReplacementFromConfigFileGeneral): - replacement_id = 'ConfigBasedTestReplacement' - run_not_recursively = True - - def transform_graph(self, graph: Graph, replacement_descriptions): - sigmoid_nodes = graph.get_op_nodes(op='Sigmoid') - assert len(sigmoid_nodes) > 0, "Error while applying ConfigBasedTestReplacement." diff --git a/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_exts_dir1/front/ext1.py b/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_exts_dir1/front/ext1.py deleted file mode 100644 index 123dea71adf203..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_exts_dir1/front/ext1.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.activation_ops import Sin - - -class DummyExt1(FrontReplacementPattern): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='ReLU'): - new_node = Sin(graph, {'name': node.soft_get('name') + '/sin'}).create_node() - node.out_port(0).get_connection().insert_node(new_node) diff --git a/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_exts_dir2/front/ext2.py b/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_exts_dir2/front/ext2.py deleted file mode 100644 index 003aa577ed709d..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_legacy_exts/test_exts_dir2/front/ext2.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.activation_ops import Tanh - - -class DummyExt2(FrontReplacementPattern): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Sigmoid'): - new_node = Tanh(graph, {'name': node.soft_get('name') + '/tanh'}).create_node() - node.out_port(0).get_connection().insert_node(new_node) diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py deleted file mode 100644 index 58c5fff36b2fe2..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import os -import pytest -from openvino.runtime import Model, Layout, PartialShape, Shape, layout_helpers, Type, Dimension -from openvino.tools.mo import LayoutMap, InputCutInfo -import openvino.runtime as ov -from common.mo_convert_test_class import CommonMOConvertTest -from common.utils.tf_utils import save_to_pb - - -class TestComplexParams(CommonMOConvertTest): - def create_tf_model(self, tmp_dir): - # - # Create Tensorflow model with multiple inputs/outputs - # - - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input1') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input2') - inp3 = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input3') - - relu1 = tf.nn.relu(inp1, name='Relu1') - relu2 = tf.nn.relu(inp2, name='Relu2') - relu3 = tf.nn.relu(inp3, name='Relu3') - - concat = tf.concat([relu1, relu2, relu3], axis=0, name='Concat') - - outputs = tf.split(concat, 3) - outputs_list = [] - for i, output in enumerate(outputs): - outputs_list.append(tf.nn.sigmoid(output, name='Sigmoid_{}'.format(i))) - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # save model to .pb and return path to the model - return save_to_pb(tf_net, tmp_dir) - - def create_tf_model_no_concat(self, tmp_dir): - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input1') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input2') - inp3 = tf.compat.v1.placeholder(tf.bool, [], 'Input3') - output2 = inp3 - - relu1 = tf.nn.sigmoid(inp1, name='Relu1') - relu2 = tf.nn.sigmoid(inp2, name='Relu2') - output = relu1 + relu2 - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # save model to .pb and return path to the model - return save_to_pb(tf_net, tmp_dir) - - def create_tf_model_single_input_output(self, tmp_dir): - # - # Create Tensorflow model with single input/output - # - - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input') - - relu = tf.nn.relu(inp, name='Relu') - - output = tf.nn.sigmoid(relu, name='Sigmoid') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # save model to .pb and return path to the model - return save_to_pb(tf_net, tmp_dir) - - def create_tf_model_no_sigmoid(self, tmp_dir): - # - # Create Tensorflow model without Sigmoid nodes - # - - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # save model to .pb and return path to the model - return save_to_pb(tf_net, tmp_dir) - - def create_tf_param_res_model(self, tmp_dir): - # - # Create Tensorflow model with following pattern: - # Input ---\ - # Add --> Identity - # Input1 ---/ - # - # This graph is needed for transform test. Input and Identity are replaced with ReadValue and Assign ops. - - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input') - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 3, 2, 2], 'Input1') - sum1 = tf.add(inp, inp1, "Add1") - result = tf.identity(sum1, name='Identity') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # save model to .pb and return path to the model - return save_to_pb(tf_net, tmp_dir) - - test_data = [ - {'params_test': {'input_shape': [PartialShape([2, 3, 4]), - [2, 3, 4], - [Dimension(2), Dimension(3), Dimension(4)]], - 'input':['Input1', 'Input2', 'Relu3'], 'compress_to_fp16': True}, - 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1,Input2,Relu3'}}, - {'params_test': {'input_shape': [PartialShape([Dimension(), Dimension(1, 3), Dimension(4, -1), Dimension(-1, 5)]), - [Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)], - [Dimension(), 3, Dimension(4, -1), Dimension(-1, 5)]], - 'compress_to_fp16': True, - 'input':['Input1', 'Input2', 'Relu3']}, - 'params_ref': {'input_shape': "[?,1..3,4..,..5],[?,1..3,4,..5],[?,3,4..,..5]", 'input': 'Input1,Input2,Relu3'}}, - {'params_test': {'input': [InputCutInfo("Relu1", Shape([3, 2]), Type(np.int32)), - InputCutInfo("Relu2", PartialShape([Dimension(3, 10), Dimension(2, -1)]), np.int32), - InputCutInfo("Relu3", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6])]}, - 'params_ref': {'input': "Relu1[3 2]{i32},Relu2[3..10 2..]{i32},Relu3[3 2]{i32}->[1 2 3 4 5 6]"}}, - {'params_test': {'input': [("Relu1", Shape([3, 2]), Type(np.int32)), - (np.int32, "Relu2", PartialShape([Dimension(3, 10), Dimension(2, -1)])), - ([3, 2],"Relu3", Type(np.int32))]}, - 'params_ref': {'input': "Relu1[3 2]{i32},Relu2[3..10 2..]{i32},Relu3[3 2]{i32}"}}, - {'params_test': {'output': ["Sigmoid_0", "Sigmoid_2"]}, - 'params_ref': {'output': "Sigmoid_0,Sigmoid_2"}}, - {'params_test': {'mean_values': {'Input1:0': [0.5,1.3,0.67], 'Input2:0':[4.2, 6.7, 3.15], 'Input3:0':[0.757, 4.6, 7.3]}, - 'compress_to_fp16': True}, - 'params_ref': {'mean_values': "Input1:0[0.5,1.3,0.67],Input2:0[4.2,6.7,3.15],Input3:0[0.757,4.6,7.3]"}}, - {'params_test': { - 'mean_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]], 'compress_to_fp16': True}, - 'params_ref': {'mean_values': "[0.5,1.3,0.67],[4.2,6.7,3.15],[0.757,4.6,7.3]"}}, - {'params_test': {'scale_values': {'Input1:0': [0.5,1.3,0.67], 'Input2:0':[4.2, 6.7, 3.15], 'Input3:0':[0.757, 4.6, 7.3]}, - 'compress_to_fp16': True}, - 'params_ref': {'scale_values': "Input1:0[0.5,1.3,0.67],Input2:0[4.2,6.7,3.15],Input3:0[0.757,4.6,7.3]"}}, - {'params_test': { - 'scale_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]], 'compress_to_fp16': True}, - 'params_ref': {'scale_values': "[0.5,1.3,0.67],[4.2,6.7,3.15],[0.757,4.6,7.3]"}}, - {'params_test': { - 'source_layout': {'Input1:0': Layout("nchw"), 'Input2:0': "nchw", 'Input3:0': "nc??"}, 'compress_to_fp16': True}, - 'params_ref': {'source_layout': "Input1:0(nchw),Input2:0(nchw),Input3:0(nc??)"}}, - {'params_test': { - 'target_layout': {'Input1:0': Layout("nhwc"), 'Input2:0': "nhwc", 'Input3:0': "n??c"}, 'compress_to_fp16': True}, - 'params_ref': {'target_layout': "Input1:0(nhwc),Input2:0(nhwc),Input3:0(n??c)"}}, - {'params_test': { - 'layout': {'Input1:0': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc"), - 'Input2:0': LayoutMap(source_layout="nc??", target_layout=Layout("n??c")), - 'Input3:0': LayoutMap(source_layout="abcd", target_layout="acdb")}, 'compress_to_fp16': True}, - 'params_ref': {'layout': "Input1:0(nchw->nhwc),Input2:0(nc??->n??c),Input3:0(abcd->acdb)"}}, - {'params_test': {'input': [PartialShape([2, 3, 4]), [2, 3, 4], [Dimension(2), Dimension(3), Dimension(4)]]}, - 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1:0,Input2:0,Input3:0'}}, - {'params_test': {'input': [np.int32, Type(np.int32), np.int32]}, - 'params_ref': {'input': 'Input1:0{i32},Input2:0{i32},Input3:0{i32}'}}, - {'params_test': {'input': [InputCutInfo(shape=[1], type=np.int32, value=[10]), - InputCutInfo(shape=[1], type=np.int32, value=[20]), - InputCutInfo(shape=[1], type=np.int32, value=[30])]}, - 'params_ref': {'input': 'Input1[1]{i32}->[10],Input2[1]{i32}->[20],Input3[1]{i32}->[30]'}} - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - tf_net_path = self.create_tf_model(temp_dir) - - test_params = params['params_test'] - ref_params = params['params_ref'] - test_params.update({'use_convert_model_from_mo': True}) - test_params.update({'input_model': tf_net_path}) - ref_params.update({'input_model': tf_net_path}) - self._test(temp_dir, test_params, ref_params) - - test_data = [ - {'params_test': {'input_shape': [[Dimension(1), 2, 3], [Dimension(1), 2, 3]], - 'freeze_placeholder_with_value': 'Input3->[1]'}, - - 'params_ref': {'input_shape': '[1,2,3],[1,2,3]', - 'freeze_placeholder_with_value': 'Input3->[1]'}}, - {'params_test': {'input': [PartialShape([Dimension(-1), 5, 6]), [-1, 5, 6]], - 'freeze_placeholder_with_value': 'Input3->[1]'}, - - 'params_ref': {'input': 'Input1:0[?,5,6],Input2:0[?,5,6]', - 'freeze_placeholder_with_value': 'Input3->[1]'}}, - {'params_test': {'input': [np.float16, np.float16], - 'input_shape': [[10, 20], [10, 20]], - 'freeze_placeholder_with_value': 'Input3->[1]'}, - - 'params_ref': {'input': 'Input1:0{f16},Input2:0{f16}', - 'input_shape': "[10,20],[10,20]", - 'freeze_placeholder_with_value': 'Input3->[1]'}}, - - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - def test_mo_convert_tf_model_no_concat(self, params, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - tf_net_path = self.create_tf_model_no_concat(temp_dir) - - test_params = params['params_test'] - ref_params = params['params_ref'] - test_params.update({'input_model': tf_net_path}) - test_params.update({'use_convert_model_from_mo': True, 'compress_to_fp16': True}) - ref_params.update({'input_model': tf_net_path}) - self._test(temp_dir, test_params, ref_params) - - test_data = [ - # When use_convert_model_from_mo=True legacy openvino.tools.mo.convert_model is used - # By default compress_to_fp16 in Python API is False but for mo cli tool (used for params_ref) it's True. - # compress_to_fp16 should be specified explicitly either in 'param_test' or 'params_ref' (or in both) - # Check all args combinations. - {'params_test': {'input_shape': PartialShape([2, 3, 4]), 'compress_to_fp16': True}, - 'params_ref': {'input_shape': "[2,3,4]"}}, - {'params_test': {'input_shape': PartialShape([2, 3, 4])}, - 'params_ref': {'input_shape': "[2,3,4]", 'compress_to_fp16': False}}, - {'params_test': {'input_shape': PartialShape([2, 3, 4]), 'compress_to_fp16': True}, - 'params_ref': {'input_shape': "[2,3,4]", 'compress_to_fp16': True}}, - {'params_test': {'input_shape': PartialShape([2, 3, 4]), 'compress_to_fp16': False}, - 'params_ref': {'input_shape': "[2,3,4]", 'compress_to_fp16': False}}, - - # ovc.convert_model with save_model are used, by default save_model compresses to fp16 same as cli tool. - # Check all args combinations. - {'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6])}, - 'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]"}}, - {'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6]), 'compress_to_fp16': True}, - 'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]"}}, - {'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6])}, - 'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]", 'compress_to_fp16': True}}, - {'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6]), 'compress_to_fp16': True}, - 'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]", 'compress_to_fp16': True}}, - {'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6]), 'compress_to_fp16': False}, - 'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]", 'compress_to_fp16': False}}, - - {'params_test': {'input_shape': [Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)], 'compress_to_fp16': True}, - 'params_ref': {'input_shape': "[?,1..3,4,..5]"}}, - {'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6])}, - 'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]"}}, - {'params_test': {'input': ("Relu", [3, 2], Type(np.int32))}, - 'params_ref': {'input': "Relu[3 2]{i32}"}}, - {'params_test': {'input': ("Relu", Type(np.int32))}, - 'params_ref': {'input': "Relu{i32}"}}, - {'params_test': {'input': ("Relu", [3, 2])}, - 'params_ref': {'input': "Relu[3 2]"}}, - {'params_test': {'input': ("Relu")}, - 'params_ref': {'input': "Relu"}}, - {'params_test': {'mean_values': [0.5, 1.3, 0.67], 'compress_to_fp16': True}, - 'params_ref': {'mean_values': "[0.5,1.3,0.67]"}}, - {'params_test': {'scale_values': [0.5, 1.3, 0.67], 'compress_to_fp16': True}, - 'params_ref': {'scale_values': "[0.5,1.3,0.67]"}}, - {'params_test': {'source_layout': Layout("nchw"), 'compress_to_fp16': True}, - 'params_ref': {'source_layout': "nchw"}}, - {'params_test': {'target_layout': Layout("nchw"), 'compress_to_fp16': True}, - 'params_ref': {'target_layout': "nchw"}}, - {'params_test': {'layout': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc"), 'compress_to_fp16': True}, - 'params_ref': {'layout': "nchw->nhwc"}}, - {'params_test': {'layout': Layout("nchw"), 'compress_to_fp16': True}, - 'params_ref': {'layout': "nchw"}}, - {'params_test': {'input': [3, 2]}, - 'params_ref': {'input': "Input:0[3 2]"}}, - {'params_test': {'input': [Dimension(3,10), 2]}, - 'params_ref': {'input': "Input:0[3..10 2]"}}, - {'params_test': {'input': (-1, 10)}, - 'params_ref': {'input': "Input:0[?,10]"}}, - {'params_test': {'input': PartialShape([-1, 10])}, - 'params_ref': {'input': "Input:0[?,10]"}}, - {'params_test': {'input': np.int32}, - 'params_ref': {'input': "Input:0{i32}"}}, - {'params_test': {'input': InputCutInfo(shape=[1], type=np.int32, value=[10])}, - 'params_ref': {'input': "Input:0[1]{i32}->[10]"}}, - {'params_test': {'input': (np.int32, [1, 2, 3])}, - 'params_ref': {'input': "Input:0[1,2,3]{i32}"}}, - {'params_test': {'input_shape': [Dimension(3, 10), 10, -1], 'compress_to_fp16': True}, - 'params_ref': {'input_shape': '[3..10,10,?]'}}, - {'params_test': {'input': [Dimension(3, 10), 10, -1]}, - 'params_ref': {'input': 'Input:0[3..10,10,?]'}}, - {'params_test': {'input': PartialShape([1, 100, 100, 3]), 'mean_values': [0.5, 1.3, 0.67], 'compress_to_fp16': True}, - 'params_ref': {'input': "Input:0[1,100,100,3]", 'mean_values': "[0.5,1.3,0.67]"}}, - {'params_test': {'input': [1, 100, 100, 3], 'scale_values': [0.5, 1.3, 0.67], 'compress_to_fp16': True}, - 'params_ref': {'input': "Input:0[1,100,100,3]", 'scale_values': "[0.5,1.3,0.67]"}}, - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - @pytest.mark.precommit - def test_mo_convert_tf_model_single_input_output(self, params, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - tf_net_path = self.create_tf_model_single_input_output(temp_dir) - - test_params = params['params_test'] - ref_params = params['params_ref'] - test_params.update({'use_convert_model_from_mo': True}) - test_params.update({'input_model': tf_net_path}) - ref_params.update({'input_model': tf_net_path}) - self._test(temp_dir, test_params, ref_params) - - @pytest.mark.nightly - @pytest.mark.precommit - def test_mo_convert_clearing_transformation_registry(self, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - tf_net_path = self.create_tf_model_single_input_output(temp_dir) - from openvino.tools.mo import convert_model - - config_path = os.path.join(os.path.dirname(__file__), "test_transform_config/test_config.json") - test_config_based_transform = os.path.join(os.path.dirname(__file__), "test_legacy_exts/test_config_transform/") - - # apply config based transformation on model - _ = convert_model(input_model=tf_net_path, transformations_config=config_path, - extensions=test_config_based_transform) - - # convert another model which would fail if custom transform from config_path applied - tf_net_path = self.create_tf_model_no_sigmoid(temp_dir) - _ = convert_model(input_model=tf_net_path, extensions=test_config_based_transform) - - # check that CustomReplacementRegistry.registry is cleared - from openvino.tools.mo.front.common.custom_replacement_registry import CustomReplacementRegistry - assert len(CustomReplacementRegistry.registry) == 0 diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py deleted file mode 100644 index d4b798c6d532a4..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import openvino.runtime as ov -import pytest -from common.mo_convert_test_class import CommonMOConvertTest -from common.onnx_layer_test_class import save_to_onnx -from openvino.runtime import PartialShape, Model - - -class TestONNXExtensions(CommonMOConvertTest): - def create_onnx_model(self, tmp_dir): - # - # Create ONNX model - # - - import onnx - from onnx import helper - from onnx import TensorProto - - shape = [2, 3, 4] - - input = helper.make_tensor_value_info( - 'input', TensorProto.FLOAT, shape) - output = helper.make_tensor_value_info( - 'output', TensorProto.FLOAT, shape) - - node_def = onnx.helper.make_node( - 'LeakyRelu', - inputs=['input'], - outputs=['LeakyRelu_data'], - alpha=0.1 - ) - node_def2 = onnx.helper.make_node( - 'Elu', - inputs=['LeakyRelu_data'], - outputs=['output'], - alpha=0.1 - ) - - # Create the graph (GraphProto) - graph_def = helper.make_graph( - [node_def, node_def2], - 'test_model', - [input], - [output], - ) - - # Create the model (ModelProto) - onnx_net = helper.make_model(graph_def, producer_name='test_model') - - # save model to .onnx and return path to the model - return save_to_onnx(onnx_net, tmp_dir) - - def create_custom_extension_leaky_relu_to_relu(): - # replaces LeakyRelu with Relu - from openvino.frontend import ConversionExtension - from openvino.frontend import NodeContext - import openvino.runtime.opset14 as ops - - def custom_converter(node: NodeContext): - input = node.get_input(0) - relu = ops.relu(input) - return [relu.output(0)] - - return ConversionExtension("LeakyRelu", custom_converter) - - def create_custom_op_extension_leaky_relu_to_relu(): - # replaces LeakyRelu with Relu - from openvino.frontend import OpExtension - - return OpExtension("Relu", "LeakyRelu") - - def create_custom_extension_elu_to_sigmoid(): - # replaces Elu with Sigmoid - from openvino.frontend import ConversionExtension - from openvino.frontend import NodeContext - import openvino.runtime.opset14 as ops - - def custom_converter(node: NodeContext): - input = node.get_input(0) - sigm = ops.sigmoid(input) - return [sigm.output(0)] - - return ConversionExtension("Elu", custom_converter) - - def create_ref_graph1(): - shape = PartialShape([2, 3, 4]) - param = ov.opset14.parameter(shape, dtype=np.float32) - param.get_output_tensor(0).set_names({"input"}) - relu = ov.opset14.relu(param) - relu.get_output_tensor(0).set_names({"LeakyRelu_data"}) - elu = ov.opset14.elu(relu, alpha=0.1) - elu.get_output_tensor(0).set_names({"output"}) - - return Model([elu], [param], "test") - - def create_ref_graph2(): - shape = PartialShape([2, 3, 4]) - param = ov.opset14.parameter(shape, dtype=np.float32) - param.get_output_tensor(0).set_names({"input"}) - relu = ov.opset14.relu(param) - relu.get_output_tensor(0).set_names({"LeakyRelu_data"}) - sigmoid = ov.opset14.sigmoid(relu) - sigmoid.get_output_tensor(0).set_names({"output"}) - - return Model([sigmoid], [param], "test") - - test_data = [ - {'params_test': {'extensions': create_custom_extension_leaky_relu_to_relu()}, - 'ref_graph': create_ref_graph1()}, - {'params_test': {'extensions': create_custom_op_extension_leaky_relu_to_relu()}, - 'ref_graph': create_ref_graph1()}, - {'params_test': {'extensions': [create_custom_extension_leaky_relu_to_relu(), - create_custom_extension_elu_to_sigmoid()]}, - 'ref_graph': create_ref_graph2()} - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - @pytest.mark.precommit - def test_onnx_mo_convert_extensions(self, params, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - onnx_net_path = self.create_onnx_model(temp_dir) - - test_params = params['params_test'] - test_params.update({'input_model': onnx_net_path}) - test_params.update({'use_convert_model_from_mo': True}) - self._test_by_ref_graph(temp_dir, test_params, params['ref_graph']) - - -class TestPyTorchExtensions(CommonMOConvertTest): - def create_model(self, tmp_dir): - import torch - - class CosModel(torch.nn.Module): - def __init__(self): - super(CosModel, self).__init__() - - def forward(self, x): - return torch.cos(x.to(torch.float32)) - - return CosModel() - - def create_custom_extension_cos_to_sin(): - from openvino.frontend import ConversionExtension - from openvino.frontend import NodeContext - import openvino.runtime.opset14 as ops - - def custom_converter(node: NodeContext): - input = node.get_input(0) - sin = ops.sin(input) - return sin.outputs() - - return ConversionExtension("aten::cos", custom_converter) - - def create_custom_op_extension_cos_to_sin(): - from openvino.frontend import OpExtension - - return OpExtension("Sin", "aten::cos") - - def create_ref_graph(): - shape = PartialShape.dynamic() - param = ov.opset14.parameter(shape, dtype=ov.Type.dynamic) - param.get_output_tensor(0).set_names({"x"}) - convert = ov.opset14.convert(param, ov.Type.f32) - convert.get_output_tensor(0).set_names({"5"}) - sin = ov.opset14.sin(convert) - - return Model([sin], [param], "test") - - test_data = [ - {'params_test': {'extension': create_custom_extension_cos_to_sin()}, - 'ref_graph': create_ref_graph()}, - {'params_test': {'extension': create_custom_op_extension_cos_to_sin()}, - 'ref_graph': create_ref_graph()}, - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - @pytest.mark.precommit - def test_pt_mo_convert_extensions(self, params, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - model = self.create_model(temp_dir) - - test_params = params['params_test'] - test_params.update({'input_model': model}) - self._test_by_ref_graph(temp_dir, test_params, params['ref_graph']) - - -class TestTfExtensions(CommonMOConvertTest): - def create_keras_model(self, temp_dir): - import tensorflow as tf - - tf.keras.backend.clear_session() - tf.compat.v1.reset_default_graph() - - input_name = "Input1" - input_shape = [None, 1, 2, 3] - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, input_shape, input_name) - tf.raw_ops.Cos(x=x, name='res') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - return tf_net - - def create_custom_extension_cos_to_sin(): - from openvino.frontend import ConversionExtension - from openvino.frontend import NodeContext - import openvino.runtime.opset14 as ops - - def custom_converter(node: NodeContext): - input = node.get_input(0) - sin = ops.sin(input) - return sin.outputs() - - return ConversionExtension("Cos", custom_converter) - - def create_custom_op_extension_cos_to_sin(): - from openvino.frontend import OpExtension - - return OpExtension("Sin", "Cos") - - def create_ref_graph(): - shape = PartialShape([-1, 1, 2, 3]) - param = ov.opset14.parameter(shape, dtype=np.float32) - param.get_output_tensor(0).set_names({"Input1:0"}) - y = ov.opset14.sin(param) - - parameter_list = [param] - - return Model([y], parameter_list, "test") - - test_data = [ - {'params_test': {'extension': create_custom_extension_cos_to_sin()}, - 'ref_graph': create_ref_graph()}, - {'params_test': {'extension': create_custom_op_extension_cos_to_sin()}, - 'ref_graph': create_ref_graph()}, - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - @pytest.mark.precommit - def test_tf_mo_convert_extensions(self, params, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - model = self.create_keras_model(temp_dir) - - test_params = params['params_test'] - test_params.update({'input_model': model}) - self._test_by_ref_graph(temp_dir, test_params, params['ref_graph']) diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_onnx.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_onnx.py deleted file mode 100644 index ddc636c4efec38..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_onnx.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import io - -import numpy as np -import openvino.runtime as ov -import pytest -from openvino.runtime import Model - -from common.mo_convert_test_class import CommonMOConvertTest - - -def make_graph_proto_model(): - import onnx - from onnx import helper - from onnx import TensorProto - - shape = [2, 3, 4] - - input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape) - output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape) - - node_def = onnx.helper.make_node( - 'LeakyRelu', - inputs=['input'], - outputs=['LeakyRelu_data'], - alpha=0.1 - ) - node_def2 = onnx.helper.make_node( - 'Elu', - inputs=['LeakyRelu_data'], - outputs=['output'], - alpha=0.1 - ) - - # Create the graph (GraphProto) - graph_def = helper.make_graph( - [node_def, node_def2], - 'test_model', - [input], - [output], - ) - - # Create the model (ModelProto) - onnx_net = helper.make_model(graph_def, producer_name='test_model') - - return onnx_net - -def create_ref_model(shape): - param1 = ov.opset8.parameter(shape, dtype=np.float32) - slope_const = ov.opset8.constant([0.1], dtype=np.float32) - prelu = ov.opset8.prelu(param1, slope=slope_const) - relu = ov.opset8.elu(prelu, alpha=np.float32(0.1)) - parameter_list = [param1] - return Model([relu], parameter_list, "test") - -def create_bytes_io(): - import onnx - onnx_model = make_graph_proto_model() - - file_like_object = io.BytesIO() - onnx.save(onnx_model, file_like_object) - - ref_model = create_ref_model([2,3,4]) - return file_like_object, ref_model, {} - - -class TestMoConvertONNX(CommonMOConvertTest): - test_data = ['create_bytes_io'] - @pytest.mark.parametrize("create_model", test_data) - @pytest.mark.nightly - @pytest.mark.precommit - def test_mo_convert_onnx(self, create_model, ie_device, precision, ir_version, - temp_dir): - fw_model, graph_ref, mo_params = eval(create_model)() - - test_params = {'input_model': fw_model} - if mo_params is not None: - test_params.update(mo_params) - test_params.update({'use_convert_model_from_mo': True}) - self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) - diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_paddle.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_paddle.py deleted file mode 100644 index 600646e4589c6f..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_paddle.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest -from common.mo_convert_test_class import CommonMOConvertTest - -import openvino.runtime as ov -from openvino.runtime import PartialShape, Model - -def make_pd_dynamic_graph_model(): - import paddle - paddle.disable_static() - class NeuralNetwork(paddle.nn.Layer): - def __init__(self): - super(NeuralNetwork, self).__init__() - self.relu_sigmoid_stack = paddle.nn.Sequential( - paddle.nn.ReLU(), - paddle.nn.Sigmoid()) - def forward(self, input): - return self.relu_sigmoid_stack(input) - return NeuralNetwork() - -def make_pd_static_graph_model(shape): - import paddle - import paddle.nn - - paddle.enable_static() - - x = paddle.static.data(name="x", shape=shape) - y = paddle.static.data(name="y", shape=shape) - relu = paddle.nn.ReLU() - sigmoid = paddle.nn.Sigmoid() - y = sigmoid(relu(x)) - - exe = paddle.static.Executor(paddle.CPUPlace()) - exe.run(paddle.static.default_startup_program()) - return exe, x, y - -def make_pd_hapi_graph_model(shape): - import paddle - paddle.disable_static() - from paddle.static import InputSpec - net = paddle.nn.Sequential( - paddle.nn.ReLU(), - paddle.nn.Sigmoid()) - input = InputSpec(shape, 'float32', 'x') - label = InputSpec(shape, 'float32', 'label') - - model = paddle.Model(net, input, label) - optim = paddle.optimizer.SGD(learning_rate=1e-3, - parameters=model.parameters()) - model.prepare(optim, paddle.nn.CrossEntropyLoss(), paddle.metric.Accuracy()) - return model - -def make_ref_graph_model(shape, dtype=np.float32): - shape = PartialShape(shape) - param = ov.opset8.parameter(shape, name="x", dtype=dtype) - - relu = ov.opset8.relu(param) - sigm = ov.opset8.sigmoid(relu) - - model = Model([sigm], [param], "test") - return model - -def create_paddle_dynamic_module(tmp_dir): - import paddle - shape = [2,3,4] - pd_model = make_pd_dynamic_graph_model() - ref_model = make_ref_graph_model(shape) - - x = paddle.static.InputSpec(shape=shape, dtype='float32', name='x') - return pd_model, ref_model, {"example_input": [x]} - -def create_paddle_static_module(tmp_dir): - shape = [2,3,4] - pd_model, x, y = make_pd_static_graph_model(shape) - ref_model = make_ref_graph_model(shape) - - return pd_model, ref_model, {"example_input": [x], "example_output": [y]} - -def create_paddle_hapi_module(tmp_dir): - shape = [2,3,4] - pd_model = make_pd_hapi_graph_model(shape) - ref_model = make_ref_graph_model(shape) - - return pd_model, ref_model, {} - -class TestMoConvertPaddle(CommonMOConvertTest): - test_data = [ - 'create_paddle_dynamic_module', - 'create_paddle_static_module', - 'create_paddle_hapi_module' - ] - @pytest.mark.skip(reason="Paddlepaddle has incompatible protobuf. Ticket: 95904") - @pytest.mark.parametrize("create_model", test_data) - def test_mo_import_from_memory_paddle_fe(self, create_model, ie_device, precision, ir_version, - temp_dir): - fw_model, graph_ref, mo_params = eval(create_model)(temp_dir) - test_params = {'input_model': fw_model} - if mo_params is not None: - test_params.update(mo_params) - test_params.update({'use_convert_model_from_mo': True}) - self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py deleted file mode 100644 index f08bf42822fccf..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py +++ /dev/null @@ -1,1276 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -import platform -from typing import Tuple - -import numpy as np -import openvino.runtime as ov -import pytest -import torch -from openvino.runtime import PartialShape, Dimension, Model, Type, Core, save_model -from openvino.test_utils import compare_functions - -from common.mo_convert_test_class import CommonMOConvertTest -from openvino.tools.mo import InputCutInfo -from openvino.tools.ovc import convert_model - - -class MyTorchOp(torch.autograd.Function): - @staticmethod - def symbolic(g, in_positions): - return g.op("MyTorchOp", in_positions) - - @staticmethod - def forward(self, in_positions): - out_pos = in_positions.reshape(-1) - return out_pos + 0.5 - - -def make_pt_model_one_input(): - from torch import nn - - class NeuralNetwork(nn.Module): - def __init__(self): - super(NeuralNetwork, self).__init__() - self.linear_relu_stack = nn.Sequential( - nn.ReLU(), - nn.Sigmoid(), - ) - - def forward(self, x): - logits = self.linear_relu_stack(x) - return logits - - return NeuralNetwork() - - -def make_pt_model_two_inputs(): - from torch import nn - - class NeuralNetwork(nn.Module): - def __init__(self): - super(NeuralNetwork, self).__init__() - self.linear_relu_stack = nn.Sequential( - nn.ReLU(), - nn.Sigmoid(), - ) - - def forward(self, x, y): - logits = self.linear_relu_stack(x * y) - return logits - - return NeuralNetwork() - - -def make_pt_model_with_optional_input(): - from torch import nn - - class NeuralNetwork(nn.Module): - def __init__(self): - super(NeuralNetwork, self).__init__() - self.linear_relu_stack = nn.Sequential( - nn.ReLU(), - nn.Sigmoid(), - ) - - def forward(self, x, y=None, z=None): - logits = None - if y is None: - logits = self.linear_relu_stack(x + z) - if z is None: - logits = self.linear_relu_stack(x * y) - return logits - - return NeuralNetwork() - - -def make_ref_pt_model_one_input(shape, dtype=np.float32): - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape, name="input_0", dtype=dtype) - relu = ov.opset8.relu(param1) - if dtype not in [np.float32, Type.dynamic]: - relu = ov.opset8.convert(relu, np.float32) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1] - model = Model([sigm], parameter_list, "test") - return model - - -def make_ref_pt_model_two_inputs(shape, dtype=np.float32): - if len(shape) == 2: - param1 = ov.opset8.parameter(PartialShape( - shape[0]), name="input_0", dtype=dtype) - param2 = ov.opset8.parameter(PartialShape( - shape[1]), name="input_1", dtype=dtype) - else: - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape, name="input_0", dtype=dtype) - param2 = ov.opset8.parameter(shape, name="input_1", dtype=dtype) - if dtype == Type.dynamic: - cl = ov.opset8.convert_like(param2, param1) - mul = ov.opset8.multiply(param1, cl) - else: - mul = ov.opset8.multiply(param1, param2) - relu = ov.opset8.relu(mul) - if dtype not in [np.float32, Type.dynamic]: - relu = ov.opset8.convert(relu, np.float32) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model = Model([sigm], parameter_list, "test") - return model - - -def make_ref_pt_model_with_optional_inputs(shape, dtype=np.float32, z_exist=False): - if len(shape) == 2: - param1 = ov.opset8.parameter(PartialShape( - shape[0]), name="input_0", dtype=dtype) - param2 = ov.opset8.parameter(PartialShape( - shape[1]), name="input_1", dtype=dtype) - else: - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape, name="input_0", dtype=dtype) - param2 = ov.opset8.parameter(shape, name="input_1", dtype=dtype) - - op = ov.opset8.multiply( - param1, param2) if not z_exist else ov.opset8.add(param1, param2) - relu = ov.opset8.relu(op) - if dtype != np.float32: - relu = ov.opset8.convert(relu, np.float32) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model = Model([sigm], parameter_list, "test") - return model - - -def create_pytorch_nn_module_case1(tmp_dir): - pt_model = make_pt_model_two_inputs() - ref_model = make_ref_pt_model_two_inputs([-1, -1, -1, -1]) - - sample_input1 = torch.zeros(1, 3, 10, 10) - sample_input2 = torch.zeros(1, 3, 10, 10) - sample_input = sample_input1, sample_input2 - - return pt_model, ref_model, {'example_input': sample_input} - - -def create_pytorch_nn_module_case2(tmp_dir): - pt_model = make_pt_model_two_inputs() - ref_model = make_ref_pt_model_two_inputs([-1, 3, -1, -1]) - - sample_input1 = torch.zeros(1, 3, 10, 10) - sample_input2 = torch.zeros(1, 3, 10, 10) - sample_input = sample_input1, sample_input2 - - return pt_model, ref_model, {'input': [PartialShape("[?,3,?,?]"), PartialShape([-1, 3, -1, -1])], - 'example_input': sample_input} - - -def create_pytorch_nn_module_with_scalar_input(tmp_dir): - pt_model = make_pt_model_two_inputs() - ref_model = make_ref_pt_model_two_inputs([[], [-1, 3, -1, -1]]) - - sample_input1 = torch.tensor(0.66) - sample_input2 = torch.zeros(1, 3, 10, 10) - sample_input = sample_input1, sample_input2 - - return pt_model, ref_model, {'input_shape': [PartialShape("[]"), PartialShape([-1, 3, -1, -1])], - 'example_input': sample_input} - - -def create_pytorch_nn_module_case3(tmp_dir): - pt_model = make_pt_model_two_inputs() - ref_model = make_ref_pt_model_two_inputs([-1, 3, -1, -1]) - - sample_input1 = torch.zeros(1, 3, 10, 10) - sample_input2 = torch.zeros(1, 3, 10, 10) - sample_input = tuple([sample_input1, sample_input2]) - - return pt_model, ref_model, {'input_shape': "[?,3,?,?],[?,3,?,?]", - 'example_input': sample_input} - - -def create_pytorch_nn_module_case4(tmp_dir): - pt_model = make_pt_model_one_input() - - sample_input = torch.zeros(1, 3, 10, 10) - - ref_model = make_ref_pt_model_one_input(PartialShape([1, 3, 20, 20])) - - return pt_model, ref_model, {'example_input': sample_input, "input": [1, 3, 20, 20]} - - -def create_pytorch_nn_module_case5(tmp_dir): - pt_model = make_pt_model_one_input() - inp_shape = PartialShape([-1, 3, Dimension(2, -1), Dimension(-1, 10)]) - ref_model = make_ref_pt_model_one_input(inp_shape) - - sample_input = torch.zeros(3, 3, 10, 10) - return pt_model, ref_model, {'example_input': sample_input, - 'input': (inp_shape, np.float32)} - - -def create_pytorch_nn_module_case6(tmp_dir): - pt_model = make_pt_model_one_input() - shape = PartialShape([1, 3, Dimension(2, -1), Dimension(-1, 10)]) - ref_model = make_ref_pt_model_one_input(shape) - - return pt_model, ref_model, {'input': (shape, np.float32)} - - -def create_pytorch_nn_module_case7(tmp_dir): - pt_model = make_pt_model_one_input() - - sample_input = torch.zeros(1, 3, 10, 10, dtype=torch.int32) - - ref_model = make_ref_pt_model_one_input( - PartialShape([1, 3, 20, 20]), dtype=np.int32) - - return pt_model, ref_model, {'example_input': sample_input, "input": ([1, 3, 20, 20], np.int32)} - - -def create_pytorch_nn_module_torch_size(tmp_dir): - pt_model = make_pt_model_one_input() - ref_model = make_ref_pt_model_one_input([1, 3, 2, 10]) - - return pt_model, ref_model, {'input': (torch.Size([1, 3, 2, 10]), np.float32)} - - -def create_pytorch_nn_module_sample_input_int32(tmp_dir): - pt_model = make_pt_model_one_input() - shape = PartialShape([-1, 3, Dimension(2, -1), Dimension(-1, 10)]) - - sample_input = torch.zeros(1, 3, 10, 10, dtype=torch.int32) - - ref_model = make_ref_pt_model_one_input(shape, dtype=np.int32) - - return pt_model, ref_model, {'example_input': sample_input, - 'input': (shape, np.int32)} - - -def create_pytorch_nn_module_sample_input_int32_two_inputs(tmp_dir): - pt_model = make_pt_model_two_inputs() - inp_shapes = [PartialShape("[?,3,?,?]"), PartialShape([-1, 3, -1, -1])] - - sample_input1 = torch.zeros(1, 3, 10, 10, dtype=torch.int32) - sample_input2 = torch.zeros(1, 3, 10, 10, dtype=torch.int32) - sample_input = sample_input1, sample_input2 - ref_model = make_ref_pt_model_two_inputs( - [PartialShape([-1, 3, -1, -1]), inp_shapes[1]], dtype=np.int32) - - return pt_model, ref_model, {'input': [(np.int32, inp_shapes[0]), (np.int32, inp_shapes[1])], - 'example_input': sample_input} - - -def create_pytorch_jit_script_module(tmp_dir): - import torch - - net = make_pt_model_two_inputs() - scripted_model = torch.jit.script(net) - - model_ref = make_ref_pt_model_two_inputs([1, 3, 5, 5]) - return scripted_model, model_ref, {'input': [([1, 3, 5, 5], np.float32), ([1, 3, 5, 5], np.float32)]} - - -def create_pytorch_jit_script_function(tmp_dir): - import torch - - @torch.jit.script - def scripted_fn(x: torch.Tensor, y: torch.Tensor): - return torch.sigmoid(torch.relu(x * y)) - - inp_shape = PartialShape([Dimension(1, -1), Dimension(-1, 5), 10]) - ref_model = make_ref_pt_model_two_inputs(inp_shape) - return scripted_fn, ref_model, {'input': [(inp_shape, Type.f32), (inp_shape, Type.f32)]} - - -def create_pytorch_nn_module_layout_list(tmp_dir): - from openvino.runtime import Layout - pt_model = make_pt_model_two_inputs() - shape = [1, 3, 10, 10] - - shape = PartialShape(shape) - ref_model = make_ref_pt_model_two_inputs(shape) - ref_model.inputs[0].node.layout = Layout('nchw') - ref_model.inputs[1].node.layout = Layout('nhwc') - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ['nchw', Layout('nhwc')], - 'use_convert_model_from_mo': True - } - - -def create_pytorch_nn_module_layout_list_case2(tmp_dir): - from openvino.runtime import Layout - pt_model = make_pt_model_two_inputs() - shape = [1, 3, 10, 10] - - shape = PartialShape(shape) - ref_model = make_ref_pt_model_two_inputs(shape) - ref_model.inputs[0].node.layout = Layout('nchw') - ref_model.inputs[1].node.layout = Layout('nhwc') - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ('nchw', Layout('nhwc')), - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_disabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - add1 = ov.opset8.add(param1, const1) - add2 = ov.opset8.add(param2, const2) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'compress_to_fp16': False, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_default(tmp_dir): - # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled - # therefore decompression Converts will not be present - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - add1 = ov.opset8.add(param1, const1) - add2 = ov.opset8.add(param2, const2) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) - const1_decompressed = ov.opset8.convert( - const1, destination_type=np.float32) - const2_decompressed = ov.opset8.convert( - const2, destination_type=np.float32) - - add1 = ov.opset8.add(param1, const1_decompressed) - add2 = ov.opset8.add(param2, const2_decompressed) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'compress_to_fp16': True, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_disabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - sub1 = ov.opset8.multiply(param1, const1) - sub2 = ov.opset8.multiply(param2, const2) - mul = ov.opset8.multiply(sub1, sub2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'compress_to_fp16': False, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_default(tmp_dir): - # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled - # therefore decompression Converts will not be present - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - sub1 = ov.opset8.multiply(param1, const1) - sub2 = ov.opset8.multiply(param2, const2) - mul = ov.opset8.multiply(sub1, sub2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) - const1_decompressed = ov.opset8.convert( - const1, destination_type=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) - const2_decompressed = ov.opset8.convert( - const2, destination_type=np.float32) - mul1 = ov.opset8.multiply(param1, const1_decompressed) - mul2 = ov.opset8.multiply(param2, const2_decompressed) - mul3 = ov.opset8.multiply(mul1, mul2) - relu = ov.opset8.relu(mul3) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'compress_to_fp16': True, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_shapes_list_static(tmp_dir): - pt_model = make_pt_model_two_inputs() - ref_model = make_ref_pt_model_two_inputs([1, 3, 20, 20]) - - return pt_model, ref_model, {'input': [([1, 3, 20, 20], Type.f32), ([1, 3, 20, 20], Type.f32)]} - - -def create_pytorch_nn_module_shapes_list_static_via_input(tmp_dir): - pt_model = make_pt_model_two_inputs() - ref_model = make_ref_pt_model_two_inputs([1, 3, 20, 20]) - - return pt_model, ref_model, {'input': [([1, 3, 20, 20], np.float32), ([1, 3, 20, 20], np.float32)]} - - -def create_pytorch_nn_module_shapes_list_dynamic(tmp_dir): - pt_model = make_pt_model_two_inputs() - inp_shapes = [[Dimension(-1), 3, 20, Dimension(20, -1)], - [-1, 3, 20, Dimension(-1, 20)]] - - param1 = ov.opset8.parameter(PartialShape( - inp_shapes[0]), name="x", dtype=Type.f32) - param2 = ov.opset8.parameter(PartialShape( - inp_shapes[1]), name="y", dtype=Type.f32) - mul = ov.opset8.multiply(param1, param2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, {'input': [(inp_shapes[0], Type.f32), (inp_shapes[1], Type.f32)]} - - -def create_pytorch_nn_module_shapes_list_dynamic_via_input(tmp_dir): - pt_model = make_pt_model_two_inputs() - inp_shapes = [[Dimension(-1), 3, 20, Dimension(20, -1)], - [-1, 3, 20, Dimension(-1, 20)]] - - param1 = ov.opset8.parameter(PartialShape( - inp_shapes[0]), name="x", dtype=np.float32) - param2 = ov.opset8.parameter(PartialShape( - inp_shapes[1]), name="y", dtype=np.float32) - mul = ov.opset8.multiply(param1, param2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, {'input': [(inp_shapes[0], Type.f32), (inp_shapes[1], Type.f32)]} - - -def create_pytorch_nn_module_shapes_list_dynamic_single_input(tmp_dir): - pt_model = make_pt_model_one_input() - inp_shapes = [[Dimension(-1), 3, 20, Dimension(20, -1)], Type.f32] - ref_model = make_ref_pt_model_one_input(inp_shapes[0]) - return pt_model, ref_model, {'input': inp_shapes} - - -def create_pytorch_nn_module_shapes_list_dynamic_single_input_via_input(tmp_dir): - pt_model = make_pt_model_one_input() - inp_shapes = [Dimension(-1), 3, 20, Dimension(20, -1)] - ref_model = make_ref_pt_model_one_input(inp_shapes) - return pt_model, ref_model, {'input': InputCutInfo(shape=inp_shapes, type=np.float32)} - - -def create_pytorch_nn_module_shapes_list_static_single_input(tmp_dir): - pt_model = make_pt_model_one_input() - inp_shapes = [[1, 3, 20, 20], Type.f32] - ref_model = make_ref_pt_model_one_input(inp_shapes[0]) - return pt_model, ref_model, {'input': inp_shapes} - - -def create_pytorch_nn_module_shapes_list_static_single_input_via_input(tmp_dir): - pt_model = make_pt_model_one_input() - inp_shapes = [1, 3, 20, 20] - ref_model = make_ref_pt_model_one_input(inp_shapes) - return pt_model, ref_model, {'input': (inp_shapes, np.float32)} - - -def create_pytorch_nn_module_convert_pytorch_frontend1(tmp_dir): - pt_model = make_pt_model_one_input() - shape = [-1, -1, -1, -1] - shape = PartialShape(shape) - param = ov.opset10.parameter(shape) - relu = ov.opset10.relu(param) - sigm = ov.opset10.sigmoid(relu) - - parameter_list = [param] - ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, { - "example_input": torch.zeros((1, 3, 10, 10)), - 'input': [InputCutInfo(shape=[-1, -1, -1, -1], type="f32")] - } - - -def create_pytorch_nn_module_convert_pytorch_frontend2(tmp_dir): - pt_model = make_pt_model_one_input() - shape = [-1, -1, -1, -1] - shape = PartialShape(shape) - param = ov.opset10.parameter(shape, Type.i32) - relu = ov.opset10.relu(param) - convt = ov.opset10.convert(relu, "f32") - sigm = ov.opset10.sigmoid(convt) - - parameter_list = [param] - ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, { - "example_input": torch.zeros((1, 3, 10, 10), dtype=torch.int32), - 'input': [InputCutInfo(shape=[-1, -1, -1, -1], type="i32")] - } - - -def create_pytorch_nn_module_convert_pytorch_frontend3(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [-1, -1, -1, -1] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape, dtype=np.float32) - param2 = ov.opset10.parameter(shape, dtype=np.float32) - mul = ov.opset10.multiply(param1, param2) - relu = ov.opset10.relu(mul) - sigm = ov.opset10.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, { - "example_input": [torch.zeros((1, 3, 10, 10)), torch.ones((1, 3, 10, 10))], - 'input': [InputCutInfo(shape=[-1, -1, -1, -1], type="f32"), InputCutInfo(shape=[-1, -1, -1, -1], type="f32")] - } - - -def create_pytorch_nn_module_convert_pytorch_frontend4(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [-1, -1, -1, -1] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape, dtype=np.float32) - param2 = ov.opset10.parameter(shape, dtype=np.float32) - mul = ov.opset10.multiply(param1, param2) - relu = ov.opset10.relu(mul) - sigm = ov.opset10.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, { - "example_input": {"x": torch.zeros((1, 3, 10, 10), dtype=torch.float32), - "y": torch.ones((1, 3, 10, 10), dtype=torch.float32)}, - 'input': [InputCutInfo(shape=[-1, -1, -1, -1], type="f32"), InputCutInfo(shape=[-1, -1, -1, -1], type="f32")] - } - - -def create_pytorch_jit_script_module_convert_pytorch_frontend(tmp_dir): - import torch - - net = make_pt_model_two_inputs() - scripted_model = torch.jit.script(net) - shape = [-1, -1, -1, -1] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape, dtype=np.float32) - param2 = ov.opset10.parameter(shape, dtype=np.float32) - mul = ov.opset10.multiply(param1, param2) - relu = ov.opset10.relu(mul) - sigm = ov.opset10.sigmoid(relu) - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - return scripted_model, ref_model, { - "example_input": [torch.zeros((1, 3, 10, 10)), torch.ones((1, 3, 10, 10))]} - - -def create_pytorch_jit_trace_module_convert_pytorch_frontend(tmp_dir): - import torch - - net = make_pt_model_two_inputs() - example_input = [torch.zeros((1, 3, 10, 10)), torch.ones((1, 3, 10, 10))] - scripted_model = torch.jit.trace(net, example_input) - shape = [-1, -1, -1, -1] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape, dtype=np.float32) - param2 = ov.opset10.parameter(shape, dtype=np.float32) - mul = ov.opset10.multiply(param1, param2) - relu = ov.opset10.relu(mul) - sigm = ov.opset10.sigmoid(relu) - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - return scripted_model, ref_model, {"example_input": example_input} - - -def create_pytorch_module_convert_pytorch_frontend_oob(tmp_dir): - import torch - import torch.nn.functional as F - - class ConvModel(torch.nn.Module): - def __init__(self): - super(ConvModel, self).__init__() - self.weights = torch.rand([1, 3, 3, 3]) - - def forward(self, x): - return F.conv2d(x, self.weights) - - net = ConvModel() - shape = PartialShape([-1, 3, -1, -1]) - param1 = ov.opset10.parameter(shape, dtype=np.float32) - weights = ov.opset10.constant(net.weights.numpy(force=True), dtype=np.float32) - conv = ov.opset10.convolution(param1, weights, strides=[1, 1], - pads_begin=[0, 0], pads_end=[0, 0], - dilations=[1, 1]) - parameter_list = [param1] - ref_model = Model([conv], parameter_list, "test") - return net, ref_model, {} - - -def create_pytorch_module_with_optional_inputs_case1(tmp_dir): - net = make_pt_model_with_optional_input() - example_input = {"x": torch.zeros( - (1, 3, 10, 10)), "y": torch.ones((1, 3, 10, 10))} - ref_model = make_ref_pt_model_with_optional_inputs([-1, -1, -1, -1]) - return net, ref_model, {"example_input": example_input} - - -def create_pytorch_module_with_optional_inputs_case2(tmp_dir): - net = make_pt_model_with_optional_input() - example_input = {"x": torch.zeros( - (1, 3, 10, 10)), "z": torch.ones((1, 3, 10, 10))} - ref_model = make_ref_pt_model_with_optional_inputs( - [-1, -1, -1, -1], z_exist=True) - return net, ref_model, {"example_input": example_input} - - -def create_pytorch_module_with_optional_inputs_case3(tmp_dir): - net = make_pt_model_with_optional_input() - example_input = {"x": torch.zeros( - (1, 3, 10, 10)), "z": torch.ones((1, 3, 10, 10))} - ref_model = make_ref_pt_model_with_optional_inputs( - [3, 3, 3, 3], z_exist=True) - return net, ref_model, {"example_input": example_input, "input": [[3, 3, 3, 3], [3, 3, 3, 3]]} - - -def create_pytorch_module_with_compressed_int8_constant_compress_to_fp16_default(tmp_dir): - import torch - import torch.nn.functional as F - - class Int8Model(torch.nn.Module): - def __init__(self): - super(Int8Model, self).__init__() - self.weights = torch.randint(-127, 128, - [1, 3, 3, 3], dtype=torch.int8) - - def forward(self, x): - cast = self.weights.to(torch.float32) - sub = cast - 0.5 - mul = sub * 0.02 - return F.conv2d(x, mul) - - net = Int8Model() - example_input = (torch.rand((1, 3, 10, 10)),) - traced_model = torch.jit.trace(net, example_input) - shape = [-1, 3, -1, -1] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape, dtype=np.float32) - weights = ov.opset10.constant(net.weights.numpy(force=True)) - cast1 = ov.opset10.convert(weights, np.float32) - sub1_const = np.float32(0.5).reshape(1, 1, 1, 1) - mul1_const = np.float32(0.02).reshape(1, 1, 1, 1) - sub1 = ov.opset10.subtract(cast1, sub1_const) - mul1 = ov.opset10.multiply(sub1, mul1_const) - conv = ov.opset10.convolution(param1, mul1, strides=[1, 1], - pads_begin=[0, 0], pads_end=[0, 0], - dilations=[1, 1]) - ref_model = Model([conv], [param1], "test") - return traced_model, ref_model, {"example_input": example_input} - - -def create_pytorch_module_with_compressed_int8_constant(tmp_dir): - import torch - import torch.nn.functional as F - - class Int8Model(torch.nn.Module): - def __init__(self): - super(Int8Model, self).__init__() - self.weights = torch.randint(-127, 128, - [1, 3, 3, 3], dtype=torch.int8) - - def forward(self, x): - cast = self.weights.to(torch.float32) - sub = cast - 0.5 - mul = sub * 0.02 - return F.conv2d(x, mul) - - net = Int8Model() - example_input = (torch.rand((1, 3, 10, 10)),) - traced_model = torch.jit.trace(net, example_input) - shape = [-1, 3, -1, -1] - shape = PartialShape(shape) - param1 = ov.opset10.parameter(shape, dtype=np.float32) - weights = ov.opset10.constant(net.weights.numpy(force=True)) - cast1 = ov.opset10.convert(weights, np.float32) - sub1 = ov.opset10.subtract(cast1, np.float32(0.5).reshape(1, 1, 1, 1)) - mul1 = ov.opset10.multiply(sub1, np.float32(0.02).reshape(1, 1, 1, 1)) - conv = ov.opset10.convolution(param1, mul1, strides=[1, 1], - pads_begin=[0, 0], pads_end=[0, 0], - dilations=[1, 1]) - ref_model = Model([conv], [param1], "test") - return traced_model, ref_model, {"example_input": example_input, "compress_to_fp16": False} - - -def create_pytorch_module_with_nested_inputs(tmp_dir): - class PTModel(torch.nn.Module): - - def forward(self, z: Tuple[torch.Tensor, torch.Tensor]): - z1, z2 = z - zeros1 = torch.zeros((1, 1)) - zeros2 = torch.zeros((1, 5, 1)) - return torch.cat([z1, zeros1], 1), torch.cat([z2, zeros2], 2) - - net = PTModel() - constant_zeros1 = ov.opset10.constant(np.zeros((1, 1), dtype=np.float32), dtype=np.float32) - constant_zeros2 = ov.opset10.constant(np.zeros((1, 5, 1), dtype=np.float32), dtype=np.float32) - shape1 = PartialShape([1, -1]) - shape2 = PartialShape([1, 5, -1]) - param1 = ov.opset10.parameter(shape1, dtype=np.float32) - param2 = ov.opset10.parameter(shape2, dtype=np.float32) - concat1 = ov.opset10.concat([param1, constant_zeros1], 1) - concat2 = ov.opset10.concat([param2, constant_zeros2], 2) - ref_model = Model([concat2, concat1], [param1, param2], "test") - return net, ref_model, {"example_input": {"z": (torch.zeros((1, 10)), torch.ones((1, 5, 2)))}, - "compress_to_fp16": False} - - -def create_pytorch_module_with_nested_inputs_compress_to_fp16_default(tmp_dir): - class PTModel(torch.nn.Module): - - def forward(self, z: Tuple[torch.Tensor, torch.Tensor]): - z1, z2 = z - zeros1 = torch.zeros((1, 1)) - zeros2 = torch.zeros((1, 5, 1)) - return torch.cat([z1, zeros1], 1), torch.cat([z2, zeros2], 2) - - net = PTModel() - constant_zeros1 = ov.opset10.constant(np.zeros((1, 1), dtype=np.float32), dtype=np.float16) - constant_zeros2 = ov.opset10.constant(np.zeros((1, 5, 1), dtype=np.float32), dtype=np.float16) - const1_decompress = ov.opset10.convert(constant_zeros1, np.float32) - const2_decompress = ov.opset10.convert(constant_zeros2, np.float32) - shape1 = PartialShape([1, -1]) - shape2 = PartialShape([1, 5, -1]) - param1 = ov.opset10.parameter(shape1, dtype=np.float32) - param2 = ov.opset10.parameter(shape2, dtype=np.float32) - concat1 = ov.opset10.concat([param1, const1_decompress], 1) - concat2 = ov.opset10.concat([param2, const2_decompress], 2) - ref_model = Model([concat2, concat1], [param1, param2], "test") - return net, ref_model, {"example_input": {"z": (torch.zeros((1, 10)), torch.ones((1, 5, 2)))}} - - -def create_pytorch_module_with_nested_inputs2(tmp_dir): - class PTModel(torch.nn.Module): - - def forward(self, x: torch.Tensor, z: Tuple[torch.Tensor, torch.Tensor]): - z1, z2 = z - zeros1 = torch.zeros((1, 1)) - zeros2 = torch.zeros((1, 5, 1)) - return torch.cat([z1, zeros1], 1) + x, torch.cat([z2, zeros2], 2) - - net = PTModel() - constant_zeros1 = ov.opset10.constant(np.zeros((1, 1), dtype=np.float32), dtype=np.float32) - constant_zeros2 = ov.opset10.constant(np.zeros((1, 5, 1), dtype=np.float32), dtype=np.float32) - shape1 = PartialShape([1, -1]) - shape2 = PartialShape([1, 5, -1]) - param0 = ov.opset10.parameter(PartialShape([-1, -1]), dtype=np.float32) - param1 = ov.opset10.parameter(shape1, dtype=np.float32) - param2 = ov.opset10.parameter(shape2, dtype=np.float32) - concat1 = ov.opset10.concat([param1, constant_zeros1], 1) - concat2 = ov.opset10.concat([param2, constant_zeros2], 2) - add = ov.opset10.add(concat1, param0) - ref_model = Model([concat2, add], [param0, param1, param2], "test") - return net, ref_model, { - "example_input": {"x": torch.ones((1, 10)), "z": (torch.zeros((1, 9)), torch.ones((1, 5, 5)))}, - "compress_to_fp16": False} - - -def create_pytorch_module_with_nested_inputs3(tmp_dir): - class PTModel(torch.nn.Module): - - def forward(self, z: Tuple[torch.Tensor, torch.Tensor], x: torch.Tensor): - z1, z2 = z - zeros1 = torch.zeros((1, 1)) - zeros2 = torch.zeros((1, 5, 1)) - return torch.cat([z1, zeros1], 1) + x, torch.cat([z2, zeros2], 2) - - net = PTModel() - shape1 = PartialShape([1, -1]) - shape2 = PartialShape([1, 5, -1]) - constant_zeros1 = ov.opset10.constant(np.zeros((1, 1), dtype=np.float32), dtype=np.float32) - constant_zeros2 = ov.opset10.constant(np.zeros((1, 5, 1), dtype=np.float32), dtype=np.float32) - param1 = ov.opset10.parameter(shape1, dtype=np.float32) - param2 = ov.opset10.parameter(shape2, dtype=np.float32) - param3 = ov.opset10.parameter(PartialShape([-1, -1]), dtype=np.float32) - concat1 = ov.opset10.concat([param1, constant_zeros1], 1) - concat2 = ov.opset10.concat([param2, constant_zeros2], 2) - add = ov.opset10.add(concat1, param3) - ref_model = Model([concat2, add], [param1, param2, param3], "test") - return net, ref_model, { - "example_input": {"x": torch.ones((1, 10)), "z": (torch.zeros((1, 9)), torch.ones((1, 5, 3)))}, - "compress_to_fp16": False} - - -def create_pytorch_module_with_nested_inputs4(tmp_dir): - class PTModel(torch.nn.Module): - - def forward(self, x: torch.Tensor, z: Tuple[torch.Tensor, torch.Tensor], y: torch.Tensor): - z1, z2 = z - zeros1 = torch.zeros((1, 1)) - zeros2 = torch.zeros((1, 5, 1)) - return torch.cat([z1, zeros1], 1) + x, torch.cat([z2, zeros2], 2) * y - - net = PTModel() - constant_zeros1 = ov.opset10.constant(np.zeros((1, 1), dtype=np.float32), dtype=np.float32) - constant_zeros2 = ov.opset10.constant(np.zeros((1, 5, 1), dtype=np.float32), dtype=np.float32) - shape1 = PartialShape([1, -1]) - shape2 = PartialShape([1, 5, -1]) - param1 = ov.opset10.parameter(shape1, dtype=np.float32) - param2 = ov.opset10.parameter(shape2, dtype=np.float32) - param3 = ov.opset10.parameter(PartialShape([-1, -1]), dtype=np.float32) - param4 = ov.opset10.parameter(PartialShape([-1]), dtype=np.float32) - concat1 = ov.opset10.concat([param1, constant_zeros1], 1) - concat2 = ov.opset10.concat([param2, constant_zeros2], 2) - add = ov.opset10.add(concat1, param3) - mul = ov.opset10.multiply(concat2, param4) - ref_model = Model([mul, add], [param3, param1, param2, param4], "test") - return net, ref_model, { - "example_input": {"x": torch.ones((1, 10)), "z": (torch.zeros((1, 9)), torch.ones((1, 5, 10))), - "y": torch.ones((1,))}, - "compress_to_fp16": False} - - -def create_pytorch_module_with_nested_inputs5(tmp_dir): - class PTModel(torch.nn.Module): - - def forward(self, x: torch.Tensor, z: Tuple[torch.Tensor, torch.Tensor], y: torch.Tensor): - z1, z2 = z - zeros1 = torch.zeros((1, 1)) - zeros2 = torch.zeros((1, 5, 1)) - return torch.cat([z1, zeros1], 1) + x, torch.cat([z2, zeros2], 2) * y - - net = PTModel() - constant_zeros1 = ov.opset10.constant(np.zeros((1, 1), dtype=np.float32), dtype=np.float32) - constant_zeros2 = ov.opset10.constant(np.zeros((1, 5, 1), dtype=np.float32), dtype=np.float32) - shape1 = PartialShape([1, -1]) - shape2 = PartialShape([1, 5, -1]) - param0 = ov.opset10.parameter(PartialShape([-1, -1]), dtype=np.float32) - param1 = ov.opset10.parameter(shape1, dtype=np.float32) - param2 = ov.opset10.parameter(shape2, dtype=np.float32) - param4 = ov.opset10.parameter(PartialShape([-1]), dtype=np.float32) - concat1 = ov.opset10.concat([param1, constant_zeros1], 1) - concat2 = ov.opset10.concat([param2, constant_zeros2], 2) - add = ov.opset10.add(concat1, param0) - mul = ov.opset10.multiply(concat2, param4) - ref_model = Model([mul, add], [param0, param1, param2, param4], "test") - return net, ref_model, { - "example_input": [torch.ones((1, 10)), (torch.zeros((1, 9)), torch.ones((1, 5, 10))), torch.ones((1,))], - "compress_to_fp16": False} - - -def create_pytorch_module_with_nested_inputs6(tmp_dir): - class PTModel(torch.nn.Module): - - def forward(self, x: torch.Tensor, y: torch.Tensor = None, z: Tuple[torch.Tensor, torch.Tensor] = None): - z1, z2 = z - zeros1 = torch.zeros((1, 1)) - zeros2 = torch.zeros((1, 5, 1)) - if y is not None: - return torch.cat([z1, zeros1], 1) * y, torch.cat([z2, zeros2], 2) * y - return torch.cat([z1, zeros1], 1) + x, torch.cat([z2, zeros2], 2) - - net = PTModel() - constant_zeros1 = ov.opset10.constant(np.zeros((1, 1), dtype=np.float32), dtype=np.float32) - constant_zeros2 = ov.opset10.constant(np.zeros((1, 5, 1), dtype=np.float32), dtype=np.float32) - shape1 = PartialShape([1, -1]) - shape2 = PartialShape([1, 5, -1]) - param0 = ov.opset10.parameter(PartialShape([-1, -1]), dtype=np.float32) - param1 = ov.opset10.parameter(shape1, dtype=np.float32) - param2 = ov.opset10.parameter(shape2, dtype=np.float32) - concat1 = ov.opset10.concat([param1, constant_zeros1], 1) - concat2 = ov.opset10.concat([param2, constant_zeros2], 2) - add1 = ov.opset10.add(concat1, param0) - ref_model = Model([concat2, add1], [param0, param1, param2], "test") - return net, ref_model, { - "example_input": {"x": torch.ones((1, 11)), "z": (torch.zeros((1, 10)), torch.ones((1, 5, 10)))}, - "compress_to_fp16": False} - - -class TestMoConvertPyTorch(CommonMOConvertTest): - test_data = [ - 'create_pytorch_nn_module_case1', - 'create_pytorch_nn_module_case2', - 'create_pytorch_nn_module_case3', - 'create_pytorch_nn_module_case4', - 'create_pytorch_nn_module_case5', - 'create_pytorch_nn_module_case6', - 'create_pytorch_nn_module_case7', - 'create_pytorch_nn_module_torch_size', - 'create_pytorch_nn_module_sample_input_int32', - 'create_pytorch_nn_module_sample_input_int32_two_inputs', - 'create_pytorch_jit_script_module', - 'create_pytorch_jit_script_function', - 'create_pytorch_nn_module_layout_list', - 'create_pytorch_nn_module_layout_list_case2', - 'create_pytorch_nn_module_mean_list_compression_default', - 'create_pytorch_nn_module_mean_list_compression_disabled', - 'create_pytorch_nn_module_mean_list_compression_enabled', - 'create_pytorch_nn_module_scale_list_compression_default', - 'create_pytorch_nn_module_scale_list_compression_disabled', - 'create_pytorch_nn_module_scale_list_compression_enabled', - 'create_pytorch_nn_module_shapes_list_static', - 'create_pytorch_nn_module_shapes_list_static_via_input', - 'create_pytorch_nn_module_shapes_list_dynamic', - 'create_pytorch_nn_module_shapes_list_dynamic_via_input', - 'create_pytorch_nn_module_shapes_list_dynamic_single_input', - 'create_pytorch_nn_module_shapes_list_static_single_input', - 'create_pytorch_nn_module_shapes_list_dynamic_single_input_via_input', - 'create_pytorch_nn_module_shapes_list_static_single_input_via_input', - 'create_pytorch_nn_module_convert_pytorch_frontend1', - 'create_pytorch_nn_module_convert_pytorch_frontend2', - 'create_pytorch_nn_module_convert_pytorch_frontend3', - 'create_pytorch_nn_module_convert_pytorch_frontend4', - 'create_pytorch_jit_script_module_convert_pytorch_frontend', - 'create_pytorch_jit_trace_module_convert_pytorch_frontend', - 'create_pytorch_module_convert_pytorch_frontend_oob', - 'create_pytorch_module_with_optional_inputs_case1', - 'create_pytorch_module_with_optional_inputs_case2', - 'create_pytorch_module_with_optional_inputs_case3', - 'create_pytorch_nn_module_with_scalar_input', - 'create_pytorch_module_with_compressed_int8_constant', - 'create_pytorch_module_with_compressed_int8_constant_compress_to_fp16_default', - 'create_pytorch_module_with_nested_inputs', - 'create_pytorch_module_with_nested_inputs2', - 'create_pytorch_module_with_nested_inputs3', - 'create_pytorch_module_with_nested_inputs4', - 'create_pytorch_module_with_nested_inputs5', - 'create_pytorch_module_with_nested_inputs6' - ] - - @pytest.mark.parametrize("create_model", test_data) - @pytest.mark.nightly - @pytest.mark.precommit - def test_mo_import_from_memory(self, create_model, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - fw_model, graph_ref, mo_params = eval(create_model)(temp_dir) - - test_params = {'input_model': fw_model} - test_params.update({'use_convert_model_from_mo': True}) - if mo_params is not None: - test_params.update(mo_params) - self._test_by_ref_graph(temp_dir, test_params, - graph_ref, compare_tensor_names=False) - - @ pytest.mark.precommit - def test_sharing_memory_switched_off(self, ie_device, precision, ir_version, temp_dir): - class DataModel(torch.nn.Module): - def __init__(self): - super(DataModel, self).__init__() - self.data = torch.tensor([1, 2, 3, 4]) - - def forward(self, x): - return self.data, x - - data_model = DataModel() - test_input = np.array([0, 0, 0, 0]) - - # Convert model to OV - ov_model = convert_model(data_model, input=([4], Type.i32), share_weights=False) - - # Change value of variables in original model - data_model.data[0] *= 2 - - # Check model inference - core = Core() - cmp_model = core.compile_model(ov_model, ie_device) - ov_infer1 = cmp_model(test_input) - - assert np.array_equal(ov_infer1[0], [1, 2, 3, 4]) - - @ pytest.mark.precommit - def test_sharing_memory_switched_on(self, ie_device, precision, ir_version, temp_dir): - from openvino.tools.ovc import convert_model - from openvino.runtime import Core - - class DataModel(torch.nn.Module): - def __init__(self): - super(DataModel, self).__init__() - self.data = torch.tensor([1, 2, 3, 4]) - - def forward(self, x): - return self.data, x - - data_model = DataModel() - test_input = np.array([0, 0, 0, 0]) - - # Convert model to OV - ov_model = convert_model(data_model, input=([4], Type.i32), share_weights=True) - - # Change value of variables in original model - data_model.data[0] *= 2 - - # Check model inference - core = Core() - cmp_model = core.compile_model(ov_model, ie_device) - ov_infer1 = cmp_model(test_input) - - assert np.array_equal(ov_infer1[0], [2, 2, 3, 4]) - - -def create_pt_model_with_custom_op(): - # - # Create PyTorch model with custom operation - # - import torch.nn as nn - - class MyModel(nn.Module): - def __init__(self): - super(MyModel, self).__init__() - self.my_op = MyTorchOp() - - def forward(self, x): - return self.my_op.apply(x) - - return MyModel() - - -class ConvertRaises(unittest.TestCase): - def test_example_inputs(self): - from openvino.tools.mo import convert_model - pytorch_model = create_pt_model_with_custom_op() - - # Check that mo raises error message of wrong argument. - with self.assertRaisesRegex(AssertionError, ".*'example_inputs' argument is not recognized.*"): - convert_model(pytorch_model, example_inputs=(torch.tensor(1),)) - - def test_failed_extension(self): - from openvino.tools.mo import convert_model - from openvino.frontend.pytorch import ConversionExtension - - inp_shapes = [1, 3, 20, 20] - pt_model = make_pt_model_one_input() - - def relu_bad(n): - assert False, "Something happened" - - # Check that mo raises error message of wrong argument. - with self.assertRaisesRegex(Exception, ".*Conversion is failed for: aten::relu.*"): - convert_model(pt_model, input=(inp_shapes, np.float32), extensions=[ - ConversionExtension("aten::relu", relu_bad)]) - - def test_failed_extension(self): - import tempfile - from openvino.tools.mo import convert_model - - with self.assertRaisesRegex(Exception, ".*PyTorch Frontend doesn't support provided model type.*"): - with tempfile.NamedTemporaryFile(delete=False) as tmpfile: - convert_model(tmpfile.name, framework="pytorch") - os.remove(tmpfile.name) - - -def create_pytorch_layer_norm(tmp_dir): - class aten_layer_norm(torch.nn.Module): - def forward(self, x): - return torch.nn.functional.layer_norm(x, normalized_shape=[3]) - - shape = PartialShape(PartialShape([-1, -1])) - param1 = ov.opset8.parameter(shape, name="input_0", dtype=np.float32) - const1 = ov.opset8.constant([-1], dtype=np.int32) - mvn1 = ov.opset8.mvn(param1, const1, True, 1e-5, "inside_sqrt") - ref_model = Model([mvn1], [param1], "test") - - test_params = {'example_input': 300 + np.random.randn(2, 3).astype(np.float32)} - return aten_layer_norm(), ref_model, test_params - - -def create_pytorch_normalize(tmp_dir): - class aten_normalize(torch.nn.Module): - def forward(self, x): - return torch.nn.functional.normalize(x) - - test_params = {'example_input': 300 + np.random.randn(2, 3).astype(np.float32)} - return aten_normalize(), None, test_params - - -def create_pytorch_precision_sensitive_with_div(tmp_dir): - class precision_sensitive_with_div(torch.nn.Module): - def forward(self, x): - eps = 1.0e-8 - return 2.0 / (torch.sqrt(torch.sum(torch.pow(x + 2, 2.0), 1)) + eps) - test_params = {'example_input': 300 + np.random.randn(2, 3).astype(np.float32)} - return precision_sensitive_with_div(), None, test_params - - -def create_pytorch_precision_sensitive_for_exp_reduce(tmp_dir): - class precision_sensitive_for_exp_reduce(torch.nn.Module): - def forward(self, x): - return torch.sum(torch.exp(x + 10), 1) - - test_params = {'example_input': 300 + np.random.randn(2, 3).astype(np.float32)} - return precision_sensitive_for_exp_reduce(), None, test_params - - -def create_pytorch_precision_sensitive_div_as_pow(tmp_dir): - class precision_sensitive_div_as_pow(torch.nn.Module): - def forward(self, x): - eps = 1.0e-8 - return 2.0 * (torch.sqrt(torch.sum(torch.pow(x + 2, 2.0), 1)) + eps)**(-1) - - test_params = {'example_input': 300 + np.random.randn(2, 3).astype(np.float32)} - return precision_sensitive_div_as_pow(), None, test_params - - -def create_pytorch_precision_sensitive_two_inp_1(tmp_dir): - class precision_sensitive_two_inp_1(torch.nn.Module): - def forward(self, x, y): - eps = 1.0e-8 - return x / (torch.sqrt(torch.sum(torch.pow(y + 2, 2.0), 2)) + eps) - test_params = {'example_input': (10000 + np.ones((2, 10), dtype=np.float32), - 300 + np.ones((2, 10, 3), dtype=np.float32))} - return precision_sensitive_two_inp_1(), None, test_params - - -def create_pytorch_precision_sensitive_two_inp_2(tmp_dir): - class precision_sensitive_two_inp_2(torch.nn.Module): - def forward(self, x, y): - eps = 1.0e-8 - return x * (torch.sqrt(torch.sum(torch.pow(y + 2, 2.0), 2)) + eps)**(-1) - test_params = {'example_input': (10000 + np.ones((2, 10), dtype=np.float32), - 300 + np.ones((2, 10, 3), dtype=np.float32))} - return precision_sensitive_two_inp_2(), None, test_params - - -def create_pytorch_precision_sensitive_with_matmul(tmp_dir): - class precision_sensitive_with_matmul(torch.nn.Module): - def forward(self, x, y): - eps = 1.0e-8 - interm_res = x / (torch.sqrt(torch.sum(torch.pow(y + 2, 2.0), 2)) + eps) - print(f"interm_res shpae: {interm_res.shape}") - print(interm_res) - weights = 1024.0 + torch.zeros(10, 2) - return torch.mm(interm_res, weights) - test_params = {'example_input': (10000 + np.ones((2, 10), dtype=np.float32), - 300 + np.ones((2, 10, 3), dtype=np.float32))} - return precision_sensitive_with_matmul(), None, test_params - - -def create_pytorch_not_precision_sensitive(tmp_dir): - class not_precision_sensitive(torch.nn.Module): - def forward(self, x): - return torch.sum(x, 1) - - test_params = 10000.0 + np.zeros((2, 20), dtype=np.float32), # 10 000 * 20 = 200 000 > 65504 (fp16_max) - return not_precision_sensitive(), None, test_params - - -class TestPrecisionSensitive(): - test_data = [ - 'create_pytorch_layer_norm', - 'create_pytorch_normalize', - 'create_pytorch_precision_sensitive_with_div', - 'create_pytorch_precision_sensitive_div_as_pow', - 'create_pytorch_precision_sensitive_for_exp_reduce', - 'create_pytorch_precision_sensitive_two_inp_1', - 'create_pytorch_precision_sensitive_two_inp_2', - ] - - @pytest.mark.parametrize("create_model", test_data) - @pytest.mark.nightly - @pytest.mark.precommit - @pytest.mark.xfail(condition=platform.system() in ('Darwin', 'Linux') and platform.machine() in ('arm', 'armv7l', - 'aarch64', - 'arm64', 'ARM64'), - reason='Ticket - 122714, 122710') - def test_precision_sensitive(self, create_model, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): - import numpy.testing as npt - from pathlib import Path - - fw_model, ref_model, mo_params = eval(create_model)(temp_dir) - - test_params = {'input_model': fw_model} - if mo_params is not None: - test_params.update(mo_params) - - model = convert_model(**test_params) - model_name = 'model_test.xml' - - save_model(model, str(Path(temp_dir, model_name)), True) - - core = Core() - ir_test = core.read_model(Path(temp_dir, model_name)) - if ref_model is not None: - flag, msg = compare_functions(ir_test, ref_model, compare_tensor_names=False) - assert flag, msg - - example_inputs = test_params['example_input'] - torch_inp_tensors = [] - if isinstance(example_inputs, tuple): - for input_arr in example_inputs: - torch_inp_tensors.append(torch.tensor(input_arr)) - else: - torch_inp_tensors.append(torch.tensor(example_inputs)) - - fw_res = fw_model(*torch_inp_tensors) - ov_res = core.compile_model(ir_test)(example_inputs) - - npt.assert_allclose(ov_res[0], fw_res.numpy(), atol=1e-3, rtol=1e-3) diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_subprocess_tests.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_subprocess_tests.py deleted file mode 100644 index 38a848e1f7b0da..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_subprocess_tests.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import subprocess -import sys -import unittest - - -class TestSubprocessMoConvert(unittest.TestCase): - def test_mo_convert(self): - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'mo_convert_legacy_extensions_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode \ No newline at end of file diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_tf.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_tf.py deleted file mode 100644 index 960e8e08457af5..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_tf.py +++ /dev/null @@ -1,1087 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import openvino.runtime as ov -import pytest -import tempfile -import unittest -from common import constants -from common.layer_test_class import CommonLayerTest -from common.mo_convert_test_class import CommonMOConvertTest -from common.utils.tf_utils import save_to_pb -from openvino.runtime import PartialShape, Model, Dimension -from pathlib import Path - - -def create_tf_graph_def(tmp_dir): - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - relu = tf.nn.relu(inp1 + inp2, name='Relu') - - output = tf.nn.sigmoid(relu, name='Sigmoid') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - return tf_net, model_ref, None - - -def create_keras_model(temp_dir): - import tensorflow as tf - - tf.keras.backend.clear_session() - tf.compat.v1.reset_default_graph() - - input_names = ["Input1", "Input2"] - input_shape = [1, 2, 3] - - x1 = tf.keras.Input(shape=input_shape, name=input_names[0]) - x2 = tf.keras.Input(shape=input_shape, name=input_names[1]) - relu = tf.keras.layers.Activation('relu')(x1 + x2) - sigmoid = tf.keras.layers.Activation('sigmoid')(relu) - keras_net = tf.keras.Model(inputs=[x1, x2], outputs=[sigmoid]) - - shape = PartialShape([-1, 1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - tf.keras.backend.clear_session() - - return keras_net, model_ref, None - - -def create_tf1_wrap_function(tmp_dir): - import tensorflow as tf - - def f(x, y): - return tf.nn.sigmoid(tf.nn.relu(x + y)) - - func = tf.compat.v1.wrap_function(f, [tf.TensorSpec((1, 2, 3), tf.float32), - tf.TensorSpec((1, 2, 3), tf.float32)]) - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - return func, model_ref, None - - -def create_tf_session(tmp_dir): - import tensorflow as tf - from tensorflow.python.eager.context import graph_mode - - with graph_mode(): - tf.compat.v1.reset_default_graph() - sess = tf.compat.v1.Session() - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input1') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input2') - relu = tf.nn.relu(inp1 + inp2, name='Relu') - - output = tf.nn.sigmoid(relu, name='Sigmoid') - - tf.compat.v1.global_variables_initializer() - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - return sess, model_ref, None - - -def create_tf_module(tmp_dir): - import tensorflow as tf - - class Net(tf.Module): - def __init__(self, name=None): - super(Net, self).__init__(name=name) - - def __call__(self, x, y): - return tf.nn.sigmoid(tf.nn.relu(x + y)) - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - net = Net() - return net, model_ref, {'example_input': (np.random.rand(1, 2, 3).astype(np.float32), - np.random.rand(1, 2, 3).astype(np.float32))} - - -def create_tf_module_layout_list(tmp_dir): - from openvino.runtime import Layout - import tensorflow as tf - - class Net(tf.Module): - def __init__(self, name=None): - super(Net, self).__init__(name=name) - - def __call__(self, x, y): - return tf.nn.sigmoid(tf.nn.relu(x + y)) - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - model_ref.inputs[0].node.layout = Layout('NCH') - model_ref.inputs[1].node.layout = Layout('NHC') - - net = Net() - return net, model_ref, {'example_input': (np.random.rand(1, 2, 3).astype(np.float32), - np.random.rand(1, 2, 3).astype(np.float32)), 'layout': ["NCH", "NHC"], - 'use_convert_model_from_mo': True} - - -def create_tf_module_dynamic(tmp_dir): - import tensorflow as tf - - class Net(tf.Module): - def __init__(self, name=None): - super(Net, self).__init__(name=name) - - def __call__(self, x, y): - return tf.nn.sigmoid(tf.nn.relu(x + y)) - - input_shapes = [PartialShape([-1, Dimension(3, -1), Dimension(4)]), - PartialShape([-1, Dimension(3), Dimension(4, -1)])] - - param1 = ov.opset8.parameter(input_shapes[0], dtype=np.float32) - param2 = ov.opset8.parameter(input_shapes[1], dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - net = Net() - return net, model_ref, {'input': input_shapes, - 'example_input': (np.random.rand(1, 2, 3).astype(np.float32), - np.random.rand(1, 2, 3).astype(np.float32)) - } - - -def create_keras_layer(tmp_dir): - import tensorflow as tf - class LayerModel(tf.keras.layers.Layer): - - def __init__(self): - super(LayerModel, self).__init__() - - def call(self, x, y): - return tf.sigmoid(tf.nn.relu(x + y)) - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - net = LayerModel() - return net, model_ref, {'example_input': (np.random.rand(1, 2, 3).astype(np.float32), - np.random.rand(1, 2, 3).astype(np.float32)) - } - - -def create_keras_layer_dynamic(tmp_dir): - import tensorflow as tf - class LayerModel(tf.keras.layers.Layer): - - def __init__(self): - super(LayerModel, self).__init__() - - def call(self, x, y): - return tf.sigmoid(tf.nn.relu(x + y)) - - input_shapes = [PartialShape([-1, Dimension(3, -1), Dimension(4)]), - PartialShape([-1, Dimension(3), Dimension(4, -1)])] - - param1 = ov.opset8.parameter(input_shapes[0], dtype=np.float32) - param2 = ov.opset8.parameter(input_shapes[1], dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - net = LayerModel() - return net, model_ref, {'input': input_shapes, - 'example_input': (np.random.rand(1, 2, 3).astype(np.float32), - np.random.rand(1, 2, 3).astype(np.float32)) - } - - -def create_tf_checkpoint(tmp_dir): - import tensorflow as tf - - input_names = ["Input1", "Input2"] - input_shape = [1, 2, 3] - - x1 = tf.keras.Input(shape=input_shape, name=input_names[0]) - x2 = tf.keras.Input(shape=input_shape, name=input_names[1]) - relu = tf.keras.layers.Activation('relu')(x1 + x2) - sigmoid = tf.keras.layers.Activation('sigmoid')(relu) - - model = tf.keras.Model(inputs=[x1, x2], outputs=[sigmoid]) - checkpoint = tf.train.Checkpoint(model) - - shape = PartialShape([-1, 1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - return checkpoint, model_ref, None - - -def create_tf_function(temp_dir): - import tensorflow as tf - - @tf.function( - input_signature=[tf.TensorSpec(shape=[1, 2, 3], dtype=tf.float32), - tf.TensorSpec(shape=[1, 2, 3], dtype=tf.float32)]) - def f(x1, x2): - y = tf.nn.sigmoid(tf.nn.relu(x1 + x2)) - return y - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - return f, model_ref, None - - -def create_tf_graph(temp_dir): - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - relu = tf.nn.relu(inp1 + inp2, name='Relu') - - output = tf.nn.sigmoid(relu, name='Sigmoid') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph - - shape = PartialShape([1, 2, 3]) - param1 = ov.opset8.parameter(shape, dtype=np.float32) - param2 = ov.opset8.parameter(shape, dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - return tf_net, model_ref, None - - -def create_tf_saved_model_dir(temp_dir): - import tensorflow as tf - - input_names = ["Input1", "Input2"] - input_shape = [1, 2, 3] - - x1 = tf.keras.Input(shape=input_shape, name=input_names[0]) - x2 = tf.keras.Input(shape=input_shape, name=input_names[1]) - relu = tf.keras.layers.Activation('relu')(x1 + x2) - sigmoid = tf.keras.layers.Activation('sigmoid')(relu) - keras_net = tf.keras.Model(inputs=[x1, x2], outputs=[sigmoid]) - - keras_net.export(temp_dir + "/model") - - shape = PartialShape([-1, 1, 2, 3]) - param1 = ov.opset8.parameter(shape, name="Input1:0", dtype=np.float32) - param2 = ov.opset8.parameter(shape, name="Input2:0", dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu = ov.opset8.relu(add) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - model_ref = Model([sigm], parameter_list, "test") - - return temp_dir + "/model", model_ref - - -def create_tf_stateful_partioned_call_net(temp_dir): - import tensorflow as tf - tf.compat.v1.reset_default_graph() - - data_shape = [1, 1, 10, 10] - filters_shape = [3, 3, 1, 1] - - strides = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - dilations = [1, 1] - - @tf.function - def second_func(input, filter): - conv = tf.raw_ops.Conv2D(input=input, filter=filter, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW') - return conv - - @tf.function( - input_signature=[tf.TensorSpec(shape=data_shape, dtype=tf.float32), - tf.TensorSpec(shape=filters_shape, dtype=tf.float32)]) - def first_func(input, filter): - conv = second_func(input, filter) - return conv - - tf_model = first_func - - param1 = ov.opset8.parameter(data_shape, dtype=np.float32) - param2 = ov.opset8.parameter(filters_shape, dtype=np.float32) - transpose2 = ov.opset8.transpose(param2, np.array([3, 2, 0, 1], dtype=np.int64)) - conv = ov.opset11.convolution(param1, transpose2, strides, pads_begin, pads_end, dilations, auto_pad="same_upper") - - parameter_list = [param1, param2] - model_ref = Model([conv], parameter_list, "test") - - return tf_model, model_ref, {} - - -def create_keras_layer_input_list(): - import tensorflow as tf - class LayerModel(tf.keras.layers.Layer): - - def __init__(self): - super(LayerModel, self).__init__() - - def call(self, x, y): - res_list = [tf.sigmoid(tf.nn.relu(x + y)), tf.nn.relu(x), tf.sigmoid(y)] - return res_list - - input_shapes = [PartialShape([1, 2, 3]), - PartialShape([1, 2, 3])] - - param1 = ov.opset8.parameter(input_shapes[0], dtype=np.float32) - param2 = ov.opset8.parameter(input_shapes[1], dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu1 = ov.opset8.relu(add) - sigm1 = ov.opset8.sigmoid(relu1) - relu2 = ov.opset8.relu(param1) - sigm2 = ov.opset8.sigmoid(param2) - - parameter_list = [param1, param2] - model_ref = Model([sigm1, relu2, sigm2], parameter_list, "test") - return LayerModel(), model_ref - - -def create_keras_layer_input_list_one_inp(): - import tensorflow as tf - class LayerModel(tf.keras.layers.Layer): - - def __init__(self): - super(LayerModel, self).__init__() - - def call(self, x): - res_list = [tf.sigmoid(tf.nn.relu(x)), tf.nn.relu(x)] - return res_list - - input_shapes = [PartialShape([1, 2, 3])] - - param1 = ov.opset8.parameter(input_shapes[0], dtype=np.float32) - relu1 = ov.opset8.relu(param1) - sigm1 = ov.opset8.sigmoid(relu1) - parameter_list = [param1] - model_ref = Model([sigm1, relu1], parameter_list, "test") - - return LayerModel(), model_ref - - -def create_keras_layer_input_dict(): - import tensorflow as tf - class LayerModel(tf.keras.layers.Layer): - - def __init__(self): - super(LayerModel, self).__init__() - - def call(self, args): - res = {} - res['result'] = tf.sigmoid(tf.nn.relu(args['a'] + args['b'])) - return res - - input_shapes = [PartialShape([1, 2, 3]), - PartialShape([1, 2, 3])] - - param1 = ov.opset8.parameter(input_shapes[0], dtype=np.float32) - param2 = ov.opset8.parameter(input_shapes[1], dtype=np.float32) - add = ov.opset8.add(param1, param2) - relu1 = ov.opset8.relu(add) - sigm1 = ov.opset8.sigmoid(relu1) - - parameter_list = [param1, param2] - model_ref = Model([sigm1], parameter_list, "test") - return LayerModel(), model_ref - - -def create_keras_layer_input_dict_one_inp(): - import tensorflow as tf - class LayerModel(tf.keras.layers.Layer): - - def __init__(self): - super(LayerModel, self).__init__() - - def call(self, args): - res = {} - res['result'] = tf.sigmoid(tf.nn.relu(args['args'])) - return res - - input_shapes = [PartialShape([1, 2, 3]), - PartialShape([1, 2, 3])] - - param1 = ov.opset8.parameter(input_shapes[0], dtype=np.float32) - param2 = ov.opset8.parameter(input_shapes[1], dtype=np.float32) - relu1 = ov.opset8.relu(param1) - sigm1 = ov.opset8.sigmoid(relu1) - parameter_list = [param1, param2] - model_ref = Model([sigm1], parameter_list, "test") - return LayerModel(), model_ref - - -def single_param_function_reference(shape, const_value): - param1 = ov.opset8.parameter(shape, dtype=np.float32) - const = ov.opset8.constant(const_value, dtype=np.float32) - sigm = ov.opset8.sigmoid(param1) - mul = ov.opset8.multiply(sigm, const) - parameter_list = [param1] - return Model([mul], parameter_list, "test") - - -def two_params_function_reference(shapes, const_value): - param1 = ov.opset8.parameter(shapes[0], dtype=np.float32) - param2 = ov.opset8.parameter(shapes[1], dtype=np.float32) - const = ov.opset8.constant(const_value, dtype=np.float32) - sigm = ov.opset8.sigmoid(param1) - add = ov.opset8.add(sigm, param2) - mul = ov.opset8.multiply(add, const) - parameter_list = [param1, param2] - return Model([mul], parameter_list, "test") - - -def two_params_function_reference_fp16_compressed(shapes, const_value): - param1 = ov.opset8.parameter(shapes[0], dtype=np.float32) - param2 = ov.opset8.parameter(shapes[1], dtype=np.float32) - const_value = ov.opset8.constant(const_value, dtype=np.float16) - const_decompress = ov.opset8.convert(const_value, np.float32) - sigm = ov.opset8.sigmoid(param1) - add = ov.opset8.add(sigm, param2) - mul = ov.opset8.multiply(add, const_decompress) - parameter_list = [param1, param2] - return Model([mul], parameter_list, "test") - - -def create_keras_layer_with_example_input_1(tmp_dir): - model, model_ref = create_keras_layer_input_list() - example_input = (np.random.rand(1, 2, 3).astype(np.float32), np.random.rand(1, 2, 3).astype(np.float32)) - return model, model_ref, {'example_input': example_input} - - -def create_keras_layer_with_example_input_2(tmp_dir): - model, model_ref = create_keras_layer_input_dict() - example_input = {'a': np.random.rand(1, 2, 3).astype(np.float32), 'b': np.random.rand(1, 2, 3).astype(np.float32)} - return model, model_ref, {'example_input': example_input} - - -def create_keras_layer_with_input_shapes_case1(tmp_dir): - model, model_ref = create_keras_layer_input_list() - return model, model_ref, {'example_input': (np.random.rand(1, 2, 3).astype(np.float32), - np.random.rand(1, 2, 3).astype(np.float32))} - - -def create_keras_layer_with_input_shapes_case2(tmp_dir): - model, model_ref = create_keras_layer_input_list() - return model, model_ref, {'example_input': (np.random.rand(1, 2, 3).astype(np.float32), - np.random.rand(1, 2, 3).astype(np.float32))} - - -def create_keras_layer_with_input_shapes_case3(tmp_dir): - model, model_ref = create_keras_layer_input_dict_one_inp() - return model, model_ref, {'example_input': {"args": np.random.rand(1, 2, 3).astype(np.float32)}} - - -def create_keras_layer_with_input_shapes_case4(tmp_dir): - model, model_ref = create_keras_layer_input_list_one_inp() - return model, model_ref, {'input': [1, 2, 3]} - - -def create_keras_layer_with_tf_function_call(tmp_dir): - import tensorflow as tf - class LayerModel(tf.Module): - def __init__(self): - super(LayerModel, self).__init__() - self.var1 = tf.Variable(5.0) - - @tf.function(input_signature=[tf.TensorSpec([1, 2], tf.float32), tf.TensorSpec([1, 2], tf.float32)]) - def __call__(self, input1, input2): - sigm = tf.nn.sigmoid(input1) + input2 - return sigm * self.var1 - - model = LayerModel() - model_ref = two_params_function_reference([[1, 2], [1, 2]], [[5.0]]) - return model, model_ref, {'compress_to_fp16': False} - - -def create_keras_layer_with_tf_function_call_default_compressed_to_fp16(tmp_dir): - import tensorflow as tf - class LayerModel(tf.Module): - def __init__(self): - super(LayerModel, self).__init__() - self.var1 = tf.Variable(5.0) - - @tf.function(input_signature=[tf.TensorSpec([1, 2], tf.float32), tf.TensorSpec([1, 2], tf.float32)]) - def __call__(self, input1, input2): - sigm = tf.nn.sigmoid(input1) + input2 - return sigm * self.var1 - - model = LayerModel() - model_ref = two_params_function_reference([[1, 2], [1, 2]], [[5.0]]) - return model, model_ref, {} - - -def create_keras_layer_with_tf_function_call_no_signature(tmp_dir): - import tensorflow as tf - class LayerModel(tf.Module): - def __init__(self): - super(LayerModel, self).__init__() - self.var1 = tf.Variable(5.0) - - @tf.function() - def __call__(self, input1, input2): - sigm = tf.nn.sigmoid(input1) + input2 - return sigm * self.var1 - - model = LayerModel() - example_input = [np.random.rand(2, 3).astype(np.float32), np.random.rand(2, 3).astype(np.float32)] - - model_ref = two_params_function_reference([[2, 3], [2, 3]], [[5.0]]) - return model, model_ref, {'example_input': example_input, 'compress_to_fp16': False} - - -def create_keras_layer_with_tf_function_call_no_signature_single_input(tmp_dir): - import tensorflow as tf - class LayerModel(tf.Module): - def __init__(self): - super(LayerModel, self).__init__() - self.var1 = tf.Variable(5.0) - - @tf.function() - def __call__(self, input1): - sigm = tf.nn.sigmoid(input1) - return sigm * self.var1 - - model = LayerModel() - example_input = np.random.rand(2, 3).astype(np.float32) - - model_ref = single_param_function_reference([2, 3], [[5.0]]) - return model, model_ref, {'example_input': example_input, 'compress_to_fp16': False} - - -def create_keras_layer_with_string_tensor(tmp_dir): - import tensorflow as tf - class LayerModel(tf.Module): - def __init__(self): - super(LayerModel, self).__init__() - self.var = tf.Variable("Text_1", dtype=tf.string) - self.const = tf.constant("Text_2", dtype=tf.string) - - @tf.function(input_signature=[tf.TensorSpec([1], tf.float32), tf.TensorSpec([1], tf.float32)]) - def __call__(self, input1, input2): - return input1 + input2, self.var, self.const - - model = LayerModel() - - param1 = ov.opset8.parameter([1], dtype=np.float32) - param2 = ov.opset8.parameter([1], dtype=np.float32) - add = ov.opset8.add(param1, param2) - parameter_list = [param1, param2] - model_ref = Model([add], parameter_list, "test") - - return model, model_ref, {} - - -def shape_of_const_fold_test(temp_dir): - import tensorflow as tf - - # TF model - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - x1 = tf.compat.v1.placeholder(tf.float32, [1, 4, 10, 10], 'Input') - shape = tf.shape(x1) - rank = tf.cast(tf.shape(shape), dtype=tf.float32) - reshape = tf.reshape(x1, shape) - mul = rank * reshape - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # Ref model - param1 = ov.opset8.parameter(PartialShape([1, 4, 10, 10])) - shape_const = ov.opset8.constant([1, 4, 10, 10], dtype=np.int32) - reshape = ov.opset8.reshape(param1, shape_const, False) - mul_const = ov.opset8.constant([[[[4]]]], dtype=np.float32) - mul = ov.opset8.multiply(mul_const, reshape) - - parameter_list = [param1] - model_ref = Model([mul], parameter_list, "test") - - return tf_net, model_ref, {} - - -def static_shape_true(temp_dir): - import tensorflow as tf - - # TF model - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - x1 = tf.compat.v1.placeholder(tf.float32, [1, 4, 10, 10], 'Input') - shape = tf.shape(x1) - rank = tf.cast(tf.shape(shape), dtype=tf.float32) - reshape = tf.reshape(x1, shape) - mul = rank * reshape - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # Ref model - param1 = ov.opset8.parameter(PartialShape([1, 4, 10, 10])) - mul_const = ov.opset8.constant([[[[4]]]], dtype=np.float32) - mul = ov.opset8.multiply(mul_const, param1) - - parameter_list = [param1] - model_ref = Model([mul], parameter_list, "test") - - return tf_net, model_ref, {'use_convert_model_from_mo': True, 'static_shape': True} - - -def static_shape_false(temp_dir): - import tensorflow as tf - - # TF model - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - x1 = tf.compat.v1.placeholder(tf.float32, [1, 4, 10, 10], 'Input') - shape = tf.shape(x1) - rank = tf.cast(tf.shape(shape), dtype=tf.float32) - reshape = tf.reshape(x1, shape) - mul = rank * reshape - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - - # Ref model - param1 = ov.opset8.parameter(PartialShape([1, 4, 10, 10])) - shape_const = ov.opset8.constant([1, 4, 10, 10], dtype=np.int32) - reshape = ov.opset8.reshape(param1, shape_const, False) - mul_const = ov.opset8.constant([[[[4]]]], dtype=np.float32) - mul = ov.opset8.multiply(mul_const, reshape) - - parameter_list = [param1] - model_ref = Model([mul], parameter_list, "test") - - return tf_net, model_ref, {'use_convert_model_from_mo': True, 'static_shape': False} - - -class TestMoConvertTF(CommonMOConvertTest): - test_data = [ - # TF2 - 'create_keras_model', - 'create_keras_layer', - 'create_tf_function', - 'create_tf_module', - 'create_tf_checkpoint', - 'create_keras_layer_dynamic', - 'create_tf_module_dynamic', - 'create_tf_module_layout_list', - 'create_tf_stateful_partioned_call_net', - 'create_keras_layer_with_example_input_1', - 'create_keras_layer_with_example_input_2', - 'create_keras_layer_with_input_shapes_case1', - 'create_keras_layer_with_input_shapes_case2', - 'create_keras_layer_with_input_shapes_case3', - # can skip since this is legacy MO - # create_keras_layer_with_input_shapes_case4, - 'create_keras_layer_with_tf_function_call', - 'create_keras_layer_with_tf_function_call_default_compressed_to_fp16', - 'create_keras_layer_with_tf_function_call_no_signature', - 'create_keras_layer_with_tf_function_call_no_signature_single_input', - 'create_keras_layer_with_string_tensor', - 'shape_of_const_fold_test', - 'static_shape_true', - 'static_shape_false', - - # TF1 - 'create_tf_graph', - 'create_tf_graph_def', - 'create_tf1_wrap_function', - 'create_tf_session', - ] - - @pytest.mark.parametrize("create_model", test_data) - @pytest.mark.nightly - @pytest.mark.precommit_tf_fe - @pytest.mark.precommit - def test_mo_import_from_memory_tf_fe(self, create_model, ie_device, precision, ir_version, - temp_dir): - fw_model, graph_ref, mo_params = eval(create_model)(temp_dir) - - test_params = {'input_model': fw_model} - test_params.update({'use_convert_model_from_mo': True}) - if mo_params is not None: - test_params.update(mo_params) - self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) - - @pytest.mark.nightly - @pytest.mark.precommit - def test_unnamed_saved_model_dir(self, ie_device, precision, ir_version, temp_dir): - saved_model_dir, graph_ref = create_tf_saved_model_dir(temp_dir) - - test_params = {'input_model': saved_model_dir} - test_params.update({'use_convert_model_from_mo': True}) - self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) - - test_params = {'input_model': saved_model_dir} - self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) - - def test_zero_copy(self, ie_device, precision, ir_version, temp_dir): - import tensorflow as tf - from openvino.tools.mo import convert_model - from openvino.runtime import compile_model - class LayerModel(tf.Module): - def __init__(self): - super(LayerModel, self).__init__() - self.var1 = tf.Variable([7., 5., 6.], name='var1') - self.var2 = tf.Variable([5., 7., 3.], name='var2') - - @tf.function - def sub_function(self, input): - return input * self.var1 + self.var2 - - @tf.function() - def __call__(self, input): - return self.sub_function(input) - - # Create TF model with variables - keras_model = LayerModel() - test_input = np.array(7.).astype(np.float32) - - # Convert model to OV - ov_model = convert_model(keras_model, input=[1], share_weights=True) - cmp_model = compile_model(ov_model) - - # Check model inference - ov_infer1 = cmp_model(test_input, ie_device) - fw_infer1 = keras_model(test_input).numpy() - - assert np.array_equal(ov_infer1['Identity:0'], fw_infer1) - assert np.array_equal(ov_infer1['Identity:0'], [54., 42., 45.]) - - # Change value of variables in original model - for val in keras_model.variables: - arr = val.value().__array__() - arr[0] = 0 - arr[1] = 1 - arr[2] = 2 - - # Check model inference - cmp_model = compile_model(ov_model) - ov_infer2 = cmp_model(test_input) - fw_infer2 = keras_model(test_input).numpy() - - assert np.array_equal(ov_infer2['Identity:0'], fw_infer2) - assert np.array_equal(ov_infer2['Identity:0'], [0., 8., 16.]) - - def test_memory_loss(self, ie_device, precision, ir_version, temp_dir): - # This test checks that the memory allocated for constants - # is not lost after returning the model from convert_model() method. - import tensorflow as tf - tf.compat.v1.reset_default_graph() - - from openvino.tools.mo import convert_model - from openvino.runtime import compile_model - import gc - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [3], 'Input') - const = tf.constant([0.5, 2.3, 7.8], dtype=tf.float32) - res = inp1 + const - - tf.compat.v1.global_variables_initializer() - tf_graph = sess.graph # tf.Graph - - if precision == 'FP32': - eps = 1e-4 - else: - eps = 5e-2 - - test_input = np.array([2.1, 7.3, 4.6]).astype(np.float32) - - # Convert model to OV - ov_model = convert_model(tf_graph) - cmp_model = compile_model(ov_model) - - # Check model inference - ov_infer1 = cmp_model(test_input, ie_device) - - feed_dict = {"Input:0": test_input} - with tf.compat.v1.Session(graph=tf_graph) as sess: - fw_infer1 = sess.run('add:0', feed_dict=feed_dict) - - assert CommonLayerTest().compare_ie_results_with_framework(ov_infer1, {"add:0": fw_infer1}, eps) - assert CommonLayerTest().compare_ie_results_with_framework(ov_infer1, {"add:0": [2.6, 9.6, 12.4]}, eps) - - # run Garbage collector to ensure, that values from tf.constant are copied to ov.Const and - # we do not lose allocated memory. - gc.collect() - - # Check model inference - cmp_model = compile_model(ov_model) - ov_infer2 = cmp_model(test_input, ie_device) - - feed_dict = {"Input:0": test_input} - with tf.compat.v1.Session(graph=tf_graph) as sess: - fw_infer2 = sess.run('add:0', feed_dict=feed_dict) - - assert CommonLayerTest().compare_ie_results_with_framework(ov_infer2, {"add:0": fw_infer2}, eps) - assert CommonLayerTest().compare_ie_results_with_framework(ov_infer1, {"add:0": [2.6, 9.6, 12.4]}, eps) - - def test_tensor_names(self, ie_device, precision, ir_version, temp_dir): - import tensorflow as tf - class LayerModel(tf.Module): - def __init__(self): - super(LayerModel, self).__init__() - self.var1 = tf.Variable([7., 5., 6.], name='var1') - self.var2 = tf.Variable([5., 7., 3.], name='var2') - self.var3 = tf.Variable([5., 7., 3.], name='var2') - - @tf.function - def sub_function(self, input): - return input + self.var1 + self.var2 + self.var3 - - @tf.function() - def __call__(self, input): - return self.sub_function(input) - - from openvino.tools.ovc import convert_model - model = LayerModel() - ov_model = convert_model(model) - - ov_model.outputs[0].get_tensor().set_names({"name1"}) - assert ov_model.outputs[0].names == {"name1"} - - ov_model.validate_nodes_and_infer_types() - assert ov_model.outputs[0].names == {"name1"} - - ov_model.outputs[0].get_tensor().set_names({"name2"}) - assert ov_model.outputs[0].names == {"name2"} - - -class TFConvertTest(unittest.TestCase): - @pytest.mark.nightly - @pytest.mark.precommit - def test_tf_function_no_signature(self): - import tensorflow as tf - from openvino.tools.mo import convert_model - - @tf.function() - def function(x1, x2): - y = tf.nn.sigmoid(tf.nn.relu(x1 + x2)) - return y - - with self.assertRaisesRegex(Exception, ".*Please provide 'example_input'.*"): - convert_model(function) - - -class TestTFLoadByModel(unittest.TestCase): - def test_load_by_model_tf_graph_iterator(self): - def simple_tf_model(): - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], "Input") - _ = tf.nn.sigmoid(inp, name="Sigmoid") - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph - return tf_net - - from openvino.frontend.tensorflow.graph_iterator import GraphIteratorTFGraph - from openvino.frontend import FrontEndManager - model = GraphIteratorTFGraph(simple_tf_model(), True) - fem = FrontEndManager() - fe = fem.load_by_model(model) - assert fe is not None - assert fe.get_name() == "tf" - - -class TestInputTensorName(unittest.TestCase): - @pytest.mark.nightly - @pytest.mark.precommit - def test_tf1_from_file_single_input_name(self): - import tensorflow as tf - tf.keras.backend.clear_session() - tf.compat.v1.reset_default_graph() - Path(constants.out_path).mkdir(parents=True, exist_ok=True) - tmp_dir = tempfile.TemporaryDirectory(dir=constants.out_path).name - from openvino.tools.mo import convert_model - - model, _, _ = create_tf_graph_def(None) - path = save_to_pb(model, tmp_dir) - - ov_model = convert_model(path) - - ref_inputs = ["Input:0", "Input_1:0"] - for idx, output in enumerate(ov_model.inputs): - tensors = output.get_names() - - assert len(tensors) == 1 - out_tensor = list(tensors)[0] - assert out_tensor == ref_inputs[idx] - - ov_model = convert_model(path, input=["Input:0", "Input_1:0"]) - for idx, output in enumerate(ov_model.inputs): - tensors = output.get_names() - - assert len(tensors) == 1 - out_tensor = list(tensors)[0] - assert out_tensor == ref_inputs[idx] - - ref_inputs = ["Input", "Input_1"] - ov_model = convert_model(path, input=["Input", "Input_1"]) - for idx, output in enumerate(ov_model.inputs): - tensors = output.get_names() - - assert len(tensors) == 1 - out_tensor = list(tensors)[0] - assert out_tensor == ref_inputs[idx] - - @pytest.mark.nightly - @pytest.mark.precommit - def test_tf1_from_memory_single_input_name(self): - import tensorflow as tf - tf.keras.backend.clear_session() - tf.compat.v1.reset_default_graph() - from openvino.tools.mo import convert_model - - model, _, _ = create_tf_graph_def(None) - - ov_model = convert_model(model) - - ref_inputs = ["Input:0", "Input_1:0"] - for idx, output in enumerate(ov_model.inputs): - tensors = output.get_names() - - assert len(tensors) == 1 - out_tensor = list(tensors)[0] - assert out_tensor == ref_inputs[idx] - - ov_model = convert_model(model, input=["Input:0", "Input_1:0"]) - for idx, output in enumerate(ov_model.inputs): - tensors = output.get_names() - - assert len(tensors) == 1 - out_tensor = list(tensors)[0] - assert out_tensor == ref_inputs[idx] - - ref_inputs = ["Input", "Input_1"] - ov_model = convert_model(model, input=["Input", "Input_1"]) - for idx, output in enumerate(ov_model.inputs): - tensors = output.get_names() - - assert len(tensors) == 1 - out_tensor = list(tensors)[0] - assert out_tensor == ref_inputs[idx] - - @pytest.mark.nightly - @pytest.mark.precommit - def test_tf1_input_with_identity(self): - import tensorflow as tf - tf.keras.backend.clear_session() - tf.compat.v1.reset_default_graph() - from openvino.tools.mo import convert_model - - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, [2], 'x') - y = tf.compat.v1.placeholder(tf.float32, [2], 'y') - input1 = tf.identity(x, name="x_identity") - input2 = tf.identity(y, name="y_identity") - add = tf.add(input1, input2, name="add") - - tf.compat.v1.global_variables_initializer() - model = sess.graph_def - - ov_model = convert_model(model) - - assert ov_model.inputs[0].get_names() == {"x:0"} - assert ov_model.inputs[1].get_names() == {"y:0"} - - ov_model = convert_model(model, input=["x", "y"]) - - assert ov_model.inputs[0].get_names() == {"x"} - assert ov_model.inputs[1].get_names() == {"y"} diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_help.py b/tests/layer_tests/mo_python_api_tests/test_mo_help.py deleted file mode 100644 index d69a6b60f40791..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_mo_help.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import os -import sys -import unittest -from openvino.tools.mo import mo -from openvino.tools.mo.utils.cli_parser import get_mo_convert_params -from pathlib import Path - -from common.utils.common_utils import shell - - -class TestSubprocessMoConvert(unittest.TestCase): - def test_mo_convert(self): - mo_convert_params = get_mo_convert_params() - - # Test mo tool help - mo_path = Path(mo.__file__).parent - mo_runner = mo_path.joinpath('main.py').as_posix() - params = [sys.executable, mo_runner, "--help"] - _, mo_output, _ = shell(params) - - # We don't expect PyTorch specific parameters to be in help message of the MO tool. - for group in mo_convert_params: - if group == 'Pytorch-specific parameters:' or group == 'PaddlePaddle-specific parameters:': - continue - for param_name in group: - assert param_name in mo_output - - # Test Python API help, applicable for convert_model from tools.mo only - mo_help_file = os.path.join(os.path.dirname(__file__), "mo_convert_help.py") - params = [sys.executable, mo_help_file] - _, mo_output, _ = shell(params) - - legacy_params = get_mo_convert_params() - for group in legacy_params: - for param_name in group: - assert param_name in mo_output diff --git a/tests/layer_tests/mo_python_api_tests/test_transform_config/test_config.json b/tests/layer_tests/mo_python_api_tests/test_transform_config/test_config.json deleted file mode 100644 index a953f82b28c683..00000000000000 --- a/tests/layer_tests/mo_python_api_tests/test_transform_config/test_config.json +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - "custom_attributes": { - "masks_node_prefix_name": "node_name" - }, - "id": "ConfigBasedTestReplacement", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tests/layer_tests/ovc_python_api_tests/test_ovc_cli_tool.py b/tests/layer_tests/ovc_python_api_tests/test_ovc_cli_tool.py index 8ca07fd2334b60..d921d1977d915f 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_ovc_cli_tool.py +++ b/tests/layer_tests/ovc_python_api_tests/test_ovc_cli_tool.py @@ -118,7 +118,7 @@ def test_ovc_tool(self): core = Core() - # tests for MO cli tool + # tests for OVC cli tool exit_code, stderr = generate_ir_ovc(coverage=False, **{"input_model": model_path, "output_model": self.tmp_dir + os.sep + "model1"}) assert not exit_code @@ -133,7 +133,7 @@ def test_ovc_tool_output_dir(self): core = Core() - # tests for MO cli tool + # tests for OVC cli tool exit_code, stderr = generate_ir_ovc(coverage=False, **{"input_model": model_path, "output_model": self.tmp_dir}) assert not exit_code diff --git a/tests/layer_tests/ovc_python_api_tests/test_ovc_tool_help.py b/tests/layer_tests/ovc_python_api_tests/test_ovc_tool_help.py index 762552121e7a10..cbbae8775d4803 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_ovc_tool_help.py +++ b/tests/layer_tests/ovc_python_api_tests/test_ovc_tool_help.py @@ -22,7 +22,7 @@ def test_mo_convert(self): params = [sys.executable, mo_runner, "--help"] _, mo_output, _ = shell(params) - # We don't expect PyTorch specific parameters to be in help message of the MO tool. + # We don't expect PyTorch specific parameters to be in help message of the OVC tool. for group in mo_convert_params: if group == 'Pytorch-specific parameters:' or group == 'PaddlePaddle-specific parameters:': continue diff --git a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py index 02b4d569927909..d8c4ee525fa9c1 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py +++ b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py @@ -1118,7 +1118,7 @@ def test_example_inputs(self): from openvino.tools.ovc import convert_model pytorch_model = create_pt_model_with_custom_op() - # Check that mo raises error message of wrong argument. + # Check that OVC raises error message of wrong argument. with self.assertRaisesRegex(TypeError, ".*got an unexpected keyword argument 'example_inputs'.*"): convert_model(pytorch_model, example_inputs=(torch.tensor(1),)) @@ -1164,7 +1164,7 @@ def test_failed_extension(self): def relu_bad(n): assert False, "Something happened" - # Check that mo raises error message of wrong argument. + # Check that OVC raises error message of wrong argument. with self.assertRaisesRegex(Exception, ".*Conversion is failed for: aten::relu.*"): convert_model(pt_model, input=(inp_shapes, np.float32), extensions=[ ConversionExtension("aten::relu", relu_bad)]) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py index 76655a0b85476c..36706810025d31 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py @@ -76,7 +76,7 @@ def create_keras_multiheadattention_net(self, marks=pytest.mark.xfail(reason="45432")) ] - @pytest.mark.skip(reason='Einsum is unsupported in MO') + @pytest.mark.skip(reason='Einsum is unsupported in OVC') @pytest.mark.parametrize("params", test_data) @pytest.mark.precommit def test_keras_multiheadattention(self, params, ie_device, precision, ir_version, temp_dir, @@ -104,7 +104,7 @@ def test_keras_multiheadattention(self, params, ie_device, precision, ir_version marks=pytest.mark.xfail(reason="45432")) ] - @pytest.mark.skip(reason='Einsum is unsupported in MO') + @pytest.mark.skip(reason='Einsum is unsupported in OVC') @pytest.mark.parametrize("params", test_data_no_bias) @pytest.mark.nightly def test_keras_multiheadattention_no_bias(self, params, ie_device, precision, ir_version, diff --git a/tools/ovc/.pylintrc b/tools/ovc/.pylintrc index b0667dc23e577a..8072aa5cb4589a 100644 --- a/tools/ovc/.pylintrc +++ b/tools/ovc/.pylintrc @@ -160,7 +160,7 @@ expected-line-ending-format= spelling-dict=en_US # List of comma separated words that should not be checked. -spelling-ignore-words=TF, MO, IR, IE, OVC +spelling-ignore-words=TF, IR, IE, OVC # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file=.pylintdict diff --git a/tools/ovc/CMakeLists.txt b/tools/ovc/CMakeLists.txt index 2bdcd36f5b9503..beb201c6a4d3ee 100644 --- a/tools/ovc/CMakeLists.txt +++ b/tools/ovc/CMakeLists.txt @@ -38,6 +38,7 @@ install(DIRECTORY ${OpenVINOConverter_SOURCE_DIR}/openvino # # Tests # + if(ENABLE_TESTS) install(DIRECTORY unit_tests DESTINATION tests/ovc diff --git a/tools/ovc/openvino/tools/ovc/cli_parser.py b/tools/ovc/openvino/tools/ovc/cli_parser.py index ab4099a17f7093..48a396c508bc49 100644 --- a/tools/ovc/openvino/tools/ovc/cli_parser.py +++ b/tools/ovc/openvino/tools/ovc/cli_parser.py @@ -623,7 +623,7 @@ def depersonalize(value: str, key: str): def get_available_front_ends(fem=None): - # Use this function as workaround to avoid IR frontend usage by MO + # Use this function as workaround to avoid IR frontend usage by OVC if fem is None: return [] available_moc_front_ends = fem.get_available_front_ends() diff --git a/tools/ovc/openvino/tools/ovc/convert_impl.py b/tools/ovc/openvino/tools/ovc/convert_impl.py index 152ff03c28e71a..c690df9d4a00f6 100644 --- a/tools/ovc/openvino/tools/ovc/convert_impl.py +++ b/tools/ovc/openvino/tools/ovc/convert_impl.py @@ -323,7 +323,7 @@ def normalize_inputs(argv: argparse.Namespace): argv.placeholder_data_types - dictionary where key is node name, value is node np.type, or list of np.types if node names were not set. - :param argv: MO arguments + :param argv: OVC arguments """ # Parse input to list of InputCutInfo inputs = input_to_input_cut_info(argv.input) @@ -514,7 +514,7 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used): if paddle_runtime_converter: paddle_runtime_converter.destroy() - # add MO meta data to model + # add OVC meta data to model ov_model.set_rt_info(get_rt_version(), "Runtime_version") for key, value in non_default_params.items(): ov_model.set_rt_info(str(value), ["conversion_parameters", str(key)]) diff --git a/tools/ovc/unit_tests/ovc/bom_test.py b/tools/ovc/unit_tests/ovc/bom_test.py deleted file mode 100644 index 3b396ac240c5f6..00000000000000 --- a/tools/ovc/unit_tests/ovc/bom_test.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import platform -import re -import unittest -from itertools import islice - -from openvino.tools.ovc.utils import get_mo_root_dir - -dir_patterns_to_skip = ['.*__pycache__.*'] -file_patterns_to_skip = ['.*\\.DS_Store$', - '.*\\.swp', - '.*\\.pyc$', - 'requirements.*\.txt', - 'version.txt'] -full_name_patterns_to_skip = [] -if platform.system() == 'Windows': - full_name_patterns_to_skip = [i.replace('/', '\\\\') for i in full_name_patterns_to_skip] - - -def is_match(name: str, patterns: ()): - return any((re.match(pattern, name) for pattern in patterns)) - - -class TestBOMFile(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.existing_files = [] - cur_path = os.path.join(os.path.realpath(__file__), os.pardir) - mo_path = os.path.abspath(os.path.join(cur_path, os.pardir, os.pardir)) - with open(os.path.join(mo_path, 'unit_tests', 'ovc', 'package_BOM.txt'), 'r') as bom_file: - if platform.system() == 'Windows': - cls.existing_files = [name.rstrip().replace('/', '\\') for name in bom_file.readlines()] - else: - cls.existing_files = [name.rstrip() for name in bom_file.readlines()] - - # dirs_to_search is the root directory where MO is located, 'openvino_project_root/tools/mo/openvino/tools' - cls.dirs_to_search = os.path.normpath(get_mo_root_dir() + '/ovc/') - cls.prefix = os.path.normpath(get_mo_root_dir() + '../../../') # prefix which is used in BOM file - cls.expected_header = [re.compile(pattern) for pattern in [ - r'^# Copyright \([cC]\) [0-9\-]+ Intel Corporation$', - r'^# SPDX-License-Identifier: Apache-2.0$', - ]] - - def test_bom_file(self): - missing_files = list() - for src_dir in [self.dirs_to_search]: - if not os.path.isdir(src_dir): - continue - for root, dirs, files in os.walk(src_dir): - if is_match(root, dir_patterns_to_skip): - continue - for f in files: - full_name = os.path.join(root, f) - full_name = full_name[len(self.prefix) + 1:] - if is_match(f, file_patterns_to_skip): - continue - if is_match(full_name, full_name_patterns_to_skip): - continue - if full_name not in self.existing_files: - missing_files.append(full_name) - - if len(missing_files) != 0: - print("Missing files:") - for f in missing_files: - print(f.replace('\\', '/')) - self.assertTrue(not len(missing_files), '{} files missed in BOM'.format(len(missing_files))) - - def test_bom_does_not_contain_unittest_files(self): - for file_name in self.existing_files: - self.assertFalse(file_name.endswith('_test.py'), 'BOM file contains test file {}'.format(file_name)) - - def test_deleted_files_still_stored_in_bom(self): - deleted = list() - for file in self.existing_files: - if not os.path.isfile(os.path.join(self.prefix, file)): - deleted.append(file) - if len(deleted) != 0: - print("Deleted files still stored in BOM file:") - for f in deleted: - print(f) - self.assertTrue(not len(deleted), '{} files deleted but still stored in BOM'.format(len(deleted))) - - def test_alphabetical_order_and_duplicates(self): - sorted_bom = sorted([x for x in self.existing_files if self.existing_files.count(x) == 1], key=str.lower) - if self.existing_files != sorted_bom: - print("Wrong order. Alphabetical order of BOM is:") - print(*sorted_bom, sep='\n') - self.assertTrue(False) - - def test_missed_intel_header(self): - missing_files = list() - for src_dir in [self.dirs_to_search]: - if not os.path.isdir(src_dir): - continue - for root, dirs, files in os.walk(src_dir): - if is_match(root, dir_patterns_to_skip): - continue - for f in files: - ignores = [ - '^__init__.py$', - '^caffe_pb2.py$', - '^.*.pyc$', - '^generate_caffe_pb2.py$' - ] - if not is_match(f, ['.*.py$']) or is_match(f, ignores): - continue - full_name = os.path.join(root, f) - with open(full_name, 'r') as source_f: - # read two more lines from the file because it can contain shebang and empty lines - s = [x.strip() for x in islice(source_f, len(self.expected_header) + 2)] - # skip shebang and empty lines in the beginning of the file - try: - while s[0] in ('', '#!/usr/bin/env python3'): - s = s[1:] - for str_ind in range(0, len(self.expected_header)): - if not re.match(self.expected_header[str_ind], s[str_ind]): - missing_files.append(full_name) - break - except: - pass - self.assertTrue(not len(missing_files), - '{} files with missed header: \n{}'.format(len(missing_files), '\n'.join(missing_files))) diff --git a/tools/ovc/unit_tests/ovc/package_BOM.txt b/tools/ovc/unit_tests/ovc/package_BOM.txt deleted file mode 100644 index a0d0d5382e043f..00000000000000 --- a/tools/ovc/unit_tests/ovc/package_BOM.txt +++ /dev/null @@ -1,32 +0,0 @@ -openvino/tools/ovc/__init__.py -openvino/tools/ovc/__main__.py -openvino/tools/ovc/cli_parser.py -openvino/tools/ovc/convert.py -openvino/tools/ovc/convert_data_type.py -openvino/tools/ovc/convert_impl.py -openvino/tools/ovc/environment_setup_utils.py -openvino/tools/ovc/error.py -openvino/tools/ovc/get_ov_update_message.py -openvino/tools/ovc/help.py -openvino/tools/ovc/logger.py -openvino/tools/ovc/main.py -openvino/tools/ovc/moc_frontend/__init__.py -openvino/tools/ovc/moc_frontend/analysis.py -openvino/tools/ovc/moc_frontend/check_config.py -openvino/tools/ovc/moc_frontend/extractor.py -openvino/tools/ovc/moc_frontend/jax_frontend_utils.py -openvino/tools/ovc/moc_frontend/layout_utils.py -openvino/tools/ovc/moc_frontend/moc_emit_ir.py -openvino/tools/ovc/moc_frontend/offline_transformations.py -openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py -openvino/tools/ovc/moc_frontend/pipeline.py -openvino/tools/ovc/moc_frontend/preprocessing.py -openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py -openvino/tools/ovc/moc_frontend/shape_utils.py -openvino/tools/ovc/moc_frontend/type_utils.py -openvino/tools/ovc/ovc.py -openvino/tools/ovc/telemetry_params.py -openvino/tools/ovc/telemetry_stub.py -openvino/tools/ovc/telemetry_utils.py -openvino/tools/ovc/utils.py -openvino/tools/ovc/version.py \ No newline at end of file