From 5c87a54ec45e0b730fd7d13d80f1b3fdfb8776c1 Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Sun, 10 Nov 2024 22:00:57 +0400 Subject: [PATCH 01/26] [GHA][MO][openvino-dev] Remove tests for legacy openvino-dev from GHA We are removing openvino-dev in 2025.0 so all legacy tests are no longer needed in GHA. Signed-off-by: Kazantsev, Roman --- .github/CODEOWNERS | 2 - .github/dependabot.yml | 2 +- .github/labeler.yml | 6 -- .github/workflows/coverage.yml | 9 --- .github/workflows/job_onnx_models_tests.yml | 5 -- .github/workflows/job_python_unit_tests.yml | 52 ------------------ .github/workflows/mo.yml | 58 -------------------- .github/workflows/windows_vs2019_release.yml | 26 --------- 8 files changed, 1 insertion(+), 159 deletions(-) delete mode 100644 .github/workflows/mo.yml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8e7eb099540439..3598e32166a809 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -108,8 +108,6 @@ /tools/ @openvinotoolkit/openvino-tools-maintainers /tools/benchmark_tool/ @openvinotoolkit/openvino-ie-python-api-maintainers /tools/legacy/ @openvinotoolkit/openvino-samples-maintainers -/tools/openvino_dev/ @openvinotoolkit/openvino-tools-maintainers @openvinotoolkit/openvino-ie-python-api-maintainers -/tools/mo/ @openvinotoolkit/openvino-mo-maintainers /tools/ovc/ @openvinotoolkit/openvino-ovc-maintainers /thirdparty/open_model_zoo/ @openvinotoolkit/omz-maintainers diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 1511e6a2c30170..99338f8500d10d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -41,7 +41,7 @@ updates: - "rkazants" versioning-strategy: increase-if-necessary - # Model Optimizer, openvino_dev and Benchmark tool + # ovc tool - package-ecosystem: pip directory: "/tools" schedule: diff --git a/.github/labeler.yml b/.github/labeler.yml index daa5375b175bd3..e9b2acb26c9072 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -100,10 +100,6 @@ 'category: LP transformations': - 'src/common/low_precision_transformations/**/*' -'category: MO': -- 'tools/mo/**/*' -- 'tests/layer_tests/mo_python_api_tests/**/*' - 'category: OVC': - 'tools/ovc/**/*' - 'tests/layer_tests/ovc_python_api_tests/**/*' @@ -119,7 +115,6 @@ - any: ['src/bindings/js/node/CMakeLists.txt', 'src/bindings/js/node/package.json', 'src/bindings/js/node/package-lock.json'] -- 'tools/openvino_dev/**/*' 'category: PDPD FE': - 'src/frontends/paddle/**/*' @@ -183,7 +178,6 @@ 'category: tools': - any: ['tools/**', - '!tools/mo/**/*', '!tools/ovc/**/*'] 'category: transformations': diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 6cb0b2c5b6233c..cde1b9cf67e2fc 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -50,12 +50,6 @@ jobs: python3 -m pip install -r ${{ github.workspace }}/src/frontends/onnx/tests/requirements.txt # For running TensorFlow frontend unit tests python3 -m pip install -r ${{ github.workspace }}/src/frontends/tensorflow/tests/requirements.txt - # For MO unit tests - python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_caffe.txt - python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_kaldi.txt - python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_onnx.txt - python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_tf2.txt - python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_dev.txt - name: Build OpenVINO with CMake uses: ashutoshvarma/action-cmake-build@ade188313bc7eaa6f14349569a64d8bc716342ff # master @@ -84,9 +78,6 @@ jobs: - name: Install wheel packages run: cmake -DCOMPONENT=python_wheels -DCMAKE_INSTALL_PREFIX=${{ github.workspace }}/install_pkg -P '${{ github.workspace }}/build/cmake_install.cmake' - - name: Install python wheels - run: python3 -m pip install openvino-dev --find-links=${{ github.workspace }}/install_pkg/tools - - name: List binaries run: ls -la ${{ github.workspace }}/bin/intel64/${{ env.CMAKE_BUILD_TYPE }} diff --git a/.github/workflows/job_onnx_models_tests.yml b/.github/workflows/job_onnx_models_tests.yml index c879f0cb6a1efc..47879a9946181c 100644 --- a/.github/workflows/job_onnx_models_tests.yml +++ b/.github/workflows/job_onnx_models_tests.yml @@ -89,11 +89,6 @@ jobs: extras_to_install="onnx" - # Find and install OV dev wheel - ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[$extras_to_install] - working-directory: ${{ env.INSTALL_WHEELS_DIR }} - - name: Install Python tests dependencies run: | # To enable pytest parallel features diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 64be9ef4bbcc44..36c36e86371458 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -102,17 +102,11 @@ jobs: extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch" - # Find and install OV dev wheel - ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[$extras_to_install] - working-directory: ${{ env.INSTALL_WHEELS_DIR }} - - name: Install Python API tests dependencies run: | # To enable pytest parallel features python3 -m pip install pytest-xdist[psutil] python3 -m pip install -r ${INSTALL_TEST_DIR}/bindings/python/requirements_test.txt - python3 -m pip install -r ${INSTALL_TEST_DIR}/mo/requirements_dev.txt # # Tests @@ -127,18 +121,6 @@ jobs: --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_utils/test_utils.py - - name: Model Optimizer unit tests - if: fromJSON(inputs.affected-components).MO.test - run: | - if [[ "${{ runner.os }}" == "Linux" ]] && [[ "${{ runner.arch }}" != "ARM64" ]]; then - # required for MxNet - apt-get install -y libgomp1 libquadmath0 - fi - - # Skips under tickets: 133405, 122666 - python3 -m pytest -s ${INSTALL_TEST_DIR}/mo/unit_tests \ - --junitxml=${INSTALL_TEST_DIR}/TEST-ModelOptimizer.xml - - name: Python ONNX operators tests if: (fromJSON(inputs.affected-components).Python_API.test || fromJSON(inputs.affected-components).ONNX_FE.test) && @@ -161,24 +143,6 @@ jobs: # layer test requirements python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - - name: MO Python API Tests - if: fromJSON(inputs.affected-components).MO.test - run: | - # Import 'test_utils' installed in '/tests/python/openvino' - export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH - export PYTHONPATH=${INSTALL_TEST_DIR}/python - - if [[ "${{ runner.os }}" == "Linux" ]] && [[ "${{ runner.arch }}" == "ARM64" ]]; then - # Find gomp lib - GOMP_LIB=$(find "${PIP_INSTALL_PATH}/torch/lib/../../torch.libs/" -name '*libgomp-*so*') - export LD_PRELOAD=${GOMP_LIB} - fi - - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/mo_python_api_tests -n logical --junitxml=${INSTALL_TEST_DIR}/TEST-test_mo_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - name: OVC Python API Tests if: fromJSON(inputs.affected-components).MO.test run: | @@ -230,22 +194,6 @@ jobs: TEST_DEVICE: CPU TEST_PRECISION: FP16 - - name: TensorFlow 1 Layer Tests - Legacy FE - if: fromJSON(inputs.affected-components).TF_FE.test - run: python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --use_legacy_frontend --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow 2 Layer Tests - Legacy FE - # no longer workable since TF 2.17 - # will be removed in 2024.5 - if: ${{ 'false' }} - run: python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/test_tf2_keras_activation.py --use_legacy_frontend --ir_version=11 -k "sigmoid" --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_Activation.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - name: Clone API snippets if: runner.os != 'macOS' uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/.github/workflows/mo.yml b/.github/workflows/mo.yml deleted file mode 100644 index f48986d4a0d304..00000000000000 --- a/.github/workflows/mo.yml +++ /dev/null @@ -1,58 +0,0 @@ -name: MO -on: - push: - paths: - - 'tools/mo/**' - - '.github/workflows/mo.yml' - branches: - - 'master' - - 'releases/**' - pull_request: - paths: - - 'tools/mo/**' - - '.github/workflows/mo.yml' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -permissions: read-all - -jobs: - Pylint-UT: - runs-on: ubuntu-22.04 - if: ${{ github.repository_owner == 'openvinotoolkit' }} - steps: - - name: Clone OpenVINO - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Setup Python - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 - with: - python-version: '3.10' - - - name: Cache pip - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('tools/mo/requirements*.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - ${{ runner.os }}- - - - name: Install dependencies - run: | - python -m pip install --upgrade pip setuptools - # For UT - pip install unittest-xml-reporting==3.0.2 - # MO requirements - pip install -r requirements_caffe.txt - pip install -r requirements_kaldi.txt - pip install -r requirements_onnx.txt - pip install -r requirements_tf2.txt - pip install -r requirements_dev.txt - working-directory: tools/mo - - - name: Pylint-MO - run: pylint -d C,R,W openvino/tools/mo - working-directory: tools/mo diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index b9b8fa76d37c34..04de44df7f8e5c 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -296,11 +296,6 @@ jobs: $ovCoreWheelPath=Get-ChildItem -Path . -Filter openvino-*.whl | % { $_.FullName } python3 -m pip install "$ovCoreWheelPath" - # Find and install the dev OV wheel - $ovDevWheelPath=Get-ChildItem -Path . -Filter openvino_dev*.whl | % { $_.FullName } - python3 -m pip install "$ovDevWheelPath[caffe,kaldi,onnx,tensorflow2,pytorch]" - working-directory: ${{ env.INSTALL_WHEELS_DIR }} - - name: Install Python API tests dependencies run: | # To enable pytest parallel features @@ -309,9 +304,6 @@ jobs: # For torchvision to OpenVINO preprocessing converter python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/python/preprocess/torchvision/requirements.txt - # TODO: replace with Python API tests requirements - python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt - # For getting rid of SSL issues during model downloading for unit tests python3 -m pip install certifi @@ -325,12 +317,6 @@ jobs: set PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py - - name: Model Optimizer UT - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - shell: cmd - run: | - python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml - - name: Install Python Layer tests dependencies run: | # layer test requirements @@ -366,18 +352,6 @@ jobs: --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-onnx_frontend.xml ^ --ignore=${{ env.INSTALL_TEST_DIR }}/onnx/test_python/test_zoo_models.py - - name: MO Python API Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - shell: cmd - run: | - :: Used for 'test_utils' installed in '\python\openvino\test_utils' - set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\python\openvino\test_utils;${{ env.INSTALL_TEST_DIR }}\python;%PYTHONPATH% - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - name: OVC Python API Tests if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test shell: cmd From 60c172097013d3651655f2503e219e594ed11c3b Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Mon, 11 Nov 2024 13:34:06 +0400 Subject: [PATCH 02/26] Remove rest of legacy artifacts Signed-off-by: Kazantsev, Roman --- .github/workflows/job_onnx_models_tests.yml | 2 -- .github/workflows/job_python_unit_tests.yml | 2 -- .github/workflows/windows_vs2019_release.yml | 3 +++ 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/job_onnx_models_tests.yml b/.github/workflows/job_onnx_models_tests.yml index 47879a9946181c..9d1c1902bb2ec0 100644 --- a/.github/workflows/job_onnx_models_tests.yml +++ b/.github/workflows/job_onnx_models_tests.yml @@ -87,8 +87,6 @@ jobs: # Install the core OV wheel python3 -m pip install ./openvino-*.whl - extras_to_install="onnx" - - name: Install Python tests dependencies run: | # To enable pytest parallel features diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 36c36e86371458..ee51bf0f4e1e08 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -100,8 +100,6 @@ jobs: # Install the core OV wheel python3 -m pip install ./openvino-*.whl - extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch" - - name: Install Python API tests dependencies run: | # To enable pytest parallel features diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 04de44df7f8e5c..3c6eea7eef2030 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -304,6 +304,9 @@ jobs: # For torchvision to OpenVINO preprocessing converter python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/python/preprocess/torchvision/requirements.txt + # For validation of Python API + python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/bindings/python/requirements_test.txt + # For getting rid of SSL issues during model downloading for unit tests python3 -m pip install certifi From 44c3c88be707c3c1e320357952211f043cf97d97 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 11 Nov 2024 13:49:58 +0400 Subject: [PATCH 03/26] Update .github/workflows/job_python_unit_tests.yml --- .github/workflows/job_python_unit_tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index ee51bf0f4e1e08..3477763871b7c6 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -100,6 +100,7 @@ jobs: # Install the core OV wheel python3 -m pip install ./openvino-*.whl + working-directory: ${{ env.INSTALL_WHEELS_DIR }} - name: Install Python API tests dependencies run: | # To enable pytest parallel features From 083868d02e5f9b4b50f119e39d89c044f9dddfdf Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 11 Nov 2024 13:51:29 +0400 Subject: [PATCH 04/26] Update .github/workflows/windows_vs2019_release.yml --- .github/workflows/windows_vs2019_release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 3c6eea7eef2030..266f351c7227dd 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -296,6 +296,7 @@ jobs: $ovCoreWheelPath=Get-ChildItem -Path . -Filter openvino-*.whl | % { $_.FullName } python3 -m pip install "$ovCoreWheelPath" + working-directory: ${{ env.INSTALL_WHEELS_DIR }} - name: Install Python API tests dependencies run: | # To enable pytest parallel features From 17e5278cabdc09fa476efa4f7c3a187bc2f78ffa Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 11 Nov 2024 13:52:03 +0400 Subject: [PATCH 05/26] Update .github/workflows/windows_vs2019_release.yml --- .github/workflows/windows_vs2019_release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 266f351c7227dd..03ee4dc1abd748 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -297,6 +297,7 @@ jobs: python3 -m pip install "$ovCoreWheelPath" working-directory: ${{ env.INSTALL_WHEELS_DIR }} + - name: Install Python API tests dependencies run: | # To enable pytest parallel features From ca167f2bd8fe992e629d26542cdc8816ff7297af Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 11 Nov 2024 13:52:25 +0400 Subject: [PATCH 06/26] Update .github/workflows/job_python_unit_tests.yml --- .github/workflows/job_python_unit_tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 3477763871b7c6..fcbafe959b33d2 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -101,6 +101,7 @@ jobs: python3 -m pip install ./openvino-*.whl working-directory: ${{ env.INSTALL_WHEELS_DIR }} + - name: Install Python API tests dependencies run: | # To enable pytest parallel features From c8c80e1a7a1b57f24110e1059da7c02fee29b842 Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Mon, 11 Nov 2024 14:30:14 +0400 Subject: [PATCH 07/26] Install additional deps for ovc unit tests Signed-off-by: Kazantsev, Roman --- .github/workflows/job_python_unit_tests.yml | 20 ++++++++------------ tests/constraints.txt | 1 + tests/layer_tests/requirements.txt | 1 + 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index fcbafe959b33d2..8f5f53d966ccba 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -91,10 +91,6 @@ jobs: should-setup-pip-paths: ${{ runner.os == 'Linux' }} self-hosted-runner: ${{ runner.os == 'Linux' }} - # - # Tests - # - - name: Install OpenVINO Python wheels run: | # Install the core OV wheel @@ -108,6 +104,14 @@ jobs: python3 -m pip install pytest-xdist[psutil] python3 -m pip install -r ${INSTALL_TEST_DIR}/bindings/python/requirements_test.txt + - name: Install Python Layer tests dependencies and for OVC unit tests + run: | + # For torchvision to OpenVINO preprocessing converter + python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt + + # layer test requirements + python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt + # # Tests # @@ -135,14 +139,6 @@ jobs: if: fromJSON(inputs.affected-components).MO.test run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml - - name: Install Python Layer tests dependencies - run: | - # For torchvision to OpenVINO preprocessing converter - python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt - - # layer test requirements - python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - - name: OVC Python API Tests if: fromJSON(inputs.affected-components).MO.test run: | diff --git a/tests/constraints.txt b/tests/constraints.txt index 2272151565ca8a..ddddaebbd4d635 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -13,6 +13,7 @@ defusedxml>=0.7.1 fastjsonschema~=2.17.1 tensorflow>=2.5,<2.19.0 requests>=2.25.1 +onnx>=1.8.1,<=1.17.0 opencv-python>=4.5 paddlepaddle==2.6.1 protobuf>=3.18.1,<4.0.0 diff --git a/tests/layer_tests/requirements.txt b/tests/layer_tests/requirements.txt index 04889ebce10a39..015640b2ff6f10 100644 --- a/tests/layer_tests/requirements.txt +++ b/tests/layer_tests/requirements.txt @@ -1,6 +1,7 @@ -c ../constraints.txt # paddlepaddle # ticket 95904 numpy +onnx onnxruntime>=1.18.0,<=1.19.2; python_version <= '3.9' onnxruntime>=1.18.0; python_version >= '3.10' requests From 46f76d42bfafb37396a55e027d0e8ca31e71f726 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 11 Nov 2024 15:14:29 +0400 Subject: [PATCH 08/26] Update job_python_unit_tests.yml --- .github/workflows/job_python_unit_tests.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 8f5f53d966ccba..3ef95394039d15 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -165,16 +165,6 @@ jobs: export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml - - name: ONNX Layer Tests - if: ${{ fromJSON(inputs.affected-components).ONNX_FE.test }} - run: | - # requires 'unit_tests' from 'tools/mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - name: JAX Layer Tests - JAX FE if: ${{ fromJSON(inputs.affected-components).JAX_FE.test && runner.arch != 'ARM64' && runner.os != 'macOS' }} run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/jax_tests/ -m precommit_jax_fe --junitxml=${INSTALL_TEST_DIR}/TEST-jax_fe.xml From d20bedc9649dfcf2e4dabf4f5de416761ca324dc Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 11 Nov 2024 15:16:58 +0400 Subject: [PATCH 09/26] Update windows_vs2019_release.yml --- .github/workflows/windows_vs2019_release.yml | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 03ee4dc1abd748..4f958b01efb4bb 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -315,28 +315,17 @@ jobs: - name: Set SSL_CERT_FILE for model downloading for unit tests run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV - - name: Python API Tests - #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 - shell: cmd - run: | - set PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% - python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py - - name: Install Python Layer tests dependencies run: | # layer test requirements python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - name: ONNX Layer Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test + - name: Python API Tests + #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 shell: cmd run: | - :: requires 'unit_tests' from 'tools/mo' - set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH% - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/onnx_tests -n logical -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 + set PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% + python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py - name: TensorFlow Lite Layer Tests - TFL FE if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test From 3f8edb0a24444af9a71d5a886cc0564c31908252 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 11 Nov 2024 19:37:30 +0400 Subject: [PATCH 10/26] Update .github/workflows/job_onnx_models_tests.yml --- .github/workflows/job_onnx_models_tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/job_onnx_models_tests.yml b/.github/workflows/job_onnx_models_tests.yml index 9d1c1902bb2ec0..a90cd3465a8f8e 100644 --- a/.github/workflows/job_onnx_models_tests.yml +++ b/.github/workflows/job_onnx_models_tests.yml @@ -86,6 +86,7 @@ jobs: run: | # Install the core OV wheel python3 -m pip install ./openvino-*.whl + working-directory: ${{ env.INSTALL_WHEELS_DIR }} - name: Install Python tests dependencies run: | From 236435748488f1e286a57dae40d4423e21ee5cb4 Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Tue, 12 Nov 2024 13:08:24 +0400 Subject: [PATCH 11/26] Fix issues with ONNX models tests Signed-off-by: Kazantsev, Roman --- .github/workflows/job_onnx_models_tests.yml | 3 + .../layer_tests/common/utils/common_utils.py | 32 +-- .../ovc_python_api_tests/test_pytorch.py | 182 ------------------ tests/requirements_onnx | 3 + 4 files changed, 16 insertions(+), 204 deletions(-) create mode 100644 tests/requirements_onnx diff --git a/.github/workflows/job_onnx_models_tests.yml b/.github/workflows/job_onnx_models_tests.yml index 9d1c1902bb2ec0..3f87f72ae56a66 100644 --- a/.github/workflows/job_onnx_models_tests.yml +++ b/.github/workflows/job_onnx_models_tests.yml @@ -87,6 +87,9 @@ jobs: # Install the core OV wheel python3 -m pip install ./openvino-*.whl + - name: Install ONNX Models tests requirements + run: python3 -m pip install -r ${INSTALL_TEST_DIR}/requirements_onnx + - name: Install Python tests dependencies run: | # To enable pytest parallel features diff --git a/tests/layer_tests/common/utils/common_utils.py b/tests/layer_tests/common/utils/common_utils.py index 0c8ad494c5cec2..620f2fee9de260 100644 --- a/tests/layer_tests/common/utils/common_utils.py +++ b/tests/layer_tests/common/utils/common_utils.py @@ -14,36 +14,24 @@ def generate_ir(coverage=False, **kwargs): - from openvino.tools.mo import mo - mo_path = Path(mo.__file__).parent - mo_runner = mo_path.joinpath('main.py').as_posix() + from openvino.tools.ovc import ovc + # Get OVC file directory + ovc_path = Path(ovc.__file__).parent + + ovc_runner = ovc_path.joinpath('main.py').as_posix() if coverage: - params = [sys.executable, '-m', 'coverage', 'run', '-p', '--source={}'.format(mo_path.parent), - '--omit=*_test.py', mo_runner] + params = [sys.executable, '-m', 'coverage', 'run', '-p', '--source={}'.format(ovc_runner.parent), + '--omit=*_test.py', ovc_runner] else: - params = [sys.executable, mo_runner] + params = [sys.executable, ovc_runner] for key, value in kwargs.items(): - if key == "batch": - params.extend(("-b", str(value))) - elif key == "k": - params.extend(("-k", str(value))) - # for FP32 set explicitly compress_to_fp16=False, - # if we omit this argument for FP32, it will be set implicitly to True as the default + if key == 'input_model': + params.append((str(value))) elif key == 'compress_to_fp16': params.append("--{}={}".format(key, value)) - elif isinstance(value, bool) and value: - params.append("--{}".format(key)) - elif isinstance(value, bool) and not value: - continue - elif (isinstance(value, tuple) and value) or (isinstance(value, str)): - params.extend(("--{}".format(key), str('"{}"'.format(value)))) - elif key == "mean_values" and (' ' in value or '(' in value): - params.extend(("--{}".format(key), str('"{}"'.format(value)))) else: params.extend(("--{}".format(key), str(value))) exit_code, stdout, stderr = shell(params) - logger.info("Model Optimizer out:\n{}".format(stdout)) - logger.error(stderr) return exit_code, stderr diff --git a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py index 1a49a989c11df2..1625db78aa024d 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py +++ b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py @@ -366,188 +366,6 @@ def scripted_fn(x: torch.Tensor, y: torch.Tensor): return scripted_fn, ref_model, {'input': [(inp_shape, Type.f32), (inp_shape, Type.f32)]} -def create_pytorch_nn_module_layout_list(tmp_dir): - from openvino.runtime import Layout - pt_model = make_pt_model_two_inputs() - shape = [1, 3, 10, 10] - - shape = PartialShape(shape) - ref_model = make_ref_pt_model_two_inputs(shape) - ref_model.inputs[0].node.layout = Layout('nchw') - ref_model.inputs[1].node.layout = Layout('nhwc') - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ['nchw', Layout('nhwc')], - 'use_convert_model_from_mo': True - } - - -def create_pytorch_nn_module_layout_list_case2(tmp_dir): - from openvino.runtime import Layout - pt_model = make_pt_model_two_inputs() - shape = [1, 3, 10, 10] - - shape = PartialShape(shape) - ref_model = make_ref_pt_model_two_inputs(shape) - ref_model.inputs[0].node.layout = Layout('nchw') - ref_model.inputs[1].node.layout = Layout('nhwc') - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ('nchw', Layout('nhwc')), - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_disabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - add1 = ov.opset8.add(param1, const1) - add2 = ov.opset8.add(param2, const2) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'compress_to_fp16': False, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_default(tmp_dir): - # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled - # therefore decompression Converts will not be present - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - add1 = ov.opset8.add(param1, const1) - add2 = ov.opset8.add(param2, const2) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) - const1_decompressed = ov.opset8.convert( - const1, destination_type=np.float32) - const2_decompressed = ov.opset8.convert( - const2, destination_type=np.float32) - - add1 = ov.opset8.add(param1, const1_decompressed) - add2 = ov.opset8.add(param2, const2_decompressed) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'compress_to_fp16': True, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_disabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - sub1 = ov.opset8.multiply(param1, const1) - sub2 = ov.opset8.multiply(param2, const2) - mul = ov.opset8.multiply(sub1, sub2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'compress_to_fp16': False, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_default(tmp_dir): - # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled - # therefore decompression Converts will not be present - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - sub1 = ov.opset8.multiply(param1, const1) - sub2 = ov.opset8.multiply(param2, const2) - mul = ov.opset8.multiply(sub1, sub2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) - const1_decompressed = ov.opset8.convert( - const1, destination_type=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) - const2_decompressed = ov.opset8.convert( - const2, destination_type=np.float32) - mul1 = ov.opset8.multiply(param1, const1_decompressed) - mul2 = ov.opset8.multiply(param2, const2_decompressed) - mul3 = ov.opset8.multiply(mul1, mul2) - relu = ov.opset8.relu(mul3) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'compress_to_fp16': True, 'use_convert_model_from_mo': True} - - def create_pytorch_nn_module_with_compressed_constants(tmp_dir): import torch diff --git a/tests/requirements_onnx b/tests/requirements_onnx new file mode 100644 index 00000000000000..1dfc0077b5d075 --- /dev/null +++ b/tests/requirements_onnx @@ -0,0 +1,3 @@ +numpy>=1.16.6,<1.27 +onnx>=1.8.1,<=1.17.0 +protobuf>=3.18.1,<4.0.0 From 74a6fbe8f50a324cfd92f68feae3cf68e0c7503f Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Tue, 12 Nov 2024 13:55:40 +0400 Subject: [PATCH 12/26] Fix ovc param Signed-off-by: Kazantsev, Roman --- tests/layer_tests/common/mo_convert_test_class.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/layer_tests/common/mo_convert_test_class.py b/tests/layer_tests/common/mo_convert_test_class.py index 6a57339cedf111..7eff4f7fee9e8a 100644 --- a/tests/layer_tests/common/mo_convert_test_class.py +++ b/tests/layer_tests/common/mo_convert_test_class.py @@ -63,7 +63,8 @@ def _test(self, temp_dir, test_params, ref_params): core = Core() test_params.update({"model_name": 'model_test', "output_dir": temp_dir}) - ref_params.update({"model_name": 'model_ref', "output_dir": temp_dir}) + ref_output_path = Path(temp_dir, 'model_ref.xml').absolute().as_posix() + ref_params.update({"output_model": ref_output_path}) self.generate_ir_python_api(**test_params) From 7183348464494090601b8fe6cde335396062a86b Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Thu, 14 Nov 2024 10:37:29 +0400 Subject: [PATCH 13/26] Remove invalid test cases using type specification in ovc tool Signed-off-by: Kazantsev, Roman --- .../ovc_python_api_tests/test_complex_params.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py index 3d6df0c95f31ae..06819bd615963e 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py +++ b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py @@ -120,10 +120,6 @@ def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, self._test(temp_dir, test_params, ref_params) test_data = [ - {'params_test': {'input': {"Input:0": ([3, 2], ov.Type.i32)}}, - 'params_ref': {'input': "Input:0[3,2]{i32}"}}, - {'params_test': {'input': {"Input:0": ov.Type.i32}}, - 'params_ref': {'input': "Input:0{i32}"}}, {'params_test': {'input': {"Input:0": [3, 2]}}, 'params_ref': {'input': "Input:0[3,2]"}}, {'params_test': {'input': (3, 2)}, @@ -138,10 +134,6 @@ def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, 'params_ref': {'input': "Input:0[?,10]"}}, {'params_test': {'input': PartialShape([-1, 10])}, 'params_ref': {'input': "Input:0[?,10]"}}, - {'params_test': {'input': np.int32}, - 'params_ref': {'input': "Input:0{i32}"}}, - {'params_test': {'input': (np.int32, [1, 2, 3])}, - 'params_ref': {'input': "Input:0[1,2,3]{i32}"}}, {'params_test': {'input': [Dimension(3, 10), 10, -1]}, 'params_ref': {'input': 'Input:0[3..10,10,?]'}}, ] From 35af2a8b5674ca2b22b8e373f785bdbfb7f7ffaa Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Thu, 14 Nov 2024 10:39:38 +0400 Subject: [PATCH 14/26] Install onnx req file Signed-off-by: Kazantsev, Roman --- tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e4b5fcd5d1089f..bec2823004c1e3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -9,3 +9,4 @@ add_subdirectory(e2e_tests) install(FILES requirements_pytorch DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) install(FILES requirements_tensorflow DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) +install(FILES requirements_onnx DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) From 89ba377a9ce6823c883d16130170e878ded62a31 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 14 Nov 2024 10:40:54 +0400 Subject: [PATCH 15/26] Update tests/layer_tests/ovc_python_api_tests/test_pytorch.py --- .../ovc_python_api_tests/test_pytorch.py | 179 ++++++++++++++++++ 1 file changed, 179 insertions(+) diff --git a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py index 1625db78aa024d..18fa3b9be86d06 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py +++ b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py @@ -365,7 +365,186 @@ def scripted_fn(x: torch.Tensor, y: torch.Tensor): ref_model = make_ref_pt_model_two_inputs(inp_shape) return scripted_fn, ref_model, {'input': [(inp_shape, Type.f32), (inp_shape, Type.f32)]} +def create_pytorch_nn_module_layout_list(tmp_dir): + from openvino.runtime import Layout + pt_model = make_pt_model_two_inputs() + shape = [1, 3, 10, 10] + + shape = PartialShape(shape) + ref_model = make_ref_pt_model_two_inputs(shape) + ref_model.inputs[0].node.layout = Layout('nchw') + ref_model.inputs[1].node.layout = Layout('nhwc') + + return pt_model, ref_model, { + 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ['nchw', Layout('nhwc')], + 'use_convert_model_from_mo': True + } + + +def create_pytorch_nn_module_layout_list_case2(tmp_dir): + from openvino.runtime import Layout + pt_model = make_pt_model_two_inputs() + shape = [1, 3, 10, 10] + + shape = PartialShape(shape) + ref_model = make_ref_pt_model_two_inputs(shape) + ref_model.inputs[0].node.layout = Layout('nchw') + ref_model.inputs[1].node.layout = Layout('nhwc') + + return pt_model, ref_model, { + 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ('nchw', Layout('nhwc')), + 'use_convert_model_from_mo': True} + + +def create_pytorch_nn_module_mean_list_compression_disabled(tmp_dir): + pt_model = make_pt_model_two_inputs() + shape = [1, 10, 10, 3] + + shape = PartialShape(shape) + param1 = ov.opset8.parameter(shape) + param2 = ov.opset8.parameter(shape) + const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) + const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) + add1 = ov.opset8.add(param1, const1) + add2 = ov.opset8.add(param2, const2) + mul = ov.opset8.multiply(add1, add2) + relu = ov.opset8.relu(mul) + sigm = ov.opset8.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + + return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], + 'mean_values': [[0, 0, 0], [0, 0, 0]], + 'compress_to_fp16': False, 'use_convert_model_from_mo': True} + + +def create_pytorch_nn_module_mean_list_compression_default(tmp_dir): + # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled + # therefore decompression Converts will not be present + pt_model = make_pt_model_two_inputs() + shape = [1, 10, 10, 3] + + shape = PartialShape(shape) + param1 = ov.opset8.parameter(shape) + param2 = ov.opset8.parameter(shape) + const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) + const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) + add1 = ov.opset8.add(param1, const1) + add2 = ov.opset8.add(param2, const2) + mul = ov.opset8.multiply(add1, add2) + relu = ov.opset8.relu(mul) + sigm = ov.opset8.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + + return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], + 'mean_values': [[0, 0, 0], [0, 0, 0]], + 'use_convert_model_from_mo': True} + + +def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir): + pt_model = make_pt_model_two_inputs() + shape = [1, 10, 10, 3] + + shape = PartialShape(shape) + param1 = ov.opset8.parameter(shape) + param2 = ov.opset8.parameter(shape) + const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) + const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) + const1_decompressed = ov.opset8.convert( + const1, destination_type=np.float32) + const2_decompressed = ov.opset8.convert( + const2, destination_type=np.float32) + + add1 = ov.opset8.add(param1, const1_decompressed) + add2 = ov.opset8.add(param2, const2_decompressed) + mul = ov.opset8.multiply(add1, add2) + relu = ov.opset8.relu(mul) + sigm = ov.opset8.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + + return pt_model, ref_model, { + 'input': [(shape, np.float32), (shape, np.float32)], 'mean_values': [[0, 0, 0], [0, 0, 0]], + 'compress_to_fp16': True, 'use_convert_model_from_mo': True} + + +def create_pytorch_nn_module_scale_list_compression_disabled(tmp_dir): + pt_model = make_pt_model_two_inputs() + shape = [1, 10, 10, 3] + + shape = PartialShape(shape) + param1 = ov.opset8.parameter(shape) + param2 = ov.opset8.parameter(shape) + const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) + const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) + sub1 = ov.opset8.multiply(param1, const1) + sub2 = ov.opset8.multiply(param2, const2) + mul = ov.opset8.multiply(sub1, sub2) + relu = ov.opset8.relu(mul) + sigm = ov.opset8.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + + return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], + 'scale_values': [[1, 1, 1], [1, 1, 1]], + 'compress_to_fp16': False, 'use_convert_model_from_mo': True} + + +def create_pytorch_nn_module_scale_list_compression_default(tmp_dir): + # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled + # therefore decompression Converts will not be present + pt_model = make_pt_model_two_inputs() + shape = [1, 10, 10, 3] + + shape = PartialShape(shape) + param1 = ov.opset8.parameter(shape) + param2 = ov.opset8.parameter(shape) + const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) + const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) + sub1 = ov.opset8.multiply(param1, const1) + sub2 = ov.opset8.multiply(param2, const2) + mul = ov.opset8.multiply(sub1, sub2) + relu = ov.opset8.relu(mul) + sigm = ov.opset8.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + + return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], + 'scale_values': [[1, 1, 1], [1, 1, 1]], + 'use_convert_model_from_mo': True} + + +def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir): + pt_model = make_pt_model_two_inputs() + shape = [1, 10, 10, 3] + + shape = PartialShape(shape) + param1 = ov.opset8.parameter(shape) + param2 = ov.opset8.parameter(shape) + const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) + const1_decompressed = ov.opset8.convert( + const1, destination_type=np.float32) + const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) + const2_decompressed = ov.opset8.convert( + const2, destination_type=np.float32) + mul1 = ov.opset8.multiply(param1, const1_decompressed) + mul2 = ov.opset8.multiply(param2, const2_decompressed) + mul3 = ov.opset8.multiply(mul1, mul2) + relu = ov.opset8.relu(mul3) + sigm = ov.opset8.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], + 'scale_values': [[1, 1, 1], [1, 1, 1]], + 'compress_to_fp16': True, 'use_convert_model_from_mo': True} def create_pytorch_nn_module_with_compressed_constants(tmp_dir): import torch From 8afbc104266cd378f32385c2968f7a33c3290c92 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 14 Nov 2024 10:41:30 +0400 Subject: [PATCH 16/26] Apply suggestions from code review --- tests/layer_tests/ovc_python_api_tests/test_pytorch.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py index 18fa3b9be86d06..1a49a989c11df2 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py +++ b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py @@ -365,6 +365,7 @@ def scripted_fn(x: torch.Tensor, y: torch.Tensor): ref_model = make_ref_pt_model_two_inputs(inp_shape) return scripted_fn, ref_model, {'input': [(inp_shape, Type.f32), (inp_shape, Type.f32)]} + def create_pytorch_nn_module_layout_list(tmp_dir): from openvino.runtime import Layout pt_model = make_pt_model_two_inputs() @@ -545,6 +546,8 @@ def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir): return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], 'scale_values': [[1, 1, 1], [1, 1, 1]], 'compress_to_fp16': True, 'use_convert_model_from_mo': True} + + def create_pytorch_nn_module_with_compressed_constants(tmp_dir): import torch From 8eebeb121c9c046d1754ef646446e5e623e93bdc Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 14 Nov 2024 10:44:37 +0400 Subject: [PATCH 17/26] Apply suggestions from code review --- tests/constraints.txt | 1 - tests/layer_tests/requirements.txt | 1 - 2 files changed, 2 deletions(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index ddddaebbd4d635..2272151565ca8a 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -13,7 +13,6 @@ defusedxml>=0.7.1 fastjsonschema~=2.17.1 tensorflow>=2.5,<2.19.0 requests>=2.25.1 -onnx>=1.8.1,<=1.17.0 opencv-python>=4.5 paddlepaddle==2.6.1 protobuf>=3.18.1,<4.0.0 diff --git a/tests/layer_tests/requirements.txt b/tests/layer_tests/requirements.txt index 015640b2ff6f10..04889ebce10a39 100644 --- a/tests/layer_tests/requirements.txt +++ b/tests/layer_tests/requirements.txt @@ -1,7 +1,6 @@ -c ../constraints.txt # paddlepaddle # ticket 95904 numpy -onnx onnxruntime>=1.18.0,<=1.19.2; python_version <= '3.9' onnxruntime>=1.18.0; python_version >= '3.10' requests From bf6aae646fc843da170bf55f1200b74e0c459ada Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Thu, 14 Nov 2024 10:47:34 +0400 Subject: [PATCH 18/26] Install ONNX tests reqs from dedicated file Signed-off-by: Kazantsev, Roman --- .github/workflows/job_python_unit_tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 3ef95394039d15..024789fbd1c361 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -112,6 +112,11 @@ jobs: # layer test requirements python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt + - name: Install ONNX tests dependencies + run: | + # ONNX tests requirements + python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements_onnx + # # Tests # From 71569db1c2994a80d2d2f3f9902e10cb6752bae5 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 14 Nov 2024 10:53:28 +0400 Subject: [PATCH 19/26] Update .github/workflows/windows_vs2019_release.yml --- .github/workflows/windows_vs2019_release.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 7a8dc4cb44c255..25062cd5ccf414 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -315,6 +315,13 @@ jobs: - name: Set SSL_CERT_FILE for model downloading for unit tests run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV + - name: Python API Tests + #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 + shell: cmd + run: | + set PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% + python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py + - name: Install Python Layer tests dependencies run: | # layer test requirements From 61eb712831f903727881107f7fb38005689e1485 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 14 Nov 2024 10:56:30 +0400 Subject: [PATCH 20/26] Update windows_vs2019_release.yml --- .github/workflows/windows_vs2019_release.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 25062cd5ccf414..7a8dc4cb44c255 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -315,13 +315,6 @@ jobs: - name: Set SSL_CERT_FILE for model downloading for unit tests run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV - - name: Python API Tests - #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 - shell: cmd - run: | - set PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% - python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py - - name: Install Python Layer tests dependencies run: | # layer test requirements From aa1aa5c391d7ec0903758cdb8b97f53cde028e52 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 14 Nov 2024 13:08:21 +0400 Subject: [PATCH 21/26] Update .github/workflows/job_python_unit_tests.yml --- .github/workflows/job_python_unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 024789fbd1c361..8075f3299fe063 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -115,7 +115,7 @@ jobs: - name: Install ONNX tests dependencies run: | # ONNX tests requirements - python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements_onnx + python3 -m pip install -r ${INSTALL_TEST_DIR}/requirements_onnx # # Tests From 41f9c8f1a8886c8e93fd55d9a4084116bc78f4df Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 14 Nov 2024 13:09:39 +0400 Subject: [PATCH 22/26] Update .github/workflows/windows_vs2019_release.yml --- .github/workflows/windows_vs2019_release.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 7a8dc4cb44c255..ed3695491af4de 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -309,6 +309,9 @@ jobs: # For validation of Python API python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/bindings/python/requirements_test.txt + # ONNX tests requirements + python3 -m pip install -r ${INSTALL_TEST_DIR}/requirements_onnx + # For getting rid of SSL issues during model downloading for unit tests python3 -m pip install certifi From 1695f3d706de03428df4f5159569b6db51be1bf6 Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Mon, 18 Nov 2024 21:34:44 +0400 Subject: [PATCH 23/26] Remove legacy cases Signed-off-by: Kazantsev, Roman --- .../test_complex_params.py | 53 ----- .../ovc_python_api_tests/test_pytorch.py | 190 ------------------ 2 files changed, 243 deletions(-) diff --git a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py index 06819bd615963e..57c98db8c45e61 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py +++ b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py @@ -70,55 +70,6 @@ def create_tf_model_single_input_output(tmp_dir): # save model to .pb and return path to the model return save_to_pb(tf_net, tmp_dir) - test_data = [ - {'params_test': {'output': ["Sigmoid_0:0", "Sigmoid_2:0"]}, - 'params_ref': {'output': "Sigmoid_0,Sigmoid_2"}}, - {'params_test': {'output': ["Sigmoid_0:0"]}, - 'params_ref': {'output': "Sigmoid_0"}}, - {'params_test': {'input': [PartialShape([2, 3, 4]), [2, 3, 4], [Dimension(2), Dimension(3), Dimension(4)]]}, - 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1:0,Input2:0,Input3:0'}}, - {'params_test': {'input': [PartialShape([1, 3, -1, -1]), [1, 3, -1, -1]]}, - 'params_ref': {'input_shape': "[1,3,?,?],[1,3,?,?]", 'input': 'Input1:0,Input2:0'}}, - {'params_test': {'input': [(2, 3, 4), [2, 3, 4], (Dimension(2), Dimension(3), Dimension(4))]}, - 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1:0,Input2:0,Input3:0'}}, - {'params_test': {'input': {"Input1:0": PartialShape([2, 3, 4]), "Input2:0": [2, 3, 4], - "Input3:0": [Dimension(2), Dimension(3), Dimension(4)]}}, - 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1:0,Input2:0,Input3:0'}}, - {'params_test': {'input': {"Input2:0": [1, -1, -1, -1], - "Input3:0": [Dimension(1), Dimension(-1), Dimension(-1), Dimension(-1)]}}, - 'params_ref': {'input_shape': "[1,?,?,?],[1,?,?,?]", 'input': 'Input2:0,Input3:0'}}, - {'params_test': {'input': [np.int32, Type(np.int32), np.int32]}, - 'params_ref': {'input': 'Input1:0{i32},Input2:0{i32},Input3:0{i32}'}}, - {'params_test': {'input': [ov.Type.f32, ov.Type.f32]}, - 'params_ref': {'input': 'Input1:0{f32},Input2:0{f32}'}}, - {'params_test': {'input': [([1, 3, -1, -1], ov.Type.i32), ov.Type.i32, ov.Type.i32]}, - 'params_ref': {'input': 'Input1:0[1,3,?,?]{i32},Input2:0{i32},Input3:0{i32}'}}, - {'params_test': {'input': (PartialShape([2, 3, 4]), [2, 3, 4], [Dimension(2), Dimension(3), Dimension(4)])}, - 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1:0,Input2:0,Input3:0'}}, - {'params_test': {'input': (PartialShape([1, 3, -1, -1]), [1, 3, -1, -1])}, - 'params_ref': {'input_shape': "[1,3,?,?],[1,3,?,?]", 'input': 'Input1:0,Input2:0'}}, - {'params_test': {'input': ((2, 3, 4), [2, 3, 4], (Dimension(2), Dimension(3), Dimension(4)))}, - 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1:0,Input2:0,Input3:0'}}, - {'params_test': {'input': (np.int32, Type(np.int32), np.int32)}, - 'params_ref': {'input': 'Input1:0{i32},Input2:0{i32},Input3:0{i32}'}}, - {'params_test': {'input': (ov.Type.f32, ov.Type.f32)}, - 'params_ref': {'input': 'Input1:0{f32},Input2:0{f32}'}}, - {'params_test': {'input': (([1, 3, -1, -1], ov.Type.i32), ov.Type.i32, ov.Type.i32)}, - 'params_ref': {'input': 'Input1:0[1,3,?,?]{i32},Input2:0{i32},Input3:0{i32}'}} - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, - temp_dir, use_legacy_frontend): - tf_net_path = self.create_tf_model(temp_dir) - - test_params = params['params_test'] - ref_params = params['params_ref'] - test_params.update({'input_model': tf_net_path}) - ref_params.update({'input_model': tf_net_path}) - self._test(temp_dir, test_params, ref_params) - test_data = [ {'params_test': {'input': {"Input:0": [3, 2]}}, 'params_ref': {'input': "Input:0[3,2]"}}, @@ -126,10 +77,6 @@ def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, 'params_ref': {'input': "Input:0[3,2]"}}, {'params_test': {'input': (3, Dimension(2))}, 'params_ref': {'input': "Input:0[3,2]"}}, - {'params_test': {'input': [3, 2]}, - 'params_ref': {'input': "Input:0[3 2]"}}, - {'params_test': {'input': [Dimension(3, 10), 2]}, - 'params_ref': {'input': "Input:0[3..10 2]"}}, {'params_test': {'input': (-1, 10)}, 'params_ref': {'input': "Input:0[?,10]"}}, {'params_test': {'input': PartialShape([-1, 10])}, diff --git a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py index 1a49a989c11df2..02b4d569927909 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py +++ b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py @@ -366,188 +366,6 @@ def scripted_fn(x: torch.Tensor, y: torch.Tensor): return scripted_fn, ref_model, {'input': [(inp_shape, Type.f32), (inp_shape, Type.f32)]} -def create_pytorch_nn_module_layout_list(tmp_dir): - from openvino.runtime import Layout - pt_model = make_pt_model_two_inputs() - shape = [1, 3, 10, 10] - - shape = PartialShape(shape) - ref_model = make_ref_pt_model_two_inputs(shape) - ref_model.inputs[0].node.layout = Layout('nchw') - ref_model.inputs[1].node.layout = Layout('nhwc') - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ['nchw', Layout('nhwc')], - 'use_convert_model_from_mo': True - } - - -def create_pytorch_nn_module_layout_list_case2(tmp_dir): - from openvino.runtime import Layout - pt_model = make_pt_model_two_inputs() - shape = [1, 3, 10, 10] - - shape = PartialShape(shape) - ref_model = make_ref_pt_model_two_inputs(shape) - ref_model.inputs[0].node.layout = Layout('nchw') - ref_model.inputs[1].node.layout = Layout('nhwc') - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'layout': ('nchw', Layout('nhwc')), - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_disabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - add1 = ov.opset8.add(param1, const1) - add2 = ov.opset8.add(param2, const2) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'compress_to_fp16': False, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_default(tmp_dir): - # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled - # therefore decompression Converts will not be present - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float32) - add1 = ov.opset8.add(param1, const1) - add2 = ov.opset8.add(param2, const2) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) - const2 = ov.opset8.constant([[[[-0.0, -0.0, -0.0]]]], dtype=np.float16) - const1_decompressed = ov.opset8.convert( - const1, destination_type=np.float32) - const2_decompressed = ov.opset8.convert( - const2, destination_type=np.float32) - - add1 = ov.opset8.add(param1, const1_decompressed) - add2 = ov.opset8.add(param2, const2_decompressed) - mul = ov.opset8.multiply(add1, add2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, { - 'input': [(shape, np.float32), (shape, np.float32)], 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'compress_to_fp16': True, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_disabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - sub1 = ov.opset8.multiply(param1, const1) - sub2 = ov.opset8.multiply(param2, const2) - mul = ov.opset8.multiply(sub1, sub2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'compress_to_fp16': False, 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_default(tmp_dir): - # when 'use_convert_model_from_mo': True by default compression in convert_model is disabled - # therefore decompression Converts will not be present - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float32) - sub1 = ov.opset8.multiply(param1, const1) - sub2 = ov.opset8.multiply(param2, const2) - mul = ov.opset8.multiply(sub1, sub2) - relu = ov.opset8.relu(mul) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'use_convert_model_from_mo': True} - - -def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir): - pt_model = make_pt_model_two_inputs() - shape = [1, 10, 10, 3] - - shape = PartialShape(shape) - param1 = ov.opset8.parameter(shape) - param2 = ov.opset8.parameter(shape) - const1 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) - const1_decompressed = ov.opset8.convert( - const1, destination_type=np.float32) - const2 = ov.opset8.constant([[[[1, 1, 1]]]], dtype=np.float16) - const2_decompressed = ov.opset8.convert( - const2, destination_type=np.float32) - mul1 = ov.opset8.multiply(param1, const1_decompressed) - mul2 = ov.opset8.multiply(param2, const2_decompressed) - mul3 = ov.opset8.multiply(mul1, mul2) - relu = ov.opset8.relu(mul3) - sigm = ov.opset8.sigmoid(relu) - - parameter_list = [param1, param2] - ref_model = Model([sigm], parameter_list, "test") - - return pt_model, ref_model, {'input': [(shape, np.float32), (shape, np.float32)], - 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'compress_to_fp16': True, 'use_convert_model_from_mo': True} - - def create_pytorch_nn_module_with_compressed_constants(tmp_dir): import torch @@ -1208,14 +1026,6 @@ class TestMoConvertPyTorch(CommonMOConvertTest): 'create_pytorch_nn_module_sample_input_int32_two_inputs', 'create_pytorch_jit_script_module', 'create_pytorch_jit_script_function', - 'create_pytorch_nn_module_layout_list', - 'create_pytorch_nn_module_layout_list_case2', - 'create_pytorch_nn_module_mean_list_compression_default', - 'create_pytorch_nn_module_mean_list_compression_disabled', - 'create_pytorch_nn_module_mean_list_compression_enabled', - 'create_pytorch_nn_module_scale_list_compression_default', - 'create_pytorch_nn_module_scale_list_compression_disabled', - 'create_pytorch_nn_module_scale_list_compression_enabled', 'create_pytorch_nn_module_with_compressed_constants', 'create_pytorch_nn_module_shapes_list_static', 'create_pytorch_nn_module_shapes_list_static_via_input', From 1196026b01653013850312353e30ad7d07cd41cc Mon Sep 17 00:00:00 2001 From: "Kazantsev, Roman" Date: Mon, 18 Nov 2024 22:41:39 +0400 Subject: [PATCH 24/26] Install ONNX deps for tests Signed-off-by: Kazantsev, Roman --- .github/workflows/windows_vs2019_release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index ed3695491af4de..1b218cdf7d430b 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -310,7 +310,7 @@ jobs: python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/bindings/python/requirements_test.txt # ONNX tests requirements - python3 -m pip install -r ${INSTALL_TEST_DIR}/requirements_onnx + python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/requirements_onnx # For getting rid of SSL issues during model downloading for unit tests python3 -m pip install certifi From 586c7e20856e16db65b8490aa397510ab63ec983 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Tue, 19 Nov 2024 10:59:42 +0400 Subject: [PATCH 25/26] Update CMakeLists.txt --- tests/CMakeLists.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index bec2823004c1e3..08b4308479ef03 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -7,6 +7,5 @@ add_subdirectory(model_hub_tests) add_subdirectory(samples_tests) add_subdirectory(e2e_tests) -install(FILES requirements_pytorch DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) -install(FILES requirements_tensorflow DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) -install(FILES requirements_onnx DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) +install(FILES requirements_pytorch requirements_tensorflow requirements_onnx + DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) From c5808760eb60aa01dbb7d641664f69382db37065 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Tue, 19 Nov 2024 11:00:11 +0400 Subject: [PATCH 26/26] Update .github/dependabot.yml Co-authored-by: Anastasia Kuporosova --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 99338f8500d10d..359ff683c9b22a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -41,7 +41,7 @@ updates: - "rkazants" versioning-strategy: increase-if-necessary - # ovc tool + # ovc and Benchmark tools - package-ecosystem: pip directory: "/tools" schedule: