diff --git a/.github/actions/smart-ci/action.yml b/.github/actions/smart-ci/action.yml index 23717c9dde50ce..ebe420d4ef301a 100644 --- a/.github/actions/smart-ci/action.yml +++ b/.github/actions/smart-ci/action.yml @@ -13,6 +13,9 @@ inputs: commit_sha: description: "GitHub commit hash. Used if no PR number is set" required: false + ref_name: + description: "GitHub ref name" + required: false component_pattern: description: "Pattern to extract component name from PR label. If not set, any label is considered a component name" required: false @@ -88,6 +91,7 @@ runs: python ${{ github.action_path }}/smart_ci.py \ $([[ -n "${{ inputs.pr }}" ]] && echo '--pr ${{ inputs.pr }}' || echo '-s ${{ inputs.commit_sha }}') \ -r ${{ inputs.repository }} \ + -f "${{ inputs.ref_name }}" \ -p "${{ inputs.component_pattern }}" \ -c "${{ inputs.components_config }}" \ -m "${{ inputs.components_config_schema }}" \ diff --git a/.github/actions/smart-ci/smart_ci.py b/.github/actions/smart-ci/smart_ci.py index fc88294247c221..ae6786d9882bad 100644 --- a/.github/actions/smart-ci/smart_ci.py +++ b/.github/actions/smart-ci/smart_ci.py @@ -109,11 +109,26 @@ def get_changed_component_names(pr, all_possible_components: set, component_patt return components +def get_changeset(gh_api, pr, target_branch, commit_sha): + """Returns changeset either from PR or commit""" + if pr: + return gh_api.pulls.list_files(pr) + if target_branch: + target_branch_head_commit = gh_api.repos.get_branch(target_branch).commit.sha + # In merge-queue branch all commits between head of target branch and head of current branch (commit_sha) + # contain changes added to queue earlier to be validated together. Getting all of them + changes from + # commit_sha below + changed_files = gh_api.repos.compare_commits(f'{target_branch_head_commit}...{commit_sha}').get('files', []) + return changed_files + raise ValueError(f'Either "pr" or "target_branch" parameter must be non-empty') + + def parse_args(): parser = argparse.ArgumentParser(description='Returns product components changed in a given PR or commit') parser.add_argument('--pr', type=int, required=False, help='PR number. If not set, --commit is used') parser.add_argument('-s', '--commit-sha', required=False, help='Commit SHA. If not set, --pr is used') parser.add_argument('-r', '--repo', help='GitHub repository') + parser.add_argument('-f', '--ref_name', required=False, help='GitHub ref name') parser.add_argument('-p', '--pattern', default=None, help='Pattern to extract component name from PR label. ' 'If not set, any label is considered a component name') parser.add_argument('-c', '--components-config', default='.github/components.yml', @@ -172,18 +187,27 @@ def main(): component_name = component_name_from_label(label, args.pattern) all_possible_components.add(component_name if component_name else label) - no_match_files_changed = False + run_full_scope = False # For now, we don't want to apply smart ci rules for post-commits is_postcommit = not pr - if is_postcommit: + + merge_queue_prefix = 'gh-readonly-queue/' + is_merge_queue = args.ref_name.startswith(merge_queue_prefix) + merge_queue_target_branch = re.findall(f'^{merge_queue_prefix}(.*)/', args.ref_name)[0] if is_merge_queue else None + + if is_merge_queue: + logger.info(f"The run is a merge-queue run, executing full validation scope for all components, if " + f"not all queued changes match patterns in 'skip-when-only-listed-files-changed'") + run_full_scope = True + elif is_postcommit: logger.info(f"The run is a post-commit run, executing full validation scope for all components") + run_full_scope = True else: no_match_files_changed = 'no-match-files' in [label.name for label in pr.labels] if no_match_files_changed: logger.info(f"There are changed files that don't match any pattern in labeler config, " f"executing full validation scope for all components") - - run_full_scope = is_postcommit or no_match_files_changed + run_full_scope = True # In post-commits - validate all components regardless of changeset # In pre-commits - validate only changed components with their dependencies @@ -197,7 +221,7 @@ def main(): affected_components = cfg.get_affected_components(changed_component_names) skip_workflow = False - if args.pr and not run_full_scope: + if is_merge_queue or (args.pr and not run_full_scope): if args.skip_when_only_listed_labels_set: excepted_labels = set(args.skip_when_only_listed_labels_set.split(',')) excepted_labels_only = changed_component_names - excepted_labels == set() @@ -205,7 +229,7 @@ def main(): if not skip_workflow and args.skip_when_only_listed_files_changed: # To avoid spending extra API requests running step below only if necessary - changed_files = gh_api.pulls.list_files(args.pr) + changed_files = get_changeset(gh_api, args.pr, merge_queue_target_branch, args.commit_sha) patterns = set(args.skip_when_only_listed_files_changed.split(',')) matched_files_only = all(any(fnmatch(f.filename, pattern) for pattern in patterns) for f in changed_files) diff --git a/.github/workflows/android_arm64.yml b/.github/workflows/android_arm64.yml index a8deb0e3d476e1..2405d2a84d71af 100644 --- a/.github/workflows/android_arm64.yml +++ b/.github/workflows/android_arm64.yml @@ -31,6 +31,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -125,7 +126,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" # # Build diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 206d80cd02f0af..3ac8019fba8c66 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -77,7 +77,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python diff --git a/.github/workflows/dependency_review.yml b/.github/workflows/dependency_review.yml index cadf46ab3b9d1c..777198358c78f1 100644 --- a/.github/workflows/dependency_review.yml +++ b/.github/workflows/dependency_review.yml @@ -12,6 +12,6 @@ jobs: uses: actions/checkout@v4 - name: Dependency Review - uses: actions/dependency-review-action@v3 + uses: actions/dependency-review-action@v4 with: config-file: './.github/dependency_review.yml' diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 84434981be989d..e7bb07af01707b 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -31,6 +31,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -87,7 +88,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Install python dependencies run: | diff --git a/.github/workflows/job_cpu_functional_tests.yml b/.github/workflows/job_cpu_functional_tests.yml index b1f2e6bbf08b59..7c1b29e085331f 100644 --- a/.github/workflows/job_cpu_functional_tests.yml +++ b/.github/workflows/job_cpu_functional_tests.yml @@ -98,7 +98,7 @@ jobs: fi python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke* - timeout-minutes: 20 + timeout-minutes: 25 - name: Save tests execution time uses: actions/cache/save@v3 diff --git a/.github/workflows/job_onnx_models_tests.yml b/.github/workflows/job_onnx_models_tests.yml index 1fbc0c11fe960c..07d8d12d48a386 100644 --- a/.github/workflows/job_onnx_models_tests.yml +++ b/.github/workflows/job_onnx_models_tests.yml @@ -103,4 +103,4 @@ jobs: python3 -m pip install pytest-xdist[psutil] pytest-forked - name: ONNX Models Tests - run: python3 -m pytest --backend="CPU" --model_zoo_dir="${MODELS_SHARE_PATH}" ${INSTALL_TEST_DIR}/onnx/tests/tests_python/test_zoo_models.py -v -n 12 --forked -k 'not _cuda' --model_zoo_xfail + run: python3 -m pytest --backend="CPU" --model_zoo_dir="${MODELS_SHARE_PATH}" ${INSTALL_TEST_DIR}/onnx/tests/tests_python/test_zoo_models.py -v -n auto --forked -k 'not _cuda' --model_zoo_xfail diff --git a/.github/workflows/job_onnx_runtime.yml b/.github/workflows/job_onnx_runtime.yml index 1bf4d4aa96eea3..e4dc420ea2fbf1 100644 --- a/.github/workflows/job_onnx_runtime.yml +++ b/.github/workflows/job_onnx_runtime.yml @@ -80,7 +80,10 @@ jobs: popd - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + run: | + ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + # since we are on Ubuntu 22.04, but compiled OpenVINO on Ubuntu 20.04, we need to install `libtbb2` + apt-get install --assume-yes --no-install-recommends libtbb2 - name: Clone ONNX Runtime run: | @@ -97,7 +100,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Build Lin ONNX Runtime run: | @@ -139,13 +142,6 @@ jobs: ./onnxruntime_global_thread_pools_test working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo -# Test removed in onnxruntime 1.16.2 -# - name: Run onnxruntime_api_tests_without_env -# run: | -# source ${INSTALL_DIR}/setupvars.sh -# ./onnxruntime_api_tests_without_env -# working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - name: Run pytorch-converted tests run: | source ${INSTALL_DIR}/setupvars.sh diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 274aaaacea3223..95edce67c2652d 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -249,7 +249,7 @@ jobs: run: | # requires 'unit_tests' from 'mo' export PYTHONPATH=${INSTALL_TEST_DIR}/mo - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe -n logical --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml env: TEST_DEVICE: CPU TEST_PRECISION: FP16 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 319abaa44d564a..01512464638de2 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -38,6 +38,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -113,7 +114,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python @@ -406,14 +407,16 @@ jobs: ONNX_Runtime: name: ONNX Runtime Integration - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || - fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE + # Enable back once https://github.com/microsoft/onnxruntime/pull/19184 is merged + if: ${{ 'false' }} + # if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || + # fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE needs: [ Build, Smart_CI ] uses: ./.github/workflows/job_onnx_runtime.yml with: runner: 'aks-linux-16-cores-32gb' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' - sccache-azure-key-prefix: 'ubuntu20_x86_64_onnxruntime' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' + sccache-azure-key-prefix: 'ubuntu22_x86_64_onnxruntime' ONNX_Models: name: ONNX Models Tests @@ -562,7 +565,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Install CUDA run: | diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index fd8403e0de6c53..89c8fd8b1f1e40 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -35,6 +35,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -113,7 +114,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python @@ -307,14 +308,16 @@ jobs: ONNX_Runtime: name: ONNX Runtime Integration - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || - fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE + # Enable back once https://github.com/microsoft/onnxruntime/pull/19184 is merged + if: ${{ 'false' }} + # if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT || + # fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE needs: [ Build, Smart_CI ] uses: ./.github/workflows/job_onnx_runtime.yml with: runner: 'aks-linux-16-cores-arm' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' - sccache-azure-key-prefix: 'ubuntu20_aarch64_onnxruntime' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' + sccache-azure-key-prefix: 'ubuntu22_aarch64_onnxruntime' CXX_Unit_Tests: name: C++ unit tests diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index 79ac560f84b88a..b537810ff5c414 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -35,6 +35,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -111,7 +112,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Setup Python ${{ env.PYTHON_VERSION }} uses: ./openvino/.github/actions/setup_python @@ -150,6 +151,7 @@ jobs: -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ -DENABLE_PROFILING_ITT=ON \ -DSELECTIVE_BUILD=COLLECT \ + -DENABLE_DEBUG_CAPS=ON \ -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ -S ${OPENVINO_REPO} \ @@ -200,7 +202,7 @@ jobs: pushd ${INSTALL_TEST_DIR} tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz \ tests/ov_cpu_func_tests \ - tests/libtemplate_extension.so \ + tests/libopenvino_template_extension.so \ tests/functional_test_utils/layer_tests_summary/* popd @@ -289,7 +291,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" # # Build # diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 088fddccf1b210..ff8fa44c0c7ab0 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -34,6 +34,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 0165980d1b2f57..c645781522039e 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -52,6 +52,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 64873a9b104138..4e7ed1c2a24d49 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -51,6 +51,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' diff --git a/.github/workflows/webassembly.yml b/.github/workflows/webassembly.yml index c5d94f267e4298..89d447bea40b53 100644 --- a/.github/workflows/webassembly.yml +++ b/.github/workflows/webassembly.yml @@ -31,6 +31,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -68,7 +69,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: emcmake cmake - configure run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e25ff48ca31128..daf14e16fd4185 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -34,6 +34,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -119,7 +120,7 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.3 with: - version: "v0.5.4" + version: "v0.7.5" - name: Install build dependencies run: choco install --no-progress ninja @@ -279,20 +280,22 @@ jobs: should-setup-pip-paths: 'false' self-hosted-runner: 'true' + # Test with the short names of the arguments - name: Build cpp samples run: | - & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/cpp_samples + & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples.ps1 -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/cpp_samples env: CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + # Test with the full names of the arguments - name: Build c samples run: | - & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples + & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples.ps1 -InstallDirectory ${{ env.INSTALL_DIR }} -BuildDirectory ${{ env.BUILD_DIR }}/c_samples - name: Samples tests run: | python3 -m pip install --ignore-installed PyYAML -r ${{ env.INSTALL_TEST_DIR }}/smoke_tests/requirements.txt - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/smoke_tests --env_conf ${{ env.INSTALL_TEST_DIR }}/smoke_tests/env_config.yml --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-SamplesSmokeTests.xml env: IE_APP_PATH: ${{ env.INSTALL_DIR }}/samples_bin @@ -300,6 +303,17 @@ jobs: SHARE: ${{ env.INSTALL_TEST_DIR }}/smoke_tests/samples_smoke_tests_data WORKSPACE: ${{ env.INSTALL_DIR }} + # Test .bat scripts for samples building + - name: Build cpp samples (bat) + run: | + & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }}/samples_bat -b ${{ env.BUILD_DIR }}/cpp_samples_bat + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + + - name: Build c samples (bat) + run: | + & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }}/samples_bat -b ${{ env.BUILD_DIR }}/c_samples_bat + - name: Upload Test Results uses: actions/upload-artifact@v3 if: ${{ !cancelled() }} @@ -428,6 +442,12 @@ jobs: # TODO: replace with Python API tests requirements python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt + # For getting rid of SSL issues during model downloading for unit tests + python3 -m pip install certifi + + - name: Set SSL_CERT_FILE for model downloading for unit tests + run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV + - name: Python API Tests #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 shell: cmd @@ -472,7 +492,7 @@ jobs: run: | :: requires 'unit_tests' from 'tools/mo' set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH% - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -n logical -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml env: TEST_DEVICE: CPU TEST_PRECISION: FP16 @@ -551,7 +571,7 @@ jobs: if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).PDPD_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml - name: OVC unit tests @@ -604,156 +624,156 @@ jobs: - name: OpenVINO Core unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml - name: OpenVINO Inference functional tests if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml - name: OpenVINO Inference unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml - name: Low Precision Transformations Tests if: fromJSON(needs.smart_ci.outputs.affected_components).LP_transformations.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml - name: OpenVINO Conditional compilation tests if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml - name: IR frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).IR_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml - name: PaddlePaddle frontend tests # Disabled because of CVS-95904 if: ${{ 'false' }} run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml - name: ONNX frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - name: TensorFlow Common frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml - name: TensorFlow frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml - name: TensorFlow Lite frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test run: | # Skip ticket: 126320 - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_filter=-*test_decode_convert_equal_convert*:*test_convert_partially_equal_convert* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml - name: Transformations func tests if: fromJSON(needs.smart_ci.outputs.affected_components).transformations.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml - name: Common test utils tests run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml - name: Snippets func tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml - name: CPU plugin unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml - name: ov_subgraphs_dumper_tests tests run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_subgraphs_dumper_tests --gtest_print_time=1 --device=TEMPLATE --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SubgraphsDumperTests.xml - name: Template OpImpl tests run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml - name: AUTO unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml - name: AUTO func Tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml - name: Template plugin func tests if: fromJSON(needs.smart_ci.outputs.affected_components).TEMPLATE.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml - name: OpenVINO C API tests if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml - name: AutoBatch unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml - name: AutoBatch func tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml - name: Proxy Plugin func tests if: fromJSON(needs.smart_ci.outputs.affected_components).PROXY.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml - name: Hetero Unit Tests if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml - name: Hetero Func Tests if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" ${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml - name: Upload Test Results @@ -829,7 +849,7 @@ jobs: - name: Intel CPU plugin func tests (parallel) run: | - & "${{ env.INSTALL_DIR }}/setupvars.ps1" + . "${{ env.INSTALL_DIR }}/setupvars.ps1" python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests.exe -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -- --gtest_filter=*smoke* timeout-minutes: 60 diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index 7780e50eedc894..3e038d9def9d0b 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -37,6 +37,7 @@ jobs: repository: ${{ github.repository }} pr: ${{ github.event.number }} commit_sha: ${{ github.sha }} + ref_name: ${{ github.ref_name }} component_pattern: "category: (.*)" repo_token: ${{ secrets.GITHUB_TOKEN }} skip_when_only_listed_labels_set: 'docs' @@ -181,7 +182,7 @@ jobs: - name: Build C samples - OpenVINO install tree run: | - & ${{ env.INSTALL_DIR }}/samples/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples + & ${{ env.INSTALL_DIR }}/samples/c/build_samples.ps1 -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples - name: Ctest - OpenVINO unit tests shell: cmd @@ -221,7 +222,7 @@ jobs: Compress-Archive @compress $compress = @{ - Path = "${{ env.OPENVINO_REPO }}/bin/intel64/${{ env.CMAKE_BUILD_TYPE }}/ov_cpu_func_tests.exe", "${{ env.OPENVINO_REPO }}/bin/intel64/${{ env.CMAKE_BUILD_TYPE }}/template_extension.dll", "${{ env.OPENVINO_REPO }}/src/tests/test_utils/functional_test_utils/layer_tests_summary", "${{ env.INSTALL_DIR }}/runtime/3rdparty/tbb" + Path = "${{ env.OPENVINO_REPO }}/bin/intel64/${{ env.CMAKE_BUILD_TYPE }}/ov_cpu_func_tests.exe", "${{ env.OPENVINO_REPO }}/bin/intel64/${{ env.CMAKE_BUILD_TYPE }}/openvino_template_extension.dll", "${{ env.OPENVINO_REPO }}/src/tests/test_utils/functional_test_utils/layer_tests_summary", "${{ env.INSTALL_DIR }}/runtime/3rdparty/tbb" CompressionLevel = "Optimal" DestinationPath = "${{ env.BUILD_DIR }}/openvino_tests.zip" } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f91846d8f0c78a..7e2636f9097cf8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,7 +61,7 @@ product better. [./docs/dev](https://github.com/openvinotoolkit/openvino/tree/master/docs/dev) folder. * **User documentation** is built from several sources and published at - [docs.openvino.ai](docs.openvino.ai), which is the recommended place for reading + [docs.openvino.ai](https://docs.openvino.ai/), which is the recommended place for reading these documents. Use the files maintained in this repository only for editing purposes. * The easiest way to help with documentation is to review it and provide feedback on the @@ -69,7 +69,7 @@ product better. or think more information should be added, you can reach out to any of the documentation contributors to discuss the potential changes. - You can also create a Pull Request directly, following the [editor's guide](./docs/CONTRIBUTING_DOCS.md). + You can also create a Pull Request directly, following the [editor's guide](./CONTRIBUTING_DOCS.md). ### Promote and Support OpenVINO @@ -152,4 +152,4 @@ We'll make sure to review your Pull Request as soon as possible and provide you ## License By contributing to the OpenVINO project, you agree that your contributions will be -licensed under the terms stated in the [LICENSE](./LICENSE.md) file. +licensed under the terms stated in the [LICENSE](./LICENSE) file. diff --git a/CONTRIBUTING_PR.md b/CONTRIBUTING_PR.md index df0d4ec87bd248..a6717b5336e60d 100644 --- a/CONTRIBUTING_PR.md +++ b/CONTRIBUTING_PR.md @@ -56,7 +56,7 @@ Regardless of the automated tests, you should ensure the quality of your changes ## Need Additional Help? Check these Articles -* [How to create a fork](https://help.github.com/articles/fork-a-rep) +* [How to create a fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo) * [Install Git](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) * If you want to add a new sample, please have a look at the Guide for contributing to C++/C/Python IE samples and add the license statement at the top of new files for diff --git a/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake b/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake index 6a92e0a69f420e..da543c910736dd 100644 --- a/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake +++ b/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake @@ -37,7 +37,7 @@ function(_generate_dispatcher) // !! do not modify it !!! // #include \"${XARCH_API_HEADER}\" -#include \"ie_system_conf.h\" +#include \"openvino/runtime/system_conf.hpp\" ") diff --git a/cmake/developer_package/cross_compile/cross_compiled_func.cmake b/cmake/developer_package/cross_compile/cross_compiled_func.cmake index d82d6a73098b6c..c36cbe6762d9a0 100644 --- a/cmake/developer_package/cross_compile/cross_compiled_func.cmake +++ b/cmake/developer_package/cross_compile/cross_compiled_func.cmake @@ -42,7 +42,7 @@ set(DISPATCHER_GEN_OPTIONS_HOLDER ${CMAKE_CURRENT_LIST_DIR}/cross_compiled_disp_ # # Allow to enable multiple cross compilation of source file inside one module # with keeping requirements on minimal instruction set. The CPU check performed -# in runtime via common utils declared in "ie_system_conf.h". +# in runtime via common utils declared in "system_conf.h". # # Usage example: # cross_compiled_file( diff --git a/cmake/developer_package/ncc_naming_style/openvino.style b/cmake/developer_package/ncc_naming_style/openvino.style index ebf9ef078d4ba7..6608795381e4a1 100644 --- a/cmake/developer_package/ncc_naming_style/openvino.style +++ b/cmake/developer_package/ncc_naming_style/openvino.style @@ -1,6 +1,6 @@ # custom OpenVINO values CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN|OPENVINO_OP)$' -ClassName: '^([A-Z][\w]+|b?float16|numeric_limits|ngraph_error|stopwatch|unsupported_op)$' +ClassName: '^([A-Z][\w]+|b?float16|float8_e4m3|float8_e5m2|numeric_limits|ngraph_error|stopwatch|unsupported_op)$' StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair|stat)$' FunctionName: '^(operator\W+|[a-z_\d]+)|PrintTo$' Namespace: '^([a-z\d_]*|InferenceEngine)$' @@ -18,7 +18,7 @@ VariableReference: '^\w+$' EnumName: '^[A-Z][\w]+$' # excepts element_type -EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|nf4|string|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$' +EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|nf4|f8e4m3|f8e5m2|string|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$' # TODO: align UsingDeclaration: '^.*$' TypedefName: '^.*$' diff --git a/cmake/templates/InferenceEngineConfig-version.cmake.in b/cmake/templates/InferenceEngineConfig-version.cmake.in deleted file mode 100644 index 2da3f42e1c6a54..00000000000000 --- a/cmake/templates/InferenceEngineConfig-version.cmake.in +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(PACKAGE_VERSION_MAJOR @OpenVINO_VERSION_MAJOR@) -set(PACKAGE_VERSION_MINOR @OpenVINO_VERSION_MINOR@) -set(PACKAGE_VERSION_PATCH @OpenVINO_VERSION_PATCH@) -set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}") - -set(PACKAGE_VERSION_EXACT False) -set(PACKAGE_VERSION_COMPATIBLE False) - -# Compatibility with old versioning for 2.x -if(PACKAGE_FIND_VERSION_MAJOR VERSION_EQUAL 2) - set(PACKAGE_VERSION_COMPATIBLE True) - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED) - message(WARNING "Inference Engine versioning has changed. Use ${PACKAGE_VERSION} instead of ${PACKAGE_FIND_VERSION}") - endif() -endif() - -if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) - set(PACKAGE_VERSION_EXACT True) - set(PACKAGE_VERSION_COMPATIBLE True) -endif() - -if(PACKAGE_FIND_VERSION_MAJOR EQUAL PACKAGE_VERSION_MAJOR AND - PACKAGE_FIND_VERSION VERSION_LESS PACKAGE_VERSION) - set(PACKAGE_VERSION_COMPATIBLE True) -endif() diff --git a/cmake/templates/InferenceEngineConfig.cmake.in b/cmake/templates/InferenceEngineConfig.cmake.in deleted file mode 100644 index f94124a5e88708..00000000000000 --- a/cmake/templates/InferenceEngineConfig.cmake.in +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# -# -# Inference Engine cmake config -# ------ -# -# This config defines the following variables: -# -# InferenceEngine_FOUND - True if the system has the Inference Engine library -# InferenceEngine_INCLUDE_DIRS - Inference Engine include directories -# InferenceEngine_LIBRARIES - Inference Engine libraries -# -# and the following imported targets: -# -# IE::inference_engine - The Inference Engine library -# IE::inference_engine_c_api - The Inference Engine C API library -# -# Inference Engine version variables: -# -# InferenceEngine_VERSION_MAJOR - major version component -# InferenceEngine_VERSION_MINOR - minor version component -# InferenceEngine_VERSION_PATCH - patch version component -# - -@PACKAGE_INIT@ - -message(WARNING "find_package(InferenceEngine) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINO)") - -if(NOT DEFINED CMAKE_FIND_PACKAGE_NAME) - set(CMAKE_FIND_PACKAGE_NAME InferenceEngine) - set(_ie_need_package_name_reset ON) -endif() - -# need to store current PACKAGE_PREFIX_DIR, because it's overwritten by sub-package one -set(_ie_package_prefix_dir "${PACKAGE_PREFIX_DIR}") - -include(CMakeFindDependencyMacro) - -find_dependency(OpenVINO - PATHS "${CMAKE_CURRENT_LIST_DIR}" - "${CMAKE_CURRENT_LIST_DIR}/../openvino${InferenceEngine_VERSION}" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - -# create targets with old names for compatibility -if(TARGET openvino::runtime AND NOT TARGET IE::inference_engine) - add_library(IE::inference_engine INTERFACE IMPORTED) - set_target_properties(IE::inference_engine PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::runtime) -endif() - -if(TARGET openvino::runtime::c AND NOT TARGET IE::inference_engine_c_api) - add_library(IE::inference_engine_c_api INTERFACE IMPORTED) - set_target_properties(IE::inference_engine_c_api PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::runtime::c) -endif() - -# mark components as available -foreach(comp inference_engine inference_engine_c_api) - set(${CMAKE_FIND_PACKAGE_NAME}_${comp}_FOUND ON) -endforeach() - -if(NOT ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) - set(${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS inference_engine inference_engine_c_api) -endif() - -unset(InferenceEngine_LIBRARIES) -foreach(comp IN LISTS ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) - # check if the component is available - if(${CMAKE_FIND_PACKAGE_NAME}_${comp}_FOUND) - set(pcomp IE::${comp}) - - list(APPEND InferenceEngine_LIBRARIES ${pcomp}) - endif() -endforeach() - -# restore PACKAGE_PREFIX_DIR -set(PACKAGE_PREFIX_DIR ${_ie_package_prefix_dir}) -unset(_ie_package_prefix_dir) - -check_required_components(${CMAKE_FIND_PACKAGE_NAME}) - -if(_ie_need_package_name_reset) - unset(CMAKE_FIND_PACKAGE_NAME) - unset(_ie_need_package_name_reset) -endif() diff --git a/cmake/templates/ngraphConfig.cmake.in b/cmake/templates/ngraphConfig.cmake.in deleted file mode 100644 index a0111c2302195f..00000000000000 --- a/cmake/templates/ngraphConfig.cmake.in +++ /dev/null @@ -1,102 +0,0 @@ -# ****************************************************************************** -# Copyright 2017-2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** -# -# -# ngraph config file -# ------ -# -# This script defines the following variables and imported targets: -# -# ngraph::ngraph - nGraph core target -# ngraph_FOUND - True if the system has the nGraph library -# NGRAPH_LIBRARIES - nGraph libraries -# -# Frontends: -# -# ngraph_onnx_frontend_FOUND - True if the system has ngraph::onnx_frontend library -# ngraph::onnx_frontend - ONNX FrontEnd target (optional) -# -# ngraph_paddle_frontend_FOUND - True if the system has Paddle frontend -# ngraph::paddle_frontend - nGraph Paddle frontend (optional) -# -# ngraph_ir_frontend_FOUND - True if the system has OpenVINO IR frontend -# -# ngraph_tensorflow_frontend_FOUND - True if the system has TensorFlow frontend -# ngraph::tensorflow_frontend - nGraph TensorFlow frontend (optional) -# - -@PACKAGE_INIT@ - -include(CMakeFindDependencyMacro) - -message(WARNING "find_package(ngraph) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINO)") - -find_dependency(OpenVINO - PATHS "${CMAKE_CURRENT_LIST_DIR}" - "${CMAKE_CURRENT_LIST_DIR}/../openvino${ngraph_VERSION}" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - -# create targets with old names for compatibility -if(TARGET openvino::runtime AND NOT TARGET ngraph::ngraph) - add_library(ngraph::ngraph INTERFACE IMPORTED) - set_target_properties(ngraph::ngraph PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::runtime) -endif() - -if(TARGET openvino::frontend::onnx AND NOT TARGET ngraph::onnx_frontend) - add_library(ngraph::onnx_frontend INTERFACE IMPORTED) - set_target_properties(ngraph::onnx_frontend PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::frontend::onnx) -endif() - -if(TARGET openvino::frontend::paddle AND NOT TARGET ngraph::paddle_frontend) - add_library(ngraph::paddle_frontend INTERFACE IMPORTED) - set_target_properties(ngraph::paddle_frontend PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::frontend::paddle) -endif() - -if(TARGET openvino::frontend::tensorflow AND NOT TARGET ngraph::tensorflow_frontend) - add_library(ngraph::tensorflow_frontend INTERFACE IMPORTED) - set_target_properties(ngraph::tensorflow_frontend PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::frontend::tensorflow) -endif() - -set(ngraph_ngraph_FOUND ON) -set(NGRAPH_LIBRARIES ngraph::ngraph) - -set(ngraph_onnx_frontend_FOUND ${OpenVINO_Frontend_ONNX_FOUND}) -set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND}) -set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND}) -set(ngraph_onnx_importer_FOUND ${OpenVINO_Frontend_ONNX_FOUND}) - -if(ngraph_onnx_importer_FOUND) - set(ONNX_IMPORTER_LIBRARIES ngraph::onnx_frontend) - # ngraph::onnx_importer target and variables are deprecated - # but need to create a dummy target for BW compatibility - if(NOT TARGET ngraph::onnx_importer) - add_library(ngraph::onnx_importer INTERFACE IMPORTED) - set_target_properties(ngraph::onnx_importer PROPERTIES - INTERFACE_LINK_LIBRARIES ngraph::onnx_frontend) - endif() -endif() - -set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND}) -set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND}) -set(ngraph_onnx_frontend_FOUND ${OpenVINO_Frontend_ONNX_FOUND}) -set(ngraph_ir_frontend_FOUND ${OpenVINO_Frontend_IR_FOUND}) - -check_required_components(ngraph) diff --git a/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst b/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst index 54c827ffd5f89a..22a24c8858a992 100644 --- a/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst +++ b/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.rst @@ -3,58 +3,56 @@ Supported Devices ================= - + .. meta:: - :description: Check the list of officially supported models in Intel® + :description: Check the list of officially supported models in Intel® Distribution of OpenVINO™ toolkit. OpenVINO enables you to implement its inference capabilities in your own software, -utilizing various hardware. It currently supports the following processing units +utilizing various hardware. It currently supports the following processing units (for more details, see :doc:`system requirements `): -* :doc:`CPU ` -* :doc:`GPU ` -* :doc:`GNA ` - +* :doc:`CPU ` +* :doc:`GPU ` + .. note:: - GNA, currently available in the Intel® Distribution of OpenVINO™ toolkit, - will be deprecated together with the hardware being discontinued - in future CPU solutions. - + With OpenVINO 2024 release, GNA has been deprecated together + with the hardware being discontinued in future CPU solutions. + With OpenVINO™ 2023.0 release, support has been cancelled for: - Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X - Intel® Vision Accelerator Design with Intel® Movidius™ - + To keep using the MYRIAD and HDDL plugins with your hardware, revert to the OpenVINO 2022.3 LTS release. -Beside running inference with a specific device, +Beside running inference with a specific device, OpenVINO offers automated inference management with the following inference modes: -* :doc:`Automatic Device Selection ` - automatically selects the best device - available for the given task. It offers many additional options and optimizations, including inference on +* :doc:`Automatic Device Selection ` - automatically selects the best device + available for the given task. It offers many additional options and optimizations, including inference on multiple devices at the same time. -* :doc:`Multi-device Inference ` - executes inference on multiple devices. +* :doc:`Multi-device Inference ` - executes inference on multiple devices. Currently, this mode is considered a legacy solution. Using Automatic Device Selection is advised. -* :doc:`Heterogeneous Inference ` - enables splitting inference among several devices +* :doc:`Heterogeneous Inference ` - enables splitting inference among several devices automatically, for example, if one device doesn’t support certain operations. -Devices similar to the ones used for benchmarking can be accessed using `Intel® DevCloud for the Edge `__, -a remote development environment with access to Intel® hardware and the latest versions of the Intel® Distribution +Devices similar to the ones used for benchmarking can be accessed using `Intel® DevCloud for the Edge `__, +a remote development environment with access to Intel® hardware and the latest versions of the Intel® Distribution of OpenVINO™ Toolkit. `Learn more `__ or `Register here `__. To learn more about each of the supported devices and modes, refer to the sections of: -* :doc:`Inference Device Support ` +* :doc:`Inference Device Support ` * :doc:`Inference Modes ` For setting up a relevant configuration, refer to the -:doc:`Integrate with Customer Application ` +:doc:`Integrate with Customer Application ` topic (step 3 "Configure input and output"). diff --git a/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst b/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst index e6a5a3bbd44289..9939c5ffc572f4 100644 --- a/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst +++ b/docs/articles_en/about_openvino/compatibility_and_support/supported_operations_inference_devices.rst @@ -14,154 +14,154 @@ for a more detailed and most recent listing of operations that are implemented a See the full conformance report table -================================= =============== ============== ================ ================== - Operations CPU (x86) GPU GNA CPU (Arm®) -================================= =============== ============== ================ ================== - Abs Supported** Supported Not Supported Supported - Acos Supported** Supported Not Supported Supported**** - Acosh Supported** Supported Not Supported Supported**** - Activation-Clamp Supported*** Supported Supported Supported - Activation-ELU Supported*** Supported Not Supported Supported - Activation-Exp Supported*** Supported Supported Supported - Activation-Leaky ReLU Supported*** Supported Supported Not Supported - Activation-Not Supported*** Supported Not Supported Not Supported - Activation-PReLU Supported*** Supported Not Supported Supported - Activation-ReLU Supported*** Supported Supported Supported - Activation-ReLU6 Supported*** Supported Not Supported Not Supported - Activation-Sigmoid/Logistic Supported*** Supported Supported Supported - Activation-TanH Supported*** Supported Supported Supported - ArgMax Supported** Supported Not Supported Not Supported - Asin Supported** Supported Not Supported Supported**** - Asinh Supported** Supported Not Supported Supported**** - Atan Supported** Supported Not Supported Supported**** - Atanh Supported** Supported Not Supported Supported**** - BatchNormalization Supported Supported Not Supported Supported - BinaryConvolution Supported Supported Not Supported Not Supported - Broadcast Supported** Supported Not Supported Supported - Ceil Supported** Supported Not Supported Supported - Concat Supported*** Supported Supported Supported - Const Supported Supported Supported Supported - Convolution-Dilated Supported Supported Not Supported Supported - Convolution-Dilated 3D Supported Supported Not Supported Not Supported - Convolution-Grouped Supported Supported Not Supported Supported - Convolution-Grouped 3D Supported Supported Not Supported Not Supported - Convolution-Ordinary Supported Supported Supported* Supported - Convolution-Ordinary 3D Supported Supported Not Supported Not Supported - Cos Supported** Supported Not Supported Supported**** - Cosh Supported** Supported Not Supported Supported**** - Crop Supported Supported Supported Not Supported - CTCGreedyDecoder Supported** Supported** Not Supported Supported**** - Deconvolution Supported Supported Not Supported Not Supported - Deconvolution 3D Supported Supported Not Supported Not Supported - DeformableConvolution Supported Supported Not Supported Not Supported - DepthToSpace Supported** Supported Not Supported Supported* - DetectionOutput Supported** Supported Not Supported Supported**** - Eltwise-And Supported*** Supported Not Supported Supported - Eltwise-Add Supported*** Supported Not Supported Supported - Eltwise-Div Supported*** Supported Not Supported Supported - Eltwise-Equal Supported*** Supported Not Supported Supported* - Eltwise-FloorMod Supported*** Supported Not Supported Supported**** - Eltwise-Greater Supported*** Supported Not Supported Supported - Eltwise-GreaterEqual Supported*** Supported Not Supported Supported - Eltwise-Less Supported*** Supported Not Supported Supported* - Eltwise-LessEqual Supported*** Supported Not Supported Supported* - Eltwise-LogicalAnd Supported*** Supported Not Supported Supported - Eltwise-LogicalOr Supported*** Supported Not Supported Supported - Eltwise-LogicalXor Supported*** Supported Not Supported Supported - Eltwise-Max Supported*** Supported Not Supported Supported - Eltwise-Min Supported*** Supported Not Supported Supported - Eltwise-Mul Supported*** Supported Supported Supported - Eltwise-NotEqual Supported*** Supported Not Supported Supported* - Eltwise-Pow Supported*** Supported Not Supported Supported - Eltwise-Prod Supported*** Supported Supported Not Supported - Eltwise-SquaredDiff Supported*** Supported Not Supported Supported - Eltwise-Sub Supported*** Supported Supported Supported - Eltwise-Sum Supported*** Supported Supported Supported**** - Erf Supported** Supported Not Supported Supported**** - Exp Supported Supported Supported Supported - FakeQuantize Supported Not Supported Not Supported Supported* - Fill Supported** Not Supported Not Supported Not Supported - Flatten Supported Supported Not Supported Not Supported - Floor Supported** Supported Not Supported Supported - FullyConnected (Inner Product) Supported*** Supported Supported Supported - Gather Supported** Supported Not Supported Supported* - GatherTree Supported** Not Supported Not Supported Supported**** - Gemm Supported Supported Not Supported Not Supported - GRN Supported** Supported** Not Supported Supported - HardSigmoid Supported** Supported Not Supported Supported**** - Interp Supported** Supported** Not Supported Supported* - Log Supported** Supported Supported Supported - LRN (Norm) Supported Supported Not Supported Supported* - LSTMCell Supported Supported Supported Supported - GRUCell Supported Supported Supported Supported - RNNCell Supported Supported Not Supported Supported - LSTMSequence Supported Supported Supported Supported**** - GRUSequence Supported Supported Supported Supported**** - RNNSequence Supported Supported Not Supported Supported**** - LogSoftmax Supported** Supported Not Supported Supported - Memory Supported Not Supported Supported Not Supported - MVN Supported** Supported Not Supported Supported* - Neg Supported** Supported Not Supported Supported - NonMaxSuppression Supported** Not Supported Not Supported Supported**** - Normalize Supported** Supported Not Supported Supported* - OneHot Supported** Supported Not Supported Supported**** - Pad Supported** Supported Not Supported Supported* - Permute Supported Supported Supported* Not Supported - Pooling(AVG,MAX) Supported Supported Supported Supported - Pooling(AVG,MAX) 3D Supported Supported Not Supported Supported* - Power Supported** Supported Supported* Supported - PowerFile Supported** Not Supported Not Supported Not Supported - PriorBox Supported** Supported Not Supported Supported - PriorBoxClustered Supported** Supported** Not Supported Supported - Proposal Supported** Supported Not Supported Supported**** - PSROIPooling Supported** Supported Not Supported Supported**** - Range Supported** Not Supported Not Supported Not Supported - Reciprocal Supported** Supported Not Supported Not Supported - ReduceAnd Supported** Supported Not Supported Supported**** - ReduceL1 Supported** Supported Not Supported Supported - ReduceL2 Supported** Supported Not Supported Supported - ReduceLogSum Supported** Supported Not Supported Supported - ReduceLogSumExp Supported** Supported Not Supported Not Supported - ReduceMax Supported** Supported Not Supported Supported - ReduceMean Supported** Supported Not Supported Supported - ReduceMin Supported** Supported Not Supported Supported - ReduceOr Supported** Supported Not Supported Supported**** - ReduceProd Supported** Supported Not Supported Supported - ReduceSum Supported** Supported Not Supported Supported - ReduceSumSquare Supported** Supported Not Supported Not Supported - RegionYolo Supported** Supported Not Supported Supported**** - ReorgYolo Supported** Supported Not Supported Supported - Resample Supported** Supported Not Supported Not Supported - Reshape Supported*** Supported Supported Supported - ReverseSequence Supported** Supported Not Supported Supported**** - RNN Supported Not Supported Not Supported Supported - ROIPooling Supported Supported* Not Supported Supported**** - ScaleShift Supported*** Supported Supported Not Supported - ScatterUpdate Supported** Not Supported Not Supported Not Supported - Select Supported Supported Not Supported Supported - Selu Supported** Supported Not Supported Supported**** - ShuffleChannels Supported** Supported Not Supported Supported - Sign Supported** Supported Not Supported Supported - Sin Supported** Supported Not Supported Supported - Sinh Supported** Supported Not Supported Supported**** - SimplerNMS Supported** Supported Not Supported Not Supported - Slice Supported*** Supported Supported Not Supported - SoftMax Supported*** Supported Not Supported Supported - Softplus Supported** Supported Not Supported Supported - Softsign Supported** Supported Supported Not Supported - SpaceToDepth Supported** Not Supported Not Supported Supported* - SpatialTransformer Supported** Not Supported Not Supported Not Supported - Split Supported*** Supported Supported Supported - Squeeze Supported** Supported Supported Supported - StridedSlice Supported** Supported Not Supported Supported* - Tan Supported** Supported Not Supported Supported**** - TensorIterator Supported Not Supported Supported Supported - Tile Supported*** Supported** Not Supported Supported - TopK Supported** Supported Not Supported Supported**** - Unpooling Not Supported Supported Not Supported Not Supported - Unsqueeze Supported** Supported Supported Supported - Upsampling Not Supported Supported Not Supported Not Supported -================================= =============== ============== ================ ================== +================================= =============== ============== ================== + Operations CPU (x86) GPU CPU (Arm®) +================================= =============== ============== ================== + Abs Supported** Supported Supported + Acos Supported** Supported Supported**** + Acosh Supported** Supported Supported**** + Activation-Clamp Supported*** Supported Supported + Activation-ELU Supported*** Supported Supported + Activation-Exp Supported*** Supported Supported + Activation-Leaky ReLU Supported*** Supported Not Supported + Activation-Not Supported*** Supported Not Supported + Activation-PReLU Supported*** Supported Supported + Activation-ReLU Supported*** Supported Supported + Activation-ReLU6 Supported*** Supported Not Supported + Activation-Sigmoid/Logistic Supported*** Supported Supported + Activation-TanH Supported*** Supported Supported + ArgMax Supported** Supported Not Supported + Asin Supported** Supported Supported**** + Asinh Supported** Supported Supported**** + Atan Supported** Supported Supported**** + Atanh Supported** Supported Supported**** + BatchNormalization Supported Supported Supported + BinaryConvolution Supported Supported Not Supported + Broadcast Supported** Supported Supported + Ceil Supported** Supported Supported + Concat Supported*** Supported Supported + Const Supported Supported Supported + Convolution-Dilated Supported Supported Supported + Convolution-Dilated 3D Supported Supported Not Supported + Convolution-Grouped Supported Supported Supported + Convolution-Grouped 3D Supported Supported Not Supported + Convolution-Ordinary Supported Supported Supported + Convolution-Ordinary 3D Supported Supported Not Supported + Cos Supported** Supported Supported**** + Cosh Supported** Supported Supported**** + Crop Supported Supported Not Supported + CTCGreedyDecoder Supported** Supported** Supported**** + Deconvolution Supported Supported Not Supported + Deconvolution 3D Supported Supported Not Supported + DeformableConvolution Supported Supported Not Supported + DepthToSpace Supported** Supported Supported* + DetectionOutput Supported** Supported Supported**** + Eltwise-And Supported*** Supported Supported + Eltwise-Add Supported*** Supported Supported + Eltwise-Div Supported*** Supported Supported + Eltwise-Equal Supported*** Supported Supported* + Eltwise-FloorMod Supported*** Supported Supported**** + Eltwise-Greater Supported*** Supported Supported + Eltwise-GreaterEqual Supported*** Supported Supported + Eltwise-Less Supported*** Supported Supported* + Eltwise-LessEqual Supported*** Supported Supported* + Eltwise-LogicalAnd Supported*** Supported Supported + Eltwise-LogicalOr Supported*** Supported Supported + Eltwise-LogicalXor Supported*** Supported Supported + Eltwise-Max Supported*** Supported Supported + Eltwise-Min Supported*** Supported Supported + Eltwise-Mul Supported*** Supported Supported + Eltwise-NotEqual Supported*** Supported Supported* + Eltwise-Pow Supported*** Supported Supported + Eltwise-Prod Supported*** Supported Not Supported + Eltwise-SquaredDiff Supported*** Supported Supported + Eltwise-Sub Supported*** Supported Supported + Eltwise-Sum Supported*** Supported Supported**** + Erf Supported** Supported Supported**** + Exp Supported Supported Supported + FakeQuantize Supported Not Supported Supported* + Fill Supported** Not Supported Not Supported + Flatten Supported Supported Not Supported + Floor Supported** Supported Supported + FullyConnected (Inner Product) Supported*** Supported Supported + Gather Supported** Supported Supported* + GatherTree Supported** Not Supported Supported**** + Gemm Supported Supported Not Supported + GRN Supported** Supported** Supported + HardSigmoid Supported** Supported Supported**** + Interp Supported** Supported** Supported* + Log Supported** Supported Supported + LRN (Norm) Supported Supported Supported* + LSTMCell Supported Supported Supported + GRUCell Supported Supported Supported + RNNCell Supported Supported Supported + LSTMSequence Supported Supported Supported**** + GRUSequence Supported Supported Supported**** + RNNSequence Supported Supported Supported**** + LogSoftmax Supported** Supported Supported + Memory Supported Not Supported Not Supported + MVN Supported** Supported Supported* + Neg Supported** Supported Supported + NonMaxSuppression Supported** Not Supported Supported**** + Normalize Supported** Supported Supported* + OneHot Supported** Supported Supported**** + Pad Supported** Supported Supported* + Permute Supported Supported Not Supported + Pooling(AVG,MAX) Supported Supported Supported + Pooling(AVG,MAX) 3D Supported Supported Supported* + Power Supported** Supported Supported + PowerFile Supported** Not Supported Not Supported + PriorBox Supported** Supported Supported + PriorBoxClustered Supported** Supported** Supported + Proposal Supported** Supported Supported**** + PSROIPooling Supported** Supported Supported**** + Range Supported** Not Supported Not Supported + Reciprocal Supported** Supported Not Supported + ReduceAnd Supported** Supported Supported**** + ReduceL1 Supported** Supported Supported + ReduceL2 Supported** Supported Supported + ReduceLogSum Supported** Supported Supported + ReduceLogSumExp Supported** Supported Not Supported + ReduceMax Supported** Supported Supported + ReduceMean Supported** Supported Supported + ReduceMin Supported** Supported Supported + ReduceOr Supported** Supported Supported**** + ReduceProd Supported** Supported Supported + ReduceSum Supported** Supported Supported + ReduceSumSquare Supported** Supported Not Supported + RegionYolo Supported** Supported Supported**** + ReorgYolo Supported** Supported Supported + Resample Supported** Supported Not Supported + Reshape Supported*** Supported Supported + ReverseSequence Supported** Supported Supported**** + RNN Supported Not Supported Supported + ROIPooling Supported Supported* Supported**** + ScaleShift Supported*** Supported Not Supported + ScatterUpdate Supported** Not Supported Not Supported + Select Supported Supported Supported + Selu Supported** Supported Supported**** + ShuffleChannels Supported** Supported Supported + Sign Supported** Supported Supported + Sin Supported** Supported Supported + Sinh Supported** Supported Supported**** + SimplerNMS Supported** Supported Not Supported + Slice Supported*** Supported Not Supported + SoftMax Supported*** Supported Supported + Softplus Supported** Supported Supported + Softsign Supported** Supported Not Supported + SpaceToDepth Supported** Not Supported Supported* + SpatialTransformer Supported** Not Supported Not Supported + Split Supported*** Supported Supported + Squeeze Supported** Supported Supported + StridedSlice Supported** Supported Supported* + Tan Supported** Supported Supported**** + TensorIterator Supported Not Supported Supported + Tile Supported*** Supported** Supported + TopK Supported** Supported Supported**** + Unpooling Not Supported Supported Not Supported + Unsqueeze Supported** Supported Supported + Upsampling Not Supported Supported Not Supported +================================= =============== ============== ================== | `*` - support is limited to the specific parameters. Refer to "Known Layer Limitations" section for the device :doc:`from the list of supported `. | `**` - support is implemented via :doc:`Extensibility mechanism `. diff --git a/docs/articles_en/about_openvino/releasenotes_for_openvino.rst b/docs/articles_en/about_openvino/releasenotes_for_openvino.rst index 49e4608a0e07e0..5af02c68d44f88 100644 --- a/docs/articles_en/about_openvino/releasenotes_for_openvino.rst +++ b/docs/articles_en/about_openvino/releasenotes_for_openvino.rst @@ -12,19 +12,19 @@ anywhere. We are proud to announce the release of OpenVINO 2023.2 introducing a of new features, improvements, and deprecations aimed at enhancing the developer experience. -New and changed in 2023.2 +New and changed in 2023.2 ########################### Summary of major features and improvements ++++++++++++++++++++++++++++++++++++++++++++ -* More Generative AI coverage and framework integrations to minimize code changes. +* More Generative AI coverage and framework integrations to minimize code changes. * **Expanded model support for direct PyTorch model conversion** - automatically convert additional models directly from PyTorch or execute via ``torch.compile`` with OpenVINO as the backend. * **New and noteworthy models supported** - we have enabled models used for chatbots, - instruction following, code generation, and many more, including prominent models + instruction following, code generation, and many more, including prominent models like Llava, chatGLM, Bark (text to audio) and LCM (Latent Consistency Models, an optimized version of Stable Diffusion). * **Easier optimization and conversion of Hugging Face models** - compress LLM models @@ -33,33 +33,33 @@ Summary of major features and improvements * **OpenVINO is now available on Conan** - a package manager which allows more seamless package management for large scale projects for C and C++ developers. -* Broader Large Language Model (LLM) support and more model compression techniques. +* Broader Large Language Model (LLM) support and more model compression techniques. * Accelerate inference for LLM models on Intel® CoreTM CPU and iGPU with the - use of Int8 model weight compression. + use of Int8 model weight compression. * Expanded model support for dynamic shapes for improved performance on GPU. * Preview support for Int4 model format is now included. Int4 optimized model weights are now available to try on Intel® Core™ CPU and iGPU, to accelerate models like Llama 2 and chatGLM2. * The following Int4 model compression formats are supported for inference in runtime: - + * Generative Pre-training Transformer Quantization (GPTQ); with GPTQ-compressed models, you can access them through the Hugging Face repositories. * Native Int4 compression through Neural Network Compression Framework (NNCF). * More portability and performance to run AI at the edge, in the cloud, or locally. - + * **In 2023.1 we announced full support for ARM** architecture, now we have improved - performance by enabling FP16 model formats for LLMs and integrating additional + performance by enabling FP16 model formats for LLMs and integrating additional acceleration libraries to improve latency. - + Support Change and Deprecation Notices ++++++++++++++++++++++++++++++++++++++++++ * The OpenVINO™ Development Tools package (pip install openvino-dev) is deprecated - and will be removed from installation options and distribution channels with - 2025.0. To learn more, refer to the + and will be removed from installation options and distribution channels with + 2025.0. To learn more, refer to the :doc:`OpenVINO Legacy Features and Components page `. To ensure optimal performance, install the OpenVINO package (pip install openvino), which includes essential components such as OpenVINO Runtime, OpenVINO Converter, @@ -67,17 +67,16 @@ Support Change and Deprecation Notices * Tools: - * :doc:`Deployment Manager ` - is deprecated and will be removed in the 2024.0 release. + * Deployment Manager is deprecated and will be removed in the 2024.0 release. * Accuracy Checker is deprecated and will be discontinued with 2024.0. - * Post-Training Optimization Tool (POT) is deprecated and will be - discontinued with 2024.0. + * Post-Training Optimization Tool (POT) is deprecated and will be + discontinued with 2024.0. * Model Optimizer is deprecated and will be fully supported up until the 2025.0 release. Model conversion to the OpenVINO format should be performed through - OpenVINO Model Converter, which is part of the PyPI package. Follow the + OpenVINO Model Converter, which is part of the PyPI package. Follow the :doc:`Model Optimizer to OpenVINO Model Converter transition ` - guide for smoother transition. Known limitations are TensorFlow model with - TF1 Control flow and object detection models. These limitations relate to + guide for smoother transition. Known limitations are TensorFlow model with + TF1 Control flow and object detection models. These limitations relate to the gap in TensorFlow direct conversion capabilities which will be addressed in upcoming releases. * PyTorch 1.13 support is deprecated in Neural Network Compression Framework (NNCF) @@ -87,8 +86,8 @@ Support Change and Deprecation Notices * Intel® Gaussian & Neural Accelerator (Intel® GNA) will be deprecated in a future release. We encourage developers to use the Neural Processing Unit (NPU) for low powered systems like Intel® Core™ Ultra or 14th generation and beyond. - * OpenVINO C++/C/Python 1.0 APIs will be discontinued with 2024.0. - * Python 3.7 support has been discontinued. + * OpenVINO C++/C/Python 1.0 APIs will be discontinued with 2024.0. + * Python 3.7 support has been discontinued. OpenVINO™ Development Tools ++++++++++++++++++++++++++++++++++++++++++ @@ -99,63 +98,63 @@ List of components and their changes: * :doc:`OpenVINO Model Converter tool ` now supports the original framework shape format. * `Neural Network Compression Framework (NNCF) `__ - + * Added data-free Int4 weight compression support for LLMs in OpenVINO IR with ``nncf.compress_weights()``. * Improved quantization time of LLMs with NNCF PTQ API for ``nncf.quantize()`` and ``nncf.quantize_with_accuracy_control()``. * Added support for SmoothQuant and ChannelAlighnment algorithms in NNCF HyperParameter - Tuner for automatic optimization of their hyperparameters during quantization. + Tuner for automatic optimization of their hyperparameters during quantization. * Added quantization support for the ``IF`` operation of models in OpenVINO format to speed up such models. * NNCF Post-training Quantization for PyTorch backend is now supported with - ``nncf.quantize()`` and the common implementation of quantization algorithms. - * Added support for PyTorch 2.1. PyTorch 1.13 support has been deprecated. + ``nncf.quantize()`` and the common implementation of quantization algorithms. + * Added support for PyTorch 2.1. PyTorch 1.13 support has been deprecated. -OpenVINO™ Runtime (previously known as Inference Engine) +OpenVINO™ Runtime (previously known as Inference Engine) --------------------------------------------------------- -* OpenVINO Common +* OpenVINO Common * Operations for reference implementations updated from legacy API to API 2.0. - * Symbolic transformation introduced the ability to remove Reshape operations + * Symbolic transformation introduced the ability to remove Reshape operations surrounding MatMul operations. -* OpenVINO Python API +* OpenVINO Python API * Better support for the ``openvino.properties`` submodule, which now allows the use - of properties directly, without additional parenthesis. Example use-case: + of properties directly, without additional parenthesis. Example use-case: ``{openvino.properties.cache_dir: “./some_path/”}``. * Added missing properties: ``execution_devices`` and ``loaded_from_cache``. * Improved error propagation on imports from OpenVINO package. -* AUTO device plug-in (AUTO) +* AUTO device plug-in (AUTO) * o Provided additional option to improve performance of cumulative throughput - (or MULTI), where part of CPU resources can be reserved for GPU inference + (or MULTI), where part of CPU resources can be reserved for GPU inference when GPU and CPU are both used for inference (using ``ov::hint::enable_cpu_pinning(true)``). This avoids the performance issue of CPU resource contention where there - is not enough CPU resources to schedule tasks for GPU + is not enough CPU resources to schedule tasks for GPU (`PR #19214 `__). * CPU * Introduced support of GPTQ quantized Int4 models, with improved performance - compared to Int8 weight-compressed or FP16 models. In the CPU plugin, + compared to Int8 weight-compressed or FP16 models. In the CPU plugin, the gain in performance is achieved by FullyConnected acceleration with 4bit weight decompression (`PR #20607 `__). * Improved performance of Int8 weight-compressed large language models on some platforms, such as 13th Gen Intel Core - (`PR #20607 `__). + (`PR #20607 `__). * Further reduced memory consumption of select large language models on - CPU platforms with AMX and AVX512 ISA, by eliminating extra memory copy - with a unified weight layout - (`PR #19575 `__). + CPU platforms with AMX and AVX512 ISA, by eliminating extra memory copy + with a unified weight layout + (`PR #19575 `__). * Fixed performance issue observed in 2023.1 release on select Xeon CPU - platform with improved thread workload partitioning matching L2 cache - utilization + platform with improved thread workload partitioning matching L2 cache + utilization (`PR #20436 `__). * Extended support of configuration (enable_cpu_pinning) on Windows platforms to allow fine-grain control on CPU resource used for inference @@ -168,34 +167,34 @@ OpenVINO™ Runtime (previously known as Inference Engine) * GPU * Enhanced inference performance for Large Language Models. - * Introduced int8 weight compression to boost LLM performance. + * Introduced int8 weight compression to boost LLM performance. (`PR #19548 `__). * Implemented Int4 GPTQ weight compression for improved LLM performance. * Optimized constant weights for LLMs, resulting in better memory usage and faster model loading. * Optimized gemm (general matrix multiply) and fc (fully connected) for - enhanced performance on iGPU. + enhanced performance on iGPU. (`PR #19780 `__). * Completed GPU plugin migration to API 2.0. * Added support for oneDNN 3.3 version. * Model Import Updates - * TensorFlow Framework Support + * TensorFlow Framework Support * Supported conversion of models from memory in keras.Model and tf.function formats. `PR #19903 `__ * Supported TF 2.14. `PR #20385 `__ - * PyTorch Framework Support + * PyTorch Framework Support * Supported Int4 GPTQ models. - * New operations supported. + * New operations supported. - * ONNX Framework Support + * ONNX Framework Support - * Added support for ONNX version 1.14.1 + * Added support for ONNX version 1.14.1 (`PR #18359 `__) @@ -209,17 +208,17 @@ Introduced an extension of the KServe gRPC API, enabling streaming input and output for servables with Mediapipe graphs. This extension ensures the persistence of Mediapipe graphs within a user session, improving processing performance. This enhancement supports stateful graphs, such as tracking algorithms, and -enables the use of source calculators. +enables the use of source calculators. (`see additional documentation `__) * Mediapipe framework has been updated to the version 0.10.3. * model_api used in the openvino inference Mediapipe calculator has been updated - and included with all its features. -* Added a demo showcasing gRPC streaming with Mediapipe graph. + and included with all its features. +* Added a demo showcasing gRPC streaming with Mediapipe graph. (`see here `__) * Added parameters for gRPC quota configuration and changed default gRPC channel arguments to add rate limits. It will minimize the risks of impact of the service - from uncontrolled flow of requests. + from uncontrolled flow of requests. * Updated python clients requirements to match wide range of python versions from 3.6 to 3.11 Learn more about the changes in https://github.com/openvinotoolkit/model_server/releases @@ -233,7 +232,7 @@ Jupyter Notebook Tutorials Cross-lingual Books Alignment With Transformers * `LLM chatbot `__ Create LLM-powered Chatbot - + * Updated to include Int4 weight compression and Zephyr 7B model * `Bark Text-to-Speech `__ @@ -295,18 +294,18 @@ Known issues | **ID - 121959** | *Component* - CPU plugin | *Description:* -| During inference using latency hint on selected hybrid CPU platforms - (such as 12th or 13th Gen Intel CORE), there is a sporadic occurrence of - increased latency caused by the operating system scheduling of P-cores or +| During inference using latency hint on selected hybrid CPU platforms + (such as 12th or 13th Gen Intel CORE), there is a sporadic occurrence of + increased latency caused by the operating system scheduling of P-cores or E-cores during OpenVINO initialization. | *Workaround:* -| This will be fixed in the next OpenVINO release. +| This will be fixed in the next OpenVINO release. | **ID - 123101** -| *Component* - GPU plugin +| *Component* - GPU plugin | *Description:* | Hung up of GPU plugin on A770 Graphics (dGPU) in case of - large batch size (1750). + large batch size (1750). | *Workaround:* | Decrease the batch size, wait for fixed driver released. @@ -320,19 +319,19 @@ three types of operating systems: Windows, Linux, and macOS. || Component || License | Location | +================================+===================================+=================+=================+=======================+=================================================+ || OpenVINO (Inference Engine) C++ Runtime || Dual licensing: || /runtime/* | -|| Unified API to integrate the inference with application logic || Intel® OpenVINO™ Distribution License (Version May 2021) || /runtime/include/* | -|| OpenVINO (Inference Engine) Headers || Apache 2.0 || | +|| Unified API to integrate the inference with application logic || Intel® OpenVINO™ Distribution License (Version May 2021) || /runtime/include/* | +|| OpenVINO (Inference Engine) Headers || Apache 2.0 || | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ || OpenVINO (Inference Engine) Pythion API || Apache 2.0 || /python/* | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ || OpenVINO (Inference Engine) Samples || Apache 2.0 || /samples/* | || Samples that illustrate OpenVINO C++/ Python API usage || || | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ -|| [Deprecated] Deployment manager || Apache 2.0 || /tools/deployment_manager/* | -|| The Deployment Manager is a Python* command-line tool that || || | -|| creates a deployment package by assembling the model, IR files, || || | -|| your application, and associated dependencies into a runtime || || | -|| package for your target device. || || | +|| [Deprecated] Deployment manager || Apache 2.0 || /tools/deployment_manager/* | +|| The Deployment Manager is a Python* command-line tool that || || | +|| creates a deployment package by assembling the model, IR files, || || | +|| your application, and associated dependencies into a runtime || || | +|| package for your target device. || || | +--------------------------------------------------------------------+-----------------------------------------------------------+-------------------------------------------------+ @@ -360,7 +359,7 @@ enabled hardware, software or service activation. Learn more at `http://www.intel.com/ `__ or from the OEM or retailer. -No computer system can be absolutely secure. +No computer system can be absolutely secure. Intel, Atom, Arria, Core, Movidius, Xeon, OpenVINO, and the Intel logo are trademarks of Intel Corporation in the U.S. and/or other countries. @@ -371,18 +370,18 @@ Other names and brands may be claimed as the property of others. Copyright © 2023, Intel Corporation. All rights reserved. -For more complete information about compiler optimizations, see our Optimization Notice. - -Performance varies by use, configuration and other factors. Learn more at +For more complete information about compiler optimizations, see our Optimization Notice. + +Performance varies by use, configuration and other factors. Learn more at `www.Intel.com/PerformanceIndex `__. Download +++++++++++++++++++++++++++++++++++++++++++++ `The OpenVINO product selector tool `__ -provides easy access to the right packages that match your desired OS, version, +provides easy access to the right packages that match your desired OS, version, and distribution options. - + diff --git a/docs/articles_en/documentation/openvino_ir.rst b/docs/articles_en/documentation/openvino_ir.rst index ab8e8d320e4310..94a11b06e899a0 100644 --- a/docs/articles_en/documentation/openvino_ir.rst +++ b/docs/articles_en/documentation/openvino_ir.rst @@ -5,8 +5,8 @@ OpenVINO IR format .. meta:: - :description: OpenVINO IR, known as Intermediate Representation, is the result - of model conversion in OpenVINO and is represented by two files: + :description: OpenVINO IR, known as Intermediate Representation, is the result + of model conversion in OpenVINO and is represented by two files: an XML and a binary file. .. toctree:: @@ -19,13 +19,21 @@ OpenVINO IR format openvino_docs_ops_broadcast_rules openvino_docs_MO_DG_prepare_model_convert_model_IR_suitable_for_INT8_inference -The models, built and trained using various frameworks, can be large and architecture-dependent. To successfully run inference from any device and maximize the benefits of OpenVINO tools, you can convert the model to the OpenVINO Intermediate Representation (IR) format. -OpenVINO IR is the proprietary model format of OpenVINO. It is produced after converting a model with model conversion API. Model conversion API translates the frequently used deep learning operations to their respective similar representation in OpenVINO and tunes them with the associated weights and biases from the trained model. The resulting IR contains two files: +OpenVINO IR is the proprietary model format of OpenVINO, benefiting from the full extent +of its features. It is obtained by converting a model from one of the +:doc:`supported formats ` +using the model conversion API or OpenVINO Converter. The process translates common +deep learning operations of the original network to their counterpart representations in +OpenVINO and tunes them with the associated weights and biases. +The resulting OpenVINO IR format contains two files: * ``.xml`` - Describes the model topology. * ``.bin`` - Contains the weights and binary data. +:doc:`See why converting to OpenVINO IR is recommended ` + + IR Structure ############ @@ -148,11 +156,10 @@ Here is an example of a small IR XML file that corresponds to a graph from the p -The IR does not use explicit data nodes described in the previous section. In contrast, properties of data such as tensor dimensions and their data types are described as properties of input and output ports of operations. +The IR does not use explicit data nodes described in the previous section. In contrast, properties of data such as tensor dimensions and their data types are described as properties of input and output ports of operations. Additional Resources #################### * :doc:`IR and Operation Sets ` -* :doc:`OpenVINO API 2.0 transition guide ` diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst index f0a306b642617b..08fa0ef7beb814 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.rst @@ -28,7 +28,7 @@ Sign **Outputs** -* **1**: The result of element-wise *Sign* operation. A tensor of type *T* with mapped elements of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive). +* **1**: The result of element-wise *Sign* operation. A tensor of type *T* with mapped elements of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive), nan is returned for nan inputs. **Types** diff --git a/docs/articles_en/documentation/openvino_legacy_features.rst b/docs/articles_en/documentation/openvino_legacy_features.rst index dfedd4ef01dd7b..311e1bc2529bd9 100644 --- a/docs/articles_en/documentation/openvino_legacy_features.rst +++ b/docs/articles_en/documentation/openvino_legacy_features.rst @@ -10,13 +10,10 @@ Legacy Features and Components OpenVINO Development Tools package Model Optimizer / Conversion API - Deploy Application with Deployment Manager - OpenVINO API 2.0 transition Open Model ZOO - Apache MXNet, Caffe, and Kaldi -Since OpenVINO has grown very rapidly in recent years, some of its features +Since OpenVINO has grown very rapidly in recent years, a number of its features and components have been replaced by other solutions. Some of them are still supported to assure OpenVINO users are given enough time to adjust their projects, before the features are fully discontinued. @@ -49,14 +46,6 @@ offering. | :doc:`See how to use OVC ` | :doc:`See how to transition from the legacy solution ` -| **OpenVINO Deployment Manager** -| *New solution:* the tool is no longer needed -| *Old solution:* discontinuation planned for OpenVINO 2024.0 -| -| It is recommended to explore alternative deployment solutions available in OpenVINO. -| :doc:`See how to deploy locally ` - - | **Open Model ZOO** | *New solution:* users are encouraged to use public model repositories @@ -74,12 +63,9 @@ offering. | **Apache MXNet, Caffe, and Kaldi model formats** | *New solution:* conversion to ONNX via external tools -| *Old solution:* model support will be discontinued with OpenVINO 2024.0 +| *Old solution:* model support discontinued with OpenVINO 2024.0 | -| Since these three model formats proved to be far less popular among OpenVINO users - than the remaining ones, their support has been discontinued. Converting them to the - ONNX format is a possible way of retaining them in the OpenVINO-based pipeline. -| :doc:`See the previous conversion instructions ` +| `See the previous conversion instructions `__ | :doc:`See the currently supported frameworks ` @@ -94,14 +80,11 @@ offering. | `Check the NNCF GitHub project, including documentation `__ -| **Old Inference API 1.0** +| **Inference API 1.0** | *New solution:* API 2.0 launched in OpenVINO 2022.1 -| *Old solution:* discontinuation planned for OpenVINO 2024.0 +| *Old solution:* discontinued with OpenVINO 2024.0 | -| API 1.0 (Inference Engine and nGraph) is now deprecated. It can still be - used but is not recommended. Its discontinuation is planned for 2024. -| :doc:`See how to transition to API 2.0 ` - +| `The last version supporting API 1.0 `__ | **Compile tool** | *New solution:* the tool is no longer needed @@ -110,21 +93,21 @@ offering. | Compile tool is now deprecated. If you need to compile a model for inference on a specific device, use the following script: - .. tab-set:: + .. tab-set:: - .. tab-item:: Python - :sync: py + .. tab-item:: Python + :sync: py - .. doxygensnippet:: docs/snippets/export_compiled_model.py - :language: python - :fragment: [export_compiled_model] + .. doxygensnippet:: docs/snippets/export_compiled_model.py + :language: python + :fragment: [export_compiled_model] - .. tab-item:: C++ - :sync: cpp + .. tab-item:: C++ + :sync: cpp - .. doxygensnippet:: docs/snippets/export_compiled_model.cpp - :language: cpp - :fragment: [export_compiled_model] + .. doxygensnippet:: docs/snippets/export_compiled_model.cpp + :language: cpp + :fragment: [export_compiled_model] | :doc:`see which devices support import / export ` | :doc:`Learn more on preprocessing steps ` diff --git a/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst b/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst index 55d3494ae789be..a0028a2d7e51de 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst @@ -5,7 +5,7 @@ Install OpenVINO™ Development Tools .. meta:: - :description: Learn how to install OpenVINO™ Development Tools on Windows, + :description: Learn how to install OpenVINO™ Development Tools on Windows, Linux, and macOS operating systems, using a PyPi package. OpenVINO Development Tools is a set of utilities that make it easy to develop and optimize models and applications for OpenVINO. It provides the following tools: @@ -20,9 +20,9 @@ The instructions on this page show how to install OpenVINO Development Tools. If In both cases, Python 3.8 - 3.11 needs to be installed on your machine before starting. -.. note:: +.. note:: - From the 2022.1 release, the OpenVINO™ Development Tools can only be installed via PyPI. + From the 2022.1 release, the OpenVINO™ Development Tools can only be installed via PyPI. .. _python_developers: @@ -30,7 +30,7 @@ For Python Developers ##################### If you are a Python developer, follow the steps in the :ref:`Installing OpenVINO Development Tools ` section on this page to install it. Installing OpenVINO Development Tools will also install OpenVINO Runtime as a dependency, so you don’t need to install OpenVINO Runtime separately. This option is recommended for new users. - + .. _cpp_developers: For C/C++ Developers @@ -64,7 +64,7 @@ Installation in a New Environment +++++++++++++++++++++++++++++++++ If you do not have an environment with a deep learning framework for the input model or you encounter any compatibility issues between OpenVINO -and your version of deep learning framework, you may install OpenVINO Development Tools with validated versions of frameworks into a new environment. +and your version of deep learning framework, you may install OpenVINO Development Tools with validated versions of frameworks into a new environment. Step 1. Set Up Python Virtual Environment ----------------------------------------- @@ -75,19 +75,19 @@ Create a virtual Python environment to avoid dependency conflicts. To create a v .. tab-item:: Windows :sync: windows - + .. code-block:: sh - + python -m venv openvino_env .. tab-item:: Linux and macOS :sync: linux-and-macos - + .. code-block:: sh - + python3 -m venv openvino_env - - + + Step 2. Activate Virtual Environment ------------------------------------ @@ -98,16 +98,16 @@ Activate the newly created Python virtual environment by issuing this command: .. tab-item:: Windows :sync: windows - + .. code-block:: sh - + openvino_env\Scripts\activate .. tab-item:: Linux and macOS :sync: linux-and-macos .. code-block:: sh - + source openvino_env/bin/activate .. important:: @@ -138,7 +138,7 @@ To install and configure the components of the development package together with where the ``extras`` parameter specifies the source deep learning framework for the input model -and is one or more of the following values separated with "," : ``caffe``, ``kaldi``, ``mxnet``, ``onnx``, ``pytorch``, ``tensorflow``, ``tensorflow2``. +and is one or more of the following values separated with "," : ``caffe``, ``kaldi``, ``mxnet``, ``onnx``, ``pytorch``, ``tensorflow``, ``tensorflow2``. For example, to install and configure dependencies required for working with TensorFlow 2.x and ONNX models, use the following command: @@ -147,7 +147,7 @@ For example, to install and configure dependencies required for working with Ten pip install openvino-dev[tensorflow2,onnx] -.. note:: +.. note:: Model conversion API support for TensorFlow 1.x environment has been deprecated. Use the ``tensorflow2`` parameter to install a TensorFlow 2.x environment that can convert both TensorFlow 1.x and 2.x models. If your model isn't compatible with the TensorFlow 2.x environment, use the `tensorflow` parameter to install the TensorFlow 1.x environment. The TF 1.x environment is provided only for legacy compatibility reasons. @@ -197,7 +197,6 @@ Try the :doc:`C++ Quick Start Example ` page for other C++ example applications to get you started with OpenVINO, such as: * :doc:`Basic object detection with the Hello Reshape SSD C++ sample ` -* :doc:`Automatic speech recognition C++ sample ` Learn OpenVINO Development Tools ++++++++++++++++++++++++++++++++ diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.rst deleted file mode 100644 index 56938c54b151e2..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. {#openvino_2_0_transition_guide} - -OpenVINO™ API 2.0 Transition Guide -==================================== - - -.. meta:: - :description: A detailed information on a new version of OpenVINO™ API 2.0, - as well as the new OpenVINO IR model format: IR v11. - - -.. toctree:: - :maxdepth: 1 - :hidden: - - openvino_2_0_deployment - openvino_2_0_inference_pipeline - openvino_2_0_configure_devices - openvino_2_0_preprocessing - openvino_2_0_model_creation - - -This guide introduces the new OpenVINO™ API: API 2.0, as well as the new OpenVINO IR model format: IR v11. Here, you will find comparisons of their "old" and "new" versions. - -Introduction of API 2.0 -####################### - -Versions of OpenVINO prior to 2022.1 required changes in the application logic when migrating an app from other frameworks, such as TensorFlow, ONNX Runtime, PyTorch, PaddlePaddle, etc. The changes were required because: - -- Model conversion API changed input precisions for some inputs. For example, neural language processing models with ``I64`` inputs were changed to include ``I32`` ones. -- Model conversion API changed layouts for TensorFlow models (see the :doc:`Layouts in OpenVINO `). It lead to unusual requirement of using the input data with a different layout than that of the framework: - -.. image:: _static/images/tf_openvino.svg - :alt: tf_openvino - -- Inference Engine API (`InferenceEngine::CNNNetwork `__) applied some conversion rules for input and output precisions due to limitations in device plugins. -- Users needed to specify input shapes during model conversions in model conversion API, and work with static shapes in the application. - -OpenVINO™ 2022.1 has introduced API 2.0 (also called OpenVINO API v2) to align the logic of working with models as it is done in their origin frameworks - no layout and precision changes, operating with tensor names and indices to address inputs and outputs. OpenVINO Runtime has combined Inference Engine API used for inference and nGraph API targeted to work with models and operations. API 2.0 has a common structure, naming convention styles, namespaces, and removes duplicated structures. For more details, see the :doc:`Changes to Inference Pipeline in OpenVINO API v2 `. - -.. note:: - - Your existing applications will continue to work with OpenVINO Runtime 2022.1, as normal. Although, migration to API 2.0 is strongly recommended. This will allow you to use additional features, such as :doc:`Preprocessing ` and :doc:`Dynamic shapes support `. - - -The New OpenVINO IR v11 -####################### - -To support these features, OpenVINO has introduced OpenVINO IR v11, which is now the default version for model conversion API. The model represented in OpenVINO IR v11 fully matches the original model in the original framework format in terms of inputs and outputs. It is also not required to specify input shapes during conversion, which results in OpenVINO IR v11 containing ``-1`` to denote undefined dimensions. For more details on how to fully utilize this feature, see :doc:`Working with dynamic shapes `. For information on how to reshape to static shapes in application, see :doc:`Changing input shapes `. - -OpenVINO IR v11 is fully compatible with applications written with the Inference Engine API used by older versions of OpenVINO. This backward compatibility is allowed thanks to additional runtime information included in OpenVINO IR v11. This means that when OpenVINO IR v11 is read by an application based on Inference Engine, it is internally converted to OpenVINO IR v10. - -OpenVINO IR v11 is supported by all OpenVINO Development tools including Post-Training Optimization Tool, Benchmark app, etc. - -Backward Compatibility for OpenVINO IR v10 -########################################## - -API 2.0 also supports backward compatibility for models of OpenVINO IR v10. If you have OpenVINO IR v10 files, they can also be fed to OpenVINO Runtime. For more details, see the :doc:`migration steps `. - -Some of the OpenVINO Development Tools also support both OpenVINO IR v10 and v11 as an input: - -- Accuracy checker uses API 2.0 for model accuracy measurement by default. It also supports switching to the old API by using the ``--use_new_api False`` command-line parameter. Both launchers accept OpenVINO IR v10 and v11, but in some cases configuration files should be updated. For more details, see the `Accuracy Checker documentation `__. -- :doc:`Compile tool ` compiles the model to be used in API 2.0 by default. To use the resulting compiled blob under the Inference Engine API, the additional ``ov_api_1_0`` option should be passed. - -However, Post-Training Optimization Tool of OpenVINO 2022.1 does not support OpenVINO IR v10. They require the latest version of model conversion API to generate OpenVINO IR v11 files. - -.. note:: - - To quantize your OpenVINO IR v10 models to run with OpenVINO 2022.1, download and use Post-Training Optimization Tool of OpenVINO 2021.4. - - -.. _differences_api20_ie: - -Differences in API 2.0 and Inference Engine API Behaviors -######################################################### - -Inference Engine and nGraph APIs do not become deprecated with the introduction of the new API, and they can still be used in applications. However, it is highly recommended to migrate to API 2.0, as it offers more features (further extended in future releases), such as: - -- :doc:`Working with dynamic shapes `, which increases performance when working with compatible models such as NLP (Neural Language Processing) and super-resolution models. -- :doc:`Preprocessing of the model `, which adds preprocessing operations to inference models and fully occupies the accelerator, freeing CPU resources. - -To understand the differences between Inference Engine API and API 2.0, see the definitions of two types of behaviors first: - -- **Old behavior** of OpenVINO assumes that: - - - Model Conversion API can change input element types and order of dimensions (layouts) for the model from the original framework. - - Inference Engine can override input and output element types. - - Inference Engine API uses operation names to address inputs and outputs (e.g. `InferenceEngine::InferRequest::GetBlob `__). - - Inference Engine API does not support compiling of models with dynamic input shapes. - -- **New behavior** implemented in 2022.1 assumes full model alignment with the framework: - - - Model Conversion API preserves input element types and order of dimensions (layouts), and stores tensor names from the original models. - - OpenVINO Runtime 2022.1 reads models in any format (OpenVINO IR v10, OpenVINO IR v11, TensorFlow, ONNX, PaddlePaddle, etc.). - - API 2.0 uses tensor names for addressing, which is the standard approach among the compatible model frameworks. - - API 2.0 can also address input and output tensors by the index. Some model formats like ONNX are sensitive to the input and output order, which is preserved by OpenVINO 2022.1. - -The table below demonstrates which behavior, **old** or **new**, is used for models based on the two APIs. - -+--------------------------------+-----------------+-----------------+-----------------+------------------------+ -| API | OpenVINO IR v10 | OpenVINO IR v11 | ONNX Files | Models Created in Code | -+================================+=================+=================+=================+========================+ -| Inference Engine / nGraph APIs | Old | Old | Old | Old | -+--------------------------------+-----------------+-----------------+-----------------+------------------------+ -| API 2.0 | Old | New | New | New | -+--------------------------------+-----------------+-----------------+-----------------+------------------------+ - -More Information -#################### - -See the following pages to understand how to migrate Inference Engine-based applications to API 2.0: - -- :doc:`Installation & Deployment ` -- :doc:`OpenVINO™ Common Inference pipeline ` -- :doc:`Preprocess your model ` -- :doc:`Configure device ` -- :doc:`OpenVINO™ Model Creation ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst deleted file mode 100644 index f31bcfdd83cea1..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.rst +++ /dev/null @@ -1,683 +0,0 @@ -.. {#openvino_2_0_inference_pipeline} - -Inference Pipeline -================== - - -.. meta:: - :description: The inference pipeline is a set of steps to be - performed in a specific order to infer models with OpenVINO™ - Runtime. - - -To infer models with OpenVINO™ Runtime, you usually need to perform the following steps in the application pipeline: - -1. `Create a Core object <#create-a-core-object>`__. - - * 1.1. `(Optional) Load extensions <#optional-load-extensions>`__ - -2. `Read a model from a drive <#read-a-model-from-a-drive>`__. - - * 2.1. `(Optional) Perform model preprocessing <#optional-perform-model-preprocessing>`__. - -3. `Load the model to the device <#load-the-model-to-the-device>`__. -4. `Create an inference request <#create-an-inference-request>`__. -5. `Fill input tensors with data <#fill-input-tensors-with-data>`__. -6. `Start inference <#start-inference>`__. -7. `Process the inference results <#process-the-inference-results>`__. - -Based on the steps, the following code demonstrates how to change the application code to migrate to API 2.0. - -1. Create a Core Object -####################### - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:create_core - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:create_core - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:create_core - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:create_core - - -1.1 (Optional) Load Extensions -++++++++++++++++++++++++++++++ - -To load a model with custom operations, you need to add extensions for these operations. -It is highly recommended to use :doc:`OpenVINO Extensibility API ` -to write extensions. However, you can also load the old extensions to the new OpenVINO™ Runtime: - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:load_old_extension - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:load_old_extension - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:load_old_extension - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:load_old_extension - - -2. Read a Model from a Drive -############################ - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:read_model - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:read_model - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:read_model - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:read_model - - -Reading a model has the same structure as the example in the :doc:`model creation migration guide `. - -You can combine reading and compiling a model into a single call ``ov::Core::compile_model(filename, devicename)``. - - -2.1 (Optional) Perform Model Preprocessing -++++++++++++++++++++++++++++++++++++++++++ - -When the application input data does not perfectly match the model input format, -preprocessing may be necessary. See :doc:`preprocessing in API 2.0 ` for more details. - - -3. Load the Model to the Device -############################### - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:compile_model - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:compile_model - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:compile_model - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:compile_model - - -If you need to configure devices with additional parameters for OpenVINO Runtime, refer to :doc:`Configuring Devices `. - - -4. Create an Inference Request -############################## - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:create_infer_request - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:create_infer_request - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:create_infer_request - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:create_infer_request - - -5. Fill Input Tensors with Data -############################### - -**Inference Engine API** - -The Inference Engine API fills inputs with data of the ``I32`` precision (**not** aligned with the original model): - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - - .. tab-item:: Model created in code - :sync: model - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_input_tensor - - -**API 2.0** - -API 2.0 fills inputs with data of the ``I64`` precision (aligned with the original model): - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_v10 - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_v10 - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_v10 - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - - .. tab-item:: Model created in code - :sync: model-created-in-code - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_input_tensor_aligned - - -6. Start Inference -################## - -**Inference Engine API** - -.. tab-set:: - - .. tab-item:: Sync - :sync: sync - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:inference - - .. tab-item:: Async - :sync: async - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:start_async_and_wait - - -**API 2.0** - -.. tab-set:: - - .. tab-item:: Sync - :sync: sync - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:inference - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:inference - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:inference - - .. tab-item:: Async - :sync: async - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:start_async_and_wait - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:start_async_and_wait - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:start_async_and_wait - - -7. Process the Inference Results -################################ - -**Inference Engine API** - -The Inference Engine API processes outputs as they are of the ``I32`` precision (**not** aligned with the original model): - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - - .. tab-item:: Model created in code - :sync: model - - .. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ie_common.cpp - :language: cpp - :fragment: ie:get_output_tensor - - -**API 2.0** - -API 2.0 processes outputs as they are of: - -* the ``I32`` precision (**not** aligned with the original model) for OpenVINO IR v10 models, to match the :ref:`old behavior `. -* the ``I64`` precision (aligned with the original model) for OpenVINO IR v11, ONNX, ov::Model, PaddlePaddle and TensorFlow models, to match the :ref:`new behavior `. - -.. tab-set:: - - .. tab-item:: IR v10 - :sync: ir-v10 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_v10 - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_v10 - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_v10 - - .. tab-item:: IR v11 - :sync: ir-v11 - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: ONNX - :sync: onnx - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - - .. tab-item:: Model created in code - :sync: model-created-in-code - - .. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_common.py - :language: python - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_common.cpp - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_common.c - :language: cpp - :fragment: ov_api_2_0:get_output_tensor_aligned - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.rst deleted file mode 100644 index 4108c23b03763e..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.rst +++ /dev/null @@ -1,302 +0,0 @@ -.. {#openvino_2_0_configure_devices} - -Configuring Devices -=================== - - -.. meta:: - :description: Openvino Runtime API 2.0 has introduced properties that unify - metrics and configuration key concepts, which simplifies the - configuration of inference devices. - - -The Inference Engine API provides the ability to configure devices with configuration keys and obtain device-specific metrics. The values retrived from `InferenceEngine::Core::GetConfig `__ are requested by the string name, while the return type is `InferenceEngine::Parameter `__ , which results in users not knowing what the actual type is stored in this parameter. - -API 2.0 solves these issues by introducing :doc:`properties `, which unify metrics and configuration key concepts. The main advantage is that they have the C++ type: - -.. code-block:: sh - - static constexpr Property full_name{"FULL_DEVICE_NAME"}; - - -where the property can be requested from an inference device as: - - -.. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_ro_property - - -The snippets in the following sections demonstrate the device configurations for migrating from Inference Engine to API 2.0. - -.. note:: - - The Inference Engine API is a **legacy solution** and it is recomended to use API 2.0. If you want to learn more about Inference Engine API, its configuration and how to obtain device-specific metrics from it, check the following `article `__ from the 2021.4 version of OpenVINO documentation. - -Setting Configuration Values -############################ - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_set_config - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_load_network - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: executable_network_set_config - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_set_config - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_load_network - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: executable_network_set_config - - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_set_property - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_compile_model - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: compiled_model_set_property - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Devices - :sync: devices - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_set_property - - .. tab-item:: Model Loading - :sync: model-loading - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_compile_model - - .. tab-item:: Execution - :sync: execution - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: compiled_model_set_property - - -Getting Information -################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_config - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_metric - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: executable_network_set_config - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: executable_network_get_metric - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_config - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_metric - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: executable_network_set_config - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: executable_network_get_metric - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_rw_property - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: core_get_ro_property - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: compiled_model_get_rw_property - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.cpp - :language: cpp - :fragment: compiled_model_get_ro_property - - .. tab-item:: C - :sync: c - - .. tab-set:: - - .. tab-item:: Device Configuration - :sync: device-configuration - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_rw_property - - .. tab-item:: Device metrics - :sync: device-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: core_get_ro_property - - .. tab-item:: Execution config - :sync: execution-config - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: compiled_model_get_rw_property - - .. tab-item:: Execution metrics - :sync: execution-metrics - - .. doxygensnippet:: docs/snippets/ov_properties_migration.c - :language: c - :fragment: compiled_model_get_ro_property - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst deleted file mode 100644 index 8801091f9a4b3b..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst +++ /dev/null @@ -1,246 +0,0 @@ -.. {#openvino_2_0_deployment} - -Installation & Deployment -========================= - - -.. meta:: - :description: OpenVINO™ API 2.0 focuses on the use of development tools and - deployment of applications, it also simplifies migration from - different frameworks to OpenVINO. - - -One of the main concepts for OpenVINO™ API 2.0 is being "easy to use", which includes: - -* Simplification of migration from different frameworks to OpenVINO. -* Organization of OpenVINO. -* Usage of development tools. -* Development and deployment of OpenVINO-based applications. - - -To accomplish that, the 2022.1 release OpenVINO introduced significant changes to the installation -and deployment processes. Further changes were implemented in 2023.1, aiming at making the installation -process even simpler. - -.. tip:: - - These instructions are largely deprecated and should be used for versions prior to 2023.1. - - The OpenVINO Development Tools package is being deprecated and will be discontinued entirely in 2025. - With this change, the OpenVINO Runtime package has become the default choice for installing the - software. It now includes all components necessary to utilize OpenVINO's functionality. - - - -The Installer Package Contains OpenVINO™ Runtime Only -##################################################### - -Since OpenVINO 2022.1, development tools have been distributed only via `PyPI `__, and are no longer included in the OpenVINO installer package. For a list of these components, refer to the :doc:`installation overview ` guide. Benefits of this approach include: - -* simplification of the user experience - in previous versions, installation and usage of OpenVINO Development Tools differed from one distribution type to another (the OpenVINO installer vs. PyPI), -* ensuring that dependencies are handled properly via the PIP package manager, and support virtual environments of development tools. - -The structure of the OpenVINO 2022.1 installer package has been organized as follows: - -* The ``runtime`` folder includes headers, libraries and CMake interfaces. -* The ``tools`` folder contains :doc:`the compile tool `, :doc:`deployment manager `, and a set of ``requirements.txt`` files with links to the corresponding versions of the ``openvino-dev`` package. -* The ``python`` folder contains the Python version for OpenVINO Runtime. - -Installing OpenVINO Development Tools via PyPI -############################################## - -Since OpenVINO Development Tools is no longer in the installer package, the installation process has also changed. This section describes it through a comparison with previous versions. - -For Versions Prior to 2022.1 -++++++++++++++++++++++++++++ - -In previous versions, OpenVINO Development Tools was a part of the main package. After the package was installed, to convert models (for example, TensorFlow), you needed to install additional dependencies by using the requirement files, such as ``requirements_tf.txt``, install Post-Training Optimization tool and Accuracy Checker tool via the ``setup.py`` scripts, and then use the ``setupvars`` scripts to make the tools available to the following command: - -.. code-block:: sh - - $ mo.py -h - - -For 2022.1 and After (prior to 2023.1) -++++++++++++++++++++++++++++++++++++++++++ - -In OpenVINO 2022.1 and later, you can install the development tools only from a `PyPI `__ repository, using the following command (taking TensorFlow as an example): - -.. code-block:: sh - - $ python3 -m pip install -r /tools/requirements_tf.txt - - -This will install all the development tools and additional components necessary to work with TensorFlow via the ``openvino-dev`` package (see **Step 4. Install the Package** on the `PyPI page `__ for parameters of other frameworks). - -Then, the tools can be used by commands like: - -.. code-block:: sh - - $ mo -h - $ pot -h - - -Installation of any other dependencies is not required. For more details on the installation steps, see the -`Install OpenVINO Development Tools `__ prior to OpenVINO 2023.1. - -Interface Changes for Building C/C++ Applications -################################################# - -The new OpenVINO Runtime with its API 2.0 has also brought some changes for building C/C++ applications. - -CMake Interface -++++++++++++++++++++ - -The CMake interface has been changed as follows: - -**With Inference Engine of previous versions**: - -.. code-block:: cmake - - find_package(InferenceEngine REQUIRED) - find_package(ngraph REQUIRED) - add_executable(ie_ngraph_app main.cpp) - target_link_libraries(ie_ngraph_app PRIVATE ${InferenceEngine_LIBRARIES} ${NGRAPH_LIBRARIES}) - - -**With OpenVINO Runtime 2022.1 (API 2.0)**: - -.. code-block:: cmake - - find_package(OpenVINO REQUIRED) - add_executable(ov_app main.cpp) - target_link_libraries(ov_app PRIVATE openvino::runtime) - - add_executable(ov_c_app main.c) - target_link_libraries(ov_c_app PRIVATE openvino::runtime::c) - - -Native Interfaces -++++++++++++++++++++ - -It is possible to build applications without the CMake interface by using: MSVC IDE, UNIX makefiles, and any other interface, which has been changed as shown here: - -**With Inference Engine of previous versions**: - -.. tab-set:: - - .. tab-item:: Include dirs - :sync: include-dirs - - .. code-block:: sh - - /deployment_tools/inference_engine/include - /deployment_tools/ngraph/include - - .. tab-item:: Path to libs - :sync: path-libs - - .. code-block:: sh - - /deployment_tools/inference_engine/lib/intel64/Release - /deployment_tools/ngraph/lib/ - - .. tab-item:: Shared libs - :sync: shared-libs - - .. code-block:: sh - - // UNIX systems - inference_engine.so ngraph.so - - // Windows - inference_engine.dll ngraph.dll - - .. tab-item:: (Windows) .lib files - :sync: windows-lib-files - - .. code-block:: sh - - ngraph.lib - inference_engine.lib - -**With OpenVINO Runtime 2022.1 (API 2.0)**: - -.. tab-set:: - - .. tab-item:: Include dirs - :sync: include-dirs - - .. code-block:: sh - - /runtime/include - - .. tab-item:: Path to libs - :sync: path-libs - - .. code-block:: sh - - /runtime/lib/intel64/Release - - .. tab-item:: Shared libs - :sync: shared-libs - - .. code-block:: sh - - // UNIX systems - openvino.so - - // Windows - openvino.dll - - .. tab-item:: (Windows) .lib files - :sync: windows-lib-files - - .. code-block:: sh - - openvino.lib - - -Clearer Library Structure for Deployment -######################################## - -OpenVINO 2022.1 introduced a reorganization of the libraries, to make deployment easier. In the previous versions, it was required to use several libraries to perform deployment steps. Now you can just use ``openvino`` or ``openvino_c`` based on your developing language, with the necessary plugins to complete your task. For example, ``openvino_intel_cpu_plugin`` and ``openvino_ir_frontend`` plugins will enable loading OpenVINO IRs and performing inference on the CPU device (for more details, see the :doc:`Local distribution with OpenVINO `). - -Below are detailed comparisons of the library structure between OpenVINO 2022.1 and the previous versions: - -* Starting with 2022.1 release, a single core library with all the functionalities (``openvino`` for C++ Runtime, ``openvino_c`` for Inference Engine API C interface) is used, instead of the previous core libraries which contained ``inference_engine``, ``ngraph``, ``inference_engine_transformations`` and ``inference_engine_lp_transformations``. - -* The libraries of plugins have been renamed as follows: - - * ``openvino_intel_cpu_plugin`` is used for :doc:`CPU ` device instead of ``MKLDNNPlugin``. - * ``openvino_intel_gpu_plugin`` is used for :doc:`GPU ` device instead of ``clDNNPlugin``. - * ``openvino_auto_plugin`` is used for :doc:`Auto-Device Plugin `. - -* The plugins for reading and converting models have been changed as follows: - - * ``openvino_ir_frontend`` is used to read IRs instead of ``inference_engine_ir_reader``. - * ``openvino_onnx_frontend`` is used to read ONNX models instead of ``inference_engine_onnx_reader`` (with its dependencies). - * ``openvino_paddle_frontend`` is added in 2022.1 to read PaddlePaddle models. - - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst deleted file mode 100644 index e25951582bc976..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. {#openvino_2_0_model_creation} - -Model Creation in OpenVINO™ Runtime -===================================== - - -.. meta:: - :description: Model creation in OpenVINO™ Runtime API 2.0 is performed with - nGraph engine that has been preserved in the new API and its - namespace has been changed to 'ov'. - - -OpenVINO™ Runtime with API 2.0 includes the nGraph engine as a common part. The ``ngraph`` namespace has been changed to ``ov``, but all other parts of the ngraph API have been preserved. - - -API 2.0 -#################### - - -.. tab-set:: - - .. tab-item:: Python - :sync: py - - .. doxygensnippet:: docs/snippets/ov_graph.py - :language: Python - :fragment: ov:graph - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_graph.cpp - :language: cpp - :fragment: ov:graph - - -Additional Resources -#################### - -* :doc:`Hello Model Creation Sample ` diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst deleted file mode 100644 index 263e6bc32b171b..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst +++ /dev/null @@ -1,203 +0,0 @@ -.. {#openvino_2_0_preprocessing} - -Preprocessing -============= - - -.. meta:: - :description: In OpenVINO™ API 2.0 each preprocessing or post-processing - operation is integrated directly into the model and compiled - together with the inference graph. - - -This guide introduces how preprocessing works in API 2.0 by a comparison with preprocessing in the previous Inference Engine API. It also demonstrates how to migrate preprocessing scenarios from Inference Engine to API 2.0 via code samples. - -How Preprocessing Works in API 2.0 -################################## - -Inference Engine API contains preprocessing capabilities in the `InferenceEngine::CNNNetwork `__ class. Such preprocessing information is not a part of the main inference graph executed by :doc:`OpenVINO devices `. Therefore, it is stored and executed separately before the inference stage: - -* Preprocessing operations are executed on the CPU for most OpenVINO inference plugins. Thus, instead of occupying accelerators, they keep the CPU busy with computational tasks. -* Preprocessing information stored in `InferenceEngine::CNNNetwork `__ is lost when saving back to the OpenVINO IR file format. - -API 2.0 introduces a :doc:`new way of adding preprocessing operations to the model ` - each preprocessing or post-processing operation is integrated directly into the model and compiled together with the inference graph: - -* API 2.0 first adds preprocessing operations by using `ov::preprocess::PrePostProcessor `__, -* and then compiles the model on the target by using `ov::Core::compile_model `__. - -Having preprocessing operations as a part of an OpenVINO opset makes it possible to read and serialize a preprocessed model as the OpenVINO™ IR file format. - -More importantly, API 2.0 does not assume any default layouts as Inference Engine did. For example, both ``{ 1, 224, 224, 3 }`` and ``{ 1, 3, 224, 224 }`` shapes are supposed to be in the `NCHW` layout, while only the latter is. Therefore, some preprocessing capabilities in the API require layouts to be set explicitly. To learn how to do it, refer to the :doc:`Layout overview `. For example, to perform image scaling by partial dimensions ``H`` and ``W``, preprocessing needs to know what dimensions ``H`` and ``W`` are. - -.. note:: - - Use model conversion API preprocessing capabilities to insert preprocessing operations in your model for optimization. Thus, the application does not need to read the model and set preprocessing repeatedly. You can use the :doc:`model caching feature ` to improve the time-to-inference. - -The following sections demonstrate how to migrate preprocessing scenarios from Inference Engine API to API 2.0. -The snippets assume that you need to preprocess a model input with the ``tensor_name`` in Inference Engine API, using ``operation_name`` to address the data. - -Preparation: Import Preprocessing in Python -########################################### - -| There are two different namespaces: -| * ``runtime``, which contains API 2.0 classes; -| * and ``preprocess``, which provides Preprocessing API. - -Using Mean and Scale Values -########################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: mean_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: c_api_ppp - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_mean_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_mean_scale - - -Converting Precision and Layout -############################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: conversions - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: c_api_ppp - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_conversions - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_conversions - - -Using Image Scaling -#################### - -**Inference Engine API** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: image_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: c_api_ppp - - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_image_scale - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_image_scale - - -Converting Color Space -++++++++++++++++++++++ - -**API 2.0** - - -.. tab-set:: - - .. tab-item:: C++ - :sync: cpp - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.cpp - :language: cpp - :fragment: ov_color_space - - .. tab-item:: C - :sync: c - - .. doxygensnippet:: docs/snippets/ov_preprocessing_migration.c - :language: c - :fragment: ov_color_space - - -Additional Resources -#################### - -- :doc:`Preprocessing details ` -- :doc:`NV12 classification sample ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst index 0138307a04aec6..030cd6c7dbbdf2 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst @@ -8,8 +8,8 @@ All of the issues below refer to :doc:`legacy functionalities `. -If your question is not covered by the topics below, use the -`OpenVINO Support page `__, +If your question is not covered by the topics below, use the +`OpenVINO Support page `__, where you can participate in a free forum discussion. .. warning:: @@ -82,7 +82,7 @@ Q3. What does the message "[ ERROR ]: Unable to create ports for node with id" m **A:** Most likely, Model Optimizer does not know how to infer output shapes of some layers in the given topology. To lessen the scope, compile the list of layers that are custom for Model Optimizer: present in the topology, -absent in the :doc:`list of supported operations ` for the target framework. +absent in the :doc:`list of supported operations ` for the target framework. Then, refer to available options in the corresponding section in the :doc:`[Legacy] Custom Layers in Model Optimizer ` page. .. _question-7: @@ -255,7 +255,7 @@ Q16. What does the message "Input shape is required to convert MXNet model. Plea Q19. What does the message "Both --scale and --scale_values are defined. Specify either scale factor or scale values per input channels" mean? ##################################################################################################################################################### -**A:** The ``--scale`` option sets a scaling factor for all channels, while ``--scale_values`` sets a scaling factor per each channel. Using both of them simultaneously produces ambiguity, so you must use only one of them. For more information, refer to the **Using Framework-Agnostic Conversion Parameters** section: for :doc:`Converting a Caffe Model `, :doc:`Converting a TensorFlow Model `, :doc:`Converting an MXNet Model `. +**A:** The ``--scale`` option sets a scaling factor for all channels, while ``--scale_values`` sets a scaling factor per each channel. Using both of them simultaneously produces ambiguity, so you must use only one of them. For more information, refer to the **Using Framework-Agnostic Conversion Parameters** section: for :doc:`Converting a TensorFlow Model `. .. _question-20: @@ -547,7 +547,7 @@ Keep in mind that there is no space between and inside the brackets for input sh Q58. What does the message "Please provide input layer names for input layer shapes" mean? ##################################################################################################################################################### -**A:** When specifying input shapes for several layers, you must provide names for inputs, whose shapes will be overwritten. For usage examples, see the :doc:`Converting a Caffe Model `. Additional information for ``--input_shape`` is in FAQ :ref:`#56 `. +**A:** When specifying input shapes for several layers, you must provide names for inputs, whose shapes will be overwritten. Additional information for ``--input_shape`` is in FAQ :ref:`#56 `. .. _question-59: @@ -582,14 +582,14 @@ Q62. What does the message "You should specify input for each scale value" mean? Q63. What does the message "Number of inputs and mean values does not match" mean? ##################################################################################################################################################### -**A:** The number of specified mean values and the number of inputs must be equal. For a usage example, refer to the :doc:`Converting a Caffe Model ` guide. +**A:** The number of specified mean values and the number of inputs must be equal. .. _question-64: Q64. What does the message "Number of inputs and scale values does not match" mean? ##################################################################################################################################################### -**A:** The number of specified scale values and the number of inputs must be equal. For a usage example, refer to the :doc:`Converting a Caffe Model ` guide. +**A:** The number of specified scale values and the number of inputs must be equal. .. _question-65: @@ -715,7 +715,6 @@ Topology description (``.json`` file) should be prepared (merged) in advance and If you add additional layers and weights that are in ``.nd`` files to your model, Model Optimizer can build a model from one ``.params`` file and two additional ``.nd`` files (``*_args.nd``, ``*_auxs.nd``). To do that, provide both CLI options or do not pass them if you want to convert an MXNet model without additional weights. -For more information, refer to the :doc:`Converting an MXNet Model ` guide. .. _question-82: @@ -739,7 +738,6 @@ Q84. What does the message "Specified input json ... does not exist" mean? ##################################################################################################################################################### **A:** Most likely, ``.json`` file does not exist or has a name that does not match the notation of Apache MXNet. Make sure the file exists and has a correct name. -For more information, refer to the :doc:`Converting an MXNet Model ` guide. .. _question-85: @@ -747,8 +745,6 @@ Q85. What does the message "Unsupported Input model file type ... Model Optimize ##################################################################################################################################################### **A:** Model Optimizer for Apache MXNet supports only ``.params`` and ``.nd`` files formats. Most likely, you specified an unsupported file format in ``--input_model``. -For more information, refer to :doc:`Converting an MXNet Model `. - .. _question-86: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.rst deleted file mode 100644 index c7e0a215e11160..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. {#mxnet_caffe_kaldi} - -MX Net, Caffe, and Kaldi model formats -====================================== - - - -.. toctree:: - :maxdepth: 1 - :hidden: - - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_MxNet - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Caffe - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Kaldi - openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_GluonCV_Models - openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_Style_Transfer_From_MXNet - openvino_docs_MO_DG_prepare_model_convert_model_kaldi_specific_Aspire_Tdnn_Model - - -The following articles present the deprecated conversion method for MX Net, Caffe, -and Kaldi model formats. - -:doc:`Apache MX Net conversion ` -:doc:`Caffe conversion ` -:doc:`Kaldi conversion ` - -Here are three examples of conversion for particular models. -:doc:`MXNet GluonCV conversion ` -:doc:`MXNet Style Transfer Model conversion ` -:doc:`Kaldi ASpIRE Chain TDNN Model conversion ` - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst deleted file mode 100644 index 0b55973e6a2b57..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst +++ /dev/null @@ -1,155 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_kaldi_specific_Aspire_Tdnn_Model} - -Converting a Kaldi ASpIRE Chain Time Delay Neural Network (TDNN) Model -====================================================================== - - -.. meta:: - :description: Learn how to convert an ASpIRE Chain TDNN - model from Kaldi to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Kaldi is currently being deprecated and will be removed entirely in the future. - -At the beginning, you should `download a pre-trained model `__ -for the ASpIRE Chain Time Delay Neural Network (TDNN) from the Kaldi project official website. - -Converting an ASpIRE Chain TDNN Model to IR -########################################### - -Generate the Intermediate Representation of the model by running model conversion with the following parameters: - -.. code-block:: sh - - mo --input_model exp/chain/tdnn_7b/final.mdl --output output - - -The IR will have two inputs: ``input`` for data, and ``ivector`` for ivectors. - -Example: Running ASpIRE Chain TDNN Model with the Speech Recognition Sample -########################################################################### - -.. note:: - - Before you continue with this part of the article, get familiar with the - :doc:`Speech Recognition sample `. - -In this example, the input data contains one utterance from one speaker. - -To run the ASpIRE Chain TDNN Model with Speech Recognition sample, You need to prepare environment. Do it by following the steps below : - -1. Download a `Kaldi repository `__. -2. Build it by following instructions in ``README.md`` from the repository. -3. Download the `model archive `__ from Kaldi website. -4. Extract the downloaded model archive to the ``egs/aspire/s5`` folder of the Kaldi repository. - -Once everything has been prepared, you can start a proper run: - -1. Prepare the model for decoding. Refer to the ``README.txt`` file from the downloaded model archive for instructions. -2. Convert data and ivectors to ``.ark`` format. Refer to the corresponding sections below for instructions. - -Preparing Data -++++++++++++++++++++ - -If you have a ``.wav`` data file, convert it to the ``.ark`` format using the following command: - -.. code-block:: sh - - /src/featbin/compute-mfcc-feats --config=/egs/aspire/s5/conf/mfcc_hires.conf scp:./wav.scp ark,scp:feats.ark,feats.scp - - -Add the ``feats.ark`` absolute path to ``feats.scp`` to avoid errors in later commands. - -Preparing Ivectors -++++++++++++++++++++ - -Prepare ivectors for the Speech Recognition sample: - -1. Copy the ``feats.scp`` file to the ``egs/aspire/s5/`` directory of the built Kaldi repository and navigate there: - - .. code-block:: sh - - cp feats.scp /egs/aspire/s5/ - cd /egs/aspire/s5/ - - -2. Extract ivectors from the data: - - .. code-block:: sh - - ./steps/online/nnet2/extract_ivectors_online.sh --nj 1 --ivector_period exp/tdnn_7b_chain_online/ivector_extractor - - - You can simplify the preparation of ivectors for the Speech Recognition sample. To do it, specify the maximum number of frames in utterances as a parameter for ``--ivector_period`` to get only one ivector per utterance. - - To get the maximum number of frames in utterances, use the following command line: - - .. code-block:: sh - - ../../../src/featbin/feat-to-len scp:feats.scp ark,t: | cut -d' ' -f 2 - | sort -rn | head -1 - - - As a result, you will find the ``ivector_online.1.ark`` file in ````. - -3. Go to the ````: - - .. code-block:: sh - - cd - - -4. Convert the ``ivector_online.1.ark`` file to text format, using the ``copy-feats`` tool. Run the following command: - - .. code-block:: sh - - /src/featbin/copy-feats --binary=False ark:ivector_online.1.ark ark,t:ivector_online.1.ark.txt - - -5. For the Speech Recognition sample, the ``.ark`` file must contain an ivector for each frame. Copy the ivector ``frame_count`` times by running the below script in the Python command prompt: - - .. code-block:: py - :force: - - import subprocess - - subprocess.run(["/src/featbin/feat-to-len", "scp:/egs/aspire/s5/feats.scp", "ark,t:feats_length.txt"]) - - f = open("ivector_online.1.ark.txt", "r") - g = open("ivector_online_ie.ark.txt", "w") - length_file = open("feats_length.txt", "r") - for line in f: - if "[" not in line: - for i in range(frame_count): - line = line.replace("]", " ") - g.write(line) - else: - g.write(line) - frame_count = int(length_file.read().split(" ")[1]) - g.write("]") - f.close() - g.close() - length_file.close() - - -6. Create an ``.ark`` file from ``.txt``: - - .. code-block:: sh - - /src/featbin/copy-feats --binary=True ark,t:ivector_online_ie.ark.txt ark:ivector_online_ie.ark - - -Running the Speech Recognition Sample -+++++++++++++++++++++++++++++++++++++ - -Run the Speech Recognition sample with the created ivector ``.ark`` file: - -.. code-block:: sh - - speech_sample -i feats.ark,ivector_online_ie.ark -m final.xml -d CPU -o prediction.ark -cw_l 17 -cw_r 12 - - -Results can be decoded as described in "Use of Sample in Kaldi Speech Recognition Pipeline" -in the :doc:`Speech Recognition Sample description ` article. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.rst deleted file mode 100644 index 2a00cc9016b525..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_GluonCV_Models} - -Converting MXNet GluonCV Models -=============================== - - -.. meta:: - :description: Learn how to convert GluonCV models - from MXNet to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future. - -This article provides the instructions and examples on how to convert `GluonCV SSD and YOLO-v3 models `__ to IR. - -1. Choose the topology available from the `GluonCV Model Zoo `__ and export to the MXNet format using the GluonCV API. For example, for the ``ssd_512_mobilenet1.0`` topology: - - .. code-block:: py - :force: - - from gluoncv import model_zoo, data, utils - from gluoncv.utils import export_block - net = model_zoo.get_model('ssd_512_mobilenet1.0_voc', pretrained=True) - export_block('ssd_512_mobilenet1.0_voc', net, preprocess=True, layout='HWC') - - As a result, you will get an MXNet model representation in ``ssd_512_mobilenet1.0.params`` and ``ssd_512_mobilenet1.0.json`` files generated in the current directory. - -2. Run model conversion API, specifying the ``enable_ssd_gluoncv`` option. Make sure the ``input_shape`` parameter is set to the input shape layout of your model (NHWC or NCHW). The examples below illustrate running model conversion for the SSD and YOLO-v3 models trained with the NHWC layout and located in the ````: - - * **For GluonCV SSD topologies:** - - .. code-block:: sh - - mo --input_model /ssd_512_mobilenet1.0.params --enable_ssd_gluoncv --input_shape [1,512,512,3] --input data --output_dir - - * **For YOLO-v3 topology:** - - * To convert the model: - - .. code-block:: sh - - mo --input_model /yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --output_dir - - * To convert the model with replacing the subgraph with RegionYolo layers: - - .. code-block:: sh - - mo --input_model /models/yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --transformations_config "front/mxnet/ yolo_v3_mobilenet1_voc. json" --output_dir - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.rst deleted file mode 100644 index 138fe6de0a81f9..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Caffe} - -Converting a Caffe Model -======================== - - -.. meta:: - :description: Learn how to convert a model from the - Caffe format to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Caffe is currently being deprecated and will be removed entirely in the future. - -To convert a Caffe model, run ``mo`` with the path to the input model ``.caffemodel`` file: - -.. code-block:: cpp - - mo --input_model .caffemodel - - -The following list provides the Caffe-specific parameters. - -.. code-block:: cpp - - Caffe-specific parameters: - --input_proto INPUT_PROTO, -d INPUT_PROTO - Deploy-ready prototxt file that contains a topology - structure and layer attributes - --caffe_parser_path CAFFE_PARSER_PATH - Path to python Caffe parser generated from caffe.proto - -k K Path to CustomLayersMapping.xml to register custom - layers - --disable_omitting_optional - Disable omitting optional attributes to be used for - custom layers. Use this option if you want to transfer - all attributes of a custom layer to IR. Default - behavior is to transfer the attributes with default - values and the attributes defined by the user to IR. - --enable_flattening_nested_params - Enable flattening optional params to be used for - custom layers. Use this option if you want to transfer - attributes of a custom layer to IR with flattened - nested parameters. Default behavior is to transfer the - attributes without flattening nested parameters. - - -CLI Examples Using Caffe-Specific Parameters -++++++++++++++++++++++++++++++++++++++++++++ - -* Launching model conversion for `bvlc_alexnet.caffemodel `__ with a specified `prototxt` file. This is needed when the name of the Caffe model and the `.prototxt` file are different or are placed in different directories. Otherwise, it is enough to provide only the path to the input `model.caffemodel` file. - - .. code-block:: cpp - - mo --input_model bvlc_alexnet.caffemodel --input_proto bvlc_alexnet.prototxt - -* Launching model conversion for `bvlc_alexnet.caffemodel `__ with a specified `CustomLayersMapping` file. This is the legacy method of quickly enabling model conversion if your model has custom layers. This requires the Caffe system on the computer. Example of ``CustomLayersMapping.xml`` can be found in ``/mo/front/caffe/CustomLayersMapping.xml.example``. The optional parameters without default values and not specified by the user in the ``.prototxt`` file are removed from the Intermediate Representation, and nested parameters are flattened: - - .. code-block:: cpp - - mo --input_model bvlc_alexnet.caffemodel -k CustomLayersMapping.xml --disable_omitting_optional --enable_flattening_nested_params - - This example shows a multi-input model with input layers: ``data``, ``rois`` - - .. code-block:: cpp - - layer { - name: "data" - type: "Input" - top: "data" - input_param { - shape { dim: 1 dim: 3 dim: 224 dim: 224 } - } - } - layer { - name: "rois" - type: "Input" - top: "rois" - input_param { - shape { dim: 1 dim: 5 dim: 1 dim: 1 } - } - } - -* Launching model conversion for a multi-input model with two inputs and providing a new shape for each input in the order they are passed to the model conversion API. In particular, for data, set the shape to ``1,3,227,227``. For rois, set the shape to ``1,6,1,1``: - - .. code-block:: cpp - - mo --input_model /path-to/your-model.caffemodel --input data,rois --input_shape (1,3,227,227),[1,6,1,1] - -Custom Layer Definition -######################## - -For the definition of custom layers, refer to the :doc:`Cutting Off Parts of a Model ` page. - -Supported Caffe Layers -####################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations ` page. - -Frequently Asked Questions (FAQ) -################################ - -Model conversion API provides explanatory messages when it is unable to complete conversions due to typographical errors, incorrectly used options, or other issues. A message describes the potential cause of the problem and gives a link to :doc:`Model Optimizer FAQ ` which provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in :doc:`Convert a Model `to help you understand what went wrong. - -Summary -####### - -In this document, you learned: - -* Basic information about how model conversion works with Caffe models. -* Which Caffe models are supported. -* How to convert a trained Caffe model by using model conversion API with both framework-agnostic and Caffe-specific command-line parameters. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials ` page for a set of tutorials providing step-by-step instructions for converting specific Caffe models. - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.rst deleted file mode 100644 index 7f8fc3856d960a..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Kaldi} - -Converting a Kaldi Model -======================== - - -.. meta:: - :description: Learn how to convert a model from the - Kaldi format to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Kaldi is currently being deprecated and will be removed entirely in the future. - -.. note:: - - Model conversion API supports the `nnet1 `__ and `nnet2 `__ formats of Kaldi models. The support of the `nnet3 `__ format is limited. - -To convert a Kaldi model, run model conversion with the path to the input model ``.nnet`` or ``.mdl`` file: - -.. code-block:: cpp - - mo --input_model .nnet - -Using Kaldi-Specific Conversion Parameters -########################################## - -The following list provides the Kaldi-specific parameters. - -.. code-block:: cpp - - Kaldi-specific parameters: - --counts COUNTS A file name with full path to the counts file or empty string to utilize count values from the model file - --remove_output_softmax - Removes the Softmax that is the output layer - --remove_memory Remove the Memory layer and add new inputs and outputs instead - -Examples of CLI Commands -######################## - -* To launch model conversion for the ``wsj_dnn5b_smbr`` model with the specified ``.nnet`` file: - - .. code-block:: cpp - - mo --input_model wsj_dnn5b_smbr.nnet - -* To launch model conversion for the ``wsj_dnn5b_smbr`` model with the existing file that contains counts for the last layer with biases: - - .. code-block:: cpp - - mo --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts - - - * The model conversion normalizes сounts in the following way: - - .. math:: - - S = \frac{1}{\sum_{j = 0}^{|C|}C_{j}} - - .. math:: - - C_{i}=log(S*C_{i}) - - where :math:`C` - the counts array, :math:`C_{i} - i^{th}` element of the counts array, :math:`|C|` - number of elements in the counts array; - - * The normalized counts are subtracted from biases of the last or next to last layer (if last layer is SoftMax). - - .. note:: Model conversion API will show a warning if a model contains values of counts and the ``counts`` option is not used. - -* If you want to remove the last SoftMax layer in the topology, launch the model conversion with the ``remove_output_softmax`` flag: - - .. code-block:: cpp - - mo --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts --remove_output_softmax - - Model conversion API finds the last layer of the topology and removes this layer only if it is a SoftMax layer. - - .. note:: Model conversion can remove SoftMax layer only if the topology has one output. - -* You can use the *OpenVINO Speech Recognition* sample application for the sample inference of Kaldi models. This sample supports models with only one output. If your model has several outputs, specify the desired one with the ``output`` option. - -Supported Kaldi Layers -###################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations ` page. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials ` page for a set of tutorials providing step-by-step instructions for converting specific Kaldi models. Here are some examples: - -* :doc:`Convert Kaldi ASpIRE Chain Time Delay Neural Network (TDNN) Model ` - - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.rst deleted file mode 100644 index c7b0a02d4c0458..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_MxNet} - -Converting an MXNet Model -========================= - - -.. meta:: - :description: Learn how to convert a model from the - MXNet format to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future. - -To convert an MXNet model, run Model Optimizer with the path to the ``.params`` file of the input model: - -.. code-block:: sh - - mo --input_model model-file-0000.params - - -Using MXNet-Specific Conversion Parameters -########################################## - -The following list provides the MXNet-specific parameters. - -.. code-block:: sh - - MXNet-specific parameters: - --input_symbol - Symbol file (for example, "model-symbol.json") that contains a topology structure and layer attributes - --nd_prefix_name - Prefix name for args.nd and argx.nd files - --pretrained_model_name - Name of a pre-trained MXNet model without extension and epoch - number. This model will be merged with args.nd and argx.nd - files - --save_params_from_nd - Enable saving built parameters file from .nd files - --legacy_mxnet_model - Enable Apache MXNet loader to make a model compatible with the latest Apache MXNet version. - Use only if your model was trained with Apache MXNet version lower than 1.0.0 - --enable_ssd_gluoncv - Enable transformation for converting the gluoncv ssd topologies. - Use only if your topology is one of ssd gluoncv topologies - - -.. note:: - - By default, model conversion API does not use the Apache MXNet loader. It transforms the topology to another format which is compatible with the latest version of Apache MXNet. However, the Apache MXNet loader is required for models trained with lower version of Apache MXNet. If your model was trained with an Apache MXNet version lower than 1.0.0, specify the ``--legacy_mxnet_model`` key to enable the Apache MXNet loader. Note that the loader does not support models with custom layers. In this case, you must manually recompile Apache MXNet with custom layers and install it in your environment. - -Custom Layer Definition -####################### - -For the definition of custom layers, refer to the :doc:`Cutting Off Parts of a Model ` page. - -Supported MXNet Layers -####################### - -For the list of supported standard layers, refer to the :doc:`Supported Operations ` page. - -Frequently Asked Questions (FAQ) -################################ - -Model conversion API provides explanatory messages when it is unable to complete conversions due to typographical errors, incorrectly used options, or other issues. A message describes the potential cause of the problem and gives a link to :doc:`Model Optimizer FAQ ` which provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in :doc:`Convert a Model ` to help you understand what went wrong. - -Summary -######## - -In this document, you learned: - -* Basic information about how model conversion API works with MXNet models. -* Which MXNet models are supported. -* How to convert a trained MXNet model by using model conversion API with both framework-agnostic and MXNet-specific command-line parameters. - -Additional Resources -#################### - -See the :doc:`Model Conversion Tutorials ` page for a set of tutorials providing step-by-step instructions for converting specific MXNet models. Here are some examples: - -* :doc:`Convert MXNet GluonCV Model ` -* :doc:`Convert MXNet Style Transfer Model ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.rst deleted file mode 100644 index 4afe62791563ea..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.rst +++ /dev/null @@ -1,181 +0,0 @@ -.. {#openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_Style_Transfer_From_MXNet} - -Converting an MXNet Style Transfer Model -======================================== - - -.. meta:: - :description: Learn how to convert a Style Transfer - model from MXNet to the OpenVINO Intermediate Representation. - - -.. warning:: - - Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future. - -This article provides instructions on how to generate a model for style transfer, using the public MXNet neural style transfer sample. - -**Step 1**: Download or clone the repository `Zhaw's Neural Style Transfer repository `__ with an MXNet neural style transfer sample. - -**Step 2**: Prepare the environment required to work with the cloned repository: - -.. note:: - - Python-tk installation is needed only for Linux. Python for Windows includes it by default. - - -1. Install packages dependency. - - .. code-block:: sh - - sudo apt-get install python-tk - - -2. Install Python requirements: - - .. code-block:: sh - - pip3 install --user mxnet - pip3 install --user matplotlib - pip3 install --user scikit-image - - -**Step 3**: Download the pre-trained `VGG19 model `__ and save it to the root directory of the cloned repository. The sample expects the model ``vgg19.params`` file to be in that directory. - -**Step 4**: Modify source code files of style transfer sample from the cloned repository: - -1. Go to the ``fast_mrf_cnn`` subdirectory. - - .. code-block:: sh - - cd ./fast_mrf_cnn - - -2. Open the ``symbol.py`` file and modify the ``decoder_symbol()`` function. You should see the following code there: - - .. code-block:: py - - def decoder_symbol(): - data = mx.sym.Variable('data') - data = mx.sym.Convolution(data=data, num_filter=256, kernel=(3,3), pad=(1,1), stride=(1, 1), name='deco_conv1') - - - Replace the code above with the following: - - .. code-block:: py - - def decoder_symbol_with_vgg(vgg_symbol): - data = mx.sym.Convolution(data=vgg_symbol, num_filter=256, kernel=(3,3), pad=(1,1), stride=(1, 1), name='deco_conv1') - - -3. Save and close the ``symbol.py`` file. - -4. Open and edit the ``make_image.py`` file. Go to the ``__init__()`` function in the ``Maker`` class: - - .. code-block:: py - - decoder = symbol.decoder_symbol() - - - Modify it with the following code: - - .. code-block:: py - - decoder = symbol.decoder_symbol_with_vgg(vgg_symbol) - - -5. To join the pre-trained weights with the decoder weights, make the following changes: - After the code lines for loading the decoder weights: - - .. code-block:: py - - args = mx.nd.load('%s_decoder_args.nd'%model_prefix) - auxs = mx.nd.load('%s_decoder_auxs.nd'%model_prefix) - - - Add the following line: - - .. code-block:: py - - arg_dict.update(args) - - -6. Use ``arg_dict`` instead of ``args`` as a parameter of the ``decoder.bind()`` function. Find the line below: - - .. code-block:: py - - self.deco_executor = decoder.bind(ctx=mx.gpu(), args=args, aux_states=auxs) - - - Replace it with the following: - - .. code-block:: py - - self.deco_executor = decoder.bind(ctx=mx.cpu(), args=arg_dict, aux_states=auxs) - - -7. Add the following code to the end of the ``generate()`` function in the ``Maker`` class to save the result model as a ``.json`` file: - - .. code-block:: py - - self.vgg_executor._symbol.save('{}-symbol.json'.format('vgg19')) - self.deco_executor._symbol.save('{}-symbol.json'.format('nst_vgg19')) - - -8. Save and close the ``make_image.py`` file. - -**Step 5**: Follow the instructions from the ``README.md`` file in the ``fast_mrf_cnn`` directory of the cloned repository and run the sample with a decoder model. -For example, use the following code to run the sample with the pre-trained decoder weights from the ``models`` folder and output shape: - -.. code-block:: py - - import make_image - maker = make_image.Maker('models/13', (1024, 768)) - maker.generate('output.jpg', '../images/tubingen.jpg') - - -The ``models/13`` string in the code above is composed of the following substrings: - -* ``models/`` -- path to the folder that contains ``.nd`` files with pre-trained styles weights. -* ``13`` -- prefix pointing to the default decoder for the repository, ``13_decoder``. - -.. note:: - - If an error prompts with "No module named ``cPickle``", try running the script from Step 5 in Python 2. After that return to Python 3 for the remaining steps. - -Any style can be selected from `collection of pre-trained weights `__. On the Chinese-language page, click the down arrow next to a size in megabytes. Then wait for an overlay box to appear, and click the blue button in it to download. The ``generate()`` function generates ``nst_vgg19-symbol.json`` and ``vgg19-symbol.json`` files for the specified shape. In the code, it is ``[1024 x 768]`` for a 4:3 ratio. You can specify another, for example, ``[224,224]`` for a square ratio. - -**Step 6**: Run model conversion to generate an Intermediate Representation (IR): - -1. Create a new directory. For example: - - .. code-block:: sh - - mkdir nst_model - - -2. Copy the initial and generated model files to the created directory. For example, to copy the pre-trained decoder weights from the ``models`` folder to the ``nst_model`` directory, run the following commands: - - .. code-block:: sh - - cp nst_vgg19-symbol.json nst_model - cp vgg19-symbol.json nst_model - cp ../vgg19.params nst_model/vgg19-0000.params - cp models/13_decoder_args.nd nst_model - cp models/13_decoder_auxs.nd nst_model - - - .. note:: - - Make sure that all the ``.params`` and ``.json`` files are in the same directory as the ``.nd`` files. Otherwise, the conversion process fails. - - -3. Run model conversion for Apache MXNet. Use the ``--nd_prefix_name`` option to specify the decoder prefix and ``input_shape`` to specify input shapes in ``[N,C,W,H]`` order. For example: - - .. code-block:: sh - - mo --input_symbol /nst_vgg19-symbol.json --framework mxnet --output_dir --input_shape [1,3,224,224] --nd_prefix_name 13_decoder --pretrained_model /vgg19-0000.params - - -4. The IR is generated (``.bin``, ``.xml`` and ``.mapping`` files) in the specified output directory, and ready to be consumed by the OpenVINO Runtime. - diff --git a/docs/articles_en/get_started/configurations-header.rst b/docs/articles_en/get_started/configurations-header.rst index cab6ad46b8d6ac..b92039c2ae6886 100644 --- a/docs/articles_en/get_started/configurations-header.rst +++ b/docs/articles_en/get_started/configurations-header.rst @@ -5,7 +5,7 @@ Additional Configurations For Hardware .. meta:: - :description: Learn how to create additional configurations for your devices + :description: Learn how to create additional configurations for your devices to work with Intel® Distribution of OpenVINO™ toolkit. .. _additional configurations: @@ -13,13 +13,11 @@ Additional Configurations For Hardware .. toctree:: :maxdepth: 2 :hidden: - + For GPU For NPU - For GNA - -For certain use cases, you may need to install additional software, to use the full +For certain use cases, you may need to install additional software, to use the full potential of OpenVINO™. Check the following list for components for elements used in your workflow: @@ -35,15 +33,9 @@ your workflow: See the :doc:`guide on NPU configuration ` for details. -| **GNA drivers** -| If you want to run inference on a GNA (note that it is currently being deprecated and will no longer - be supported beyond 2023.2), make sure your GPU's drivers are properly installed. See the - :doc:`guide on GNA configuration ` - for details. - | **Open Computer Vision Library** | OpenCV is used to extend the capabilities of some models, for example enhance some of - OpenVINO samples, when used as a dependency in compilation. To install OpenCV for OpenVINO, see the + OpenVINO samples, when used as a dependency in compilation. To install OpenCV for OpenVINO, see the `instructions on GtHub `__. diff --git a/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst b/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst deleted file mode 100644 index 606572e97284af..00000000000000 --- a/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. {#openvino_docs_install_guides_configurations_for_intel_gna} - -Configurations for Intel® Gaussian & Neural Accelerator (GNA) with OpenVINO™ -============================================================================ - - -.. meta:: - :description: Learn how to provide additional configuration for Intel® - Gaussian & Neural Accelerator (GNA) to work with Intel® - Distribution of OpenVINO™ toolkit on your system. - - -.. note:: - - On platforms where Intel® GNA is not enabled in the BIOS, the driver cannot be installed, so the GNA plugin uses the software emulation mode only. - - -Drivers and Dependencies -######################## - - -Intel® GNA hardware requires a driver to be installed on the system. - -.. _gna guide: - -Linux -#################### - -Prerequisites -++++++++++++++++++++ - -Ensure that make, gcc, and Linux kernel headers are installed. Use the following command to install required software: - -.. code-block:: sh - - sudo apt-get install gcc make linux-headers-generic - - -Configuration steps -++++++++++++++++++++ - -1. Download `Intel® GNA driver for Ubuntu Linux 18.04.3 LTS (with HWE Kernel version 5.4+) `__ -2. Run the sample_install.sh script provided in the installation package: - - .. code-block:: sh - - prompt$ ./scripts/sample_install.sh - - -You can also build and install the driver manually by using the following commands: - -.. code-block:: sh - - prompt$ cd src/ - prompt$ make - prompt$ sudo insmod intel_gna.ko - - -To unload the driver: - -.. code-block:: sh - - prompt$ sudo rmmod intel_gna - - -.. _gna guide windows: - - -Windows -#################### - -Intel® GNA driver for Windows is available through Windows Update. - -What’s Next? -#################### - -Now you are ready to try out OpenVINO™. You can use the following tutorials to write your applications using Python and C/C++. - -* Developing in Python: - - * `Start with tensorflow models with OpenVINO™ `__ - * `Start with ONNX and PyTorch models with OpenVINO™ `__ - * `Start with PaddlePaddle models with OpenVINO™ `__ - -* Developing in C/C++: - - * :doc:`Image Classification Async C++ Sample ` - * :doc:`Hello Classification C++ Sample ` - * :doc:`Hello Reshape SSD C++ Sample ` - - diff --git a/docs/articles_en/get_started/installing-openvino-overview.rst b/docs/articles_en/get_started/installing-openvino-overview.rst index 37e8701b7fd5f6..f4688f9f766f09 100644 --- a/docs/articles_en/get_started/installing-openvino-overview.rst +++ b/docs/articles_en/get_started/installing-openvino-overview.rst @@ -54,7 +54,6 @@ Install OpenVINO™ 2024.0 =============== ========== ====== ========= ======== ============ ========== ========== CPU V V V V V V V GPU V V V V V V V - GNA V n/a n/a n/a n/a n/a n/a NPU V n/a n/a n/a n/a n/a n/a =============== ========== ====== ========= ======== ============ ========== ========== diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst index d5360a92bb7813..056bfed3e861b6 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst @@ -5,40 +5,40 @@ Install Intel® Distribution of OpenVINO™ Toolkit for Linux Using APT Reposito .. meta:: - :description: Learn how to install OpenVINO™ Runtime on the Linux operating + :description: Learn how to install OpenVINO™ Runtime on the Linux operating system, using the APT repository. .. note:: - + Note that the APT distribution: * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference + * does not offer support for NPU inference * is dedicated to Linux users only - * additionally includes code samples + * additionally includes code samples .. tab-set:: .. tab-item:: System Requirements :sync: system-requirements - + | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ - + .. tab-item:: Software Requirements :sync: software-requirements - + * `CMake 3.13 or higher, 64-bit `__ * GCC 7.5.0 (for Ubuntu 18.04), GCC 9.3.0 (for Ubuntu 20.04) or GCC 11.3.0 (for Ubuntu 22.04) * `Python 3.8 - 3.11, 64-bit `__ - + Installing OpenVINO Runtime ####################################### @@ -71,30 +71,30 @@ Step 1: Set Up the OpenVINO Toolkit APT Repository sudo apt-get install gnupg 2. Add the repository via the following command: - + .. tab-set:: .. tab-item:: Ubuntu 22 :sync: ubuntu-22 - + .. code-block:: sh - + echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu22 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list - + .. tab-item:: Ubuntu 20 :sync: ubuntu-20 - + .. code-block:: sh - + echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list - + .. tab-item:: Ubuntu 18 :sync: ubuntu-18 - + .. code-block:: sh - + echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu18 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list - + 3. Update the list of packages via the update command: @@ -121,38 +121,38 @@ Step 2: Install OpenVINO Runtime Using the APT Package Manager .. tab-item:: The Latest Version :sync: latest-version - + Run the following command: - + .. code-block:: sh - + sudo apt install openvino - - + + .. tab-item:: A Specific Version :sync: specific-version - + #. Get a list of OpenVINO packages available for installation: - + .. code-block:: sh - + sudo apt-cache search openvino - + #. Install a specific version of an OpenVINO package: - + .. code-block:: sh - + sudo apt install openvino-.. - + For example: - + .. code-block:: sh - + sudo apt install openvino-2023.2.0 - + .. note:: - You can use ``--no-install-recommends`` option to install only required packages. + You can use ``--no-install-recommends`` option to install only required packages. Keep in mind that the build tools must be installed **separately** if you want to compile the samples. @@ -165,12 +165,12 @@ Run the following command: apt list --installed | grep openvino -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. -With the APT distribution, you can build OpenVINO sample files, as explained in the +With the APT distribution, you can build OpenVINO sample files, as explained in the :doc:`guide for OpenVINO sample applications `. For C++ and C, just run the ``build_samples.sh`` script: @@ -178,16 +178,16 @@ For C++ and C, just run the ``build_samples.sh`` script: .. tab-item:: C++ :sync: cpp - + .. code-block:: sh - + /usr/share/openvino/samples/cpp/build_samples.sh - + .. tab-item:: C :sync: c - + .. code-block:: sh - + /usr/share/openvino/samples/c/build_samples.sh Python samples can run as following: @@ -205,32 +205,32 @@ To uninstall OpenVINO Runtime via APT, run the following command based on your n .. tab-item:: The Latest Version :sync: latest-version - + .. code-block:: sh - + sudo apt autoremove openvino - + .. tab-item:: A Specific Version :sync: specific-version - + .. code-block:: sh - + sudo apt autoremove openvino-.. - + For example: - + .. code-block:: sh - + sudo apt autoremove openvino-2023.2.0 What's Next? ####################################### -Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! +Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! Learn more about how to integrate a model in OpenVINO applications by trying out the following tutorials: -* Try the `C++ Quick Start Example `_ for step-by-step +* Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg @@ -239,7 +239,6 @@ Learn more about how to integrate a model in OpenVINO applications by trying out * Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ You can also try the following: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst index 6d304da1350a26..3e39eb243968ce 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst @@ -14,24 +14,24 @@ Install OpenVINO™ Runtime on Linux from an Archive File Note that the Archive distribution: * offers both C/C++ and Python APIs - * additionally includes code samples + * additionally includes code samples * is dedicated to Linux users (archives for other systems are also available) * may offer different hardware support under different operating systems (see the drop-down below for more details). .. dropdown:: Inference Options - =================== ===== ===== ===== ===== - Operating System CPU GPU GNA NPU - =================== ===== ===== ===== ===== - Debian9 armhf V n/a n/a n/a - Ubuntu18 arm64 V n/a n/a n/a - CentOS7 x86_64 V V n/a n/a - Ubuntu18 x86_64 V V V n/a - Ubuntu20 x86_64 V V V V - Ubuntu22 x86_64 V V V V - RHEL8 x86_64 V V V n/a - =================== ===== ===== ===== ===== + =================== ===== ===== ===== + Operating System CPU GPU NPU + =================== ===== ===== ===== + Debian9 armhf V n/a n/a + Ubuntu18 arm64 V n/a n/a + CentOS7 x86_64 V V n/a + Ubuntu18 x86_64 V V n/a + Ubuntu20 x86_64 V V V + Ubuntu22 x86_64 V V V + RHEL8 x86_64 V V n/a + =================== ===== ===== ===== .. tab-set:: @@ -134,66 +134,66 @@ Step 1: Download and Install the OpenVINO Core Components :sync: ubuntu-22 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu22_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu22_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: Ubuntu 20.04 :sync: ubuntu-20 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu20_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu20_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: Ubuntu 18.04 :sync: ubuntu-18 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu18_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu18_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: RHEL 8 :sync: rhel-8 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_rhel8_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_rhel8_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: CentOS 7 :sync: centos-7 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_centos7_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_centos7_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: ARM 64-bit :sync: arm-64 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.0.0-13770-9b52171d290/l_openvino_toolkit_ubuntu18_2024.0.0.dev20231221_arm64.tgz -O openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_ubuntu18_2024.0.0.dev20231221_arm64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: ARM 32-bit :sync: arm-32 .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_debian9_2023.2.0.13089.cfd42bd2cb0_armhf.tgz -O openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv l_openvino_toolkit_debian9_2023.2.0.13089.cfd42bd2cb0_armhf /opt/intel/openvino_2023.2.0 - - + + 5. Install required system dependencies on Linux. To do this, OpenVINO provides a script in the extracted installation directory. Run the following command: .. code-block:: sh @@ -220,7 +220,7 @@ Step 1: Download and Install the OpenVINO Core Components cd /opt/intel sudo ln -s openvino_2023.2.0 openvino_2023 - + .. note:: If you have already installed a previous release of OpenVINO 2023, a symbolic link to the ``openvino_2023`` folder may already exist. Unlink the previous link with ``sudo unlink openvino_2023``, and then re-run the command above. @@ -300,9 +300,6 @@ Learn more about how to integrate a model in OpenVINO applications by trying out Visit the :doc:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `__ - * `Automatic speech recognition C++ sample `__ - - Uninstalling the Intel® Distribution of OpenVINO™ Toolkit ########################################################### diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst index 0fb0ebf3551b50..d03895cd1d4932 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst @@ -5,17 +5,17 @@ Install OpenVINO™ Runtime on Linux From YUM Repository .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Linux operating + :description: Learn how to install OpenVINO™ Runtime on Linux operating system, using the YUM repository. .. note:: - + Note that the YUM distribution: - + * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference + * does not offer support for NPU inference * is dedicated to Linux users only - * additionally includes code samples + * additionally includes code samples .. tab-set:: @@ -75,7 +75,7 @@ Step 1: Set Up the Repository EOF 2. Move the new ``openvino-2023.repo`` file to the YUM configuration directory, i.e. ``/etc/yum.repos.d``: - + .. code-block:: sh sudo mv /tmp/openvino-2023.repo /etc/yum.repos.d @@ -107,26 +107,26 @@ Install OpenVINO Runtime .. tab-item:: The Latest Version :sync: latest-version - + Run the following command: - + .. code-block:: sh - + sudo yum install openvino - + .. tab-item:: A Specific Version :sync: specific-version - + Run the following command: - + .. code-block:: sh - + sudo yum install openvino-.. - + For example: - + .. code-block:: sh - + sudo yum install openvino-2023.2.0 @@ -145,12 +145,12 @@ Run the following command: You can additionally install Python API using one of the alternative methods (:doc:`conda ` or :doc:`pip `). -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. -With the YUM distribution, you can build OpenVINO sample files, as explained in the +With the YUM distribution, you can build OpenVINO sample files, as explained in the :doc:`guide for OpenVINO sample applications `. For C++ and C, just run the ``build_samples.sh`` script: @@ -158,16 +158,16 @@ For C++ and C, just run the ``build_samples.sh`` script: .. tab-item:: C++ :sync: cpp - + .. code-block:: sh - + /usr/share/openvino/samples/cpp/build_samples.sh - + .. tab-item:: C :sync: c - + .. code-block:: sh - + /usr/share/openvino/samples/c/build_samples.sh @@ -181,23 +181,23 @@ To uninstall OpenVINO Runtime via YUM, run the following command based on your n .. tab-item:: The Latest Version :sync: latest-version - + .. code-block:: sh - + sudo yum autoremove openvino - - + + .. tab-item:: A Specific Version :sync: specific-version - + .. code-block:: sh - + sudo yum autoremove openvino-.. - + For example: - + .. code-block:: sh - + sudo yum autoremove openvino-2023.2.0 @@ -205,10 +205,10 @@ To uninstall OpenVINO Runtime via YUM, run the following command based on your n What's Next? ############# -Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! +Now that you've installed OpenVINO Runtime, you're ready to run your own machine learning applications! Learn more about how to integrate a model in OpenVINO applications by trying out the following tutorials: -* Try the `C++ Quick Start Example `_ +* Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg @@ -217,7 +217,6 @@ Learn more about how to integrate a model in OpenVINO applications by trying out * Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ You can also try the following things: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst index 4f198c3c30f3d4..fbd107f2202911 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime on macOS from an Archive File .. meta:: - :description: Learn how to install OpenVINO™ Runtime on macOS operating + :description: Learn how to install OpenVINO™ Runtime on macOS operating system, using an archive file. .. note:: - + Note that the Archive distribution: - + * offers both C/C++ and Python APIs - * additionally includes code samples + * additionally includes code samples * is dedicated to macOS users (archives for other systems are also available) * is only supported for CPU Plugin @@ -23,13 +23,13 @@ Install OpenVINO™ Runtime on macOS from an Archive File .. tab-item:: System Requirements :sync: system-requirements - + | Full requirement listing is available in: | :doc:`System Requirements Page ` .. tab-item:: Software Requirements :sync: software-requirements - + * `CMake 3.13 or higher `__ (choose "macOS 10.13 or later"). Add ``/Applications/CMake.app/Contents/bin`` to path (for default install). * `Python 3.8 - 3.11 `__ (choose 3.8 - 3.11). Install and add to path. * Apple Xcode Command Line Tools. In the terminal, run ``xcode-select --install`` from any directory @@ -69,18 +69,18 @@ Step 1: Install OpenVINO Core Components .. tab-item:: x86, 64-bit :sync: x86-64 - + .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/macos/m_openvino_toolkit_macos_10_15_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv m_openvino_toolkit_macos_10_15_2023.2.0.13089.cfd42bd2cb0_x86_64 /opt/intel/openvino_2023.2.0 - + .. tab-item:: ARM, 64-bit :sync: arm-64 - + .. code-block:: sh - + curl -L https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/macos/m_openvino_toolkit_macos_11_0_2023.2.0.13089.cfd42bd2cb0_arm64.tgz --output openvino_2023.2.0.tgz tar -xf openvino_2023.2.0.tgz sudo mv m_openvino_toolkit_macos_11_0_2023.2.0.13089.cfd42bd2cb0_arm64 /opt/intel/openvino_2023.2.0 @@ -102,7 +102,7 @@ Step 1: Install OpenVINO Core Components .. code-block:: sh - sudo ln -s /opt/intel/openvino_2023.2.0 /opt/intel/openvino_2023 + sudo ln -s /opt/intel/openvino_2023.2.0 /opt/intel/openvino_2023 .. note:: @@ -110,27 +110,27 @@ Step 1: Install OpenVINO Core Components If you have already installed a previous release of OpenVINO 2023, a symbolic link to the ``openvino_2023`` folder may already exist. Unlink the previous link with ``sudo unlink openvino_2023``, and then re-run the command above. -Congratulations, you have finished the installation! For some use cases you may still -need to install additional components. Check the description below, as well as the +Congratulations, you have finished the installation! For some use cases you may still +need to install additional components. Check the description below, as well as the :doc:`list of additional configurations ` to see if your case needs any of them. -The ``/opt/intel/openvino_2023`` folder now contains the core components for OpenVINO. -If you used a different path in Step 2, for example, ``/home//intel/``, -OpenVINO is now in ``/home//intel/openvino_2023``. The path to the ``openvino_2023`` +The ``/opt/intel/openvino_2023`` folder now contains the core components for OpenVINO. +If you used a different path in Step 2, for example, ``/home//intel/``, +OpenVINO is now in ``/home//intel/openvino_2023``. The path to the ``openvino_2023`` directory is also referred as ```` throughout the OpenVINO documentation. Step 2: Configure the Environment +++++++++++++++++++++++++++++++++ -You must update several environment variables before you can compile and run OpenVINO applications. Open a terminal window and run the ``setupvars.sh`` -script as shown below to temporarily set your environment variables. If your ```` (the folder you used to install OpenVINO) is not +You must update several environment variables before you can compile and run OpenVINO applications. Open a terminal window and run the ``setupvars.sh`` +script as shown below to temporarily set your environment variables. If your ```` (the folder you used to install OpenVINO) is not the default ``/opt/intel/openvino_2023``, use the correct one instead. .. code-block:: sh - cd /opt/intel/openvino_2023 + cd /opt/intel/openvino_2023 source /opt/intel/openvino_2023/setupvars.sh @@ -151,45 +151,44 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine .. tab-item:: Get started with Python :sync: get-started-py - + Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. - + .. image:: https://user-images.githubusercontent.com/15709723/127752390-f6aa371f-31b5-4846-84b9-18dd4f662406.gif :width: 400 - + Visit the :ref:`Tutorials ` page for more Jupyter Notebooks to get you started with OpenVINO, such as: - + * `OpenVINO Python API Tutorial `__ * `Basic image classification program with Hello Image Classification `__ * `Convert a PyTorch model and use it for image background removal `__ - + .. tab-item:: Get started with C++ :sync: get-started-cpp - + Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. - + .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg :width: 400 - + Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - + * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ Uninstalling Intel® Distribution of OpenVINO™ Toolkit ##################################################### If you have installed OpenVINO Runtime from archive files, you can uninstall it by deleting the archive files and the extracted folders. -Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. +Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. If you have created the symbolic link, remove the link first: - + .. code-block:: sh sudo rm /opt/intel/openvino_2023 - + To delete the files: - + .. code-block:: sh rm -r && rm diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst index 6b6b9fcea70066..0940d68969a3c8 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-brew.rst @@ -5,15 +5,15 @@ Install OpenVINO™ Runtime via Homebrew .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Linux and macOS + :description: Learn how to install OpenVINO™ Runtime on Linux and macOS operating systems, using Homebrew. .. note:: - + Note that the `Homebrew `__ distribution: * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference + * does not offer support for NPU inference * is dedicated to macOS (both arm64 and x86_64) and Linux (x86_64 only) users. @@ -24,10 +24,10 @@ Install OpenVINO™ Runtime via Homebrew | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -38,7 +38,7 @@ Install OpenVINO™ Runtime via Homebrew .. tab-item:: Linux :sync: linux - + * `Homebrew `_ * `CMake 3.13 or higher, 64-bit `__ * GCC 7.5.0 (for Ubuntu 18.04), GCC 9.3.0 (for Ubuntu 20.04) or GCC 11.3.0 (for Ubuntu 22.04) @@ -46,13 +46,13 @@ Install OpenVINO™ Runtime via Homebrew .. tab-item:: macOS :sync: macos - + * `Homebrew `_ - * `CMake 3.13 or higher `__ (choose "macOS 10.13 or later"). Add ``/Applications/CMake.app/Contents/bin`` to path (for default installation). + * `CMake 3.13 or higher `__ (choose "macOS 10.13 or later"). Add ``/Applications/CMake.app/Contents/bin`` to path (for default installation). * `Python 3.8 - 3.11 `__ . Install and add it to path. * Apple Xcode Command Line Tools. In the terminal, run ``xcode-select --install`` from any directory to install it. * (Optional) Apple Xcode IDE (not required for OpenVINO™, but useful for development) - + Installing OpenVINO Runtime ########################### @@ -72,8 +72,8 @@ Installing OpenVINO Runtime brew list -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst index fbc8bfa87aa622..8bb322092f32da 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime from Conan Package Manager .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using Conan Package Manager. .. note:: - + Note that the Conan Package Manager distribution: * offers C/C++ API only - * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * does not offer support for NPU inference + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) @@ -25,10 +25,10 @@ Install OpenVINO™ Runtime from Conan Package Manager Full requirement listing is available in: :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: `Product Specifications `__ @@ -64,14 +64,14 @@ Installing OpenVINO Runtime with Conan Package Manager .. code-block:: sh conan install conanfile.txt --build=missing - - By default, OpenVINO is statically compiled, together with all available + + By default, OpenVINO is statically compiled, together with all available plugins and frontends. To build a version tailored to your needs, check - what options there are on the `Conan Package Manager page for OpenVINO `__ + what options there are on the `Conan Package Manager page for OpenVINO `__ and extend the command, like so: - + .. code-block:: sh - + conan install conanfile.txt --build=missing -o:h 'openvino/*:enable_intel_gpu=False' -o:h 'openvino/*:enable_onnx_frontend=False' -o:h 'openvino/*:shared=True' 3. Configure and compile your project with OpenVINO: @@ -82,7 +82,7 @@ Installing OpenVINO Runtime with Conan Package Manager cmake --build --parallel .. note:: - + OpenVINO can be used with any build interface, as long as it is supported by Conan 2.0. Read `more `__. Additional Resources diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst index cb3045da5aaa45..37cca12efa23b0 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst @@ -5,17 +5,17 @@ Install OpenVINO™ Runtime from Conda Forge .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using Conda Forge. .. note:: - + Note that the Conda Forge distribution: * offers both C/C++ and Python APIs - * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * does not offer support for NPU inference + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) .. tab-set:: @@ -25,11 +25,11 @@ Install OpenVINO™ Runtime from Conda Forge | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -45,7 +45,7 @@ Installing OpenVINO Runtime with Anaconda Package Manager ############################################################ 1. Set up the Anaconda environment (Python 3.10 used as an example): - + .. code-block:: sh conda create --name py310 python=3.10 @@ -55,7 +55,7 @@ Installing OpenVINO Runtime with Anaconda Package Manager conda activate py310 2. Update it to the latest version: - + .. code-block:: sh conda update --all @@ -66,22 +66,22 @@ Installing OpenVINO Runtime with Anaconda Package Manager conda install -c conda-forge openvino=2023.2.0 -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the description below, as well as the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the description below, as well as the :doc:`list of additional configurations ` to see if your case needs any of them. Compiling with OpenVINO Runtime from Conda-Forge on Linux +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -When linking OpenVINO libraries from Conda on Linux, ensure that you have the necessary Conda compilers installed. +When linking OpenVINO libraries from Conda on Linux, ensure that you have the necessary Conda compilers installed. To do so, run the following command in your Conda environment: .. code-block:: sh conda install cmake c-compiler cxx-compiler make -It is crucial to reactivate your Conda environment after installing the compilers. +It is crucial to reactivate your Conda environment after installing the compilers. This step ensures that all the environment variables are set correctly for successful linkage. To reactivate your Conda environment, execute the following command: @@ -90,38 +90,33 @@ To reactivate your Conda environment, execute the following command: conda activate py310 -Once you have reactivated your Conda environment, make sure that all the necessary environment +Once you have reactivated your Conda environment, make sure that all the necessary environment variables are properly set and proceed with linking the OpenVINO libraries. Uninstalling OpenVINO™ Runtime ########################################################### -Once OpenVINO Runtime is installed via Conda, you can remove it using the following command, +Once OpenVINO Runtime is installed via Conda, you can remove it using the following command, with the proper OpenVINO version number: .. code-block:: sh - + conda remove openvino=2023.2.0 What's Next? ############################################################ -Now that you've installed OpenVINO Runtime, you are ready to run your own machine learning applications! +Now that you've installed OpenVINO Runtime, you are ready to run your own machine learning applications! To learn more about how to integrate a model in OpenVINO applications, try out some tutorials and sample applications. -Try the :doc:`C++ Quick Start Example ` for step-by-step instructions +Try the :doc:`C++ Quick Start Example ` for step-by-step instructions on building and running a basic image classification C++ application. .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg :width: 400 - + Visit the :doc:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: * `Basic object detection with the Hello Reshape SSD C++ sample `__ -* `Automatic speech recognition C++ sample `__ - - - - diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst index 5185fc192d8e9b..badaa9fb433222 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst @@ -5,18 +5,18 @@ Install Intel® Distribution of OpenVINO™ Toolkit from PyPI Repository .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using a PyPi package. .. note:: - + Note that the PyPi distribution: - + * offers the Python API only - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) - * Windows and Linux do not offer support for GNA and NPU inference + * Windows and Linux do not offer support for NPU inference * macOS offers support only for CPU inference .. tab-set:: @@ -27,11 +27,11 @@ Install Intel® Distribution of OpenVINO™ Toolkit from PyPI Repository | Full requirement listing is available in: | :doc:`System Requirements Page ` | `PyPi OpenVINO page `__ - - + + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -120,8 +120,8 @@ Run the command below: If installation was successful, you will see the list of available devices. -Congratulations! You've just Installed OpenVINO! For some use cases you may still -need to install additional components. Check the +Congratulations! You've just Installed OpenVINO! For some use cases you may still +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst index fbd7035e083354..e7bfba2850eff0 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime via vcpkg .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and macOS operating systems, using vcpkg. .. note:: - + Note that the vcpkg distribution: * offers C/C++ API only - * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSes: Windows, Linux, and macOS + * does not offer support for NPU inference + * is dedicated to users of all major OSes: Windows, Linux, and macOS (all x86_64 / arm64 architectures) .. tab-set:: @@ -24,10 +24,10 @@ Install OpenVINO™ Runtime via vcpkg | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ @@ -41,7 +41,7 @@ Install OpenVINO™ Runtime via vcpkg Installing OpenVINO Runtime ########################### -1. Make sure that you have installed vcpkg on your system. If not, follow the +1. Make sure that you have installed vcpkg on your system. If not, follow the `vcpkg installation instructions `__. @@ -52,7 +52,7 @@ Installing OpenVINO Runtime vcpkg install openvino vcpkg also enables you to install only selected components, by specifying them in the command. - See the list of `available features `__, for example: + See the list of `available features `__, for example: .. code-block:: sh @@ -64,8 +64,8 @@ Installing OpenVINO Runtime vcpkg install 'openvino:x64-windows-static' -Note that the vcpkg installation means building all packages and dependencies from source, -which means the compiler stage will require additional time to complete the process. +Note that the vcpkg installation means building all packages and dependencies from source, +which means the compiler stage will require additional time to complete the process. After installation, you can use OpenVINO in your product's cmake scripts: @@ -80,7 +80,7 @@ And running from terminal: cmake -B -S -DCMAKE_TOOLCHAIN_FILE=/scripts/buildsystems/vcpkg.cmake Congratulations! You've just Installed and used OpenVINO in your project! For some use cases you may still -need to install additional components. Check the +need to install additional components. Check the :doc:`list of additional configurations ` to see if your case needs any of them. diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst index 2d164bb09cb56b..42d10c8a94e479 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst @@ -5,16 +5,16 @@ Install OpenVINO™ Runtime on Windows from an Archive File .. meta:: - :description: Learn how to install OpenVINO™ Runtime on Windows operating + :description: Learn how to install OpenVINO™ Runtime on Windows operating system, using an archive file. .. note:: - + Note that the Archive distribution: - + * offers both C/C++ and Python APIs - * additionally includes code samples + * additionally includes code samples * is dedicated to Windows users (archives for other systems are also available) @@ -28,32 +28,32 @@ System Requirements | Full requirement listing is available in: | :doc:`System Requirements Page ` - + .. tab-item:: Processor Notes :sync: processor-notes - + | To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: | `Product Specifications `__ - + .. tab-item:: Software :sync: software - + * `Microsoft Visual Studio 2019 with MSBuild `__ or `Microsoft Visual Studio 2022 `__ * `CMake 3.14 or higher, 64-bit `__ (optional, only required for building sample applications) * `Python 3.8 - 3.11, 64-bit `__ - + .. note:: - + To install Microsoft Visual Studio 2019, follow the `Microsoft Visual Studio installation guide `__. You can choose to download the Community version. During installation in the **Workloads** tab, choose **Desktop development with C++**. - + .. note:: - + You can either use `cmake.msi` which is the installation wizard or `cmake.zip` where you have to go into the `bin` folder and then manually add the path to environmental variables. - + .. important:: - + When installing Python, make sure you click the option **Add Python 3.x to PATH** to `add Python `__ to your `PATH` environment variable. - + Installing OpenVINO Runtime @@ -131,14 +131,14 @@ Step 1: Download and Install OpenVINO Core Components If you have already installed a previous release of OpenVINO 2022, a symbolic link to the ``openvino_2023`` folder may already exist. If you want to override it, navigate to the ``C:\Program Files (x86)\Intel`` folder and delete the existing linked folder before running the ``mklink`` command. -Congratulations, you have finished the installation! For some use cases you may still -need to install additional components. Check the description below, as well as the +Congratulations, you have finished the installation! For some use cases you may still +need to install additional components. Check the description below, as well as the :doc:`list of additional configurations ` to see if your case needs any of them. -The ``C:\Program Files (x86)\Intel\openvino_2023`` folder now contains the core components for OpenVINO. -If you used a different path in Step 1, you will find the ``openvino_2023`` folder there. -The path to the ``openvino_2023`` directory is also referred as ```` +The ``C:\Program Files (x86)\Intel\openvino_2023`` folder now contains the core components for OpenVINO. +If you used a different path in Step 1, you will find the ``openvino_2023`` folder there. +The path to the ``openvino_2023`` directory is also referred as ```` throughout the OpenVINO documentation. @@ -162,8 +162,8 @@ You must update several environment variables before you can compile and run Ope .. note:: - If you see an error indicating Python is not installed, Python may not be added to the PATH environment variable - (as described `here `__). + If you see an error indicating Python is not installed, Python may not be added to the PATH environment variable + (as described `here `__). Check your system environment variables, and add Python if necessary. @@ -177,30 +177,29 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine .. tab-item:: Get started with Python :sync: get-started-py - + Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. - + .. image:: https://user-images.githubusercontent.com/15709723/127752390-f6aa371f-31b5-4846-84b9-18dd4f662406.gif :width: 400 - + Visit the :ref:`Tutorials ` page for more Jupyter Notebooks to get you started with OpenVINO, such as: - - * `OpenVINO Python API Tutorial `__ + + * `OpenVINO Python API Tutorial `__ * `Basic image classification program with Hello Image Classification `__ * `Convert a PyTorch model and use it for image background removal `__ - + .. tab-item:: Get started with C++ :sync: get-started-cpp - + Try the `C++ Quick Start Example `_ for step-by-step instructions on building and running a basic image classification C++ application. - + .. image:: https://user-images.githubusercontent.com/36741649/127170593-86976dc3-e5e4-40be-b0a6-206379cd7df5.jpg :width: 400 - + Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - + * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ .. _uninstall-from-windows: @@ -209,7 +208,7 @@ Uninstalling OpenVINO Runtime ############################# If you have installed OpenVINO Runtime from archive files, you can uninstall it by deleting the archive files and the extracted folders. -Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. +Uninstallation removes all Intel® Distribution of OpenVINO™ Toolkit component files but does not affect user files in the installation directory. If you have created the symbolic link, remove the link first. @@ -239,7 +238,7 @@ Additional Resources * IoT libraries and code samples in the GitHUB repository: `Intel® IoT Developer Kit `__ Inner Loop - const auto current_expr_loops = expr->get_loop_ids(); + const auto& current_expr_loops = expr->get_loop_ids(); const auto current_loop_depth = current_expr_loops.size(); for (size_t i = 0; i < current_loop_depth; ++i) { const auto current_loop_id = current_expr_loops[i]; @@ -235,7 +235,7 @@ bool FuseLoops::run(LinearIR& linear_ir) { continue; } - const auto upper_loop_ids = parent_expr->get_loop_ids(); + const auto& upper_loop_ids = parent_expr->get_loop_ids(); if (upper_loop_ids.empty()) continue; @@ -279,7 +279,7 @@ bool FuseLoops::run(LinearIR& linear_ir) { continue; } - const auto lower_loop_ids = consumer_expr->get_loop_ids(); + const auto& lower_loop_ids = consumer_expr->get_loop_ids(); if (lower_loop_ids.empty()) continue; diff --git a/src/common/snippets/src/lowered/pass/identify_buffers.cpp b/src/common/snippets/src/lowered/pass/identify_buffers.cpp index 5b0dcee5221b6c..6b04701ff155d5 100644 --- a/src/common/snippets/src/lowered/pass/identify_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/identify_buffers.cpp @@ -43,8 +43,8 @@ bool IdentifyBuffers::can_reuse_id(const ShiftPtrParams& lhs, const ShiftPtrPara bool IdentifyBuffers::are_adjacent(const std::pair& lhs, const std::pair& rhs) { - const auto lhs_ids = lhs.first->get_loop_ids(); - const auto rhs_ids = rhs.first->get_loop_ids(); + const auto& lhs_ids = lhs.first->get_loop_ids(); + const auto& rhs_ids = rhs.first->get_loop_ids(); const auto equal_loop_ids = lhs_ids == rhs_ids; if (equal_loop_ids) { // Buffers are connected to the same Loop and have the same outer Loops return !can_reuse_id(lhs.second, rhs.second); diff --git a/src/common/snippets/src/lowered/pass/init_loops.cpp b/src/common/snippets/src/lowered/pass/init_loops.cpp index 68e8cc7757e13f..8272b7c3de2a81 100644 --- a/src/common/snippets/src/lowered/pass/init_loops.cpp +++ b/src/common/snippets/src/lowered/pass/init_loops.cpp @@ -6,6 +6,7 @@ #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/loop_manager.hpp" +#include "snippets/op/memory_access.hpp" #include "snippets/itt.hpp" namespace ov { @@ -37,6 +38,22 @@ int64_t get_output_stride(size_t dim, const VectorDims& shape) { InitLoops::InitLoops() : Pass() {} +void InitLoops::init_is_incremented(const LinearIR::LoopManager::LoopInfoPtr& loop_info) { + auto loop_entries = loop_info->get_entry_points(); + auto loop_exits = loop_info->get_exit_points(); + auto update = [](std::vector& ports) { + for (auto& port : ports) { + if (!ov::is_type(port.expr_port->get_expr()->get_node())) { + port.is_incremented = false; + } + } + }; + update(loop_entries); + update(loop_exits); + loop_info->set_entry_points(loop_entries); + loop_info->set_exit_points(loop_exits); +} + void InitLoops::init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& loop_info) { const auto work_amount = loop_info->get_work_amount(); auto loop_entries = loop_info->get_entry_points(); @@ -47,7 +64,6 @@ void InitLoops::init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& lo if (loop_entry.is_incremented) { const auto& port = loop_entry.expr_port; const auto source = *port->get_connected_ports().begin(); - const auto loop_ids = port->get_expr()->get_loop_ids(); const auto& layout = port->get_descriptor_ptr()->get_layout(); const auto& shape = port->get_descriptor_ptr()->get_shape(); const auto& dim = *(layout.rbegin() + loop_entry.dim_idx); @@ -63,7 +79,6 @@ void InitLoops::init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& lo loop_exit.ptr_increment = 0; if (loop_exit.is_incremented) { const auto& port = loop_exit.expr_port; - const auto loop_ids = port->get_expr()->get_loop_ids(); const auto& layout = port->get_descriptor_ptr()->get_layout(); const auto& shape = port->get_descriptor_ptr()->get_shape(); const auto original_dim = layout.size() - 1 - loop_exit.dim_idx; @@ -117,6 +132,7 @@ bool InitLoops::run(LinearIR& linear_ir) { const auto& loops = loop_manager->get_map(); for (const auto& loop : loops) { const auto loop_info = loop.second; + init_is_incremented(loop_info); init_ptr_increments(loop_info); init_finalization_offsets(loop_info); init_element_type_sizes(loop_info); diff --git a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp index 723b97b5a25788..d76a2b1af35147 100644 --- a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp +++ b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp @@ -17,7 +17,6 @@ namespace pass { bool InsertBroadcastMove::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::InsertBroadcastMove") bool modified = false; - const auto& loop_manager = linear_ir.get_loop_manager(); auto supports_broadcasting = [](const std::shared_ptr& n) { return ov::op::util::supports_auto_broadcast(n) || @@ -39,6 +38,7 @@ bool InsertBroadcastMove::run(LinearIR& linear_ir) { const auto& descriptors = expr->get_input_port_descriptors(); if (!supports_broadcasting(node) || descriptors.size() < 2) continue; + const auto& loop_ids = expr->get_loop_ids(); const auto& connectors = expr->get_input_port_connectors(); OPENVINO_ASSERT(connectors.size() == descriptors.size(), "Invalid expression configuration: connectors and descriptors size mismatch"); @@ -51,26 +51,18 @@ bool InsertBroadcastMove::run(LinearIR& linear_ir) { const auto broadcasted_dim = *std::max_element(last_dims.begin(), last_dims.end()); for (size_t i = 0; i < last_dims.size(); i++) { const auto& parent_port = connectors[i]->get_source(); - if (last_dims[i] != broadcasted_dim && - !dont_need_broadcasting(parent_port.get_expr()->get_node())) { + const auto& parent_node = parent_port.get_expr()->get_node(); + if (last_dims[i] != broadcasted_dim && !dont_need_broadcasting(parent_node)) { OPENVINO_ASSERT(last_dims[i] == 1, "Attempt to broadcast non-1 dimension. Target dim: ", broadcasted_dim, " This dim: ", last_dims[i]); - const auto broadcast = std::make_shared(node->get_input_source_output(i), broadcasted_dim); - - PortDescriptorUtils::set_port_descriptor_ptr(broadcast->output(0), connectors[i]->get_source().get_descriptor_ptr()->clone()); - const auto broadcast_expr = linear_ir.create_expression(broadcast, {connectors[i]}); - linear_ir.insert(expr_it, broadcast_expr); - linear_ir.replace_input(expr->get_input_port(i), broadcast_expr->get_output_port_connector(0)); + const auto broadcast = std::make_shared(parent_node, broadcasted_dim); + const auto broadcast_expr = *linear_ir.insert_node(broadcast, std::vector{ connectors[i] }, + loop_ids, true, expr_it, { expr->get_input_port(i) }); // Note that BroadcastMove modified the next expr input shape, so we need to set update // expr's input port descriptor to reflect the changes expr->get_input_port_descriptor(i)->set_shape(broadcast_expr->get_output_port_descriptor(0)->get_shape()); - // Copy Loop identifies - const auto& loop_ids = expr->get_loop_ids(); - broadcast_expr->set_loop_ids(loop_ids); - loop_manager->update_loops_port(loop_ids, expr->get_input_port(0), {broadcast_expr->get_input_port(0)}, true); - modified = true; } } diff --git a/src/common/snippets/src/lowered/pass/insert_buffers.cpp b/src/common/snippets/src/lowered/pass/insert_buffers.cpp index 81835a4ca390ae..0653d0589f3075 100644 --- a/src/common/snippets/src/lowered/pass/insert_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/insert_buffers.cpp @@ -105,8 +105,8 @@ InsertBuffers::InsertBuffers(int32_t buffer_allocation_rank) LinearIR::constExprIt InsertBuffers::insertion_position(const LinearIR& linear_ir, const LinearIR::LoopManagerPtr& loop_manager, const ExpressionPtr& up_expr, const ExpressionPtr& down_expr) { - const auto up_loops = up_expr->get_loop_ids(); - const auto down_loops = down_expr->get_loop_ids(); + const auto& up_loops = up_expr->get_loop_ids(); + const auto& down_loops = down_expr->get_loop_ids(); // If upper expression is out of Loop, we can insert Buffer implicitly after him if (up_loops.empty()) { return std::next(linear_ir.find(up_expr)); @@ -150,8 +150,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& const auto& expr = entry_port->get_expr(); const auto port_idx = entry_port->get_index(); const auto node = expr->get_node(); - const auto& input_connector = expr->get_input_port_connector(port_idx); - const auto& parent_expr_output = input_connector->get_source(); + const auto& parent_expr_output = expr->get_input_port_connector(port_idx)->get_source(); const auto& parent_expr = parent_expr_output.get_expr(); const auto parent_port = parent_expr_output.get_index(); const auto parent = parent_expr->get_node(); @@ -166,8 +165,8 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& const auto node_ma = ov::as_type_ptr(node); bool is_buffer_needed = (parent_ma && parent_ma->is_memory_access_output_port(parent_port)) || (node_ma && node_ma->is_memory_access_input_port(port_idx)); - const auto current_loops = expr->get_loop_ids(); - const auto parent_loops = parent_expr->get_loop_ids(); + const auto& current_loops = expr->get_loop_ids(); + const auto& parent_loops = parent_expr->get_loop_ids(); const auto buffer_loop_ids = get_buffer_loop_ids(current_loops, parent_loops, is_buffer_needed); if (is_buffer_needed) { @@ -182,12 +181,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& parent_expr_output, m_buffer_allocation_rank); const auto buffer = std::make_shared(parent->output(parent_port), allocation_shape); - PortDescriptorUtils::set_port_descriptor_ptr(buffer->output(0), parent_expr_output.get_descriptor_ptr()->clone()); - // Output connector is automatically filled from PortDescriptor - const auto buffer_expr = linear_ir.create_expression(buffer, {input_connector}); - linear_ir.insert(pos, buffer_expr); - linear_ir.replace_input(*entry_port.get(), buffer_expr->get_output_port_connector(0)); - buffer_expr->set_loop_ids(buffer_loop_ids); + linear_ir.insert_node(buffer, std::vector{ parent_expr_output }, buffer_loop_ids, false, pos, { *entry_port }); } } @@ -198,8 +192,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& const auto node = expr->get_node(); const auto output_connector = exit_port->get_port_connector_ptr(); const auto child_exprs_inputs = output_connector->get_consumers(); - const auto current_loops = expr->get_loop_ids(); - const std::vector node_outs = {output_connector}; + const auto& current_loops = expr->get_loop_ids(); std::set potential_consumers; std::set buffers; @@ -241,7 +234,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& for (const auto& buffer : buffers) { const auto& buffer_out = buffer->get_output_port_connector(0); const auto buffer_consumers_inputs = buffer_out->get_consumers(); - linear_ir.replace_input(buffer_consumers_inputs, output_connector); + replace_input_port_connectors(buffer_consumers_inputs, output_connector); potential_consumers.insert(buffer_consumers_inputs.begin(), buffer_consumers_inputs.end()); linear_ir.erase(linear_ir.find_after(expr_it, buffer)); } @@ -275,7 +268,6 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& *exit_port, m_buffer_allocation_rank); auto buffer = std::make_shared(node->output(port_idx), allocation_shape); - PortDescriptorUtils::set_port_descriptor_ptr(buffer->output(0), exit_port->get_descriptor_ptr()->clone()); // We cannot insert Node output connector on Buffer output because not all consumers of Node needs Buffer // Example: // Add @@ -284,10 +276,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& // | <- It should be new PortConnector // Relu // Output port connector is automatically filled from PortDescriptor - const auto buffer_expr = linear_ir.create_expression(buffer, node_outs); - linear_ir.insert(pos, buffer_expr); - linear_ir.replace_input(potential_consumers, buffer_expr->get_output_port_connector(0)); - buffer_expr->set_loop_ids(buffer_loop_ids); + linear_ir.insert_node(buffer, std::vector{ *exit_port }, buffer_loop_ids, false, pos, { potential_consumers }); } } } diff --git a/src/common/snippets/src/lowered/pass/insert_load_store.cpp b/src/common/snippets/src/lowered/pass/insert_load_store.cpp index 75e70c9c553c88..eb70e3d26042b8 100644 --- a/src/common/snippets/src/lowered/pass/insert_load_store.cpp +++ b/src/common/snippets/src/lowered/pass/insert_load_store.cpp @@ -37,31 +37,18 @@ bool InsertLoadStore::insert_load(LinearIR& linear_ir, const LinearIR::constExpr OPENVINO_ASSERT(consumer_inputs.size() == 1, "RankNormalization is supposed to be the only consumer"); data_expr = first_consumer; } - const auto& loop_manager = linear_ir.get_loop_manager(); const auto& data_ngraph_output = data_expr->get_node()->output(0); - const auto& output_connector = data_expr->get_output_port_connector(0); bool was_inserted = false; - for (const auto& consumer_input : output_connector->get_consumers()) { + const auto& data_out = data_expr->get_output_port_connector(0); + for (const auto& consumer_input : data_out->get_consumers()) { const auto& consumer_expr = consumer_input.get_expr(); - const auto port = consumer_input.get_index(); - const auto& consumer = consumer_expr->get_node(); - const auto ma = ov::as_type_ptr(consumer); - if (ma && ma->is_memory_access_input_port(port)) + const auto ma = ov::as_type_ptr(consumer_expr->get_node()); + if (ma && ma->is_memory_access_input_port(consumer_input.get_index())) return false; - const auto loop_ids = consumer_expr->get_loop_ids(); const auto load = std::make_shared(data_ngraph_output, get_count(data_expr->get_output_port_descriptor(0))); - PortDescriptorUtils::set_port_descriptor_ptr(load->output(0), consumer_input.get_descriptor_ptr()->clone()); - const auto load_expr = linear_ir.create_expression(load, {output_connector}); - linear_ir.insert(linear_ir.find_after(data_expr_it, consumer_expr), load_expr); - linear_ir.replace_input(consumer_input, load_expr->get_output_port_connector(0)); - // Copy Loop identifies - load_expr->set_loop_ids(loop_ids); - - // Need to update all the corresponding Loops with the same Entry Point - const auto& prev_entry_point = consumer_input; - const auto new_entry_point = load_expr->get_input_port(0); - loop_manager->update_loops_port(loop_ids, prev_entry_point, {new_entry_point}, true); + linear_ir.insert_node(load, std::vector{ data_out }, consumer_expr->get_loop_ids(), + true, linear_ir.find_after(data_expr_it, consumer_expr), { consumer_input }); was_inserted = true; } @@ -69,10 +56,8 @@ bool InsertLoadStore::insert_load(LinearIR& linear_ir, const LinearIR::constExpr } bool InsertLoadStore::insert_store(LinearIR& linear_ir, const LinearIR::constExprIt& data_expr_it) { - const auto& loop_manager = linear_ir.get_loop_manager(); const auto& data_expr = *data_expr_it; - const auto& input_connector = data_expr->get_input_port_connector(0); - const auto& parent_output = input_connector->get_source(); + const auto& parent_output = data_expr->get_input_port_connector(0)->get_source(); const auto& parent_expr = parent_output.get_expr(); const auto port = parent_output.get_index(); const auto& parent = parent_expr->get_node(); @@ -80,34 +65,10 @@ bool InsertLoadStore::insert_store(LinearIR& linear_ir, const LinearIR::constExp if (ma && ma->is_memory_access_output_port(port)) return false; - const auto loop_ids = parent_expr->get_loop_ids(); + const auto& loop_ids = parent_expr->get_loop_ids(); const auto store = std::make_shared(parent->output(port), get_count(data_expr->get_input_port_descriptor(0))); - PortDescriptorUtils::set_port_descriptor_ptr(store->output(0), parent_output.get_descriptor_ptr()->clone()); - const auto store_expr = linear_ir.create_expression(store, {input_connector}); const auto& insertion_pos = linear_ir.find_after(std::reverse_iterator(data_expr_it), parent_expr).base(); - linear_ir.insert(insertion_pos, store_expr); - linear_ir.replace_input(data_expr->get_input_port(0), store_expr->get_output_port_connector(0)); - // Copy Loop identifies - store_expr->set_loop_ids(loop_ids); - - // Need to update all the corresponding Loops with the same Exit Point - const auto prev_exit_point = parent_output; - // The previous exit point but one output port can have several consumers that can be potential exit points - // So we should verify on the possible future exit points - const auto consumer_inputs = input_connector->get_consumers(); - const auto should_be_saved = std::any_of(consumer_inputs.begin(), consumer_inputs.end(), - [&data_expr](const ExpressionPort& input_port) { - const auto expr = input_port.get_expr(); - // Skip the current data expr since the input of the expr is changed to Store expr - if (expr == data_expr) - return false; - const auto& node = expr->get_node(); - return ov::is_type(node) || ov::is_type(node); - }); - const auto new_exit_point = store_expr->get_output_port(0); - const auto new_exit_points = should_be_saved ? std::vector{prev_exit_point, new_exit_point} - : std::vector{new_exit_point}; - loop_manager->update_loops_port(loop_ids, prev_exit_point, new_exit_points, false); + linear_ir.insert_node(store, std::vector{ parent_output }, loop_ids, true, insertion_pos, { data_expr->get_input_port(0) }); return true; } diff --git a/src/common/snippets/src/lowered/pass/insert_loops.cpp b/src/common/snippets/src/lowered/pass/insert_loops.cpp index 3eab6e97df33fb..58ecf6310e2de1 100644 --- a/src/common/snippets/src/lowered/pass/insert_loops.cpp +++ b/src/common/snippets/src/lowered/pass/insert_loops.cpp @@ -27,32 +27,6 @@ std::vector get_outer_loop_ids(const ExpressionPtr& expr, size_t loop_id InsertLoops::InsertLoops() : Pass() {} -void InsertLoops::filter_ports(std::vector& loop_entries, std::vector& loop_exits) { - std::vector new_loop_entries; - std::vector new_loop_exits; - new_loop_entries.reserve(loop_entries.size()); - new_loop_exits.reserve(loop_exits.size()); - - for (const auto& loop_entry_point : loop_entries) { - const auto& expr = loop_entry_point.expr_port->get_expr(); - const auto ma = ov::as_type_ptr(expr->get_node()); - if (ma && ma->is_memory_access_input_port(loop_entry_point.expr_port->get_index())) { - new_loop_entries.push_back(loop_entry_point); - } - } - - for (const auto& loop_exit_point : loop_exits) { - const auto& expr = loop_exit_point.expr_port->get_expr(); - const auto ma = ov::as_type_ptr(expr->get_node()); - if (ma && ma->is_memory_access_output_port(loop_exit_point.expr_port->get_index())) { - new_loop_exits.push_back(loop_exit_point); - } - } - - loop_entries = new_loop_entries; - loop_exits = new_loop_exits; -} - void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& loop_manager, size_t loop_id, bool has_outer_loop) { const auto loop_info = loop_manager->get_loop_info(loop_id); auto loop_entries = loop_info->get_entry_points(); @@ -63,9 +37,6 @@ void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& LinearIR::constExprIt loop_begin_pos, loop_end_pos; loop_manager->get_loop_bounds(linear_ir, loop_id, loop_begin_pos, loop_end_pos); - // Remove non MemoryAccess ports since Loop can have only GPR inputs - filter_ports(loop_entries, loop_exits); - const auto in_out_num = loop_entries.size() + loop_exits.size(); std::vector is_incremented; std::vector ptr_increments, finalization_offsets, io_data_sizes; @@ -88,24 +59,18 @@ void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& init_params(loop_entries); init_params(loop_exits); + const auto outer_loop_ids = get_outer_loop_ids(*loop_begin_pos, loop_id); + const auto& loop_begin = std::make_shared(); - const auto& loop_begin_expr = linear_ir.create_expression(loop_begin, std::vector{}); - linear_ir.insert(loop_begin_pos, loop_begin_expr); + const auto loop_begin_expr = *linear_ir.insert_node(loop_begin, std::vector{}, outer_loop_ids, false, loop_begin_pos); const auto& loop_end = std::make_shared( loop_begin->output(0), work_amount, work_amount_increment, is_incremented, ptr_increments, finalization_offsets, io_data_sizes, loop_entries.size(), loop_exits.size(), loop_id); loop_end->has_outer_loop = has_outer_loop; - // Add LoopBegin port connector loop_end_inputs.push_back(loop_begin_expr->get_output_port_connector(0)); - - const auto& loop_end_expr = linear_ir.create_expression(loop_end, loop_end_inputs); - const auto& it = linear_ir.insert(loop_end_pos, loop_end_expr); - - const auto outer_loop_ids = get_outer_loop_ids(*std::prev(it), loop_id); - loop_begin_expr->set_loop_ids(outer_loop_ids); - loop_end_expr->set_loop_ids(outer_loop_ids); + linear_ir.insert_node(loop_end, loop_end_inputs, outer_loop_ids, false, loop_end_pos); } bool InsertLoops::run(LinearIR& linear_ir) { @@ -125,7 +90,7 @@ bool InsertLoops::run(LinearIR& linear_ir) { continue; // Outer Loop ----> Inner Loop - const auto expr_loops = expr->get_loop_ids(); + const auto& expr_loops = expr->get_loop_ids(); const auto loop_depth = expr_loops.size(); for (size_t i = 0; i < loop_depth; ++i) { const auto loop_id = expr_loops[i]; diff --git a/src/common/snippets/src/lowered/pass/insert_perf_count.cpp b/src/common/snippets/src/lowered/pass/insert_perf_count.cpp index d49ee0b1794204..6ccfbf1094cdc3 100644 --- a/src/common/snippets/src/lowered/pass/insert_perf_count.cpp +++ b/src/common/snippets/src/lowered/pass/insert_perf_count.cpp @@ -5,7 +5,6 @@ #include "snippets/lowered/pass/insert_perf_count.hpp" #include "snippets/lowered/linear_ir.hpp" -#include "snippets/snippets_isa.hpp" #include "snippets/itt.hpp" namespace ov { @@ -13,47 +12,44 @@ namespace snippets { namespace lowered { namespace pass { +InsertPerfCount::InsertPerfCount(std::map boundary_op_names) + : Pass(), m_boundary_op_names(std::move(boundary_op_names)) { +} + bool InsertPerfCount::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::InsertPerfCount") if (linear_ir.empty()) return false; + if (m_boundary_op_names.empty()) { + const auto& first_op_name = linear_ir.begin()->get()->get_node()->get_friendly_name(); + const auto& last_op_name = linear_ir.rbegin()->get()->get_node()->get_friendly_name(); + m_boundary_op_names.insert({first_op_name, last_op_name}); + } - auto is_parameter = [](const std::shared_ptr& node) { - return ov::is_type(node); - }; - auto is_result = [](const std::shared_ptr& node) { - return ov::is_type(node); - }; - - // mark perf_count_begin and perf_count_end position - auto perf_count_begin_pos = linear_ir.cbegin(); - auto perf_count_end_pos = perf_count_begin_pos; - bool first_result_marked = false; + size_t seq_number = 0; for (auto expr_it = linear_ir.cbegin(); expr_it != linear_ir.cend(); expr_it++) { - const auto expr = *expr_it; - const auto& node = expr->get_node(); - if (is_parameter(node)) - perf_count_begin_pos = expr_it; - - if (is_result(node) && !first_result_marked) { - perf_count_end_pos = expr_it; - first_result_marked = true; + const auto& op_name = expr_it->get()->get_node()->get_friendly_name(); + const auto& found = m_boundary_op_names.find(op_name); + if (found != m_boundary_op_names.end()) { + const auto perf_count_begin_pos = expr_it; + auto perf_count_end_pos = expr_it; + while (perf_count_end_pos->get()->get_node()->get_friendly_name() != found->second && + perf_count_end_pos != linear_ir.cend()) { + perf_count_end_pos++; + } + OPENVINO_ASSERT(perf_count_end_pos != linear_ir.cend(), "Failed to find requested op name to insert PerfCountEnd"); + const auto& perf_count_begin = std::make_shared(); + perf_count_begin->set_friendly_name(std::string("PerfCount_Begin_") + std::to_string(seq_number)); + const auto empty_inputs = std::vector{}; + linear_ir.insert_node(perf_count_begin, empty_inputs, perf_count_begin_pos->get()->get_loop_ids(), false, perf_count_begin_pos); + + const auto& perf_count_end = std::make_shared(perf_count_begin->output(0)); + perf_count_end->set_friendly_name(std::string("PerfCount_End_") + std::to_string(seq_number)); + // linear_ir.insert has insert before behavior, need to increment perf_count_end_pos + linear_ir.insert_node(perf_count_end, empty_inputs, perf_count_end_pos->get()->get_loop_ids(), false, next(perf_count_end_pos)); + seq_number++; } } - - // insert perf_count_begin after last parameter - // linear_ir.insert has insert before behavior, need move to next. - perf_count_begin_pos = std::next(perf_count_begin_pos); - const auto& perf_count_begin = std::make_shared(); - const auto& perf_count_begin_expr = linear_ir.create_expression(perf_count_begin, std::vector{}); - linear_ir.insert(perf_count_begin_pos, perf_count_begin_expr); - - // insert perf_count_end before first result - const auto& perf_count_end = std::make_shared(perf_count_begin->output(0)); - perf_count_end->set_friendly_name("last_parameter_to_first_result"); - const auto& perf_count_end_expr = linear_ir.create_expression(perf_count_end, std::vector{}); - linear_ir.insert(perf_count_end_pos, perf_count_end_expr); - return true; } diff --git a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp index cc685c1851157a..2727d7efbd8f83 100644 --- a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp +++ b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp @@ -24,7 +24,8 @@ void InsertTailLoop::propagate_updated_subtensor_through_loop(const LinearIR& li // First step: set new dim value to the corresponding entry_points' dimensions if (new_dim_value != existing_subtensor_value) { for (const auto& port : loop_info->get_entry_points()) { - if (port.is_incremented) { + const auto& reg_type = port.expr_port->get_descriptor_ptr()->get_reg().type; + if ((port.is_incremented && reg_type == RegType::gpr) || (reg_type == RegType::vec)) { const auto& expr = port.expr_port->get_expr(); const auto node = expr->get_node(); auto desc = port.expr_port->get_descriptor_ptr(); @@ -48,7 +49,8 @@ void InsertTailLoop::propagate_updated_subtensor_through_loop(const LinearIR& li } auto update_only_dim_idx_with_subtensor_value = [&](const LinearIR::LoopManager::LoopPort& port) { - if (port.is_incremented) { + const auto& reg_type = port.expr_port->get_descriptor_ptr()->get_reg().type; + if ((port.is_incremented && reg_type == RegType::gpr) || (reg_type == RegType::vec)) { auto desc = port.expr_port->get_descriptor_ptr(); const auto expr = port.expr_port->get_expr(); const auto parent_desc = expr->get_input_port_connector(port.expr_port->get_index())->get_source().get_descriptor_ptr(); @@ -251,14 +253,13 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, LinearIR::constExprIt tail_end, const size_t tail_size) { const auto& config = linear_ir.get_config(); - auto insertFill = [tail_size](const ov::Input& input) -> std::shared_ptr { + auto insertFill = [tail_size](const ov::Input& input, const ExpressionPort& source) -> std::shared_ptr { std::shared_ptr fill = nullptr; auto& rt = input.get_rt_info(); auto fill_rt = rt.find("set_fill"); if (fill_rt != rt.end()) { const auto fill_value = fill_rt->second.as(); - fill = std::make_shared(input.get_source_output(), tail_size, fill_value); - input.get_node()->set_argument(input.get_index(), fill); + fill = std::make_shared(source.get_expr()->get_node()->output(source.get_index()), tail_size, fill_value); } return fill; }; @@ -279,9 +280,9 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, if (config.m_need_fill_tail_register && (ov::is_type(op) || ov::is_type(op))) { - for (size_t i = 0; i < op->inputs().size(); ++i) { - if (auto fill = insertFill(op->input(i))) { - const auto& input = expr->get_input_port_connector(i); + for (size_t i = 0; i < expr->get_input_count(); ++i) { + const auto& input = expr->get_input_port_connector(i); + if (auto fill = insertFill(op->input(i), input->get_source())) { const auto consumers = input->get_consumers(); // If there are several consumers, fill expression must be inserted before first of them auto fst_consumer = std::min_element(consumers.cbegin(), consumers.cend(), [&](ExpressionPort lhs, ExpressionPort rhs) { @@ -289,15 +290,13 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, auto rhs_it = linear_ir.find(rhs.get_expr()); return std::distance(linear_ir.cbegin(), lhs_it) < std::distance(linear_ir.cbegin(), rhs_it); }); - const auto insert_pos = linear_ir.find(fst_consumer->get_expr()); - auto fill_expr = linear_ir.create_expression(fill, {input}); - linear_ir.insert(insert_pos, fill_expr); - linear_ir.replace_input(consumers, fill_expr->get_output_port_connector(0)); + const auto fill_expr = *linear_ir.insert_node(fill, std::vector{ input->get_source() }, expr->get_loop_ids(), true, + linear_ir.find(fst_consumer->get_expr()), consumers); + // in_reg == out_reg since we want to modify vector reg inplace const auto reg = expr->get_input_port_descriptor(0)->get_reg(); fill_expr->get_input_port_descriptor(0)->set_reg(reg); fill_expr->get_output_port_descriptor(0)->set_reg(reg); - fill_expr->set_loop_ids(expr->get_loop_ids()); } } } else if (const auto memory_access = std::dynamic_pointer_cast(op)) { diff --git a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp index 48f86cb2092972..3f9de12a5a0523 100644 --- a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp +++ b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp @@ -16,7 +16,6 @@ namespace pass { bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::LoadMoveBroadcastToBroadcastLoad") - const auto& loop_manager = linear_ir.get_loop_manager(); bool modified = false; for (auto expr_it = linear_ir.cbegin(); expr_it != linear_ir.cend(); expr_it++) { @@ -30,6 +29,9 @@ bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir) { if (!load) continue; + OPENVINO_ASSERT(expr->get_loop_ids() == parent_expr->get_loop_ids(), + "The pair of Load and MoveBroadcast expressions must be in the same loops!"); + // Cannot rewrite Broadcast + Load if load has more than 1 user // or more than one input, or if Broadcast has several inputs const auto load_consumers_inputs = interm_connector->get_consumers(); @@ -45,20 +47,7 @@ bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir) { const auto& outshape = move_broadcast->get_output_partial_shape(0); const auto broadcastload = std::make_shared(load->input_value(0), *outshape.rbegin(), load->get_offset()); - const auto move_consumers = expr->get_output_port_connector(0)->get_consumers(); - PortDescriptorUtils::set_port_descriptor_ptr(broadcastload->output(0), expr->get_output_port(0).get_descriptor_ptr()->clone()); - const auto broadcastload_expr = linear_ir.create_expression(broadcastload, { parent_expr->get_input_port_connector(0) }); - // Copy Loop identifies - broadcastload_expr->set_loop_ids(parent_expr->get_loop_ids()); - // Update the corresponding Loops with - loop_manager->update_loops_port(parent_expr->get_loop_ids(), parent_expr->get_input_port(0), {broadcastload_expr->get_input_port(0)}, true); - - const auto mv_expr_it = expr_it; - const auto insertion_pos = std::next(expr_it); - expr_it = linear_ir.insert(insertion_pos, broadcastload_expr); - linear_ir.erase(linear_ir.find_before(mv_expr_it, parent_expr)); - linear_ir.erase(mv_expr_it); - linear_ir.replace_input(move_consumers, broadcastload_expr->get_output_port_connector(0)); + expr_it = linear_ir.replace_with_node({ parent_expr, expr }, broadcastload); modified |= true; } } diff --git a/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp b/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp index b423eeda46a5cb..e336b62c00238f 100644 --- a/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp +++ b/src/common/snippets/src/lowered/pass/move_result_out_of_loop.cpp @@ -33,7 +33,7 @@ bool MoveResultOutOfLoop::run(LinearIR& linear_ir) { const auto& input_connector = expr->get_input_port_connector(0); const auto& parent_expr = input_connector->get_source().get_expr(); - const auto parent_loop_ids = parent_expr->get_loop_ids(); + const auto& parent_loop_ids = parent_expr->get_loop_ids(); // Parent is out of Loop: just verify that Result is after Parent if (parent_loop_ids.empty()) { diff --git a/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp b/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp index 4174f928352289..2ec613495e9a13 100644 --- a/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp +++ b/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp @@ -35,7 +35,7 @@ bool SoftmaxDecomposition::run(LinearIR& linear_ir) { const auto& pm = matcher->get_pattern_map(); const auto softmax = pm.at(match_softmax); const auto softmax_expr = *expr_it; - const auto softmax_loop_ids = softmax_expr->get_loop_ids(); + const auto& softmax_loop_ids = softmax_expr->get_loop_ids(); const auto& input_connector = softmax_expr->get_input_port_connector(0); const auto& output_connector = softmax_expr->get_output_port_connector(0); const auto tensor_out = softmax_expr->get_output_port_descriptor(0)->get_shape(); @@ -95,9 +95,8 @@ bool SoftmaxDecomposition::run(LinearIR& linear_ir) { const auto mul = push_node(std::make_shared(exp.second, broadcast_pow.second)); // Transfer original ExpressionPorts - linear_ir.replace_input((*max.first)->get_input_port(0), input_connector); - linear_ir.replace_input((*sub.first)->get_input_port(0), input_connector); - linear_ir.replace_input(output_connector->get_consumers(), (*mul.first)->get_output_port_connector(0)); + replace_input_port_connectors({ max.first->get()->get_input_port(0), sub.first->get()->get_input_port(0) }, input_connector); + replace_input_port_connectors(output_connector->get_consumers(), (*mul.first)->get_output_port_connector(0)); // Markup of Mul Loop loop_manager->mark_loop(mul.first, expr_it, inner_work_amount, m_vector_size, 0, diff --git a/src/common/snippets/src/lowered/pass/split_loops.cpp b/src/common/snippets/src/lowered/pass/split_loops.cpp index ba036eca8011f9..3a1c113152c545 100644 --- a/src/common/snippets/src/lowered/pass/split_loops.cpp +++ b/src/common/snippets/src/lowered/pass/split_loops.cpp @@ -48,7 +48,7 @@ bool SplitLoops::run(LinearIR& linear_ir) { for (const auto& entry_point : loop->get_entry_points()) { const auto& parent_port = entry_point.expr_port->get_port_connector_ptr()->get_source(); const auto& parent_expr = parent_port.get_expr(); - const auto parent_loop_ids = parent_expr->get_loop_ids(); + const auto& parent_loop_ids = parent_expr->get_loop_ids(); if (parent_loop_ids.empty()) continue; diff --git a/src/common/snippets/src/lowered/pass/validate_loops.cpp b/src/common/snippets/src/lowered/pass/validate_loops.cpp index 2377feec95c477..99698a6b4329bd 100644 --- a/src/common/snippets/src/lowered/pass/validate_loops.cpp +++ b/src/common/snippets/src/lowered/pass/validate_loops.cpp @@ -44,7 +44,7 @@ bool ValidateLoops::run(LinearIR& linear_ir) { auto validate_loop_ports = [&loop_manager, &dim_indexes, &validated_nested_loops, &is_already_verified](const std::vector& loop_ports) { for (const auto& loop_port : loop_ports) { const auto expr = loop_port.expr_port->get_expr(); - const auto loop_ids = expr->get_loop_ids(); + const auto& loop_ids = expr->get_loop_ids(); // If loop_ids of the current port is subsequence of already validated IDs, skip if (is_already_verified(loop_ids)) continue; diff --git a/src/common/snippets/src/lowered/port_descriptor.cpp b/src/common/snippets/src/lowered/port_descriptor.cpp index e8c4bdd0626b47..63269fa013b1c8 100644 --- a/src/common/snippets/src/lowered/port_descriptor.cpp +++ b/src/common/snippets/src/lowered/port_descriptor.cpp @@ -43,7 +43,7 @@ PortDescriptorPtr PortDescriptor::clone() const { return desc; } -std::string PortDescriptor::serialize() const { +std::string PortDescriptor::serialize() const { std::stringstream ss; ss << m_tensor_shape.size() << " "; for (auto val : m_tensor_shape) @@ -54,12 +54,14 @@ std::string PortDescriptor::serialize() const { ss << m_layout.size() << " "; for (auto val : m_layout) ss << val << " "; + ss << regTypeToStr(m_reg.type) << "["<< m_reg.idx << "]"; return ss.str(); } bool operator==(const PortDescriptor& lhs, const PortDescriptor& rhs) { return lhs.m_tensor_shape == rhs.m_tensor_shape && lhs.m_layout == rhs.m_layout && - lhs.m_subtensor_shape == rhs.m_subtensor_shape; + lhs.m_subtensor_shape == rhs.m_subtensor_shape && + lhs.m_reg == rhs.m_reg; } void PortDescriptorUtils::init_default(std::vector& in_descs, diff --git a/src/common/snippets/src/op/serialization_node.cpp b/src/common/snippets/src/op/serialization_node.cpp index a91c63beb9402b..dde0f0ae6aa8e9 100644 --- a/src/common/snippets/src/op/serialization_node.cpp +++ b/src/common/snippets/src/op/serialization_node.cpp @@ -40,27 +40,38 @@ std::shared_ptr SerializationNode::clone_with_new_inputs(const OutputVecto } bool SerializationNode::visit_attributes(AttributeVisitor &visitor) { + std::vector in_regs, out_regs; + std::vector in_reg_types, out_reg_types; std::vector>> shapes; for (size_t i = 0; i < m_expr->get_input_count(); i++) { - const auto &shape = m_expr->get_input_port_descriptor(i)->get_shape(); + const auto& desc = m_expr->get_input_port_descriptor(i); + const auto &shape = desc->get_shape(); if (!shape.empty()) shapes.emplace_back("in_shape_" + std::to_string(i), shape); + in_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); + in_regs.emplace_back(desc->get_reg().idx); } for (size_t i = 0; i < m_expr->get_output_count(); i++) { - const auto &shape = m_expr->get_output_port_descriptor(i)->get_shape(); + const auto& desc = m_expr->get_output_port_descriptor(i); + const auto &shape = desc->get_shape(); if (!shape.empty()) shapes.emplace_back("out_shape_" + std::to_string(i), shape); + out_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); + out_regs.emplace_back(desc->get_reg().idx); } - auto loop_ids = m_expr->get_loop_ids(); - auto rinfo = m_expr->get_reg_info(); - if (!rinfo.first.empty()) - visitor.on_attribute("in_regs", rinfo.first); - if (!rinfo.second.empty()) - visitor.on_attribute("out_regs", rinfo.second); + if (!in_regs.empty()) { + visitor.on_attribute("in_regs", in_regs); + visitor.on_attribute("in_reg_types", in_reg_types); + } + if (!out_regs.empty()) { + visitor.on_attribute("out_regs", out_regs); + visitor.on_attribute("out_reg_types", out_reg_types); + } for (auto& s : shapes) visitor.on_attribute(s.first, s.second); + auto loop_ids = m_expr->get_loop_ids(); visitor.on_attribute("loop_ids", loop_ids); m_expr->get_node()->visit_attributes(visitor); return true; diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index 7bbbd8af8230b8..6524dcda3d9605 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -343,10 +343,6 @@ std::shared_ptr Subgraph::convert_body_to_linear_ir(size_t min_parallel_work_amount, size_t min_kernel_work_amount, const std::shared_ptr& shape_infer_factory) { lowered::Config lowering_config; - lowering_config.m_save_expressions = config.m_has_domain_sensitive_ops; -#ifdef SNIPPETS_DEBUG_CAPS - lowering_config.m_save_expressions = lowering_config.m_save_expressions || (lowering_config.perf_count_mode != lowered::PerfCountMode::Disabled); -#endif lowering_config.m_need_fill_tail_register = config.m_has_domain_sensitive_ops; lowering_config.m_loop_depth = tileRank; lowering_config.m_enable_domain_optimization = !config.m_has_domain_sensitive_ops; @@ -475,8 +471,8 @@ snippets::Schedule Subgraph::generate_from_linear_ir(const std::shared_ptr& adapter) override { if (const auto& a = ov::as_type>>(&adapter)) { m_hash = hash_combine(hash_combine(m_hash, name), a->get()->get_info().variable_id); - } else if (const auto& a = - ov::as_type>>(&adapter)) { - if (name == "value" && m_node_type_name == "Constant") { - m_hash = hash_combine(m_hash, AttrType::constant); - const int64_t size = a->get()->size(); - m_hash = hash_combine(hash_combine(m_hash, AttrType::size), size); - auto data = static_cast(a->get()->get_ptr()); - for (int64_t i = 0; i < size; i++) { - m_hash = hash_combine(m_hash, data[i]); - } - } } else if (const auto& a = ov::as_type>>(&adapter)) { if (name == "value" && m_node_type_name == "Constant") { diff --git a/src/common/snippets/tests/include/lowering_utils.hpp b/src/common/snippets/tests/include/lowering_utils.hpp index fd9f7932ccb652..2ee6840e78618b 100644 --- a/src/common/snippets/tests/include/lowering_utils.hpp +++ b/src/common/snippets/tests/include/lowering_utils.hpp @@ -46,7 +46,7 @@ class DummyGenerator : public ov::snippets::Generator { std::shared_ptr clone() const override { return std::make_shared(target); } protected: - opRegType get_specific_op_reg_type(const std::shared_ptr& op) const override { return vec2vec; }; + ov::snippets::RegType get_op_out_reg_type(const ov::Output& out) const override { return ov::snippets::RegType::vec; }; }; class LoweringTests : public TransformationTestsF { diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp index 5931682325639f..a3c65a3d913ab1 100644 --- a/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp @@ -53,7 +53,6 @@ TSGeneralForward::TSGeneralForward() { TSGeneralBackward::TSGeneralBackward() { MATCHER_SCOPE(TSGeneralBackward); ADD_MATCHER(this, TSUnaryBackward); - ADD_MATCHER(this, TSUnaryBackward); ADD_MATCHER(this, TSBinaryBackward); ADD_MATCHER(this, TSConcatBackward); ADD_MATCHER(this, TSSplitBackward); diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp index 5814634e7408f5..37f07a9e707669 100644 --- a/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp @@ -38,6 +38,8 @@ using NodePair = std::pair; TSUnaryForward::TSUnaryForward() { MATCHER_SCOPE(TSUnaryForward); + // We consider HardSigmoid, Swish, Selu, ConvertLike as unary ops + // and handle only 0th input of these ops. create_pattern(true); + ov::op::v1::ConvertLike>(true, {0}); auto ts_unary_sinking_function = [this](const std::shared_ptr& main_node, const utils::TransposeInputsInfo& transpose_info) -> bool { bool res = utils::sink_forward::UpdateInputTransposes(main_node, transpose_info, {0}); @@ -64,7 +66,7 @@ TSUnaryForward::TSUnaryForward() { } TSUnaryBackward::TSUnaryBackward() { - MATCHER_SCOPE(TSUnaryBackwardMultiConsumers); + MATCHER_SCOPE(TSUnaryBackward); auto unary_restrictions = [](const Output& output) -> bool { return CheckTransposeConsumers(output); diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp index 38073bc8848e17..b07e2805da16dd 100644 --- a/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp @@ -147,9 +147,11 @@ bool HasDynamicRankInput(const NodePtr& node) { return false; } -ov::Rank::value_type GetMaxInputRank(const NodePtr& node) { +ov::Rank::value_type GetMaxInputRank(const NodePtr& node, const std::vector& input_indexes) { ov::Rank::value_type max_input_rank = 0; - for (auto& input_node : node->input_values()) { + + for (const auto& idx : input_indexes) { + const auto& input_node = node->get_input_source_output(idx); const ov::Rank output_rank = input_node.get_partial_shape().rank(); if (output_rank.is_dynamic()) return -1; @@ -213,7 +215,7 @@ bool UpdateInputTransposes(const NodePtr& main_node, if (transpose_input_info.isEmpty() || HasDynamicRankInput(main_node)) return false; - const auto max_input_rank = GetMaxInputRank(main_node); + const auto max_input_rank = GetMaxInputRank(main_node, input_indexes); if (max_input_rank < 0) return false; @@ -303,7 +305,7 @@ NodeVector InsertTransposeBeforeNode(const NodePtr& main_node, NodeVector new_nodes; - const auto max_input_rank = GetMaxInputRank(main_node); + const auto max_input_rank = GetMaxInputRank(main_node, input_indexes); if (max_input_rank < 0) return {}; diff --git a/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp b/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp index 8076edf43b2eb8..ca5db5f332bfa6 100644 --- a/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp +++ b/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp @@ -112,7 +112,7 @@ NodePtr UnaryFactory::create(const OutputVector& inputs) const { template <> NodePtr UnaryFactory::create(const OutputVector& inputs) const { - auto like = std::make_shared(element::f64, Shape{}, 1); + auto like = std::make_shared(element::f64, Shape{1, 2, 3, 2, 1, 1}, 1); return std::make_shared(inputs[0], like); } @@ -160,6 +160,32 @@ std::shared_ptr CreateFunctionTransposeAfter(const FactoryPtr& unary_ return std::make_shared(transpose0, ov::ParameterVector{X}); } +// We consider HardSigmoid, Swish, Selu, ConvertLike as unary ops +// and handle only 0th input of these ops. +// Transpose on 2nd input should be ignored. +namespace ignore_transpose_on_second_input { +std::shared_ptr CreateFunctionTransposeBefore(const FactoryPtr& unary_factory, + size_t num_unary_ops, + const Shape& input_shape, + element::Type input_type) { + auto X = std::make_shared(input_type, input_shape); + + NodePtr in_op = X; + for (size_t i = 0; i < num_unary_ops; ++i) { + in_op = unary_factory->create({in_op}); + + // Connect Transpose to 2nd input of the main node + std::vector order(in_op->input(1).get_shape().size()); + std::iota(order.rbegin(), order.rend(), 0); + auto ng_order0 = std::make_shared(element::u64, Shape{order.size()}, order); + auto transpose0 = std::make_shared(in_op->input_value(1), ng_order0); + in_op->input(1).replace_source_output(transpose0); + } + + return std::make_shared(in_op, ov::ParameterVector{X}); +} +} // namespace ignore_transpose_on_second_input + NodePtr CreateReshape(const NodePtr& parent_node, const Shape& input_shape) { const size_t mul = std::accumulate(input_shape.begin(), input_shape.end(), (size_t)1, std::multiplies()); auto reshape_const = std::make_shared(element::u64, Shape{1}, Shape{mul}); @@ -449,6 +475,21 @@ auto test_forward = []() { return wrapper(test_case); }; +auto test_forward_unary_with_multiple_inputs = []() { + TestCase test_case; + test_case.main_node = std::vector{CREATE_UNARY_FACTORY(HardSigmoid), + CREATE_UNARY_FACTORY(Selu), + CREATE_UNARY_FACTORY(ConvertLike), + CREATE_UNARY_FACTORY(Swish)}; + test_case.transformation = CREATE_PASS_FACTORY(TSUnaryForward); + test_case.num_main_ops = {1, 10}; + test_case.test_model = ignore_transpose_on_second_input::CreateFunctionTransposeBefore; + test_case.ref_model = ignore_transpose_on_second_input::CreateFunctionTransposeBefore; + test_case.input_shape = {1, 96, 55, 55}; + test_case.type = element::f32; + return wrapper(test_case); +}; + auto test_backward = []() { TestCase test_case; test_case.main_node = unary_factories; @@ -551,6 +592,11 @@ INSTANTIATE_TEST_SUITE_P(TSUnaryForwardTestSuite, transpose_sinking::testing::unary::test_forward(), TransposeSinkingUnaryTestFixture::get_test_name); +INSTANTIATE_TEST_SUITE_P(TSUnaryForwardMultipleInputsTestSuite, + TransposeSinkingUnaryTestFixture, + transpose_sinking::testing::unary::test_forward_unary_with_multiple_inputs(), + TransposeSinkingUnaryTestFixture::get_test_name); + INSTANTIATE_TEST_SUITE_P(TSUnaryBackwardTestSuite, TransposeSinkingUnaryTestFixture, transpose_sinking::testing::unary::test_backward(), diff --git a/src/common/transformations/tests/utils/compare_functions_test.cpp b/src/common/transformations/tests/utils/compare_functions_test.cpp index 185fab308d3dc4..d50053a23bb5fe 100644 --- a/src/common/transformations/tests/utils/compare_functions_test.cpp +++ b/src/common/transformations/tests/utils/compare_functions_test.cpp @@ -526,12 +526,6 @@ class DummyConstant : public op::Op { return true; } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override { - return true; - } - OPENVINO_SUPPRESS_DEPRECATED_END - // Don't constant fold a constant; it would make a copy bool constant_fold(OutputVector& outputs, const OutputVector& inputs) override { return false; diff --git a/src/common/transformations/tests/utils/compress_quantize_weights.cpp b/src/common/transformations/tests/utils/compress_quantize_weights.cpp index f76b4c901321d6..4c1eef7cea5489 100644 --- a/src/common/transformations/tests/utils/compress_quantize_weights.cpp +++ b/src/common/transformations/tests/utils/compress_quantize_weights.cpp @@ -315,6 +315,33 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminatedFP16) comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } +TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminatedBF16) { + { + auto data = opset8::Constant::create(element::bf16, Shape{3, 1, 1, 1}, {0.2, 1.2, 1.2}); + auto input_low = opset8::Constant::create(element::bf16, Shape{3, 1, 1, 1}, {0.60, 1.45, 1.30}); + auto input_high = opset8::Constant::create(element::bf16, Shape{3, 1, 1, 1}, {-0.60, -1.45, -1.30}); + auto output_low = opset8::Constant::create(element::bf16, Shape{3, 1, 1, 1}, {0.30, 0.75, 0.65}); + auto output_high = opset8::Constant::create(element::bf16, Shape{3, 1, 1, 1}, {-0.30, -0.75, -0.65}); + auto fq = std::make_shared(data, input_low, input_high, output_low, output_high, 255); + model = std::make_shared(NodeVector{fq}, ParameterVector{}); + + manager.register_pass(); + } + + { + auto data = opset8::Constant::create(element::i8, Shape{3, 1, 1, 1}, {-42, -105, -118}); + auto convert = std::make_shared(data, element::bf16); + auto scale = opset8::Constant::create(element::bf16, Shape{3, 1, 1, 1}, {-0.002325, -0.00592, -0.00509}); + auto mul = std::make_shared(convert, scale); + model_ref = std::make_shared(NodeVector{mul}, ParameterVector{}); + } + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ACCURACY); + + m_abs_threshold = 4e-2f; + m_rel_threshold = 7e-2f; +} + #ifdef OPENVINO_ARCH_ARM64 // Ticket: 122666 TEST_F(TransformationTestsF, DISABLED_NegativeCompressQuantizeWeights) { diff --git a/src/common/util/include/openvino/util/common_util.hpp b/src/common/util/include/openvino/util/common_util.hpp index b1d731ed318caf..fc1cea05887f39 100644 --- a/src/common/util/include/openvino/util/common_util.hpp +++ b/src/common/util/include/openvino/util/common_util.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +120,21 @@ bool contains(const std::vector& vec, const V& v) { }); } +/** + * @brief multiply vector's values + * @param vec - vector with values + * @return result of multiplication + */ +template +T product(std::vector const& vec) { + if (vec.empty()) + return 0; + T ret = vec[0]; + for (size_t i = 1; i < vec.size(); ++i) + ret *= vec[i]; + return ret; +} + /** * @brief Associative containers doesnt work with remove_if algorithm * @tparam ContainerT diff --git a/src/common/util/src/file_util.cpp b/src/common/util/src/file_util.cpp index 9ff7923cfd89b9..7b78461da518fc 100644 --- a/src/common/util/src/file_util.cpp +++ b/src/common/util/src/file_util.cpp @@ -148,7 +148,11 @@ std::wstring join_paths(const std::wstring& s1, const std::wstring& s2) { } else if (s1.size() > 0) { rc = s1; if (rc[rc.size() - 1] != '/') { +# ifndef _WIN32 rc += '/'; +# else + rc += '\\'; +# endif } rc += s2; } else { diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 15767de248131d..351978fec651f5 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -157,17 +157,3 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ FILES_MATCHING PATTERN "*.hpp" PATTERN "*.h") - -configure_package_config_file(${OpenVINO_SOURCE_DIR}/cmake/templates/ngraphConfig.cmake.in - ${CMAKE_BINARY_DIR}/ngraphConfig.cmake - INSTALL_DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR}) - -write_basic_package_version_file(${CMAKE_BINARY_DIR}/ngraphConfigVersion.cmake - VERSION ${OpenVINO_VERSION_MAJOR}.${OpenVINO_VERSION_MINOR}.${OpenVINO_VERSION_PATCH} - COMPATIBILITY SameMajorVersion) - -install(FILES ${CMAKE_BINARY_DIR}/ngraphConfig.cmake - ${CMAKE_BINARY_DIR}/ngraphConfigVersion.cmake - DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV} - ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) diff --git a/src/core/include/ngraph/descriptor/input.hpp b/src/core/include/ngraph/descriptor/input.hpp deleted file mode 100644 index f4966373085105..00000000000000 --- a/src/core/include/ngraph/descriptor/input.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/descriptor/tensor.hpp" -#include "openvino/core/descriptor/input.hpp" - -namespace ngraph { -using ov::Node; -namespace descriptor { - -// Describes a tensor that is an input to an op, directly or indirectly via a tuple -using ov::descriptor::Input; -} // namespace descriptor -} // namespace ngraph diff --git a/src/core/include/ngraph/descriptor/output.hpp b/src/core/include/ngraph/descriptor/output.hpp deleted file mode 100644 index 3e26c8941acf94..00000000000000 --- a/src/core/include/ngraph/descriptor/output.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/tensor.hpp" -#include "ngraph/node_output.hpp" -#include "openvino/core/descriptor/output.hpp" - -namespace ngraph { -using ov::Node; -namespace descriptor { -// Describes an output tensor of an op -using ov::descriptor::Output; -} // namespace descriptor -} // namespace ngraph diff --git a/src/core/include/ngraph/descriptor/tensor.hpp b/src/core/include/ngraph/descriptor/tensor.hpp deleted file mode 100644 index f26db08e34349c..00000000000000 --- a/src/core/include/ngraph/descriptor/tensor.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ngraph/partial_shape.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" -#include "openvino/core/descriptor/tensor.hpp" - -namespace ngraph { -using ov::TensorLabel; -using ov::TensorLabelVector; -namespace descriptor { -/// \brief Compile-time descriptor of a first-class value that is a tensor. -using ov::descriptor::Tensor; -} // namespace descriptor -} // namespace ngraph diff --git a/src/core/include/ngraph/function.hpp b/src/core/include/ngraph/function.hpp index 227518349110f2..7b9a7448a97c53 100644 --- a/src/core/include/ngraph/function.hpp +++ b/src/core/include/ngraph/function.hpp @@ -16,13 +16,9 @@ #include "ngraph/ngraph_visibility.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/assign.hpp" #include "ngraph/op/parameter.hpp" -#include "ngraph/op/read_value.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/op/sink.hpp" -#include "ngraph/op/util/variable.hpp" #include "openvino/core/model.hpp" +#include "openvino/op/util/variable.hpp" namespace ngraph { using Function = ov::Model; diff --git a/src/core/include/ngraph/ngraph.hpp b/src/core/include/ngraph/ngraph.hpp index ad8c918d5229ee..735835feafd182 100644 --- a/src/core/include/ngraph/ngraph.hpp +++ b/src/core/include/ngraph/ngraph.hpp @@ -50,9 +50,6 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/attribute_visitor.hpp" -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/output.hpp" -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/evaluator.hpp" #include "ngraph/except.hpp" #include "ngraph/factory.hpp" @@ -65,6 +62,9 @@ #include "ngraph/specialize_function.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/core/descriptor/input.hpp" +#include "openvino/core/descriptor/output.hpp" +#include "openvino/core/descriptor/tensor.hpp" // nGraph opsets #include "ngraph/opsets/opset.hpp" diff --git a/src/core/include/ngraph/node.hpp b/src/core/include/ngraph/node.hpp index 7b5da7e6eb0285..a98e068adf3eb9 100644 --- a/src/core/include/ngraph/node.hpp +++ b/src/core/include/ngraph/node.hpp @@ -32,19 +32,18 @@ #include "ngraph/check.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/deprecated.hpp" -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/output.hpp" -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/node_input.hpp" #include "ngraph/node_output.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/op_annotations.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_value.hpp" #include "ngraph/output_vector.hpp" #include "ngraph/strides.hpp" #include "openvino/core/any.hpp" +#include "openvino/core/descriptor/input.hpp" +#include "openvino/core/descriptor/output.hpp" +#include "openvino/core/descriptor/tensor.hpp" #include "openvino/core/node.hpp" +#include "openvino/op/util/variable.hpp" +#include "openvino/op/util/variable_value.hpp" namespace ov { namespace op { @@ -57,17 +56,7 @@ namespace ngraph { using ov::Node; -namespace runtime { -class HostTensor; -} -NGRAPH_SUPPRESS_DEPRECATED_START -using HostTensor = runtime::HostTensor; -using HostTensorPtr = std::shared_ptr; -using HostTensorVector = std::vector; -NGRAPH_SUPPRESS_DEPRECATED_END - namespace op { - namespace v0 { using ov::op::v0::Result; } diff --git a/src/core/include/ngraph/node_input.hpp b/src/core/include/ngraph/node_input.hpp index 37c6678de74760..bac74bc0d312be 100644 --- a/src/core/include/ngraph/node_input.hpp +++ b/src/core/include/ngraph/node_input.hpp @@ -17,10 +17,10 @@ #include #include -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" +#include "openvino/core/descriptor/tensor.hpp" #include "openvino/core/node_input.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/node_output.hpp b/src/core/include/ngraph/node_output.hpp index 4786b52f34784c..f8f1da44b2eff9 100644 --- a/src/core/include/ngraph/node_output.hpp +++ b/src/core/include/ngraph/node_output.hpp @@ -18,10 +18,10 @@ #include #include -#include "ngraph/descriptor/tensor.hpp" #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" +#include "openvino/core/descriptor/tensor.hpp" #include "openvino/core/node_output.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/abs.hpp b/src/core/include/ngraph/op/abs.hpp deleted file mode 100644 index ab96e5d413b55e..00000000000000 --- a/src/core/include/ngraph/op/abs.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/abs.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Abs; -} // namespace v0 -using v0::Abs; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/acos.hpp b/src/core/include/ngraph/op/acos.hpp deleted file mode 100644 index f3173555f3a330..00000000000000 --- a/src/core/include/ngraph/op/acos.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/acos.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Acos; -} // namespace v0 -using v0::Acos; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/acosh.hpp b/src/core/include/ngraph/op/acosh.hpp deleted file mode 100644 index 677fab4f318343..00000000000000 --- a/src/core/include/ngraph/op/acosh.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/acosh.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Acosh; -} // namespace v3 -using v3::Acosh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/adaptive_avg_pool.hpp b/src/core/include/ngraph/op/adaptive_avg_pool.hpp deleted file mode 100644 index 25e4a9976eebd9..00000000000000 --- a/src/core/include/ngraph/op/adaptive_avg_pool.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/adaptive_avg_pool.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::AdaptiveAvgPool; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/adaptive_max_pool.hpp b/src/core/include/ngraph/op/adaptive_max_pool.hpp deleted file mode 100644 index 1298dc97a2fc37..00000000000000 --- a/src/core/include/ngraph/op/adaptive_max_pool.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/adaptive_max_pool.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::AdaptiveMaxPool; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/add.hpp b/src/core/include/ngraph/op/add.hpp deleted file mode 100644 index 61584b5138f10f..00000000000000 --- a/src/core/include/ngraph/op/add.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/add.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Add; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/and.hpp b/src/core/include/ngraph/op/and.hpp deleted file mode 100644 index 3ce521f8c6019c..00000000000000 --- a/src/core/include/ngraph/op/and.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_logical.hpp" -#include "openvino/op/logical_and.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalAnd; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/asin.hpp b/src/core/include/ngraph/op/asin.hpp deleted file mode 100644 index 4ef7fb119d713d..00000000000000 --- a/src/core/include/ngraph/op/asin.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/asin.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Asin; -} // namespace v0 -using v0::Asin; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/asinh.hpp b/src/core/include/ngraph/op/asinh.hpp deleted file mode 100644 index 0f40a770a3f6b4..00000000000000 --- a/src/core/include/ngraph/op/asinh.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/asinh.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Asinh; -} // namespace v3 -using v3::Asinh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/assign.hpp b/src/core/include/ngraph/op/assign.hpp deleted file mode 100644 index af479f8bae8756..00000000000000 --- a/src/core/include/ngraph/op/assign.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/sink.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_extension.hpp" -#include "openvino/op/assign.hpp" - -namespace ngraph { -namespace op { -using ov::op::util::AssignBase; - -namespace v3 { -using ov::op::v3::Assign; -} // namespace v3 -namespace v6 { -using ov::op::v6::Assign; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/atan.hpp b/src/core/include/ngraph/op/atan.hpp deleted file mode 100644 index 4cc94c1709db29..00000000000000 --- a/src/core/include/ngraph/op/atan.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/atan.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Atan; -} // namespace v0 -using v0::Atan; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/atanh.hpp b/src/core/include/ngraph/op/atanh.hpp deleted file mode 100644 index 03b4454b5cf500..00000000000000 --- a/src/core/include/ngraph/op/atanh.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/atanh.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Atanh; -} // namespace v3 -using v3::Atanh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/avg_pool.hpp b/src/core/include/ngraph/op/avg_pool.hpp deleted file mode 100644 index 180787b42d6a3b..00000000000000 --- a/src/core/include/ngraph/op/avg_pool.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/avg_pool.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::AvgPool; -} // namespace v1 - -using v1::AvgPool; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/batch_norm.hpp b/src/core/include/ngraph/op/batch_norm.hpp deleted file mode 100644 index cac36e5b4e2578..00000000000000 --- a/src/core/include/ngraph/op/batch_norm.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/batch_norm.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::BatchNormInference; -} // namespace v0 -namespace v5 { -using ov::op::v5::BatchNormInference; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/batch_to_space.hpp b/src/core/include/ngraph/op/batch_to_space.hpp deleted file mode 100644 index da115a4c0389f3..00000000000000 --- a/src/core/include/ngraph/op/batch_to_space.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/batch_to_space.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::BatchToSpace; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/binary_convolution.hpp b/src/core/include/ngraph/op/binary_convolution.hpp deleted file mode 100644 index a51e91549b6780..00000000000000 --- a/src/core/include/ngraph/op/binary_convolution.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/binary_convolution.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::BinaryConvolution; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/broadcast.hpp b/src/core/include/ngraph/op/broadcast.hpp deleted file mode 100644 index 98c0ac86da51cd..00000000000000 --- a/src/core/include/ngraph/op/broadcast.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/broadcast_base.hpp" -#include "openvino/op/broadcast.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Broadcast; -} // namespace v3 - -namespace v1 { -using ov::op::v1::Broadcast; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/bucketize.hpp b/src/core/include/ngraph/op/bucketize.hpp deleted file mode 100644 index 052d0d13ebc2a3..00000000000000 --- a/src/core/include/ngraph/op/bucketize.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/bucketize.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::Bucketize; -} // namespace v3 -using v3::Bucketize; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ceiling.hpp b/src/core/include/ngraph/op/ceiling.hpp deleted file mode 100644 index e5c061a4c3e1d7..00000000000000 --- a/src/core/include/ngraph/op/ceiling.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/ceiling.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Ceiling; -} // namespace v0 -using v0::Ceiling; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/clamp.hpp b/src/core/include/ngraph/op/clamp.hpp deleted file mode 100644 index 64a3c2f39f17ac..00000000000000 --- a/src/core/include/ngraph/op/clamp.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/clamp.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Clamp; -} // namespace v0 -using v0::Clamp; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/concat.hpp b/src/core/include/ngraph/op/concat.hpp index 0c509cc057a07c..a7d67014756623 100644 --- a/src/core/include/ngraph/op/concat.hpp +++ b/src/core/include/ngraph/op/concat.hpp @@ -16,7 +16,6 @@ #include -#include "ngraph/op/op.hpp" #include "openvino/op/concat.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/constant.hpp b/src/core/include/ngraph/op/constant.hpp deleted file mode 100644 index e1034db162ba53..00000000000000 --- a/src/core/include/ngraph/op/constant.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/node.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/runtime/shared_buffer.hpp" -#include "ngraph/type/element_type.hpp" -#include "ngraph/type/element_type_traits.hpp" -#include "ngraph/util.hpp" -#include "openvino/op/constant.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Constant; -} // namespace v0 -using v0::Constant; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/convert.hpp b/src/core/include/ngraph/op/convert.hpp deleted file mode 100644 index 8db87dda0129af..00000000000000 --- a/src/core/include/ngraph/op/convert.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "openvino/op/convert.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Convert; -} // namespace v0 -using v0::Convert; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/convert_like.hpp b/src/core/include/ngraph/op/convert_like.hpp deleted file mode 100644 index 60e14bd4a9f2c2..00000000000000 --- a/src/core/include/ngraph/op/convert_like.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/convert_like.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ConvertLike; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/convolution.hpp b/src/core/include/ngraph/op/convolution.hpp deleted file mode 100644 index 3c5c9bab1f9116..00000000000000 --- a/src/core/include/ngraph/op/convolution.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/convolution.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Convolution; -using ov::op::v1::ConvolutionBackpropData; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/cos.hpp b/src/core/include/ngraph/op/cos.hpp deleted file mode 100644 index af59b80a9af6b4..00000000000000 --- a/src/core/include/ngraph/op/cos.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/cos.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Cos; -} // namespace v0 -using v0::Cos; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/cosh.hpp b/src/core/include/ngraph/op/cosh.hpp deleted file mode 100644 index d2c7b8f340a4ea..00000000000000 --- a/src/core/include/ngraph/op/cosh.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/cosh.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Cosh; -} // namespace v0 -using v0::Cosh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ctc_greedy_decoder.hpp b/src/core/include/ngraph/op/ctc_greedy_decoder.hpp deleted file mode 100644 index 365406cb2a5797..00000000000000 --- a/src/core/include/ngraph/op/ctc_greedy_decoder.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/ctc_greedy_decoder.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::CTCGreedyDecoder; -} // namespace v0 -using v0::CTCGreedyDecoder; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp b/src/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp deleted file mode 100644 index a2ec282688d451..00000000000000 --- a/src/core/include/ngraph/op/ctc_greedy_decoder_seq_len.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/ctc_greedy_decoder_seq_len.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::CTCGreedyDecoderSeqLen; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/ctc_loss.hpp b/src/core/include/ngraph/op/ctc_loss.hpp deleted file mode 100644 index 36e25d5f7b113b..00000000000000 --- a/src/core/include/ngraph/op/ctc_loss.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/ctc_loss.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::CTCLoss; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/cum_sum.hpp b/src/core/include/ngraph/op/cum_sum.hpp deleted file mode 100644 index 2ecd5bd4f13c47..00000000000000 --- a/src/core/include/ngraph/op/cum_sum.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/cum_sum.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::CumSum; -} // namespace v0 -using v0::CumSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/deformable_convolution.hpp b/src/core/include/ngraph/op/deformable_convolution.hpp deleted file mode 100644 index 2ef161637ddb88..00000000000000 --- a/src/core/include/ngraph/op/deformable_convolution.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/deformable_convolution_base.hpp" -#include "openvino/op/deformable_convolution.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::DeformableConvolution; -} // namespace v1 - -namespace v8 { -using ov::op::v8::DeformableConvolution; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/deformable_psroi_pooling.hpp b/src/core/include/ngraph/op/deformable_psroi_pooling.hpp deleted file mode 100644 index 1a561fbb20e08e..00000000000000 --- a/src/core/include/ngraph/op/deformable_psroi_pooling.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/deformable_psroi_pooling.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::DeformablePSROIPooling; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/depth_to_space.hpp b/src/core/include/ngraph/op/depth_to_space.hpp deleted file mode 100644 index 0f2e4341968f93..00000000000000 --- a/src/core/include/ngraph/op/depth_to_space.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/depth_to_space.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::DepthToSpace; -} // namespace v0 -using v0::DepthToSpace; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/detection_output.hpp b/src/core/include/ngraph/op/detection_output.hpp deleted file mode 100644 index 9c60638c8e3a52..00000000000000 --- a/src/core/include/ngraph/op/detection_output.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/detection_output.hpp" - -namespace ngraph { -namespace op { -using DetectionOutputAttrs = ov::op::v0::DetectionOutput::Attributes; - -namespace v0 { -using ov::op::v0::DetectionOutput; -} // namespace v0 -using v0::DetectionOutput; - -namespace v8 { -using ov::op::v8::DetectionOutput; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/dft.hpp b/src/core/include/ngraph/op/dft.hpp deleted file mode 100644 index fe94ae91fc4e03..00000000000000 --- a/src/core/include/ngraph/op/dft.hpp +++ /dev/null @@ -1,44 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2022 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fft_base.hpp" -#include "openvino/op/dft.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::DFT; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/divide.hpp b/src/core/include/ngraph/op/divide.hpp index 8ad23dc0b3ce46..7e23179987e62c 100644 --- a/src/core/include/ngraph/op/divide.hpp +++ b/src/core/include/ngraph/op/divide.hpp @@ -14,7 +14,6 @@ # endif #endif -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" #include "openvino/op/divide.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/einsum.hpp b/src/core/include/ngraph/op/einsum.hpp deleted file mode 100644 index 8dd7eaa14a1bac..00000000000000 --- a/src/core/include/ngraph/op/einsum.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/einsum.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::Einsum; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/elu.hpp b/src/core/include/ngraph/op/elu.hpp deleted file mode 100644 index 501f6689f9c581..00000000000000 --- a/src/core/include/ngraph/op/elu.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/elu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Elu; -} // namespace v0 -using v0::Elu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/embedding_segments_sum.hpp b/src/core/include/ngraph/op/embedding_segments_sum.hpp deleted file mode 100644 index 1eb8282815c0da..00000000000000 --- a/src/core/include/ngraph/op/embedding_segments_sum.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/embedding_segments_sum.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::EmbeddingSegmentsSum; -} // namespace v3 -using v3::EmbeddingSegmentsSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/embeddingbag_offsets_sum.hpp b/src/core/include/ngraph/op/embeddingbag_offsets_sum.hpp deleted file mode 100644 index 48d4a3c298dd58..00000000000000 --- a/src/core/include/ngraph/op/embeddingbag_offsets_sum.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/embeddingbag_offsets_base.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/embeddingbag_offsets_sum.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::EmbeddingBagOffsetsSum; -} // namespace v3 -using v3::EmbeddingBagOffsetsSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/embeddingbag_packedsum.hpp b/src/core/include/ngraph/op/embeddingbag_packedsum.hpp deleted file mode 100644 index 2bcbe580129dd3..00000000000000 --- a/src/core/include/ngraph/op/embeddingbag_packedsum.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/embeddingbag_packed_base.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/embeddingbag_packedsum.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::EmbeddingBagPackedSum; -} // namespace v3 -using v3::EmbeddingBagPackedSum; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/equal.hpp b/src/core/include/ngraph/op/equal.hpp deleted file mode 100644 index 69ca75b677b265..00000000000000 --- a/src/core/include/ngraph/op/equal.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/equal.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Equal; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/erf.hpp b/src/core/include/ngraph/op/erf.hpp deleted file mode 100644 index 04669af7b7bcb3..00000000000000 --- a/src/core/include/ngraph/op/erf.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/erf.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Erf; -} // namespace v0 -using v0::Erf; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/exp.hpp b/src/core/include/ngraph/op/exp.hpp deleted file mode 100644 index 582fb434af23d5..00000000000000 --- a/src/core/include/ngraph/op/exp.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/exp.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Exp; -} // namespace v0 -using v0::Exp; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_detection_output.hpp b/src/core/include/ngraph/op/experimental_detectron_detection_output.hpp deleted file mode 100644 index 32396876379a65..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_detection_output.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_detection_output.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronDetectionOutput; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp b/src/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp deleted file mode 100644 index e063f6f9380cb1..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_generate_proposals.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_generate_proposals.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp b/src/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp deleted file mode 100644 index 3a7e86241828cd..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_prior_grid_generator.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_prior_grid_generator.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronPriorGridGenerator; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_roi_feature.hpp b/src/core/include/ngraph/op/experimental_detectron_roi_feature.hpp deleted file mode 100644 index 98eeef604d0845..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_roi_feature.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_roi_feature.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronROIFeatureExtractor; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/experimental_detectron_topkrois.hpp b/src/core/include/ngraph/op/experimental_detectron_topkrois.hpp deleted file mode 100644 index 6b41bbece97cec..00000000000000 --- a/src/core/include/ngraph/op/experimental_detectron_topkrois.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/experimental_detectron_topkrois.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::ExperimentalDetectronTopKROIs; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/extractimagepatches.hpp b/src/core/include/ngraph/op/extractimagepatches.hpp deleted file mode 100644 index f4c33abd174cbf..00000000000000 --- a/src/core/include/ngraph/op/extractimagepatches.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/extractimagepatches.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ExtractImagePatches; -} // namespace v3 -using v3::ExtractImagePatches; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/eye.hpp b/src/core/include/ngraph/op/eye.hpp deleted file mode 100644 index 8bb108edb6401d..00000000000000 --- a/src/core/include/ngraph/op/eye.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/eye.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::Eye; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/fake_quantize.hpp b/src/core/include/ngraph/op/fake_quantize.hpp deleted file mode 100644 index 6a2c34531ea3fc..00000000000000 --- a/src/core/include/ngraph/op/fake_quantize.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/fake_quantize.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::FakeQuantize; -} // namespace v0 -using v0::FakeQuantize; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/floor.hpp b/src/core/include/ngraph/op/floor.hpp deleted file mode 100644 index c0c7d8f3c1651d..00000000000000 --- a/src/core/include/ngraph/op/floor.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/floor.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Floor; -} // namespace v0 -using v0::Floor; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/floor_mod.hpp b/src/core/include/ngraph/op/floor_mod.hpp deleted file mode 100644 index 03d5d4dd70fbf1..00000000000000 --- a/src/core/include/ngraph/op/floor_mod.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/floor_mod.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::FloorMod; -} // namespace v1 -using v1::FloorMod; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather.hpp b/src/core/include/ngraph/op/gather.hpp deleted file mode 100644 index 124e77f03ef7d7..00000000000000 --- a/src/core/include/ngraph/op/gather.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/gather_base.hpp" -#include "openvino/op/gather.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Gather; -} // namespace v1 -namespace v7 { -using ov::op::v7::Gather; -} // namespace v7 -namespace v8 { -using ov::op::v8::Gather; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather_elements.hpp b/src/core/include/ngraph/op/gather_elements.hpp deleted file mode 100644 index 77180e143af859..00000000000000 --- a/src/core/include/ngraph/op/gather_elements.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/gather_elements.hpp" - -namespace ngraph { -namespace op { -namespace v6 { -using ov::op::v6::GatherElements; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather_nd.hpp b/src/core/include/ngraph/op/gather_nd.hpp deleted file mode 100644 index fe587ff39e565c..00000000000000 --- a/src/core/include/ngraph/op/gather_nd.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/gather_nd.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::GatherND; -} // namespace v5 -namespace v8 { -using ov::op::v8::GatherND; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gather_tree.hpp b/src/core/include/ngraph/op/gather_tree.hpp deleted file mode 100644 index deead92748b9a3..00000000000000 --- a/src/core/include/ngraph/op/gather_tree.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/gather_tree.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::GatherTree; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gelu.hpp b/src/core/include/ngraph/op/gelu.hpp deleted file mode 100644 index d3f084c88b4d58..00000000000000 --- a/src/core/include/ngraph/op/gelu.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/gelu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Gelu; -} // namespace v0 -using v0::Gelu; - -using ov::op::GeluApproximationMode; - -namespace v7 { -using ov::op::v7::Gelu; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/generate_proposals.hpp b/src/core/include/ngraph/op/generate_proposals.hpp deleted file mode 100644 index 3dc2525b198755..00000000000000 --- a/src/core/include/ngraph/op/generate_proposals.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/generate_proposals.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::GenerateProposals; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/greater.hpp b/src/core/include/ngraph/op/greater.hpp deleted file mode 100644 index 4869b67748e83c..00000000000000 --- a/src/core/include/ngraph/op/greater.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/greater.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Greater; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/greater_eq.hpp b/src/core/include/ngraph/op/greater_eq.hpp deleted file mode 100644 index 130ea97bc87764..00000000000000 --- a/src/core/include/ngraph/op/greater_eq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/greater_eq.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::GreaterEqual; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/grid_sample.hpp b/src/core/include/ngraph/op/grid_sample.hpp deleted file mode 100644 index fe161bb9519008..00000000000000 --- a/src/core/include/ngraph/op/grid_sample.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/grid_sample.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::GridSample; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/grn.hpp b/src/core/include/ngraph/op/grn.hpp deleted file mode 100644 index 5073d8219ffa73..00000000000000 --- a/src/core/include/ngraph/op/grn.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/grn.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::GRN; -} // namespace v0 -using v0::GRN; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/group_conv.hpp b/src/core/include/ngraph/op/group_conv.hpp deleted file mode 100644 index bde389017e937e..00000000000000 --- a/src/core/include/ngraph/op/group_conv.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/convolution.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/group_conv.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::GroupConvolution; -using ov::op::v1::GroupConvolutionBackpropData; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gru_cell.hpp b/src/core/include/ngraph/op/gru_cell.hpp deleted file mode 100644 index 7eec4f1b285697..00000000000000 --- a/src/core/include/ngraph/op/gru_cell.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/gru_cell.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::GRUCell; -} // namespace v3 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/gru_sequence.hpp b/src/core/include/ngraph/op/gru_sequence.hpp deleted file mode 100644 index 9713ddf7e641ef..00000000000000 --- a/src/core/include/ngraph/op/gru_sequence.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/gru_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::GRUSequence; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/hard_sigmoid.hpp b/src/core/include/ngraph/op/hard_sigmoid.hpp deleted file mode 100644 index 170a1072461c24..00000000000000 --- a/src/core/include/ngraph/op/hard_sigmoid.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/hard_sigmoid.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::HardSigmoid; -} // namespace v0 -using v0::HardSigmoid; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/hsigmoid.hpp b/src/core/include/ngraph/op/hsigmoid.hpp deleted file mode 100644 index 31ec84fd265df4..00000000000000 --- a/src/core/include/ngraph/op/hsigmoid.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/hsigmoid.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::HSigmoid; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/hswish.hpp b/src/core/include/ngraph/op/hswish.hpp deleted file mode 100644 index 4a75cb32f3637d..00000000000000 --- a/src/core/include/ngraph/op/hswish.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/hswish.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::HSwish; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/i420_to_bgr.hpp b/src/core/include/ngraph/op/i420_to_bgr.hpp deleted file mode 100644 index 7a0e25e615afe9..00000000000000 --- a/src/core/include/ngraph/op/i420_to_bgr.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/i420_to_bgr.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::I420toBGR; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/i420_to_rgb.hpp b/src/core/include/ngraph/op/i420_to_rgb.hpp deleted file mode 100644 index 3f092d63739a28..00000000000000 --- a/src/core/include/ngraph/op/i420_to_rgb.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/i420_to_rgb.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::I420toRGB; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/idft.hpp b/src/core/include/ngraph/op/idft.hpp deleted file mode 100644 index e85d127227de75..00000000000000 --- a/src/core/include/ngraph/op/idft.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fft_base.hpp" -#include "openvino/op/idft.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::IDFT; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/if.hpp b/src/core/include/ngraph/op/if.hpp deleted file mode 100644 index f5ceab23b72a60..00000000000000 --- a/src/core/include/ngraph/op/if.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/function.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/util/multi_subgraph_base.hpp" -#include "openvino/op/if.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::If; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/interpolate.hpp b/src/core/include/ngraph/op/interpolate.hpp deleted file mode 100644 index f0628802e244d0..00000000000000 --- a/src/core/include/ngraph/op/interpolate.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/attribute_adapter.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/interpolate.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using InterpolateAttrs = ov::op::v0::Interpolate::Attributes; -using ov::op::v0::Interpolate; -} // namespace v0 -namespace v4 { -using ov::op::v4::Interpolate; -} // namespace v4 -namespace v11 { -using ov::op::v11::Interpolate; -} // namespace v11 -using v0::Interpolate; -using v0::InterpolateAttrs; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/irdft.hpp b/src/core/include/ngraph/op/irdft.hpp deleted file mode 100644 index c9b69562824af7..00000000000000 --- a/src/core/include/ngraph/op/irdft.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/irdft.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::IRDFT; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/is_finite.hpp b/src/core/include/ngraph/op/is_finite.hpp deleted file mode 100644 index af3773d6e92364..00000000000000 --- a/src/core/include/ngraph/op/is_finite.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/is_finite.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::IsFinite; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/is_inf.hpp b/src/core/include/ngraph/op/is_inf.hpp deleted file mode 100644 index 8e7b41d2191ac0..00000000000000 --- a/src/core/include/ngraph/op/is_inf.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/is_inf.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::IsInf; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/is_nan.hpp b/src/core/include/ngraph/op/is_nan.hpp deleted file mode 100644 index 61426c448b1229..00000000000000 --- a/src/core/include/ngraph/op/is_nan.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/is_nan.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::IsNaN; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/less.hpp b/src/core/include/ngraph/op/less.hpp deleted file mode 100644 index 9c9772f4533c77..00000000000000 --- a/src/core/include/ngraph/op/less.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/less.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Less; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/less_eq.hpp b/src/core/include/ngraph/op/less_eq.hpp deleted file mode 100644 index 6e2bd02da347c9..00000000000000 --- a/src/core/include/ngraph/op/less_eq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/less_eq.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LessEqual; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/log.hpp b/src/core/include/ngraph/op/log.hpp deleted file mode 100644 index f590366b208064..00000000000000 --- a/src/core/include/ngraph/op/log.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/log.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Log; -} // namespace v0 -using v0::Log; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/log_softmax.hpp b/src/core/include/ngraph/op/log_softmax.hpp deleted file mode 100644 index 09a4ab0fecce75..00000000000000 --- a/src/core/include/ngraph/op/log_softmax.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/log_softmax.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::LogSoftmax; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/loop.hpp b/src/core/include/ngraph/op/loop.hpp deleted file mode 100644 index 9c14626965883d..00000000000000 --- a/src/core/include/ngraph/op/loop.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/factory_adapter.hpp" -#include "ngraph/function.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/tensor_iterator.hpp" -#include "ngraph/op/util/sub_graph_base.hpp" -#include "openvino/op/loop.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::Loop; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/lrn.hpp b/src/core/include/ngraph/op/lrn.hpp deleted file mode 100644 index 533c3fdd8ac6e9..00000000000000 --- a/src/core/include/ngraph/op/lrn.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/lrn.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::LRN; -} // namespace v0 -using v0::LRN; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/lstm_cell.hpp b/src/core/include/ngraph/op/lstm_cell.hpp deleted file mode 100644 index 9714e9bb362ac9..00000000000000 --- a/src/core/include/ngraph/op/lstm_cell.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/lstm_cell.hpp" - -namespace ngraph { -namespace op { -using ov::op::LSTMWeightsFormat; - -namespace v0 { -using ov::op::v0::LSTMCell; -} // namespace v0 - -namespace v4 { -using ov::op::v4::LSTMCell; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/lstm_sequence.hpp b/src/core/include/ngraph/op/lstm_sequence.hpp deleted file mode 100644 index 72a2bfd1aec9e0..00000000000000 --- a/src/core/include/ngraph/op/lstm_sequence.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/lstm_cell.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/lstm_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::LSTMSequence; -} // namespace v0 - -namespace v5 { -using ov::op::v5::LSTMSequence; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/matmul.hpp b/src/core/include/ngraph/op/matmul.hpp deleted file mode 100644 index e72c43dc0eb5c9..00000000000000 --- a/src/core/include/ngraph/op/matmul.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/matmul.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::MatMul; -} // namespace v0 -using v0::MatMul; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/matrix_nms.hpp b/src/core/include/ngraph/op/matrix_nms.hpp deleted file mode 100644 index eab69f6cb591f7..00000000000000 --- a/src/core/include/ngraph/op/matrix_nms.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/matrix_nms.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::MatrixNms; -} // namespace v8 -} // namespace op -using ov::operator<<; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/max.hpp b/src/core/include/ngraph/op/max.hpp deleted file mode 100644 index 39dc365f118f5c..00000000000000 --- a/src/core/include/ngraph/op/max.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reduction.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_max.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceMax; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/max_pool.hpp b/src/core/include/ngraph/op/max_pool.hpp deleted file mode 100644 index 43d9ee2569350f..00000000000000 --- a/src/core/include/ngraph/op/max_pool.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/max_pool_base.hpp" -#include "openvino/op/max_pool.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::MaxPool; -} // namespace v1 - -namespace v8 { -using ov::op::v8::MaxPool; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/maximum.hpp b/src/core/include/ngraph/op/maximum.hpp deleted file mode 100644 index 3f881f3d8c2ca4..00000000000000 --- a/src/core/include/ngraph/op/maximum.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/maximum.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Maximum; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/min.hpp b/src/core/include/ngraph/op/min.hpp deleted file mode 100644 index 46e813f8ce910d..00000000000000 --- a/src/core/include/ngraph/op/min.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reduction.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_min.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceMin; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/minimum.hpp b/src/core/include/ngraph/op/minimum.hpp deleted file mode 100644 index 4d515d017e1432..00000000000000 --- a/src/core/include/ngraph/op/minimum.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/minimum.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Minimum; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/mish.hpp b/src/core/include/ngraph/op/mish.hpp deleted file mode 100644 index d310c5370a7415..00000000000000 --- a/src/core/include/ngraph/op/mish.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/mish.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::Mish; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/mod.hpp b/src/core/include/ngraph/op/mod.hpp deleted file mode 100644 index 92081e8fe8d267..00000000000000 --- a/src/core/include/ngraph/op/mod.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/mod.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Mod; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/multiclass_nms.hpp b/src/core/include/ngraph/op/multiclass_nms.hpp deleted file mode 100644 index 19ae119a2214e6..00000000000000 --- a/src/core/include/ngraph/op/multiclass_nms.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/multiclass_nms_base.hpp" -#include "openvino/op/multiclass_nms.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::MulticlassNms; -} // namespace v8 -namespace v9 { -using ov::op::v9::MulticlassNms; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/multiply.hpp b/src/core/include/ngraph/op/multiply.hpp deleted file mode 100644 index 9e935312462785..00000000000000 --- a/src/core/include/ngraph/op/multiply.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/multiply.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Multiply; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/mvn.hpp b/src/core/include/ngraph/op/mvn.hpp deleted file mode 100644 index 3c7a4f2a4867ab..00000000000000 --- a/src/core/include/ngraph/op/mvn.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/mvn.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::MVN; -} // namespace v0 -using v0::MVN; - -using ov::op::MVNEpsMode; - -namespace v6 { -using ov::op::v6::MVN; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/negative.hpp b/src/core/include/ngraph/op/negative.hpp deleted file mode 100644 index a872049551816d..00000000000000 --- a/src/core/include/ngraph/op/negative.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/negative.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Negative; -} // namespace v0 -using v0::Negative; -} // namespace op -NGRAPH_API -std::shared_ptr operator-(const Output& arg0); -} // namespace ngraph diff --git a/src/core/include/ngraph/op/non_max_suppression.hpp b/src/core/include/ngraph/op/non_max_suppression.hpp deleted file mode 100644 index 0797e792fdd7e6..00000000000000 --- a/src/core/include/ngraph/op/non_max_suppression.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/non_max_suppression.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::NonMaxSuppression; -} // namespace v1 - -namespace v3 { -using ov::op::v3::NonMaxSuppression; -} // namespace v3 - -namespace v4 { -using ov::op::v4::NonMaxSuppression; -} // namespace v4 - -namespace v5 { -using ov::op::v5::NonMaxSuppression; -} // namespace v5 - -namespace v9 { -using ov::op::v9::NonMaxSuppression; -} // namespace v9 -} // namespace op -using ov::operator<<; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/non_zero.hpp b/src/core/include/ngraph/op/non_zero.hpp deleted file mode 100644 index 75a48ba7ebd57a..00000000000000 --- a/src/core/include/ngraph/op/non_zero.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/non_zero.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::NonZero; -} // namespace v3 -using v3::NonZero; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/normalize_l2.hpp b/src/core/include/ngraph/op/normalize_l2.hpp deleted file mode 100644 index 9e9ab00a9a62b8..00000000000000 --- a/src/core/include/ngraph/op/normalize_l2.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/normalize_l2.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::NormalizeL2; -} // namespace v0 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/not.hpp b/src/core/include/ngraph/op/not.hpp deleted file mode 100644 index 64f5e492c240ea..00000000000000 --- a/src/core/include/ngraph/op/not.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/logical_not.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalNot; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/not_equal.hpp b/src/core/include/ngraph/op/not_equal.hpp deleted file mode 100644 index 883d48c63a94ad..00000000000000 --- a/src/core/include/ngraph/op/not_equal.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" -#include "openvino/op/not_equal.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::NotEqual; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/nv12_to_bgr.hpp b/src/core/include/ngraph/op/nv12_to_bgr.hpp deleted file mode 100644 index 0044d2049b0d09..00000000000000 --- a/src/core/include/ngraph/op/nv12_to_bgr.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/nv12_to_bgr.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::NV12toBGR; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/nv12_to_rgb.hpp b/src/core/include/ngraph/op/nv12_to_rgb.hpp deleted file mode 100644 index bd3bdb710ae333..00000000000000 --- a/src/core/include/ngraph/op/nv12_to_rgb.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/nv12_to_rgb.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::NV12toRGB; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/one_hot.hpp b/src/core/include/ngraph/op/one_hot.hpp deleted file mode 100644 index 52618026eaf4d4..00000000000000 --- a/src/core/include/ngraph/op/one_hot.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/one_hot.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::OneHot; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/op.hpp b/src/core/include/ngraph/op/op.hpp deleted file mode 100644 index 0fe6936ebf25a6..00000000000000 --- a/src/core/include/ngraph/op/op.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "openvino/op/op.hpp" - -namespace ngraph { -namespace op { -using ov::op::Op; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/or.hpp b/src/core/include/ngraph/op/or.hpp deleted file mode 100644 index cefc9f23e593da..00000000000000 --- a/src/core/include/ngraph/op/or.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_logical.hpp" -#include "openvino/op/logical_or.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalOr; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/pad.hpp b/src/core/include/ngraph/op/pad.hpp deleted file mode 100644 index 502d6a2c1fcebc..00000000000000 --- a/src/core/include/ngraph/op/pad.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/pad.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Pad; -} // namespace v1 -namespace v12 { -using ov::op::v12::Pad; -} // namespace v12 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/parameter.hpp b/src/core/include/ngraph/op/parameter.hpp index 50b432f9662304..4442673f078e92 100644 --- a/src/core/include/ngraph/op/parameter.hpp +++ b/src/core/include/ngraph/op/parameter.hpp @@ -14,7 +14,6 @@ # endif #endif -#include "ngraph/op/op.hpp" #include "openvino/op/parameter.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/power.hpp b/src/core/include/ngraph/op/power.hpp deleted file mode 100644 index 75bd94576875b0..00000000000000 --- a/src/core/include/ngraph/op/power.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/power.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Power; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/prelu.hpp b/src/core/include/ngraph/op/prelu.hpp deleted file mode 100644 index a88fec42c7ba5f..00000000000000 --- a/src/core/include/ngraph/op/prelu.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/prelu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::PRelu; -} // namespace v0 -using v0::PRelu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/prior_box.hpp b/src/core/include/ngraph/op/prior_box.hpp deleted file mode 100644 index e8ed48bbe74bb3..00000000000000 --- a/src/core/include/ngraph/op/prior_box.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/prior_box.hpp" - -namespace ngraph { -namespace op { -using PriorBoxAttrs = ov::op::v0::PriorBox::Attributes; -namespace v0 { -using ov::op::v0::PriorBox; -} // namespace v0 -namespace v8 { -using ov::op::v8::PriorBox; -} // namespace v8 -using v0::PriorBox; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/prior_box_clustered.hpp b/src/core/include/ngraph/op/prior_box_clustered.hpp deleted file mode 100644 index 17dceb8b453172..00000000000000 --- a/src/core/include/ngraph/op/prior_box_clustered.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/prior_box_clustered.hpp" - -namespace ngraph { -namespace op { -using PriorBoxClusteredAttrs = ov::op::v0::PriorBoxClustered::Attributes; - -namespace v0 { -using ov::op::v0::PriorBoxClustered; -} // namespace v0 -using v0::PriorBoxClustered; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/proposal.hpp b/src/core/include/ngraph/op/proposal.hpp deleted file mode 100644 index 65c5481fa125d5..00000000000000 --- a/src/core/include/ngraph/op/proposal.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/proposal.hpp" - -namespace ngraph { -namespace op { -using ProposalAttrs = ov::op::v0::Proposal::Attributes; - -namespace v0 { -using ov::op::v0::Proposal; -} // namespace v0 - -namespace v4 { -using ov::op::v4::Proposal; -} // namespace v4 -using v0::Proposal; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/psroi_pooling.hpp b/src/core/include/ngraph/op/psroi_pooling.hpp deleted file mode 100644 index a7d4e11f0442e1..00000000000000 --- a/src/core/include/ngraph/op/psroi_pooling.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/psroi_pooling.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::PSROIPooling; -} // namespace v0 -using v0::PSROIPooling; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/random_uniform.hpp b/src/core/include/ngraph/op/random_uniform.hpp deleted file mode 100644 index 1c55a2a3b82aa3..00000000000000 --- a/src/core/include/ngraph/op/random_uniform.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/random_uniform.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::RandomUniform; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/range.hpp b/src/core/include/ngraph/op/range.hpp deleted file mode 100644 index 274a8ca5bbce06..00000000000000 --- a/src/core/include/ngraph/op/range.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/range.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::Range; -} // namespace v4 -namespace v0 { -using ov::op::v0::Range; -} // namespace v0 -using v0::Range; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/rdft.hpp b/src/core/include/ngraph/op/rdft.hpp deleted file mode 100644 index 3f4cc966415aa1..00000000000000 --- a/src/core/include/ngraph/op/rdft.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/rdft.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::RDFT; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/read_value.hpp b/src/core/include/ngraph/op/read_value.hpp deleted file mode 100644 index 82eefe8e6633ba..00000000000000 --- a/src/core/include/ngraph/op/read_value.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_extension.hpp" -#include "openvino/op/read_value.hpp" - -namespace ngraph { -namespace op { -using ov::op::util::ReadValueBase; - -namespace v3 { -using ov::op::v3::ReadValue; -} // namespace v3 - -namespace v6 { -using ov::op::v6::ReadValue; -} // namespace v6 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_l1.hpp b/src/core/include/ngraph/op/reduce_l1.hpp deleted file mode 100644 index da3964cef7494b..00000000000000 --- a/src/core/include/ngraph/op/reduce_l1.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_l1.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::ReduceL1; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_l2.hpp b/src/core/include/ngraph/op/reduce_l2.hpp deleted file mode 100644 index 0a4667c03abaaa..00000000000000 --- a/src/core/include/ngraph/op/reduce_l2.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_l2.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::ReduceL2; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_logical_and.hpp b/src/core/include/ngraph/op/reduce_logical_and.hpp deleted file mode 100644 index cb6e06f8e426e7..00000000000000 --- a/src/core/include/ngraph/op/reduce_logical_and.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/logical_reduction_keep_dims.hpp" -#include "openvino/op/reduce_logical_and.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceLogicalAnd; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_logical_or.hpp b/src/core/include/ngraph/op/reduce_logical_or.hpp deleted file mode 100644 index e2a1b8782a08cc..00000000000000 --- a/src/core/include/ngraph/op/reduce_logical_or.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/logical_reduction_keep_dims.hpp" -#include "openvino/op/reduce_logical_or.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceLogicalOr; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_mean.hpp b/src/core/include/ngraph/op/reduce_mean.hpp deleted file mode 100644 index 4092f908367750..00000000000000 --- a/src/core/include/ngraph/op/reduce_mean.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_mean.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceMean; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_prod.hpp b/src/core/include/ngraph/op/reduce_prod.hpp deleted file mode 100644 index 4f657399f3edfb..00000000000000 --- a/src/core/include/ngraph/op/reduce_prod.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_prod.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceProd; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reduce_sum.hpp b/src/core/include/ngraph/op/reduce_sum.hpp deleted file mode 100644 index b2022c59d0f6c7..00000000000000 --- a/src/core/include/ngraph/op/reduce_sum.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" -#include "openvino/op/reduce_sum.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::ReduceSum; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/region_yolo.hpp b/src/core/include/ngraph/op/region_yolo.hpp deleted file mode 100644 index ea93351b0371a4..00000000000000 --- a/src/core/include/ngraph/op/region_yolo.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/region_yolo.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::RegionYolo; -} // namespace v0 -using v0::RegionYolo; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/relu.hpp b/src/core/include/ngraph/op/relu.hpp deleted file mode 100644 index 237bdc113c1f14..00000000000000 --- a/src/core/include/ngraph/op/relu.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/relu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Relu; -} // namespace v0 -using v0::Relu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reorg_yolo.hpp b/src/core/include/ngraph/op/reorg_yolo.hpp deleted file mode 100644 index 1359e97a5182dc..00000000000000 --- a/src/core/include/ngraph/op/reorg_yolo.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/reorg_yolo.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ReorgYolo; -} // namespace v0 -using v0::ReorgYolo; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reshape.hpp b/src/core/include/ngraph/op/reshape.hpp deleted file mode 100644 index 8220d2f9566ac7..00000000000000 --- a/src/core/include/ngraph/op/reshape.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "openvino/op/reshape.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Reshape; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/result.hpp b/src/core/include/ngraph/op/result.hpp deleted file mode 100644 index a3429b0b696ded..00000000000000 --- a/src/core/include/ngraph/op/result.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/result.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Result; -} // namespace v0 -using v0::Result; -} // namespace op -using ResultVector = std::vector>; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reverse.hpp b/src/core/include/ngraph/op/reverse.hpp deleted file mode 100644 index f7b5569812e01a..00000000000000 --- a/src/core/include/ngraph/op/reverse.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/reverse.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Reverse; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/reverse_sequence.hpp b/src/core/include/ngraph/op/reverse_sequence.hpp deleted file mode 100644 index 4ce7389f7b2c78..00000000000000 --- a/src/core/include/ngraph/op/reverse_sequence.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/reverse_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ReverseSequence; -} // namespace v0 -using v0::ReverseSequence; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/rnn_cell.hpp b/src/core/include/ngraph/op/rnn_cell.hpp deleted file mode 100644 index 5111833cd82882..00000000000000 --- a/src/core/include/ngraph/op/rnn_cell.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/rnn_cell.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::RNNCell; -} // namespace v0 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/rnn_sequence.hpp b/src/core/include/ngraph/op/rnn_sequence.hpp deleted file mode 100644 index 08397b75215e06..00000000000000 --- a/src/core/include/ngraph/op/rnn_sequence.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" -#include "openvino/op/rnn_sequence.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::RNNSequence; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/roi_align.hpp b/src/core/include/ngraph/op/roi_align.hpp deleted file mode 100644 index e8b645a827d985..00000000000000 --- a/src/core/include/ngraph/op/roi_align.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/roi_align.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ROIAlign; -} // namespace v3 -namespace v9 { -using ov::op::v9::ROIAlign; -} // namespace v9 -using v3::ROIAlign; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/roi_pooling.hpp b/src/core/include/ngraph/op/roi_pooling.hpp deleted file mode 100644 index b6d2ee15f40813..00000000000000 --- a/src/core/include/ngraph/op/roi_pooling.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/roi_pooling.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ROIPooling; -} // namespace v0 -using v0::ROIPooling; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/roll.hpp b/src/core/include/ngraph/op/roll.hpp deleted file mode 100644 index 2f3939bb9a4923..00000000000000 --- a/src/core/include/ngraph/op/roll.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/roll.hpp" - -namespace ngraph { -namespace op { -namespace v7 { -using ov::op::v7::Roll; -} // namespace v7 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/round.hpp b/src/core/include/ngraph/op/round.hpp deleted file mode 100644 index ee3e492b5a670b..00000000000000 --- a/src/core/include/ngraph/op/round.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/round.hpp" - -namespace ngraph { -namespace op { -namespace v5 { -using ov::op::v5::Round; -} // namespace v5 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/scatter_elements_update.hpp b/src/core/include/ngraph/op/scatter_elements_update.hpp deleted file mode 100644 index 056fcf64f380af..00000000000000 --- a/src/core/include/ngraph/op/scatter_elements_update.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "openvino/op/scatter_elements_update.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ScatterElementsUpdate; -} // namespace v3 -using v3::ScatterElementsUpdate; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/scatter_nd_update.hpp b/src/core/include/ngraph/op/scatter_nd_update.hpp deleted file mode 100644 index 844f2b4744bf9a..00000000000000 --- a/src/core/include/ngraph/op/scatter_nd_update.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/scatter_nd_base.hpp" -#include "openvino/op/scatter_nd_update.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ScatterNDUpdate; -} // namespace v3 -using v3::ScatterNDUpdate; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/scatter_update.hpp b/src/core/include/ngraph/op/scatter_update.hpp deleted file mode 100644 index 8d00f786a659eb..00000000000000 --- a/src/core/include/ngraph/op/scatter_update.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/scatter_base.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "openvino/op/scatter_update.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ScatterUpdate; -} // namespace v3 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/select.hpp b/src/core/include/ngraph/op/select.hpp deleted file mode 100644 index b3f06e3d8995c5..00000000000000 --- a/src/core/include/ngraph/op/select.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/select.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Select; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/selu.hpp b/src/core/include/ngraph/op/selu.hpp deleted file mode 100644 index 26e61c6eb743e2..00000000000000 --- a/src/core/include/ngraph/op/selu.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/selu.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Selu; -} // namespace v0 -using v0::Selu; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/shape_of.hpp b/src/core/include/ngraph/op/shape_of.hpp deleted file mode 100644 index 79968883133c5c..00000000000000 --- a/src/core/include/ngraph/op/shape_of.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/shape_of.hpp" - -namespace ngraph { -namespace op { -namespace v3 { -using ov::op::v3::ShapeOf; -} // namespace v3 - -namespace v0 { -using ov::op::v0::ShapeOf; -} // namespace v0 -using v0::ShapeOf; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/shuffle_channels.hpp b/src/core/include/ngraph/op/shuffle_channels.hpp deleted file mode 100644 index adc901855e7325..00000000000000 --- a/src/core/include/ngraph/op/shuffle_channels.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/shuffle_channels.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::ShuffleChannels; -} // namespace v0 -using v0::ShuffleChannels; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sigmoid.hpp b/src/core/include/ngraph/op/sigmoid.hpp deleted file mode 100644 index 89a2e4a25a0444..00000000000000 --- a/src/core/include/ngraph/op/sigmoid.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "ngraph/util.hpp" -#include "openvino/op/sigmoid.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sigmoid; -} // namespace v0 -using v0::Sigmoid; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sign.hpp b/src/core/include/ngraph/op/sign.hpp deleted file mode 100644 index 5cc50db67fec3b..00000000000000 --- a/src/core/include/ngraph/op/sign.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sign.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sign; -} // namespace v0 -using v0::Sign; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sin.hpp b/src/core/include/ngraph/op/sin.hpp deleted file mode 100644 index 45ae7dfd448876..00000000000000 --- a/src/core/include/ngraph/op/sin.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sin.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sin; -} // namespace v0 -using v0::Sin; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sinh.hpp b/src/core/include/ngraph/op/sinh.hpp deleted file mode 100644 index f6b8c99332edf6..00000000000000 --- a/src/core/include/ngraph/op/sinh.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sinh.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sinh; -} // namespace v0 -using v0::Sinh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/sink.hpp b/src/core/include/ngraph/op/sink.hpp deleted file mode 100644 index f5fd2e57af6cc7..00000000000000 --- a/src/core/include/ngraph/op/sink.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/sink.hpp" - -namespace ngraph { -namespace op { -using ov::op::Sink; -} // namespace op -using SinkVector = std::vector>; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/slice.hpp b/src/core/include/ngraph/op/slice.hpp deleted file mode 100644 index f9dc771b2bc724..00000000000000 --- a/src/core/include/ngraph/op/slice.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/slice.hpp" - -namespace ngraph { -namespace op { -namespace v8 { -using ov::op::v8::Slice; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/softmax.hpp b/src/core/include/ngraph/op/softmax.hpp deleted file mode 100644 index af1b7d617b4b6e..00000000000000 --- a/src/core/include/ngraph/op/softmax.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/softmax.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Softmax; -} // namespace v1 - -namespace v8 { -using ov::op::v8::Softmax; -} // namespace v8 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/softplus.hpp b/src/core/include/ngraph/op/softplus.hpp deleted file mode 100644 index 3bed43efa1aa16..00000000000000 --- a/src/core/include/ngraph/op/softplus.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/softplus.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::SoftPlus; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/softsign.hpp b/src/core/include/ngraph/op/softsign.hpp deleted file mode 100644 index 289e258d3e229a..00000000000000 --- a/src/core/include/ngraph/op/softsign.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/op/softsign.hpp" - -namespace ngraph { -namespace op { -namespace v9 { -using ov::op::v9::SoftSign; -} // namespace v9 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/space_to_batch.hpp b/src/core/include/ngraph/op/space_to_batch.hpp deleted file mode 100644 index b1d433f0d5ec4b..00000000000000 --- a/src/core/include/ngraph/op/space_to_batch.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/space_to_batch.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::SpaceToBatch; -} // namespace v1 -using v1::SpaceToBatch; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/space_to_depth.hpp b/src/core/include/ngraph/op/space_to_depth.hpp deleted file mode 100644 index d6e0eb4024aa7c..00000000000000 --- a/src/core/include/ngraph/op/space_to_depth.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/space_to_depth.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::SpaceToDepth; -} // namespace v0 -using v0::SpaceToDepth; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/split.hpp b/src/core/include/ngraph/op/split.hpp index 00d0b80a82237b..1ab39e2ace354d 100644 --- a/src/core/include/ngraph/op/split.hpp +++ b/src/core/include/ngraph/op/split.hpp @@ -18,7 +18,6 @@ #include #include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" #include "openvino/op/split.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/op/sqrt.hpp b/src/core/include/ngraph/op/sqrt.hpp deleted file mode 100644 index 9db2668b2d81a1..00000000000000 --- a/src/core/include/ngraph/op/sqrt.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/sqrt.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Sqrt; -} // namespace v0 -using v0::Sqrt; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/squared_difference.hpp b/src/core/include/ngraph/op/squared_difference.hpp deleted file mode 100644 index 44ebfb23b1f296..00000000000000 --- a/src/core/include/ngraph/op/squared_difference.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/squared_difference.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::SquaredDifference; -} // namespace v0 -using v0::SquaredDifference; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/squeeze.hpp b/src/core/include/ngraph/op/squeeze.hpp deleted file mode 100644 index 194ae1b9b60ba6..00000000000000 --- a/src/core/include/ngraph/op/squeeze.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/squeeze.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Squeeze; -} // namespace v0 -using v0::Squeeze; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/strided_slice.hpp b/src/core/include/ngraph/op/strided_slice.hpp deleted file mode 100644 index 1e604ca8b9c2ef..00000000000000 --- a/src/core/include/ngraph/op/strided_slice.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/strided_slice.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::StridedSlice; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/subtract.hpp b/src/core/include/ngraph/op/subtract.hpp deleted file mode 100644 index 6bc02261c18979..00000000000000 --- a/src/core/include/ngraph/op/subtract.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "openvino/op/subtract.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Subtract; -} // namespace v1 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/swish.hpp b/src/core/include/ngraph/op/swish.hpp deleted file mode 100644 index d001693e8e2a1c..00000000000000 --- a/src/core/include/ngraph/op/swish.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/swish.hpp" - -namespace ngraph { -namespace op { -namespace v4 { -using ov::op::v4::Swish; -} // namespace v4 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tan.hpp b/src/core/include/ngraph/op/tan.hpp deleted file mode 100644 index 86fe0ba4a806c3..00000000000000 --- a/src/core/include/ngraph/op/tan.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/tan.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Tan; -} // namespace v0 -using v0::Tan; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tanh.hpp b/src/core/include/ngraph/op/tanh.hpp deleted file mode 100644 index 4b22ea39cba929..00000000000000 --- a/src/core/include/ngraph/op/tanh.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" -#include "openvino/op/tanh.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Tanh; -} // namespace v0 -using v0::Tanh; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tensor_iterator.hpp b/src/core/include/ngraph/op/tensor_iterator.hpp deleted file mode 100644 index e2a2e8dcd51520..00000000000000 --- a/src/core/include/ngraph/op/tensor_iterator.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/function.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/util/sub_graph_base.hpp" -#include "openvino/op/tensor_iterator.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::TensorIterator; -} // namespace v0 -using v0::TensorIterator; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/tile.hpp b/src/core/include/ngraph/op/tile.hpp deleted file mode 100644 index ba99b63f440995..00000000000000 --- a/src/core/include/ngraph/op/tile.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "openvino/op/tile.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Tile; -} // namespace v0 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/topk.hpp b/src/core/include/ngraph/op/topk.hpp deleted file mode 100644 index c10368fe2778ab..00000000000000 --- a/src/core/include/ngraph/op/topk.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/topk.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::TopK; -} // namespace v1 - -namespace v3 { -using ov::op::v3::TopK; -} // namespace v3 - -namespace v11 { -using ov::op::v11::TopK; -} // namespace v11 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/transpose.hpp b/src/core/include/ngraph/op/transpose.hpp deleted file mode 100644 index a042ae4eaef2cc..00000000000000 --- a/src/core/include/ngraph/op/transpose.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/transpose.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::Transpose; -} // namespace v1 -using v1::Transpose; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/unique.hpp b/src/core/include/ngraph/op/unique.hpp deleted file mode 100644 index c1439fbf4207ae..00000000000000 --- a/src/core/include/ngraph/op/unique.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/unique.hpp" - -namespace ngraph { -namespace op { -namespace v10 { -using ov::op::v10::Unique; -} // namespace v10 -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/unsqueeze.hpp b/src/core/include/ngraph/op/unsqueeze.hpp deleted file mode 100644 index 2f278db8f53144..00000000000000 --- a/src/core/include/ngraph/op/unsqueeze.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "openvino/op/unsqueeze.hpp" - -namespace ngraph { -namespace op { -namespace v0 { -using ov::op::v0::Unsqueeze; -} // namespace v0 -using v0::Unsqueeze; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/activation_functions.hpp b/src/core/include/ngraph/op/util/activation_functions.hpp deleted file mode 100644 index 0d10003aac57a1..00000000000000 --- a/src/core/include/ngraph/op/util/activation_functions.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/except.hpp" -#include "ngraph/node.hpp" -#include "openvino/op/util/activation_functions.hpp" - -namespace ngraph { -namespace op { -namespace util { -namespace error { -using ov::op::util::error::UnknownActivationFunction; -} // namespace error - -namespace detail { -using ov::op::util::detail::hardsigmoid; -using ov::op::util::detail::relu; -using ov::op::util::detail::sigmoid; -using ov::op::util::detail::tanh; -} // namespace detail - -using ov::op::util::ActivationFunction; -using ov::op::util::ActivationFunctionType; -using ov::op::util::get_activation_func_by_name; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/arithmetic_reduction.hpp b/src/core/include/ngraph/op/util/arithmetic_reduction.hpp deleted file mode 100644 index 03ab9058491b5e..00000000000000 --- a/src/core/include/ngraph/op/util/arithmetic_reduction.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/reduction_base.hpp" -#include "openvino/op/util/arithmetic_reduction.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ArithmeticReduction; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/arithmetic_reductions_keep_dims.hpp b/src/core/include/ngraph/op/util/arithmetic_reductions_keep_dims.hpp deleted file mode 100644 index 829bcf5d085138..00000000000000 --- a/src/core/include/ngraph/op/util/arithmetic_reductions_keep_dims.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/arithmetic_reduction.hpp" -#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ArithmeticReductionKeepDims; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/binary_elementwise_arithmetic.hpp b/src/core/include/ngraph/op/util/binary_elementwise_arithmetic.hpp deleted file mode 100644 index 2e8acdc7462305..00000000000000 --- a/src/core/include/ngraph/op/util/binary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/binary_elementwise_arithmetic.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BinaryElementwiseArithmetic; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/binary_elementwise_comparison.hpp b/src/core/include/ngraph/op/util/binary_elementwise_comparison.hpp deleted file mode 100644 index 6f507e40da67db..00000000000000 --- a/src/core/include/ngraph/op/util/binary_elementwise_comparison.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/binary_elementwise_comparison.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BinaryElementwiseComparison; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/binary_elementwise_logical.hpp b/src/core/include/ngraph/op/util/binary_elementwise_logical.hpp deleted file mode 100644 index 46e733b149b871..00000000000000 --- a/src/core/include/ngraph/op/util/binary_elementwise_logical.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/binary_elementwise_logical.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BinaryElementwiseLogical; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/broadcast_base.hpp b/src/core/include/ngraph/op/util/broadcast_base.hpp deleted file mode 100644 index 297de17107c778..00000000000000 --- a/src/core/include/ngraph/op/util/broadcast_base.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/axis_vector.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/broadcast_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::BroadcastBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/deformable_convolution_base.hpp b/src/core/include/ngraph/op/util/deformable_convolution_base.hpp deleted file mode 100644 index c670f9e7fc0fcc..00000000000000 --- a/src/core/include/ngraph/op/util/deformable_convolution_base.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/deformable_convolution_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::DeformableConvolutionBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/detection_output_base.hpp b/src/core/include/ngraph/op/util/detection_output_base.hpp deleted file mode 100644 index 806ba06a0b7e25..00000000000000 --- a/src/core/include/ngraph/op/util/detection_output_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/detection_output_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::DetectionOutputBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/elementwise_args.hpp b/src/core/include/ngraph/op/util/elementwise_args.hpp deleted file mode 100644 index 6049761ef111b2..00000000000000 --- a/src/core/include/ngraph/op/util/elementwise_args.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "openvino/op/util/elementwise_args.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::validate_and_infer_elementwise_args; -} -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/embeddingbag_offsets_base.hpp b/src/core/include/ngraph/op/util/embeddingbag_offsets_base.hpp deleted file mode 100644 index 943737251e178e..00000000000000 --- a/src/core/include/ngraph/op/util/embeddingbag_offsets_base.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/util/embeddingbag_offsets_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::EmbeddingBagOffsetsBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/embeddingbag_packed_base.hpp b/src/core/include/ngraph/op/util/embeddingbag_packed_base.hpp deleted file mode 100644 index a66c0b52315bc5..00000000000000 --- a/src/core/include/ngraph/op/util/embeddingbag_packed_base.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/op/util/index_reduction.hpp" -#include "openvino/op/util/embeddingbag_packed_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::EmbeddingBagPackedBase; -} // namespace util -using util::EmbeddingBagPackedBase; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/eval_copy.hpp b/src/core/include/ngraph/op/util/eval_copy.hpp deleted file mode 100644 index b8f66859b2c9ec..00000000000000 --- a/src/core/include/ngraph/op/util/eval_copy.hpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#define COPY_TENSOR(a) \ - case element::Type_t::a: \ - rc = copy_tensor diff --git a/src/core/include/ngraph/op/util/evaluate_helpers.hpp b/src/core/include/ngraph/op/util/evaluate_helpers.hpp deleted file mode 100644 index 02e6730b5fc64b..00000000000000 --- a/src/core/include/ngraph/op/util/evaluate_helpers.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/axis_set.hpp" -#include "ngraph/descriptor/tensor.hpp" -#include "ngraph/util.hpp" -#include "ngraph/validation_util.hpp" - -namespace ngraph { -/// \brief Extracts the tensor data and returns a set of normalized axes created out of it. -/// -/// \param tensor A pointer to a HostTensor object containing the raw axes data -/// \param rank Rank of an operator's input data tensor (used to normalize the axes) -/// \param node_description An identifier of the operator's node (used to report errors) -/// -/// \return Normalized (positive only) axes as an AxisSet object. -NGRAPH_API_DEPRECATED AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, - const ngraph::Rank& rank, - const std::string& node_description); -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/fft_base.hpp b/src/core/include/ngraph/op/util/fft_base.hpp deleted file mode 100644 index d30134d1a53243..00000000000000 --- a/src/core/include/ngraph/op/util/fft_base.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/fft_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::FFTBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/framework_node.hpp b/src/core/include/ngraph/op/util/framework_node.hpp deleted file mode 100644 index f65b23451b8bbe..00000000000000 --- a/src/core/include/ngraph/op/util/framework_node.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/framework_node.hpp" - -namespace ngraph { -namespace op { -using ov::op::util::FrameworkNode; -using ov::op::util::FrameworkNodeAttrs; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/gather_base.hpp b/src/core/include/ngraph/op/util/gather_base.hpp deleted file mode 100644 index d098fdc272b46d..00000000000000 --- a/src/core/include/ngraph/op/util/gather_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/gather_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::GatherBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/gather_nd_base.hpp b/src/core/include/ngraph/op/util/gather_nd_base.hpp deleted file mode 100644 index be066f26528ffc..00000000000000 --- a/src/core/include/ngraph/op/util/gather_nd_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/gather_nd_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::GatherNDBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/index_reduction.hpp b/src/core/include/ngraph/op/util/index_reduction.hpp deleted file mode 100644 index 1b934cce3f2fbd..00000000000000 --- a/src/core/include/ngraph/op/util/index_reduction.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/index_reduction.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::IndexReduction; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/logical_reduction.hpp b/src/core/include/ngraph/op/util/logical_reduction.hpp deleted file mode 100644 index d20cf28a8b3b15..00000000000000 --- a/src/core/include/ngraph/op/util/logical_reduction.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/reduction_base.hpp" -#include "openvino/op/util/logical_reduction.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::LogicalReduction; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/logical_reduction_keep_dims.hpp b/src/core/include/ngraph/op/util/logical_reduction_keep_dims.hpp deleted file mode 100644 index 12f440bd3e9b41..00000000000000 --- a/src/core/include/ngraph/op/util/logical_reduction_keep_dims.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/logical_reduction.hpp" -#include "openvino/op/util/logical_reduction_keep_dims.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::LogicalReductionKeepDims; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/max_pool_base.hpp b/src/core/include/ngraph/op/util/max_pool_base.hpp deleted file mode 100644 index 72564c888a92bb..00000000000000 --- a/src/core/include/ngraph/op/util/max_pool_base.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/max_pool_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::MaxPoolBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/multi_subgraph_base.hpp b/src/core/include/ngraph/op/util/multi_subgraph_base.hpp deleted file mode 100644 index 069dfeea568aba..00000000000000 --- a/src/core/include/ngraph/op/util/multi_subgraph_base.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/multi_subgraph_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::MultiSubGraphOp; -using MultiSubgraphInputDescriptionPtr = ov::op::util::MultiSubGraphOp::InputDescription::Ptr; -using MultiSubgraphOutputDescriptionPtr = ov::op::util::MultiSubGraphOp::OutputDescription::Ptr; -using MultiSubgraphInputDescriptionVector = util::MultiSubGraphOp::MultiSubgraphInputDescriptionVector; -using MultiSubgraphOutputDescriptionVector = util::MultiSubGraphOp::MultiSubgraphOutputDescriptionVector; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/multiclass_nms_base.hpp b/src/core/include/ngraph/op/util/multiclass_nms_base.hpp deleted file mode 100644 index e599f5e0cb0013..00000000000000 --- a/src/core/include/ngraph/op/util/multiclass_nms_base.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "openvino/op/util/multiclass_nms_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::MulticlassNmsBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/op_annotations.hpp b/src/core/include/ngraph/op/util/op_annotations.hpp deleted file mode 100644 index dec2879f9c837f..00000000000000 --- a/src/core/include/ngraph/op/util/op_annotations.hpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/deprecated.hpp" -#include "ngraph/except.hpp" -#include "ngraph/ngraph_visibility.hpp" - -namespace ngraph { -namespace op { -namespace util { -struct NGRAPH_API_DEPRECATED oi_pair { - size_t output; - size_t input; - bool destructive; -}; - -/// \brief Base class for annotations added to graph ops -class NGRAPH_API_DEPRECATED NGRAPH_API OpAnnotations { - NGRAPH_SUPPRESS_DEPRECATED_START -public: - virtual ~OpAnnotations() = default; - - void add_in_place_oi_pair(const struct oi_pair& oi) { - for (const auto& e : m_in_place_oi_pairs) { - if (e.input == oi.input || e.output == oi.output) { - OPENVINO_THROW("In_place hint conflicts with an existing entry"); - } - } - m_in_place_oi_pairs.emplace_back(oi); - } - - const std::vector& get_in_place_oi_pairs() const { - return m_in_place_oi_pairs; - } - bool is_cacheable() const { - return m_cacheable; - } - void set_cacheable(bool val) { - m_cacheable = val; - } - -private: - // map of output-input pairs for which in-place computation is valid - std::vector m_in_place_oi_pairs; - - bool m_cacheable = false; - NGRAPH_SUPPRESS_DEPRECATED_END -}; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/recurrent_sequence.hpp b/src/core/include/ngraph/op/util/recurrent_sequence.hpp deleted file mode 100644 index f283947fa154b6..00000000000000 --- a/src/core/include/ngraph/op/util/recurrent_sequence.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/deprecated.hpp" -#include "ngraph/node.hpp" -#include "openvino/op/util/recurrent_sequence.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::validate_seq_input_rank_dimension; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/reduction_base.hpp b/src/core/include/ngraph/op/util/reduction_base.hpp deleted file mode 100644 index 94d653781fc894..00000000000000 --- a/src/core/include/ngraph/op/util/reduction_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/reduction_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ReductionBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/rnn_cell_base.hpp b/src/core/include/ngraph/op/util/rnn_cell_base.hpp deleted file mode 100644 index 1c865cb4eb086f..00000000000000 --- a/src/core/include/ngraph/op/util/rnn_cell_base.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "openvino/op/util/rnn_cell_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::convert_lstm_node_format; -using ov::op::util::LSTMWeightsFormat; -using ov::op::util::RNNCellBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/scatter_base.hpp b/src/core/include/ngraph/op/util/scatter_base.hpp deleted file mode 100644 index 73c457b60ba7d4..00000000000000 --- a/src/core/include/ngraph/op/util/scatter_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/scatter_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ScatterBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/scatter_nd_base.hpp b/src/core/include/ngraph/op/util/scatter_nd_base.hpp deleted file mode 100644 index 9a92acb00917b6..00000000000000 --- a/src/core/include/ngraph/op/util/scatter_nd_base.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/scatter_nd_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::ScatterNDBase; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/slice_plan.hpp b/src/core/include/ngraph/op/util/slice_plan.hpp deleted file mode 100644 index e47e4ecd80e4f0..00000000000000 --- a/src/core/include/ngraph/op/util/slice_plan.hpp +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/axis_set.hpp" -#include "ngraph/shape.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ngraph { -// -// In various places, like ConstantFolding, it is -// useful to transform DynSlice by converting it to a sequence of ops: -// -// Slice (to do the basic slicing) -// | -// v -// Reshape (non-transposing, to handle shrinks) -// | -// v -// Reverse (to emulate backwards stride) -// -// (The Reshape, Reverse, or both may be omitted if they would just be -// identities.) -// -// A SlicePlan is used to collect parameters for these ops. -// -// This class is moved to dev API -struct NGRAPH_API_DEPRECATED NGRAPH_API SlicePlan { - // Parameters for the Slice - std::vector begins; - std::vector ends; - std::vector strides; - - // Shapes coming into, and going out of, the Reshape. - Shape reshape_in_shape; - Shape reshape_out_shape; - - // Parameters for the Reverse - AxisSet reverse_axes; - - bool operator==(const SlicePlan& other) const; - bool operator!=(const SlicePlan& other) const; -}; - -NGRAPH_API_DEPRECATED SlicePlan NGRAPH_API make_slice_plan(const Shape& input_shape, - const std::vector& begins, - const std::vector& ends, - const std::vector& strides, - const AxisSet& lower_bounds_mask, - const AxisSet& upper_bounds_mask, - const AxisSet& new_axis_mask, - const AxisSet& shrink_axis_mask, - const AxisSet& ellipsis_mask); -} // namespace ngraph - -using ngraph::make_slice_plan; -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/src/core/include/ngraph/op/util/sub_graph_base.hpp b/src/core/include/ngraph/op/util/sub_graph_base.hpp deleted file mode 100644 index 48a3098e16d2b9..00000000000000 --- a/src/core/include/ngraph/op/util/sub_graph_base.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/multi_subgraph_base.hpp" -#include "openvino/op/util/sub_graph_base.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::SubGraphOp; -using InputDescriptionPtr = util::SubGraphOp::InputDescription::Ptr; -using OutputDescriptionPtr = util::SubGraphOp::OutputDescription::Ptr; -using InputDescriptionVector = std::vector; -using OutputDescriptionVector = std::vector; -} // namespace util -} // namespace op - -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/unary_elementwise_arithmetic.hpp b/src/core/include/ngraph/op/util/unary_elementwise_arithmetic.hpp deleted file mode 100644 index 152467da57fe22..00000000000000 --- a/src/core/include/ngraph/op/util/unary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/util/unary_elementwise_arithmetic.hpp" - -namespace ngraph { -namespace op { -namespace util { -using ov::op::util::UnaryElementwiseArithmetic; -} // namespace util -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable.hpp b/src/core/include/ngraph/op/util/variable.hpp deleted file mode 100644 index 6093d69d247783..00000000000000 --- a/src/core/include/ngraph/op/util/variable.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/partial_shape.hpp" -#include "ngraph/type/element_type.hpp" -#include "openvino/op/util/variable.hpp" - -namespace ngraph { -using ov::op::util::Variable; -using ov::op::util::VariableInfo; -using VariablePtr = std::shared_ptr; -using VariableVector = std::vector; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable_context.hpp b/src/core/include/ngraph/op/util/variable_context.hpp deleted file mode 100644 index 680f871f0c9366..00000000000000 --- a/src/core/include/ngraph/op/util/variable_context.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/op/util/variable.hpp" -#include "ngraph/op/util/variable_value.hpp" -#include "ngraph/output_vector.hpp" -#include "openvino/op/util/variable_context.hpp" - -namespace ngraph { -using VariableMap = std::unordered_map; -using ov::op::util::VariableContext; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable_extension.hpp b/src/core/include/ngraph/op/util/variable_extension.hpp deleted file mode 100644 index c93e2c7b7178eb..00000000000000 --- a/src/core/include/ngraph/op/util/variable_extension.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/runtime/host_tensor.hpp" -#include "openvino/op/util/variable_extension.hpp" - -namespace ngraph { -using ov::op::util::VariableExtension; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/util/variable_value.hpp b/src/core/include/ngraph/op/util/variable_value.hpp deleted file mode 100644 index c2b657767bceb0..00000000000000 --- a/src/core/include/ngraph/op/util/variable_value.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/runtime/host_tensor.hpp" -#include "openvino/op/util/variable_value.hpp" - -namespace ngraph { -using ov::op::util::VariableValue; -using VariableValuePtr = std::shared_ptr; -} // namespace ngraph diff --git a/src/core/include/ngraph/op/variadic_split.hpp b/src/core/include/ngraph/op/variadic_split.hpp deleted file mode 100644 index aea880731dc1c2..00000000000000 --- a/src/core/include/ngraph/op/variadic_split.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/op/op.hpp" -#include "openvino/op/variadic_split.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::VariadicSplit; -} // namespace v1 -using v1::VariadicSplit; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/op/xor.hpp b/src/core/include/ngraph/op/xor.hpp deleted file mode 100644 index e25e59d061c7be..00000000000000 --- a/src/core/include/ngraph/op/xor.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/op/util/binary_elementwise_logical.hpp" -#include "openvino/op/logical_xor.hpp" -#include "openvino/op/xor.hpp" - -namespace ngraph { -namespace op { -namespace v1 { -using ov::op::v1::LogicalXor; -} // namespace v1 -namespace v0 { -using ov::op::v0::Xor; -} // namespace v0 -using v0::Xor; -} // namespace op -} // namespace ngraph diff --git a/src/core/include/ngraph/ops.hpp b/src/core/include/ngraph/ops.hpp index 79a4d26adaeecd..b3cecc8e036e58 100644 --- a/src/core/include/ngraph/ops.hpp +++ b/src/core/include/ngraph/ops.hpp @@ -16,181 +16,9 @@ # endif #endif -#include "ngraph/op/abs.hpp" -#include "ngraph/op/acos.hpp" -#include "ngraph/op/acosh.hpp" -#include "ngraph/op/adaptive_avg_pool.hpp" -#include "ngraph/op/adaptive_max_pool.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/and.hpp" -#include "ngraph/op/asin.hpp" -#include "ngraph/op/asinh.hpp" -#include "ngraph/op/assign.hpp" -#include "ngraph/op/atan.hpp" -#include "ngraph/op/atanh.hpp" -#include "ngraph/op/avg_pool.hpp" -#include "ngraph/op/batch_norm.hpp" -#include "ngraph/op/batch_to_space.hpp" -#include "ngraph/op/binary_convolution.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/bucketize.hpp" -#include "ngraph/op/ceiling.hpp" -#include "ngraph/op/clamp.hpp" #include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/convert_like.hpp" -#include "ngraph/op/convolution.hpp" -#include "ngraph/op/cos.hpp" -#include "ngraph/op/cosh.hpp" -#include "ngraph/op/ctc_greedy_decoder.hpp" -#include "ngraph/op/ctc_greedy_decoder_seq_len.hpp" -#include "ngraph/op/ctc_loss.hpp" -#include "ngraph/op/cum_sum.hpp" -#include "ngraph/op/deformable_convolution.hpp" -#include "ngraph/op/deformable_psroi_pooling.hpp" -#include "ngraph/op/depth_to_space.hpp" -#include "ngraph/op/detection_output.hpp" -#include "ngraph/op/dft.hpp" #include "ngraph/op/divide.hpp" -#include "ngraph/op/einsum.hpp" -#include "ngraph/op/elu.hpp" -#include "ngraph/op/embedding_segments_sum.hpp" -#include "ngraph/op/embeddingbag_offsets_sum.hpp" -#include "ngraph/op/embeddingbag_packedsum.hpp" -#include "ngraph/op/equal.hpp" -#include "ngraph/op/erf.hpp" -#include "ngraph/op/exp.hpp" -#include "ngraph/op/experimental_detectron_detection_output.hpp" -#include "ngraph/op/experimental_detectron_generate_proposals.hpp" -#include "ngraph/op/experimental_detectron_prior_grid_generator.hpp" -#include "ngraph/op/experimental_detectron_roi_feature.hpp" -#include "ngraph/op/experimental_detectron_topkrois.hpp" -#include "ngraph/op/extractimagepatches.hpp" -#include "ngraph/op/eye.hpp" -#include "ngraph/op/fake_quantize.hpp" -#include "ngraph/op/floor.hpp" -#include "ngraph/op/floor_mod.hpp" -#include "ngraph/op/gather.hpp" -#include "ngraph/op/gather_elements.hpp" -#include "ngraph/op/gather_nd.hpp" -#include "ngraph/op/gather_tree.hpp" -#include "ngraph/op/gelu.hpp" -#include "ngraph/op/generate_proposals.hpp" -#include "ngraph/op/greater.hpp" -#include "ngraph/op/greater_eq.hpp" -#include "ngraph/op/grid_sample.hpp" -#include "ngraph/op/grn.hpp" -#include "ngraph/op/group_conv.hpp" -#include "ngraph/op/gru_cell.hpp" -#include "ngraph/op/gru_sequence.hpp" -#include "ngraph/op/hard_sigmoid.hpp" -#include "ngraph/op/hsigmoid.hpp" -#include "ngraph/op/hswish.hpp" -#include "ngraph/op/i420_to_bgr.hpp" -#include "ngraph/op/i420_to_rgb.hpp" -#include "ngraph/op/idft.hpp" -#include "ngraph/op/if.hpp" -#include "ngraph/op/interpolate.hpp" -#include "ngraph/op/irdft.hpp" -#include "ngraph/op/is_finite.hpp" -#include "ngraph/op/is_inf.hpp" -#include "ngraph/op/is_nan.hpp" -#include "ngraph/op/less.hpp" -#include "ngraph/op/less_eq.hpp" -#include "ngraph/op/log.hpp" -#include "ngraph/op/log_softmax.hpp" -#include "ngraph/op/loop.hpp" -#include "ngraph/op/lrn.hpp" -#include "ngraph/op/lstm_cell.hpp" -#include "ngraph/op/lstm_sequence.hpp" -#include "ngraph/op/matmul.hpp" -#include "ngraph/op/matrix_nms.hpp" -#include "ngraph/op/max.hpp" -#include "ngraph/op/max_pool.hpp" -#include "ngraph/op/maximum.hpp" -#include "ngraph/op/min.hpp" -#include "ngraph/op/minimum.hpp" -#include "ngraph/op/mish.hpp" -#include "ngraph/op/mod.hpp" -#include "ngraph/op/multiclass_nms.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/mvn.hpp" -#include "ngraph/op/negative.hpp" -#include "ngraph/op/non_max_suppression.hpp" -#include "ngraph/op/non_zero.hpp" -#include "ngraph/op/normalize_l2.hpp" -#include "ngraph/op/not.hpp" -#include "ngraph/op/not_equal.hpp" -#include "ngraph/op/nv12_to_bgr.hpp" -#include "ngraph/op/nv12_to_rgb.hpp" -#include "ngraph/op/one_hot.hpp" -#include "ngraph/op/or.hpp" -#include "ngraph/op/pad.hpp" #include "ngraph/op/parameter.hpp" -#include "ngraph/op/power.hpp" -#include "ngraph/op/prelu.hpp" -#include "ngraph/op/prior_box.hpp" -#include "ngraph/op/prior_box_clustered.hpp" -#include "ngraph/op/proposal.hpp" -#include "ngraph/op/psroi_pooling.hpp" -#include "ngraph/op/random_uniform.hpp" -#include "ngraph/op/range.hpp" -#include "ngraph/op/rdft.hpp" -#include "ngraph/op/read_value.hpp" -#include "ngraph/op/reduce_l1.hpp" -#include "ngraph/op/reduce_l2.hpp" -#include "ngraph/op/reduce_logical_and.hpp" -#include "ngraph/op/reduce_logical_or.hpp" -#include "ngraph/op/reduce_mean.hpp" -#include "ngraph/op/reduce_prod.hpp" -#include "ngraph/op/reduce_sum.hpp" -#include "ngraph/op/region_yolo.hpp" -#include "ngraph/op/relu.hpp" -#include "ngraph/op/reorg_yolo.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/op/reverse.hpp" -#include "ngraph/op/reverse_sequence.hpp" -#include "ngraph/op/rnn_cell.hpp" -#include "ngraph/op/rnn_sequence.hpp" -#include "ngraph/op/roi_align.hpp" -#include "ngraph/op/roi_pooling.hpp" -#include "ngraph/op/roll.hpp" -#include "ngraph/op/round.hpp" -#include "ngraph/op/scatter_elements_update.hpp" -#include "ngraph/op/scatter_nd_update.hpp" -#include "ngraph/op/scatter_update.hpp" -#include "ngraph/op/select.hpp" -#include "ngraph/op/selu.hpp" -#include "ngraph/op/shape_of.hpp" -#include "ngraph/op/shuffle_channels.hpp" -#include "ngraph/op/sigmoid.hpp" -#include "ngraph/op/sign.hpp" -#include "ngraph/op/sin.hpp" -#include "ngraph/op/sinh.hpp" -#include "ngraph/op/slice.hpp" -#include "ngraph/op/softmax.hpp" -#include "ngraph/op/softplus.hpp" -#include "ngraph/op/softsign.hpp" -#include "ngraph/op/space_to_batch.hpp" -#include "ngraph/op/space_to_depth.hpp" #include "ngraph/op/split.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/op/squared_difference.hpp" -#include "ngraph/op/squeeze.hpp" -#include "ngraph/op/strided_slice.hpp" -#include "ngraph/op/subtract.hpp" -#include "ngraph/op/swish.hpp" -#include "ngraph/op/tan.hpp" -#include "ngraph/op/tanh.hpp" -#include "ngraph/op/tensor_iterator.hpp" -#include "ngraph/op/tile.hpp" -#include "ngraph/op/topk.hpp" -#include "ngraph/op/transpose.hpp" -#include "ngraph/op/unique.hpp" -#include "ngraph/op/unsqueeze.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/op_types.hpp" -#include "ngraph/op/variadic_split.hpp" -#include "ngraph/op/xor.hpp" diff --git a/src/core/include/ngraph/opsets/opset.hpp b/src/core/include/ngraph/opsets/opset.hpp index 3f65437c6d3801..de79416b7c2a2d 100644 --- a/src/core/include/ngraph/opsets/opset.hpp +++ b/src/core/include/ngraph/opsets/opset.hpp @@ -54,19 +54,7 @@ class NGRAPH_API OpSet : public ov::OpSet { } }; -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset1(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset2(); NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset3(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset4(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset5(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset6(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset7(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset8(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset9(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset10(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset11(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset12(); -NGRAPH_API_DEPRECATED const NGRAPH_API OpSet& get_opset13(); NGRAPH_API_DEPRECATED const NGRAPH_API std::map>& get_available_opsets(); } // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset2.hpp b/src/core/include/ngraph/opsets/opset2.hpp deleted file mode 100644 index d2f09479f941a2..00000000000000 --- a/src/core/include/ngraph/opsets/opset2.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset2 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset2_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset2 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset2_tbl.hpp b/src/core/include/ngraph/opsets/opset2_tbl.hpp deleted file mode 100644 index 2438ff7b341e9d..00000000000000 --- a/src/core/include/ngraph/opsets/opset2_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset2_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/pass/graph_rewrite.hpp b/src/core/include/ngraph/pass/graph_rewrite.hpp index 0931a9c704f7a0..ea11d0dff9351c 100644 --- a/src/core/include/ngraph/pass/graph_rewrite.hpp +++ b/src/core/include/ngraph/pass/graph_rewrite.hpp @@ -19,7 +19,6 @@ #include #include "ngraph/pass/pass.hpp" -#include "ngraph/pattern/matcher.hpp" #include "openvino/pass/graph_rewrite.hpp" namespace ngraph { diff --git a/src/core/include/ngraph/pattern/matcher.hpp b/src/core/include/ngraph/pattern/matcher.hpp deleted file mode 100644 index 947dcfb4d58a9c..00000000000000 --- a/src/core/include/ngraph/pattern/matcher.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/pattern/op/any.hpp" -#include "ngraph/pattern/op/any_of.hpp" -#include "ngraph/pattern/op/any_output.hpp" -#include "ngraph/pattern/op/label.hpp" -#include "ngraph/pattern/op/skip.hpp" -#include "openvino/pass/pattern/matcher.hpp" - -namespace ov { -namespace pass { -class GraphRewrite; -} -} // namespace ov -namespace ngraph { -namespace pass { -using ov::pass::GraphRewrite; -} - -namespace pattern { -using ov::pass::pattern::Matcher; -using ov::pass::pattern::MatcherState; -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/any.hpp b/src/core/include/ngraph/pattern/op/any.hpp deleted file mode 100644 index a7a5aaf194ca25..00000000000000 --- a/src/core/include/ngraph/pattern/op/any.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/any.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Any; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/any_of.hpp b/src/core/include/ngraph/pattern/op/any_of.hpp deleted file mode 100644 index fbf6652f273d90..00000000000000 --- a/src/core/include/ngraph/pattern/op/any_of.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/any_of.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::AnyOf; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/any_output.hpp b/src/core/include/ngraph/pattern/op/any_output.hpp deleted file mode 100644 index 4f733d48d4187d..00000000000000 --- a/src/core/include/ngraph/pattern/op/any_output.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/any_output.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::AnyOutput; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/branch.hpp b/src/core/include/ngraph/pattern/op/branch.hpp deleted file mode 100644 index e93b7b1b00976b..00000000000000 --- a/src/core/include/ngraph/pattern/op/branch.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/branch.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Branch; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/capture.hpp b/src/core/include/ngraph/pattern/op/capture.hpp deleted file mode 100644 index 25031c5c3cdc71..00000000000000 --- a/src/core/include/ngraph/pattern/op/capture.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/capture.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Capture; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/label.hpp b/src/core/include/ngraph/pattern/op/label.hpp deleted file mode 100644 index c780effa3130c6..00000000000000 --- a/src/core/include/ngraph/pattern/op/label.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/label.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Label; -} // namespace op - -using ov::pass::pattern::any_input; -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/or.hpp b/src/core/include/ngraph/pattern/op/or.hpp deleted file mode 100644 index 2c6926aa7d99cd..00000000000000 --- a/src/core/include/ngraph/pattern/op/or.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/or.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Or; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/pattern.hpp b/src/core/include/ngraph/pattern/op/pattern.hpp deleted file mode 100644 index 1ff997f306a4a2..00000000000000 --- a/src/core/include/ngraph/pattern/op/pattern.hpp +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/node.hpp" -#include "openvino/pass/pattern/op/pattern.hpp" - -namespace ov { -namespace pass { -namespace pattern { -namespace op { -class Label; -} - -class Matcher; -class MatchState; -} // namespace pattern -} // namespace pass -} // namespace ov -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Label; -} - -using ov::pass::pattern::Matcher; -using ov::pass::pattern::MatcherState; - -using ov::pass::pattern::PatternValueMap; -using ov::pass::pattern::PatternValueMaps; -using ov::pass::pattern::RPatternValueMap; - -using ov::pass::pattern::PatternMap; - -using ov::pass::pattern::as_pattern_map; -using ov::pass::pattern::as_pattern_value_map; -using ov::pass::pattern::consumers_count; -using ov::pass::pattern::has_class; -using ov::pass::pattern::has_static_dim; -using ov::pass::pattern::has_static_dims; -using ov::pass::pattern::has_static_rank; -using ov::pass::pattern::has_static_shape; -using ov::pass::pattern::rank_equals; -using ov::pass::pattern::type_matches; -using ov::pass::pattern::type_matches_any; - -namespace op { -using ov::pass::pattern::op::NodePredicate; -using ov::pass::pattern::op::ValuePredicate; - -using ov::pass::pattern::op::as_value_predicate; -using ov::pass::pattern::op::Pattern; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/skip.hpp b/src/core/include/ngraph/pattern/op/skip.hpp deleted file mode 100644 index 9cfb2f967b0e39..00000000000000 --- a/src/core/include/ngraph/pattern/op/skip.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/skip.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::Skip; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/true.hpp b/src/core/include/ngraph/pattern/op/true.hpp deleted file mode 100644 index 62a564a96f1258..00000000000000 --- a/src/core/include/ngraph/pattern/op/true.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/true.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::True; -} // namespace op -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/pattern/op/wrap_type.hpp b/src/core/include/ngraph/pattern/op/wrap_type.hpp deleted file mode 100644 index 50a3a781bc7745..00000000000000 --- a/src/core/include/ngraph/pattern/op/wrap_type.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/node.hpp" -#include "ngraph/pattern/op/pattern.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" - -namespace ngraph { -namespace pattern { -namespace op { -using ov::pass::pattern::op::WrapType; -} // namespace op - -using ov::pass::pattern::wrap_type; -} // namespace pattern -} // namespace ngraph diff --git a/src/core/include/ngraph/runtime/aligned_buffer.hpp b/src/core/include/ngraph/runtime/aligned_buffer.hpp deleted file mode 100644 index 4ac11da07b1bc1..00000000000000 --- a/src/core/include/ngraph/runtime/aligned_buffer.hpp +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/util.hpp" -#include "openvino/core/rtti.hpp" - -namespace ngraph { -namespace runtime { -NGRAPH_SUPPRESS_DEPRECATED_START -/// \brief Allocates a block of memory on the specified alignment. The actual size of the -/// allocated memory is larger than the requested size by the alignment, so allocating 1 -/// byte -/// on 64 byte alignment will allocate 65 bytes. -class NGRAPH_API NGRAPH_API_DEPRECATED AlignedBuffer { -public: - // Allocator objects and the allocation interfaces are owned by the - // creators of AlignedBuffers. They need to ensure that the lifetime of - // allocator exceeds the lifetime of this AlignedBuffer. - AlignedBuffer(size_t byte_size, size_t alignment = 64); - - AlignedBuffer(); - virtual ~AlignedBuffer(); - - AlignedBuffer(AlignedBuffer&& other); - AlignedBuffer& operator=(AlignedBuffer&& other); - - size_t size() const { - return m_byte_size; - } - void* get_ptr(size_t offset) const { - return m_aligned_buffer + offset; - } - void* get_ptr() { - return m_aligned_buffer; - } - const void* get_ptr() const { - return m_aligned_buffer; - } - template - T* get_ptr() { - return reinterpret_cast(m_aligned_buffer); - } - template - const T* get_ptr() const { - return reinterpret_cast(m_aligned_buffer); - } - - template - explicit operator T*() { - return get_ptr(); - } - -private: - AlignedBuffer(const AlignedBuffer&) = delete; - AlignedBuffer& operator=(const AlignedBuffer&) = delete; - -protected: - char* m_allocated_buffer; - char* m_aligned_buffer; - size_t m_byte_size; -}; -NGRAPH_SUPPRESS_DEPRECATED_END -} // namespace runtime -} // namespace ngraph - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ov { -template <> -class NGRAPH_API AttributeAdapter> - : public DirectValueAccessor> { -public: - AttributeAdapter(std::shared_ptr& value); - - OPENVINO_RTTI("AttributeAdapter"); -}; -NGRAPH_SUPPRESS_DEPRECATED_END - -} // namespace ov diff --git a/src/core/include/ngraph/runtime/host_tensor.hpp b/src/core/include/ngraph/runtime/host_tensor.hpp deleted file mode 100644 index 2f8374a577cf8e..00000000000000 --- a/src/core/include/ngraph/runtime/host_tensor.hpp +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/descriptor/output.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "ngraph/type/element_type.hpp" -#include "ngraph/type/element_type_traits.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ov { -class Node; -namespace op { -namespace v0 { -class Constant; -} -} // namespace op -} // namespace ov -namespace ngraph { - -namespace runtime { -class HostTensor; -} - -using HostTensorPtr = std::shared_ptr; -namespace op { -namespace v0 { -using ov::op::v0::Constant; -} -} // namespace op -namespace runtime { -class NGRAPH_API NGRAPH_API_DEPRECATED HostTensor : public ngraph::runtime::Tensor { -public: - HostTensor(const element::Type& element_type, const Shape& shape, void* memory_pointer); - HostTensor(const element::Type& element_type, const Shape& shape); - HostTensor(const element::Type& element_type, const PartialShape& partial_shape); - HostTensor(); - explicit HostTensor(const Output&); - explicit HostTensor(const std::shared_ptr& constant); - virtual ~HostTensor() override; - - void initialize(const std::shared_ptr& constant); - - void* get_data_ptr(); - const void* get_data_ptr() const; - - template - T* get_data_ptr() { - return static_cast(get_data_ptr()); - } - - template - const T* get_data_ptr() const { - return static_cast(get_data_ptr()); - } - - template - typename element_type_traits::value_type* get_data_ptr() { - NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr() called for incorrect element type."); - return static_cast::value_type*>(get_data_ptr()); - } - - template - const typename element_type_traits::value_type* get_data_ptr() const { - NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr() called for incorrect element type."); - return static_cast::value_type>(get_data_ptr()); - } - - /// \brief Write bytes directly into the tensor - /// \param p Pointer to source of data - /// \param n Number of bytes to write, must be integral number of elements. - void write(const void* p, size_t n) override; - - /// \brief Read bytes directly from the tensor - /// \param p Pointer to destination for data - /// \param n Number of bytes to read, must be integral number of elements. - void read(void* p, size_t n) const override; - - bool get_is_allocated() const; - /// \brief Set the element type. Must be compatible with the current element type. - /// \param element_type The element type - void set_element_type(const element::Type& element_type); - /// \brief Set the actual shape of the tensor compatibly with the partial shape. - /// \param shape The shape being set - void set_shape(const Shape& shape); - /// \brief Set the shape of a node from an input - /// \param arg The input argument - void set_unary(const HostTensorPtr& arg); - /// \brief Set the shape of the tensor using broadcast rules - /// \param autob The broadcast mode - /// \param arg0 The first argument - /// \param arg1 The second argument - void set_broadcast(const op::AutoBroadcastSpec& autob, const HostTensorPtr& arg0, const HostTensorPtr& arg1); - /// \brief Set the shape of the tensor using broadcast rules - /// \param autob The broadcast mode - /// \param arg0 The first argument - /// \param arg1 The second argument - /// \param element_type The output element type - void set_broadcast(const op::AutoBroadcastSpec& autob, - const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const element::Type& element_type); - -protected: - virtual void allocate_buffer(); - HostTensor(const HostTensor&) = delete; - HostTensor(HostTensor&&) = delete; - HostTensor& operator=(const HostTensor&) = delete; - - void* m_memory_pointer{nullptr}; - void* m_allocated_buffer_pool{nullptr}; - void* m_aligned_buffer_pool{nullptr}; - size_t m_buffer_size; -}; -} // namespace runtime -} // namespace ngraph -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/src/core/include/ngraph/runtime/shared_buffer.hpp b/src/core/include/ngraph/runtime/shared_buffer.hpp deleted file mode 100644 index 576c9888561c6a..00000000000000 --- a/src/core/include/ngraph/runtime/shared_buffer.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ngraph/runtime/aligned_buffer.hpp" - -namespace ngraph { -namespace runtime { -NGRAPH_SUPPRESS_DEPRECATED_START -/// \brief SharedBuffer class to store pointer to pre-acclocated buffer. -template -class NGRAPH_API_DEPRECATED SharedBuffer : public ngraph::runtime::AlignedBuffer { -public: - SharedBuffer(char* data, size_t size, const T& shared_object) : _shared_object(shared_object) { - m_allocated_buffer = data; - m_aligned_buffer = data; - m_byte_size = size; - } - - virtual ~SharedBuffer() { - m_aligned_buffer = nullptr; - m_allocated_buffer = nullptr; - m_byte_size = 0; - } - -private: - T _shared_object; -}; -NGRAPH_SUPPRESS_DEPRECATED_END -} // namespace runtime -} // namespace ngraph diff --git a/src/core/include/ngraph/runtime/tensor.hpp b/src/core/include/ngraph/runtime/tensor.hpp deleted file mode 100644 index 84cd45268c9bd9..00000000000000 --- a/src/core/include/ngraph/runtime/tensor.hpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ngraph/descriptor/tensor.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/strides.hpp" -#include "ngraph/type/element_type.hpp" - -namespace ngraph { -namespace runtime { -NGRAPH_SUPPRESS_DEPRECATED_START -class NGRAPH_API NGRAPH_API_DEPRECATED Tensor { -protected: - Tensor(const std::shared_ptr& descriptor) : m_descriptor(descriptor), m_stale(true) {} - -public: - virtual ~Tensor() {} - Tensor& operator=(const Tensor&) = default; - - /// \brief Get tensor shape - /// \return const reference to a Shape - virtual const ngraph::Shape& get_shape() const; - - /// \brief Get tensor partial shape - /// \return const reference to a PartialShape - const ngraph::PartialShape& get_partial_shape() const; - - /// \brief Get tensor element type - /// \return element::Type - virtual const element::Type& get_element_type() const; - - /// \brief Get number of elements in the tensor - /// \return number of elements in the tensor - virtual size_t get_element_count() const; - - /// \brief Get the size in bytes of the tensor - /// \return number of bytes in tensor's allocation - virtual size_t get_size_in_bytes() const; - - /// \brief Write bytes directly into the tensor - /// \param p Pointer to source of data - /// \param n Number of bytes to write, must be integral number of elements. - virtual void write(const void* p, size_t n) = 0; - - /// \brief Read bytes directly from the tensor - /// \param p Pointer to destination for data - /// \param n Number of bytes to read, must be integral number of elements. - virtual void read(void* p, size_t n) const = 0; - -protected: - std::shared_ptr m_descriptor; - bool m_stale; -}; -NGRAPH_SUPPRESS_DEPRECATED_END -} // namespace runtime -} // namespace ngraph diff --git a/src/core/include/ngraph/type/element_type.hpp b/src/core/include/ngraph/type/element_type.hpp index cd125409db5bc6..3ff94063d82d65 100644 --- a/src/core/include/ngraph/type/element_type.hpp +++ b/src/core/include/ngraph/type/element_type.hpp @@ -35,6 +35,8 @@ using ov::element::dynamic; using ov::element::f16; using ov::element::f32; using ov::element::f64; +using ov::element::f8e4m3; +using ov::element::f8e5m2; using ov::element::i16; using ov::element::i32; using ov::element::i4; diff --git a/src/core/include/ngraph/util.hpp b/src/core/include/ngraph/util.hpp index dbcf324f9f569f..f08b70233a0c2b 100644 --- a/src/core/include/ngraph/util.hpp +++ b/src/core/include/ngraph/util.hpp @@ -32,12 +32,11 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/node.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/runtime/tensor.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type_traits.hpp" #include "openvino/core/enum_mask.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { class Node; @@ -46,10 +45,7 @@ namespace ngraph { using ov::EnumMask; using ov::Node; class stopwatch; - -namespace runtime { class Tensor; -} // namespace runtime NGRAPH_SUPPRESS_DEPRECATED_START template @@ -258,14 +254,14 @@ NGRAPH_API_DEPRECATED T double_to_int(double x, double float_to_int_converter(do } // end namespace ngraph template -NGRAPH_API_DEPRECATED std::vector read_vector(std::shared_ptr tv) { +NGRAPH_API_DEPRECATED std::vector read_vector(std::shared_ptr tv) { if (ngraph::element::from() != tv->get_element_type()) { OPENVINO_THROW("read_vector type must match Tensor type"); } size_t element_count = ngraph::shape_size(tv->get_shape()); size_t size = element_count * sizeof(T); std::vector rc(element_count); - tv->read(rc.data(), size); + std::memcpy(rc.data(), tv->data(), size); return rc; } @@ -278,76 +274,12 @@ NGRAPH_API_DEPRECATED std::vector array_2_vector(typename ngraph::element_typ } return result; } -template -NGRAPH_API_DEPRECATED std::vector host_tensor_2_vector(ngraph::HostTensorPtr tensor) { - NGRAPH_CHECK(tensor != nullptr, "Invalid Tensor received, can't read the data from a null pointer."); - - switch (tensor->get_element_type()) { - case ngraph::element::Type_t::boolean: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::bf16: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::f16: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::f32: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::f64: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::i8: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::i16: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::i32: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::i64: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::u1: - NGRAPH_CHECK(false, "u1 element type is unsupported"); - break; - case ngraph::element::Type_t::u8: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::u16: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::u32: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - case ngraph::element::Type_t::u64: { - auto p = tensor->get_data_ptr(); - return array_2_vector(p, tensor->get_element_count()); - } - default: - NGRAPH_UNREACHABLE("unsupported element type"); - } -} NGRAPH_API_DEPRECATED -std::vector NGRAPH_API read_float_vector(std::shared_ptr tv); +std::vector NGRAPH_API read_float_vector(std::shared_ptr tv); NGRAPH_API_DEPRECATED -std::vector NGRAPH_API read_index_vector(std::shared_ptr tv); +std::vector NGRAPH_API read_index_vector(std::shared_ptr tv); NGRAPH_API NGRAPH_API_DEPRECATED diff --git a/src/core/include/ngraph/validation_util.hpp b/src/core/include/ngraph/validation_util.hpp index 1d403eebc19d4a..3535911b1eb9bb 100644 --- a/src/core/include/ngraph/validation_util.hpp +++ b/src/core/include/ngraph/validation_util.hpp @@ -17,11 +17,9 @@ #include #include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/variable_context.hpp" #include "openvino/core/validation_util.hpp" +#include "openvino/op/util/variable_context.hpp" namespace ngraph { using ov::evaluate_as_partial_shape; @@ -31,6 +29,7 @@ using ov::infer_auto_padding; using ov::infer_convolution_forward; using ov::normalize_axes; using ov::normalize_axis; +using ov::op::v0::Constant; NGRAPH_API_DEPRECATED NGRAPH_API @@ -167,36 +166,17 @@ PartialShape infer_slice_shape(const Node* node, NGRAPH_API_DEPRECATED NGRAPH_API std::pair maximum_value(const Output& value); -/// \brief Evaluates outputs, treating values in value_map as already computed. value_map is -/// updated. -/// \param value_map Key is RawNodeOutput in graph, value is the computed value. Updated by the -/// function. -/// \param output_tensor_map Tensors to use for particular outputs -/// \param outputs Root set of values to try to compute -/// \param evaluation_context Storage of additional settings and attributes that can be used -/// when evaluating the function. This additional information can be shared across nodes. -NGRAPH_API_DEPRECATED -NGRAPH_API void evaluate_nodes(std::map& value_map, - std::map& output_tensor_map, - const OutputVector& outputs, - const EvaluationContext& evaluation_context = EvaluationContext()); - /// \brief Returns a Constant storing scalar value equal to std::numeric_limits::max() NGRAPH_API_DEPRECATED -NGRAPH_API std::shared_ptr get_constant_max_of_type(element::Type_t t); +NGRAPH_API std::shared_ptr get_constant_max_of_type(element::Type_t t); /// \brief Returns a Constant storing scalar value equal to std::numeric_limits::min() NGRAPH_API_DEPRECATED -NGRAPH_API std::shared_ptr get_constant_min_of_type(element::Type_t t); +NGRAPH_API std::shared_ptr get_constant_min_of_type(element::Type_t t); /// \brief Returns a Constant storing scalar value equal to std::numeric_limits::lowest() NGRAPH_API_DEPRECATED -NGRAPH_API std::shared_ptr get_constant_lowest_of_type(element::Type_t t); - -/// \brief Checks if size of HostTensorVector is the same as passed size attribute. Then checks -/// that all the HostTensorPtrs are not equal to nullptr -NGRAPH_API_DEPRECATED -NGRAPH_API bool validate_host_tensor_vector(const HostTensorVector& v, const size_t& size); +NGRAPH_API std::shared_ptr get_constant_lowest_of_type(element::Type_t t); namespace opset1 { /// diff --git a/src/core/include/openvino/core/descriptor/tensor.hpp b/src/core/include/openvino/core/descriptor/tensor.hpp index 73b34a32ea53a0..d7be44f6e025b3 100644 --- a/src/core/include/openvino/core/descriptor/tensor.hpp +++ b/src/core/include/openvino/core/descriptor/tensor.hpp @@ -18,12 +18,6 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/tensor.hpp" -namespace ngraph { -namespace runtime { -class HostTensor; -} -} // namespace ngraph - namespace ov { class Node; /// \brief Alias for label tensor. @@ -98,7 +92,7 @@ class OPENVINO_API Tensor { TensorLabel get_value_label() const { return m_value_label; } - /// \brief checks if lower and upper bound are set and point to the same HostTensor + /// \brief checks if lower and upper bound are set and point to the same Tensor bool has_and_set_bound() const { return m_upper_value && m_lower_value && m_upper_value.data() == m_lower_value.data(); } @@ -144,7 +138,6 @@ class OPENVINO_API Tensor { friend OPENVINO_API std::string get_ov_tensor_legacy_name(const Tensor& tensor); friend OPENVINO_API void set_ov_tensor_legacy_name(Tensor& tensor, const std::string& tensor_name); friend class pass::ReverseShapeAndTypeInfer; - friend class ngraph::runtime::HostTensor; }; OPENVINO_API diff --git a/src/core/include/openvino/core/enum_names.hpp b/src/core/include/openvino/core/enum_names.hpp index 60e7f3297b5b4f..7885200645f0ae 100644 --- a/src/core/include/openvino/core/enum_names.hpp +++ b/src/core/include/openvino/core/enum_names.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include "openvino/core/except.hpp" diff --git a/src/core/include/openvino/core/model.hpp b/src/core/include/openvino/core/model.hpp index 303c1488739298..2375f8d60c09ee 100644 --- a/src/core/include/openvino/core/model.hpp +++ b/src/core/include/openvino/core/model.hpp @@ -221,26 +221,6 @@ class OPENVINO_API Model : public std::enable_shared_from_this { /// \param value Output containing Node int64_t get_result_index(const ov::Output& value) const; - /// \deprecated Use evaluate with ov::Tensor instead - /// \brief Evaluate the model on inputs, putting results in outputs. - /// \param output_tensors Tensors for the outputs to compute. One for each result - /// \param input_tensors Tensors for the inputs. One for each inputs. - /// \param evaluation_context Storage of additional settings and attributes that can be used - /// when evaluating the model. This additional information can be shared across nodes. - OPENVINO_DEPRECATED( - "This method is deprecated and will be removed soon. Please use evaluate with ov::Tensor instead.") - bool evaluate(const ov::HostTensorVector& output_tensors, - const ov::HostTensorVector& input_tensors, - ov::EvaluationContext& evaluation_context) const; - - /// \deprecated Use evaluate with ov::Tensor instead - /// \brief Evaluate the model on inputs, putting results in outputs. - /// \param output_tensors Tensors for the outputs to compute. One for each result - /// \param input_tensors Tensors for the inputs. One for each inputs. - OPENVINO_DEPRECATED( - "This method is deprecated and will be removed soon. Please use evaluate with ov::Tensor instead.") - bool evaluate(const ov::HostTensorVector& output_tensors, const ov::HostTensorVector& input_tensors) const; - /// \brief Evaluate the model on inputs, putting results in outputs. /// \param output_tensors Tensors for the outputs to compute. One for each result /// \param input_tensors Tensors for the inputs. One for each inputs. diff --git a/src/core/include/openvino/core/node.hpp b/src/core/include/openvino/core/node.hpp index 68910bf70b8b91..ff2190ae9ab59a 100644 --- a/src/core/include/openvino/core/node.hpp +++ b/src/core/include/openvino/core/node.hpp @@ -53,9 +53,6 @@ namespace pattern { class Matcher; } // namespace pattern } // namespace pass -OPENVINO_SUPPRESS_DEPRECATED_START -using HostTensorVector = std::vector; -OPENVINO_SUPPRESS_DEPRECATED_END template class Input; @@ -192,26 +189,6 @@ class OPENVINO_API Node : public std::enable_shared_from_this { /// operation // \returns true if evaluate is available virtual bool has_evaluate() const; - /// \deprecated Use evaluate with ov::Tensor instead - /// \brief Evaluates the op on input_values putting results in output_values - /// \param output_values Tensors for the outputs to compute. One for each result - /// \param input_values Tensors for the inputs. One for each inputs. - /// \returns true if successful - OPENVINO_DEPRECATED( - "This method is deprecated and will be removed soon. Please use evaluate with ov::Tensor instead.") - virtual bool evaluate(const ov::HostTensorVector& output_values, const ov::HostTensorVector& input_values) const; - /// \deprecated Use evaluate with ov::Tensor instead - /// \brief Evaluates the op on input_values putting results in output_values - /// \param output_values Tensors for the outputs to compute. One for each result - /// \param input_values Tensors for the inputs. One for each inputs. - /// \param evaluation_context Storage of additional settings and attributes that can be used - /// when evaluating the op. - /// \returns true if successful - OPENVINO_DEPRECATED( - "This method is deprecated and will be removed soon. Please use evaluate with ov::Tensor instead.") - virtual bool evaluate(const ov::HostTensorVector& output_values, - const ov::HostTensorVector& input_values, - const EvaluationContext& evaluationContext) const; /// \brief Evaluates the op on input_values putting results in output_values /// \param output_values Tensors for the outputs to compute. One for each result diff --git a/src/core/include/openvino/core/type/element_type.hpp b/src/core/include/openvino/core/type/element_type.hpp index 88e79a75d25174..bea57a6ce98479 100644 --- a/src/core/include/openvino/core/type/element_type.hpp +++ b/src/core/include/openvino/core/type/element_type.hpp @@ -20,6 +20,8 @@ #include "openvino/core/rtti.hpp" #include "openvino/core/type/bfloat16.hpp" #include "openvino/core/type/float16.hpp" +#include "openvino/core/type/float8_e4m3.hpp" +#include "openvino/core/type/float8_e5m2.hpp" /** * @defgroup ov_element_cpp_api Element types @@ -52,6 +54,8 @@ enum class Type_t { u32, //!< u32 element type u64, //!< u64 element type nf4, //!< nf4 element type + f8e4m3, //!< f8e4m3 element type + f8e5m2, //!< f8e5m2 element type string //!< string element type }; @@ -182,6 +186,12 @@ constexpr Type u64(Type_t::u64); /// \brief nf4 element type /// \ingroup ov_element_cpp_api constexpr Type nf4(Type_t::nf4); +/// \brief f8e4m3 element type +/// \ingroup ov_element_cpp_api +constexpr Type f8e4m3(Type_t::f8e4m3); +/// \brief f8e4m3 element type +/// \ingroup ov_element_cpp_api +constexpr Type f8e5m2(Type_t::f8e5m2); /// \brief string element type /// \ingroup ov_element_cpp_api constexpr Type string(Type_t::string); @@ -219,6 +229,10 @@ OPENVINO_API Type from(); template <> OPENVINO_API Type from(); template <> +OPENVINO_API Type from(); +template <> +OPENVINO_API Type from(); +template <> OPENVINO_API Type from(); OPENVINO_API Type fundamental_type_for(const Type& type); diff --git a/src/core/include/openvino/core/type/element_type_traits.hpp b/src/core/include/openvino/core/type/element_type_traits.hpp index 33f0bbd059a99d..fefbac51866417 100644 --- a/src/core/include/openvino/core/type/element_type_traits.hpp +++ b/src/core/include/openvino/core/type/element_type_traits.hpp @@ -98,6 +98,16 @@ struct element_type_traits { using value_type = int8_t; }; +template <> +struct element_type_traits { + using value_type = ov::float8_e4m3; +}; + +template <> +struct element_type_traits { + using value_type = ov::float8_e5m2; +}; + template <> struct element_type_traits { using value_type = std::string; diff --git a/src/core/include/openvino/core/type/float8_e4m3.hpp b/src/core/include/openvino/core/type/float8_e4m3.hpp new file mode 100644 index 00000000000000..af95d183d69129 --- /dev/null +++ b/src/core/include/openvino/core/type/float8_e4m3.hpp @@ -0,0 +1,157 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "openvino/core/core_visibility.hpp" + +namespace ov { + +/** + * @brief Class to represent the f8e4m3 type. + */ +class OPENVINO_API float8_e4m3 { +public: + float8_e4m3() = default; + float8_e4m3(uint32_t sign, uint32_t biased_exponent, uint32_t fraction); + float8_e4m3(float value); + + template + explicit float8_e4m3(I value) : m_value{float8_e4m3{static_cast(value)}.m_value} {} + + template + bool operator==(const T& other) const; + template + bool operator!=(const T& other) const { + return !(*this == other); + } + + template + bool operator<(const T& other) const; + template + bool operator<=(const T& other) const; + template + bool operator>(const T& other) const; + template + bool operator>=(const T& other) const; + template + float8_e4m3 operator+(const T& other) const; + template + float8_e4m3 operator+=(const T& other); + template + float8_e4m3 operator-(const T& other) const; + template + float8_e4m3 operator-=(const T& other); + template + float8_e4m3 operator*(const T& other) const; + template + float8_e4m3 operator*=(const T& other); + template + float8_e4m3 operator/(const T& other) const; + template + float8_e4m3 operator/=(const T& other); + + operator float() const; + + static constexpr float8_e4m3 from_bits(uint8_t bits) { + return float8_e4m3(bits, true); + } + uint8_t to_bits() const; + friend std::ostream& operator<<(std::ostream& out, const float8_e4m3& obj) { + out << static_cast(obj); + return out; + } + +private: + constexpr float8_e4m3(uint8_t x, bool) : m_value{x} {} + + uint8_t m_value; +}; + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4756) +#endif +template +bool float8_e4m3::operator==(const T& other) const { +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wfloat-equal" +#endif + return (static_cast(*this) == static_cast(other)); +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif +} + +template +bool float8_e4m3::operator<(const T& other) const { + return (static_cast(*this) < static_cast(other)); +} + +template +bool float8_e4m3::operator<=(const T& other) const { + return (static_cast(*this) <= static_cast(other)); +} + +template +bool float8_e4m3::operator>(const T& other) const { + return (static_cast(*this) > static_cast(other)); +} + +template +bool float8_e4m3::operator>=(const T& other) const { + return (static_cast(*this) >= static_cast(other)); +} + +template +float8_e4m3 float8_e4m3::operator+(const T& other) const { + return {static_cast(*this) + static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator+=(const T& other) { + return *this = *this + other; +} + +template +float8_e4m3 float8_e4m3::operator-(const T& other) const { + return {static_cast(*this) - static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator-=(const T& other) { + return *this = *this - other; +} + +template +float8_e4m3 float8_e4m3::operator*(const T& other) const { + return {static_cast(*this) * static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator*=(const T& other) { + return *this = *this * other; +} + +template +float8_e4m3 float8_e4m3::operator/(const T& other) const { + return {static_cast(*this) / static_cast(other)}; +} + +template +float8_e4m3 float8_e4m3::operator/=(const T& other) { + return *this = *this / other; +} +#if defined(_MSC_VER) +# pragma warning(pop) +#endif +} // namespace ov diff --git a/src/core/include/openvino/core/type/float8_e5m2.hpp b/src/core/include/openvino/core/type/float8_e5m2.hpp new file mode 100644 index 00000000000000..e3990de0c56169 --- /dev/null +++ b/src/core/include/openvino/core/type/float8_e5m2.hpp @@ -0,0 +1,157 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "openvino/core/core_visibility.hpp" + +namespace ov { + +/** + * @brief Class to represent the f8e5m2 type. + */ +class OPENVINO_API float8_e5m2 { +public: + float8_e5m2() = default; + float8_e5m2(uint32_t sign, uint32_t biased_exponent, uint32_t fraction); + float8_e5m2(float value); + + template + explicit float8_e5m2(I value) : m_value{float8_e5m2{static_cast(value)}.m_value} {} + + template + bool operator==(const T& other) const; + template + bool operator!=(const T& other) const { + return !(*this == other); + } + + template + bool operator<(const T& other) const; + template + bool operator<=(const T& other) const; + template + bool operator>(const T& other) const; + template + bool operator>=(const T& other) const; + template + float8_e5m2 operator+(const T& other) const; + template + float8_e5m2 operator+=(const T& other); + template + float8_e5m2 operator-(const T& other) const; + template + float8_e5m2 operator-=(const T& other); + template + float8_e5m2 operator*(const T& other) const; + template + float8_e5m2 operator*=(const T& other); + template + float8_e5m2 operator/(const T& other) const; + template + float8_e5m2 operator/=(const T& other); + + operator float() const; + + static constexpr float8_e5m2 from_bits(uint8_t bits) { + return float8_e5m2(bits, true); + } + uint8_t to_bits() const; + friend std::ostream& operator<<(std::ostream& out, const float8_e5m2& obj) { + out << static_cast(obj); + return out; + } + +private: + constexpr float8_e5m2(uint8_t x, bool) : m_value{x} {} + + uint8_t m_value; +}; + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4756) +#endif +template +bool float8_e5m2::operator==(const T& other) const { +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wfloat-equal" +#endif + return (static_cast(*this) == static_cast(other)); +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif +} + +template +bool float8_e5m2::operator<(const T& other) const { + return (static_cast(*this) < static_cast(other)); +} + +template +bool float8_e5m2::operator<=(const T& other) const { + return (static_cast(*this) <= static_cast(other)); +} + +template +bool float8_e5m2::operator>(const T& other) const { + return (static_cast(*this) > static_cast(other)); +} + +template +bool float8_e5m2::operator>=(const T& other) const { + return (static_cast(*this) >= static_cast(other)); +} + +template +float8_e5m2 float8_e5m2::operator+(const T& other) const { + return {static_cast(*this) + static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator+=(const T& other) { + return *this = *this + other; +} + +template +float8_e5m2 float8_e5m2::operator-(const T& other) const { + return {static_cast(*this) - static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator-=(const T& other) { + return *this = *this - other; +} + +template +float8_e5m2 float8_e5m2::operator*(const T& other) const { + return {static_cast(*this) * static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator*=(const T& other) { + return *this = *this * other; +} + +template +float8_e5m2 float8_e5m2::operator/(const T& other) const { + return {static_cast(*this) / static_cast(other)}; +} + +template +float8_e5m2 float8_e5m2::operator/=(const T& other) { + return *this = *this / other; +} +#if defined(_MSC_VER) +# pragma warning(pop) +#endif +} // namespace ov diff --git a/src/core/include/openvino/op/constant.hpp b/src/core/include/openvino/op/constant.hpp index fe91da44baf6f4..222b006c094ba8 100644 --- a/src/core/include/openvino/op/constant.hpp +++ b/src/core/include/openvino/op/constant.hpp @@ -12,7 +12,8 @@ # define WAS_OV_LIBRARY_DEFINED_CONSTANT #endif -#include "ngraph/runtime/shared_buffer.hpp" +#include "ngraph/util.hpp" +#include "openvino/core/rtti.hpp" #ifdef WAS_OV_LIBRARY_DEFINED_CONSTANT # undef IN_OV_COMPONENT @@ -36,27 +37,6 @@ class OPENVINO_API Constant : public Op { Constant() = default; - OPENVINO_SUPPRESS_DEPRECATED_START - /// \brief Initialize a constant from tensor - /// \param tensor The tensor with data - OPENVINO_DEPRECATED("This constructor is deprecated and will be removed in 2024.0 release") - Constant(const std::shared_ptr& tensor); - - /// \brief Constructs a tensor constant with the supplied data - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param data A pointer to pre-allocated shared data. - template - OPENVINO_DEPRECATED("This constructor is deprecated and will be removed in 2024.0 release") - Constant(const element::Type& type, const Shape& shape, std::shared_ptr> data) - : m_element_type(type), - m_shape(shape) { - m_data = legacy_to_ov_aligned_buffer(data); - constructor_validate_and_infer_types(); - } - OPENVINO_SUPPRESS_DEPRECATED_END - /// \brief Initialize a constant from ov::Tensor /// \param tensor The ov::Tensor with data Constant(const ov::Tensor& tensor); @@ -164,6 +144,12 @@ class OPENVINO_API Constant : public Op { case Type_t::nf4: fill_data(value); break; + case Type_t::f8e4m3: + fill_data(value); + break; + case Type_t::f8e5m2: + fill_data(value); + break; case Type_t::string: fill_data(value); break; @@ -415,11 +401,6 @@ class OPENVINO_API Constant : public Op { private: Constant(bool memset_allocation, const element::Type& type, const Shape& shape); - OPENVINO_SUPPRESS_DEPRECATED_START - std::shared_ptr legacy_to_ov_aligned_buffer( - const std::shared_ptr& buffer); - OPENVINO_SUPPRESS_DEPRECATED_END - template , typename std::enable_if(source); break; + case Type_t::f8e4m3: + write_buffer(source); + break; + case Type_t::f8e5m2: + write_buffer(source); + break; case Type_t::string: write_buffer(source); break; diff --git a/src/core/include/openvino/op/squared_difference.hpp b/src/core/include/openvino/op/squared_difference.hpp index 3c52268759d02d..845ef75b28b1cb 100644 --- a/src/core/include/openvino/op/squared_difference.hpp +++ b/src/core/include/openvino/op/squared_difference.hpp @@ -29,6 +29,8 @@ class OPENVINO_API SquaredDifference : public util::BinaryElementwiseArithmetic const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; + bool has_evaluate() const override; }; } // namespace v0 } // namespace op diff --git a/src/core/include/openvino/op/util/topk_base.hpp b/src/core/include/openvino/op/util/topk_base.hpp index 7b6f15ac6e73a6..586d495e186055 100644 --- a/src/core/include/openvino/op/util/topk_base.hpp +++ b/src/core/include/openvino/op/util/topk_base.hpp @@ -9,6 +9,10 @@ namespace ov { namespace op { +namespace v0 { +class Constant; +} + namespace util { class OPENVINO_API TopKBase : public Op { public: diff --git a/src/core/include/openvino/op/util/variable_value.hpp b/src/core/include/openvino/op/util/variable_value.hpp index 75ee8d524ea967..574a180882949c 100644 --- a/src/core/include/openvino/op/util/variable_value.hpp +++ b/src/core/include/openvino/op/util/variable_value.hpp @@ -7,18 +7,6 @@ #include #include -#ifndef IN_OV_COMPONENT -# define IN_OV_COMPONENT -# define WAS_OV_LIBRARY_DEFINED_VARIABLE_VALUE -#endif - -#include "ngraph/runtime/host_tensor.hpp" - -#ifdef WAS_OV_LIBRARY_DEFINED_VARIABLE_VALUE -# undef IN_OV_COMPONENT -# undef WAS_OV_LIBRARY_DEFINED_VARIABLE_VALUE -#endif - #include "openvino/core/core_visibility.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/runtime/tensor.hpp" @@ -34,37 +22,6 @@ class OPENVINO_API VariableValue { /// \brief Constructs an uninitialized VariableValue. VariableValue(); - /// \brief Constructor for VariableValue. - /// \deprecated This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor - /// instead - /// \param value The data for Variable. - OPENVINO_DEPRECATED( - "This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor instead.") - explicit VariableValue(ngraph::HostTensorPtr value); - - /// \brief Constructor for VariableValue. - /// \deprecated This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor - /// instead - /// \param value Data for Variable. - /// \param reset The current state of the reset flag. - OPENVINO_DEPRECATED( - "This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor instead.") - VariableValue(ngraph::HostTensorPtr value, bool reset); - - /// \brief Returns the current stored data. - /// \deprecated This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor - /// instead - OPENVINO_DEPRECATED("This method is deprecated and will be removed in 2024.0 release. Please get_state() instead.") - ngraph::HostTensorPtr get_value() const; - - /// \brief Sets new values for Variable. - /// \deprecated This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor - /// instead - /// \param value New data for Variable. - OPENVINO_DEPRECATED( - "This method is deprecated and will be removed in 2024.0 release. Please use set_state() instead.") - void set_value(const ngraph::HostTensorPtr& value); - /// \brief Sets the reset flag to a new state. /// \param reset The new state of the reset flag. void set_reset(bool reset); diff --git a/src/core/include/openvino/pass/pattern/op/pattern.hpp b/src/core/include/openvino/pass/pattern/op/pattern.hpp index c44a6bf0bd376d..643a1c935b8927 100644 --- a/src/core/include/openvino/pass/pattern/op/pattern.hpp +++ b/src/core/include/openvino/pass/pattern/op/pattern.hpp @@ -35,6 +35,14 @@ std::function)> has_class() { return pred; } +template +std::function)> class_other_than() { + auto pred = [](std::shared_ptr node) -> bool { + return !ov::is_type(node); + }; + + return pred; +} OPENVINO_API std::function)> consumers_count(size_t n); @@ -63,6 +71,9 @@ std::function)> type_matches(const element::Type& type); OPENVINO_API std::function)> type_matches_any(const std::vector& types); +OPENVINO_API +std::function)> all_of(const std::vector)>>& predicates); + namespace op { using NodePredicate = std::function)>; using ValuePredicate = std::function& value)>; diff --git a/src/core/reference/include/openvino/reference/convert_color_nv12.hpp b/src/core/reference/include/openvino/reference/convert_color_nv12.hpp index 110e1caf411093..d0d9b3ee1cc769 100644 --- a/src/core/reference/include/openvino/reference/convert_color_nv12.hpp +++ b/src/core/reference/include/openvino/reference/convert_color_nv12.hpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/core/type/element_type_traits.hpp" #include "openvino/op/util/convert_color_i420_base.hpp" #include "openvino/op/util/convert_color_nv12_base.hpp" diff --git a/src/core/reference/include/openvino/reference/sign.hpp b/src/core/reference/include/openvino/reference/sign.hpp index 6363a725164dee..fe65c357046091 100644 --- a/src/core/reference/include/openvino/reference/sign.hpp +++ b/src/core/reference/include/openvino/reference/sign.hpp @@ -17,9 +17,21 @@ constexpr T sign(const T v) { return static_cast(static_cast(v)); } -template () || std::is_signed::value>::type* = nullptr> +template ::type>::value || + std::is_signed::value>::type* = nullptr> constexpr T sign(const T v) { - return static_cast((T{0} < v) - (v < T{0})); + return static_cast(std::isnan(static_cast(v)) ? v : ((T{0} < v) - (v < T{0}))); +} + +template ::type>::value || + std::is_same::type>::value>::type* = nullptr> +T sign(const T v) { + if (std::isnan(static_cast(v))) + return v; + else + return static_cast((T{0} < v) - (v < T{0})); } } // namespace func diff --git a/src/core/reference/include/openvino/reference/utils/type_util.hpp b/src/core/reference/include/openvino/reference/utils/type_util.hpp index 12291761612340..10d513d2fb8b3a 100644 --- a/src/core/reference/include/openvino/reference/utils/type_util.hpp +++ b/src/core/reference/include/openvino/reference/utils/type_util.hpp @@ -19,6 +19,7 @@ namespace ov { template constexpr bool is_floating_point() { using U = typename std::decay::type; - return std::is_floating_point::value || std::is_same::value || std::is_same::value; + return std::is_floating_point::value || std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value; } } // namespace ov diff --git a/src/core/reference/src/op/einsum.cpp b/src/core/reference/src/op/einsum.cpp index e9f003f0ce748a..113b1c249303a6 100644 --- a/src/core/reference/src/op/einsum.cpp +++ b/src/core/reference/src/op/einsum.cpp @@ -461,7 +461,7 @@ void broadcast_input(ov::TensorVector& inputs, /// template ov::Tensor build_identity(const ov::Tensor& input, const ov::TensorLabel& repeated_label_dims) { - // allocate HostTensor for building identity tensor + // allocate Tensor for building identity tensor OPENVINO_ASSERT(repeated_label_dims.size() > 1); Shape input_shape = input.get_shape(); Shape identity_shape(input_shape.size(), 1); diff --git a/src/core/shape_inference/include/element_visitor.hpp b/src/core/shape_inference/include/element_visitor.hpp index b1d907f1495bba..b09048828702d4 100644 --- a/src/core/shape_inference/include/element_visitor.hpp +++ b/src/core/shape_inference/include/element_visitor.hpp @@ -8,6 +8,7 @@ #include "openvino/core/except.hpp" #include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/element_type_traits.hpp" #include "openvino/itt.hpp" namespace ov { diff --git a/src/core/src/graph_util.cpp b/src/core/src/graph_util.cpp index 4c6a4d0f33e516..761293f194fd16 100644 --- a/src/core/src/graph_util.cpp +++ b/src/core/src/graph_util.cpp @@ -221,7 +221,7 @@ std::shared_ptr clone_ov_model(const Model& func, std::unordered_map node : func.get_results()) { auto result = ov::as_type_ptr(node_map.at(node.get())); if (!result) { - OPENVINO_THROW("Results should be of type op::Result"); + OPENVINO_THROW("Results should be of type ov::op::v0::Result"); } cloned_results.push_back(result); } @@ -556,7 +556,7 @@ std::pair, std::shared_ptr res_node = std::make_shared(src_node); + std::shared_ptr res_node = std::make_shared(src_node); return make_pair(res_node, par_node); } @@ -626,7 +626,7 @@ std::shared_ptr make_zero(const element::Type& element_type, const Sha if (shape.size() > 0) { return std::make_shared( zero, - op::v0::Constant::create(element::u64, Shape{shape.size()}, shape)); + ov::op::v0::Constant::create(element::u64, Shape{shape.size()}, shape)); } return zero; } @@ -635,7 +635,7 @@ std::shared_ptr make_constant_from_string(std::string val, const element::Type& element_type, const Shape& shape) { auto cvals = std::vector(shape_size(shape), val); - return std::make_shared(element_type, shape, cvals); + return std::make_shared(element_type, shape, cvals); } bool is_zero(const Output& reduce_constant) { diff --git a/src/core/src/layout.cpp b/src/core/src/layout.cpp index eb90ec191d79e1..14e19ff21f0ed7 100644 --- a/src/core/src/layout.cpp +++ b/src/core/src/layout.cpp @@ -640,7 +640,11 @@ void AttributeAdapter::set(const std::string& value) { bool LayoutAttribute::visit_attributes(AttributeVisitor& visitor) { std::string layout_str = value.to_string(); visitor.on_attribute("layout", layout_str); - value = Layout(layout_str); + // some attribute visitor will not change the value + // for example, rt info serializer + // in this case, parallelization can be supported in hash pass + if (layout_str != value.to_string()) + value = Layout(layout_str); return true; } diff --git a/src/core/src/model.cpp b/src/core/src/model.cpp index d723a1b93a1f5b..edaf69edfff6e1 100644 --- a/src/core/src/model.cpp +++ b/src/core/src/model.cpp @@ -486,61 +486,6 @@ int64_t ov::Model::get_result_index(const Output& value) const { return -1; } -OPENVINO_SUPPRESS_DEPRECATED_START -namespace { -ov::Tensor wrap_tensor(const ngraph::HostTensorPtr& t) { - const auto& et = t->get_element_type(); - const auto& p_shape = t->get_partial_shape(); - - if (et.is_dynamic() || et == ov::element::undefined) { - return {}; - } else if (p_shape.is_static()) { - return {et, p_shape.to_shape(), t->get_data_ptr()}; - } else { - return {et, ov::Shape{0}}; - } -} - -ov::TensorVector wrap_tensors(const std::vector& tensors) { - ov::TensorVector out; - out.reserve(tensors.size()); - for (const auto& ht : tensors) { - out.push_back(wrap_tensor(ht)); - } - return out; -} - -void update_output_host_tensors(const std::vector& output_values, - const ov::TensorVector& outputs) { - OPENVINO_ASSERT(output_values.size() == outputs.size()); - for (size_t i = 0; i < output_values.size(); ++i) { - auto& ht = output_values[i]; - auto& t = outputs[i]; - if (ht->get_partial_shape().is_dynamic()) { - ht->set_element_type(t.get_element_type()); - ht->set_shape(t.get_shape()); - std::memcpy(ht->get_data_ptr(), t.data(), t.get_byte_size()); - } - } -} -} // namespace - -bool ov::Model::evaluate(const HostTensorVector& output_tensors, const HostTensorVector& input_tensors) const { - ov::EvaluationContext evaluation_context; - return evaluate(output_tensors, input_tensors, evaluation_context); -} - -bool ov::Model::evaluate(const HostTensorVector& output_tensors, - const HostTensorVector& input_tensors, - EvaluationContext& evaluation_context) const { - auto outputs = wrap_tensors(output_tensors); - auto inputs = wrap_tensors(input_tensors); - bool sts = evaluate(outputs, inputs, evaluation_context); - update_output_host_tensors(output_tensors, outputs); - return sts; -} -OPENVINO_SUPPRESS_DEPRECATED_END - bool ov::Model::evaluate(ov::TensorVector& output_tensors, const ov::TensorVector& input_tensors) const { ov::EvaluationContext evaluation_context; return evaluate(output_tensors, input_tensors, evaluation_context); diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index bbda98877044e4..4fa3899ed4c2ae 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -677,117 +677,16 @@ bool ov::Node::has_evaluate() const { return false; } -OPENVINO_SUPPRESS_DEPRECATED_START -bool ov::Node::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - return false; -} - -bool ov::Node::evaluate(const HostTensorVector& output_values, - const HostTensorVector& input_values, - const EvaluationContext& evaluationContext) const { - return evaluate(output_values, input_values); -} - -namespace { - -class DynamicTensor : public ngraph::runtime::HostTensor { -private: - ov::Tensor tensor; - -public: - DynamicTensor(const ov::element::Type& type) : ngraph::runtime::HostTensor(type, ov::PartialShape::dynamic()) {} - - ov::Tensor get_tensor() { - return tensor; - } - -protected: - void allocate_buffer() override { - OPENVINO_ASSERT(get_partial_shape().is_static(), - "Attempt to allocate buffer for tensor with partial shape: ", - get_partial_shape()); - OPENVINO_ASSERT(get_element_type().is_static(), - "Attempt to allocate buffer for tensor with dynamic type: ", - get_element_type()); - m_buffer_size = m_descriptor->size(); - tensor = ov::Tensor(get_element_type(), get_partial_shape().get_shape()); - m_memory_pointer = tensor.data(); - m_aligned_buffer_pool = m_memory_pointer; - } -}; - -inline ngraph::HostTensorPtr make_tmp_host_tensor(const ov::Tensor& t) { - if (!t) { - return std::make_shared(ov::element::dynamic); - } else { - return std::make_shared(t.get_element_type(), t.get_shape(), t.data()); - } -} - -inline ngraph::HostTensorPtr make_tmp_out_host_tensor(const ov::Tensor& t) { - if (!t) { - return std::make_shared(ov::element::dynamic); - } else if (t.get_shape() == ov::Shape{0}) { - return std::make_shared(t.get_element_type()); - } else { - return std::make_shared(t.get_element_type(), t.get_shape(), t.data()); - } -} - -inline ngraph::HostTensorVector create_tmp_tensors(const ov::TensorVector& tensors, const bool is_output) { - const auto make_tmp_ht = is_output ? make_tmp_out_host_tensor : make_tmp_host_tensor; - ngraph::HostTensorVector result; - result.reserve(tensors.size()); - for (const auto& tensor : tensors) { - result.push_back(make_tmp_ht(tensor)); - } - return result; -} - -inline void update_output_tensors(ov::TensorVector& output_values, const ngraph::HostTensorVector& outputs) { - OPENVINO_ASSERT(output_values.size() == outputs.size()); - for (size_t i = 0; i < outputs.size(); i++) { - if (auto dyn_output = std::dynamic_pointer_cast(outputs[i])) { - auto tensor = dyn_output->get_tensor(); - // In some cases (e.g. output with zero dims) we get empty tensor after casting to DynamicTensor. - // However we still can try to extract precision and shape from the corresponding HostTensor - if (!tensor && outputs[i]->get_partial_shape().is_static()) { - tensor = ov::Tensor(outputs[i]->get_element_type(), outputs[i]->get_shape()); - } - if (output_values[i]) { - // Copy value to the original tensor - tensor.copy_to(output_values[i]); - } else { - // Tensor is not initialized, so create the new tensor - output_values[i] = tensor; - } - } - } -} -} // namespace - bool ov::Node::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const { - HostTensorVector output = create_tmp_tensors(output_values, true); - HostTensorVector input = create_tmp_tensors(input_values, false); - bool sts = evaluate(output, input); - if (sts) - update_output_tensors(output_values, output); - return sts; + return false; } bool ov::Node::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values, const ov::EvaluationContext& evaluationContext) const { - // Call evaluate for old implementation with EvaluationContext - HostTensorVector output = create_tmp_tensors(output_values, true); - HostTensorVector input = create_tmp_tensors(input_values, false); - bool sts = evaluate(output, input, evaluationContext); - if (sts) - update_output_tensors(output_values, output); - // Call evaluate for ov::Tensor if op doesn't have evaluate with EvaluationContext - return sts ? sts : evaluate(output_values, input_values); + // As nodes in most cases implements evaluate without context try call it unless override by child class + return evaluate(output_values, input_values); } -OPENVINO_SUPPRESS_DEPRECATED_END bool ov::Node::evaluate_lower(ov::TensorVector& output_values) const { const auto& inputs = input_values(); diff --git a/src/core/src/op/constant.cpp b/src/core/src/op/constant.cpp index 914324a5dc97c6..98cdfd9cc4104e 100644 --- a/src/core/src/op/constant.cpp +++ b/src/core/src/op/constant.cpp @@ -12,13 +12,12 @@ #include "compare.hpp" #include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph/runtime/tensor.hpp" #include "openvino/core/type/float16.hpp" #include "openvino/core/type/nf4.hpp" #include "openvino/reference/utils/type_util.hpp" #include "openvino/runtime/shared_buffer.hpp" #include "openvino/runtime/string_aligned_buffer.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { namespace op { @@ -69,34 +68,6 @@ std::vector from_string_vector(const std::vector& str_values) { } // namespace namespace v0 { -OPENVINO_SUPPRESS_DEPRECATED_START -std::shared_ptr Constant::legacy_to_ov_aligned_buffer( - const std::shared_ptr& buffer) { - return std::make_shared>>(buffer->get_ptr(), - buffer->size(), - buffer); -} - -Constant::Constant(const std::shared_ptr& tensor) { - m_element_type = tensor->get_element_type(); - m_shape = tensor->get_shape(); - // Share data from HostTensor if we work with it - // And copy data in other cas - if (auto hostTensor = std::dynamic_pointer_cast(tensor)) { - m_data = std::make_shared>>( - static_cast(hostTensor->get_data_ptr()), - tensor->get_size_in_bytes(), - tensor); - } else { - OPENVINO_ASSERT(m_element_type != ov::element::string, - "Creation of string constant for ngraph::runtime::Tensor is supported only for HostTensor"); - constructor_validate_and_infer_types(); - allocate_buffer(false); - tensor->read(get_data_ptr_nc(), tensor->get_size_in_bytes()); - } - constructor_validate_and_infer_types(); -} -OPENVINO_SUPPRESS_DEPRECATED_END Constant::Constant(const Tensor& tensor) : m_element_type{tensor.get_element_type()}, @@ -221,8 +192,26 @@ struct ValueToString : ov::element::NotSupported { std::string Constant::convert_value_to_string(size_t index) const { using namespace ov::element; - return IfTypeOf::apply< - ValueToString>(get_element_type(), this, index); + return IfTypeOf::apply(get_element_type(), this, index); } size_t Constant::get_byte_size() const { diff --git a/src/core/src/op/convert.cpp b/src/core/src/op/convert.cpp index b48f2d5e433ba7..a79922dcacd42f 100644 --- a/src/core/src/op/convert.cpp +++ b/src/core/src/op/convert.cpp @@ -19,7 +19,8 @@ constexpr bool is_lp_type(const element::Type_t et) { return (et == element::i4) || (et == element::u1) || (et == element::u4) || (et == element::nf4); } -#define CONVERT_ET_LIST boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4 +#define CONVERT_ET_LIST \ + boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4, f8e4m3, f8e5m2 struct Evaluate : public element::NoAction { using element::NoAction::visit; @@ -173,6 +174,8 @@ bool Convert::has_evaluate() const { case element::u32: case element::u64: case element::nf4: + case element::f8e4m3: + case element::f8e5m2: return true; default: return false; diff --git a/src/core/src/op/fake_quantize.cpp b/src/core/src/op/fake_quantize.cpp index b15137e92038ed..8067acad5dc854 100644 --- a/src/core/src/op/fake_quantize.cpp +++ b/src/core/src/op/fake_quantize.cpp @@ -114,7 +114,7 @@ bool FakeQuantize::evaluate(TensorVector& outputs, const TensorVector& inputs) c using namespace ov::element; return IF_TYPE_OF(v0_FakeQuantize_evaluate, - OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + OV_PP_ET_LIST(bf16, f16, f32, i32, i64, u32, u64), fake_quantize::Evaluate, inputs[0].get_element_type(), inputs[0], @@ -135,6 +135,7 @@ bool FakeQuantize::evaluate(TensorVector& outputs, const TensorVector& inputs) c bool FakeQuantize::has_evaluate() const { OV_OP_SCOPE(v0_FakeQuantize_has_evaluate); switch (get_input_element_type(0)) { + case element::bf16: case element::f16: case element::f32: case element::i32: diff --git a/src/core/src/op/mvn.cpp b/src/core/src/op/mvn.cpp index fbffd1231ad4de..5b9805d9051a85 100644 --- a/src/core/src/op/mvn.cpp +++ b/src/core/src/op/mvn.cpp @@ -6,6 +6,7 @@ #include "compare.hpp" #include "itt.hpp" +#include "openvino/core/type/element_type_traits.hpp" #include "openvino/reference/mvn.hpp" // ------------------------------ V0 ------------------------------ diff --git a/src/core/src/op/reduce_prod.cpp b/src/core/src/op/reduce_prod.cpp index d80f040e5ef7ad..f8b1adeb2a77d0 100644 --- a/src/core/src/op/reduce_prod.cpp +++ b/src/core/src/op/reduce_prod.cpp @@ -7,10 +7,10 @@ #include "bound_evaluate.hpp" #include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" #include "openvino/core/shape_util.hpp" #include "openvino/op/util/axes_util.hpp" #include "openvino/reference/reduce_prod.hpp" +#include "validation_util.hpp" namespace ov { namespace op { @@ -99,11 +99,9 @@ bool ReduceProd::evaluate_upper(ov::TensorVector& output_values) const { // In case dimensions has a zero dimension - it should return 0 in any case if (tensor_has_max_value(get_input_tensor(0).get_upper_value()) && !tensor_has_zero_value(get_input_tensor(0).get_upper_value())) { - OPENVINO_SUPPRESS_DEPRECATED_START - auto max_constant = ngraph::get_constant_max_of_type(get_output_element_type(0)); - OPENVINO_SUPPRESS_DEPRECATED_END - OPENVINO_ASSERT(max_constant->get_byte_size() <= output_values[0].get_byte_size()); - memcpy(output_values[0].data(), max_constant->get_data_ptr(), max_constant->get_byte_size()); + const auto max_constant = ov::util::make_tensor_of_max_value(get_output_element_type(0)); + OPENVINO_ASSERT(max_constant.get_byte_size() <= output_values[0].get_byte_size()); + std::memcpy(output_values[0].data(), max_constant.data(), max_constant.get_byte_size()); return true; } diff --git a/src/core/src/op/squared_difference.cpp b/src/core/src/op/squared_difference.cpp index 700e0a4a809d12..e91c46aace291f 100644 --- a/src/core/src/op/squared_difference.cpp +++ b/src/core/src/op/squared_difference.cpp @@ -5,6 +5,31 @@ #include "openvino/op/squared_difference.hpp" #include "itt.hpp" +#include "openvino/reference/squared_difference.hpp" +#include "utils.hpp" + +namespace squared_difference { +struct Evaluate : ov::element::NoAction { + using ov::element::NoAction::visit; + + template + static result_type visit(const ov::Tensor& in0, + const ov::Tensor& in1, + ov::Tensor& out, + const ov::Shape& shape0, + const ov::Shape& shape1, + const ov::op::AutoBroadcastSpec& broadcast_spec) { + using T = typename ov::element_type_traits::value_type; + ov::reference::squared_difference(in0.data(), + in1.data(), + out.data(), + shape0, + shape1, + broadcast_spec); + return true; + } +}; +} // namespace squared_difference // ------------------------------ v0 ------------------------------------------- @@ -20,3 +45,31 @@ std::shared_ptr ov::op::v0::SquaredDifference::clone_with_new_inputs(c check_new_args_count(this, new_args); return std::make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } + +bool ov::op::v0::SquaredDifference::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(v0_SquaredDifference_evaluate); + OPENVINO_ASSERT(outputs.size() == 1); + + outputs[0].set_shape(infer_broadcast_shape(this, inputs)); + using namespace ov::element; + return IF_TYPE_OF(v0_SquaredDifference_evaluate, + OV_PP_ET_LIST(f32), + squared_difference::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); +} + +bool ov::op::v0::SquaredDifference::has_evaluate() const { + OV_OP_SCOPE(v0_SquaredDifference_has_evaluate); + switch (get_input_element_type(0)) { + case element::f32: + return true; + default: + return false; + } +} diff --git a/src/core/src/op/util/evaluate_helpers.cpp b/src/core/src/op/util/evaluate_helpers.cpp deleted file mode 100644 index cffc57e6fbd87c..00000000000000 --- a/src/core/src/op/util/evaluate_helpers.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/evaluate_helpers.hpp" - -namespace ngraph { -AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, - const ngraph::Rank& rank, - const std::string& node_description) { - OPENVINO_SUPPRESS_DEPRECATED_START - const auto axes_vector = host_tensor_2_vector(tensor); - const auto normalized_axes = ngraph::normalize_axes(node_description, axes_vector, rank); - OPENVINO_SUPPRESS_DEPRECATED_END - return AxisSet{normalized_axes}; -} -} // namespace ngraph diff --git a/src/core/src/op/util/slice_plan.cpp b/src/core/src/op/util/slice_plan.cpp index 2025900745ec95..434f855490615e 100644 --- a/src/core/src/op/util/slice_plan.cpp +++ b/src/core/src/op/util/slice_plan.cpp @@ -6,7 +6,6 @@ #include -#include "ngraph/op/util/slice_plan.hpp" #include "openvino/core/except.hpp" namespace ov { @@ -217,45 +216,3 @@ bool SlicePlan::operator!=(const SlicePlan& other) const { } // namespace util } // namespace op } // namespace ov - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ngraph { - -SlicePlan make_slice_plan(const Shape& input_shape, - const std::vector& begins, - const std::vector& ends, - const std::vector& strides, - const AxisSet& lower_bounds_mask, - const AxisSet& upper_bounds_mask, - const AxisSet& new_axis_mask, - const AxisSet& shrink_axis_mask, - const AxisSet& ellipsis_mask) { - const auto sp = ov::op::util::make_slice_plan(input_shape, - begins, - ends, - strides, - lower_bounds_mask, - upper_bounds_mask, - new_axis_mask, - shrink_axis_mask, - ellipsis_mask); - return SlicePlan{sp.begins, sp.ends, sp.strides, sp.reshape_in_shape, sp.reshape_out_shape, sp.reverse_axes}; -} - -bool SlicePlan::operator==(const SlicePlan& other) const { - bool equal = true; - equal &= begins == other.begins; - equal &= ends == other.ends; - equal &= strides == other.strides; - equal &= reshape_in_shape == other.reshape_in_shape; - equal &= reshape_out_shape == other.reshape_out_shape; - equal &= reverse_axes == other.reverse_axes; - - return equal; -} - -bool SlicePlan::operator!=(const SlicePlan& other) const { - return !(*this == other); -} -} // namespace ngraph -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/src/core/src/op/util/variable_value.cpp b/src/core/src/op/util/variable_value.cpp index 9de53857859f3b..86ea10a87e9021 100644 --- a/src/core/src/op/util/variable_value.cpp +++ b/src/core/src/op/util/variable_value.cpp @@ -6,7 +6,6 @@ #include -#include "ngraph/runtime/host_tensor.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/shape_util.hpp" @@ -14,108 +13,6 @@ #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/tensor.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace { - -class TensorWrapper : public ngraph::runtime::HostTensor { -public: - TensorWrapper(const ov::Tensor& tensor) - : ngraph::runtime::HostTensor(tensor.get_element_type(), tensor.get_shape(), tensor.data()), - tensor(tensor) {} - - ov::Tensor tensor; -}; - -/** - * @brief Tensor what contains HostTensorPtr inside - */ -class HostTensorWrapper : public ov::ITensor { -public: - ngraph::HostTensorPtr tensor; - - HostTensorWrapper(const ngraph::HostTensorPtr& tensor) : tensor{tensor}, m_type(tensor->get_element_type()) { - const auto& p_shape = tensor->get_partial_shape(); - m_shape = p_shape.is_static() ? p_shape.to_shape() : ov::Shape{0}; - update_strides(); - } - - const ov::element::Type& get_element_type() const override { - return m_type; - } - - void set_shape(ov::Shape shape) override { - tensor->set_shape(shape); - m_shape = shape; - update_strides(); - } - - const ov::Shape& get_shape() const override { - return m_shape; - } - - const ov::Strides& get_strides() const override { - OPENVINO_ASSERT(get_element_type().bitwidth() >= 8, - "Could not get strides for types with bitwidths less then 8 bit. Tensor type: ", - get_element_type()); - return m_strides; - } - - size_t get_size() const override { - return ov::shape_size(m_shape); - } - - size_t get_byte_size() const override { - return get_size() * m_type.size(); - } - - void* data(const ov::element::Type& element_type) const override { - return tensor->get_data_ptr(); - } - -private: - ov::element::Type m_type; - ov::Shape m_shape; - ov::Strides m_strides; - - void update_strides() { - if (m_type.bitwidth() >= 8) { - m_strides.clear(); - m_strides.resize(m_shape.size()); - auto size = m_strides.size(); - for (size_t i = 0; i < size; i++) { - size_t value(m_type.size()); - size_t dim(m_shape[size - 1 - i]); - if (i) { - value = m_strides[size - i] * dim; - } - m_strides[size - i - 1] = value; - } - } - } -}; -} // namespace - -ov::op::util::VariableValue::VariableValue() = default; - -OPENVINO_SUPPRESS_DEPRECATED_START -ov::op::util::VariableValue::VariableValue(ngraph::HostTensorPtr value) - : m_value(ov::Tensor{std::make_shared(value), {}}) {} - -ov::op::util::VariableValue::VariableValue(ngraph::HostTensorPtr value, bool reset) - : m_reset(reset), - m_value(ov::Tensor{std::make_shared(value), {}}) {} - -ngraph::HostTensorPtr ov::op::util::VariableValue::get_value() const { - if (auto wrapper = std::dynamic_pointer_cast(m_value._impl)) - return wrapper->tensor; - return std::make_shared(m_value); -} - -void ov::op::util::VariableValue::set_value(const ngraph::HostTensorPtr& value) { - m_value = ov::Tensor{std::make_shared(value), {}}; -} -OPENVINO_SUPPRESS_DEPRECATED_END - void ov::op::util::VariableValue::set_reset(bool reset) { m_reset = reset; } diff --git a/src/core/src/opsets/opset.cpp b/src/core/src/opsets/opset.cpp index 3f9104a1011fcb..a92f0d972e49d4 100644 --- a/src/core/src/opsets/opset.cpp +++ b/src/core/src/opsets/opset.cpp @@ -217,86 +217,14 @@ OpSet::OpSet(const OpSet& opset) : ov::OpSet(opset) {} const std::map>& get_available_opsets() { #define _REG_OPSET(OPSET) \ { #OPSET, get_##OPSET } - const static std::map> opset_map = {_REG_OPSET(opset1), - _REG_OPSET(opset2), - _REG_OPSET(opset3), - _REG_OPSET(opset4), - _REG_OPSET(opset5), - _REG_OPSET(opset6), - _REG_OPSET(opset7), - _REG_OPSET(opset8), - _REG_OPSET(opset9), - _REG_OPSET(opset10), - _REG_OPSET(opset11), - _REG_OPSET(opset12), - _REG_OPSET(opset13)}; + const static std::map> opset_map = {_REG_OPSET(opset3)}; #undef _REG_OPSET return opset_map; } -const OpSet& get_opset1() { - static OpSet opset(ov::get_opset1()); - return opset; -} - -const OpSet& get_opset2() { - static OpSet opset(ov::get_opset2()); - return opset; -} - const OpSet& get_opset3() { static OpSet opset(ov::get_opset3()); return opset; } -const OpSet& get_opset4() { - static OpSet opset(ov::get_opset4()); - return opset; -} - -const OpSet& get_opset5() { - static OpSet opset(ov::get_opset5()); - return opset; -} - -const OpSet& get_opset6() { - static OpSet opset(ov::get_opset6()); - return opset; -} - -const OpSet& get_opset7() { - static OpSet opset(ov::get_opset7()); - return opset; -} - -const OpSet& get_opset8() { - static OpSet opset(ov::get_opset8()); - return opset; -} - -const OpSet& get_opset9() { - static OpSet opset(ov::get_opset9()); - return opset; -} - -const OpSet& get_opset10() { - static OpSet opset(ov::get_opset10()); - return opset; -} - -const OpSet& get_opset11() { - static OpSet opset(ov::get_opset11()); - return opset; -} - -const OpSet& get_opset12() { - static OpSet opset(ov::get_opset12()); - return opset; -} - -const OpSet& get_opset13() { - static OpSet opset(ov::get_opset13()); - return opset; -} - } // namespace ngraph diff --git a/src/core/src/pass/serialize.cpp b/src/core/src/pass/serialize.cpp index 142fb71b345fdd..6745d09ccc5e3c 100644 --- a/src/core/src/pass/serialize.cpp +++ b/src/core/src/pass/serialize.cpp @@ -516,20 +516,6 @@ class XmlSerializer : public ov::AttributeVisitor { } else if (const auto& a = ov::as_type>>(&adapter)) { m_xml_node.append_attribute(name.c_str()).set_value(a->get()->get_info().variable_id.c_str()); - } else if (const auto& a = - ov::as_type>>(&adapter)) { - if (name == "value" && translate_type_name(m_node_type_name) == "Const") { - const int64_t size = a->get()->size(); - size_t new_size; - int64_t offset = m_constant_write_handler.write(static_cast(a->get()->get_ptr()), - size, - &new_size, - m_compress_to_fp16, - m_output_element_type); - - m_xml_node.append_attribute("offset").set_value(static_cast(offset)); - m_xml_node.append_attribute("size").set_value(static_cast(new_size)); - } } else if (const auto& a = ov::as_type>>(&adapter)) { if (name == "value" && translate_type_name(m_node_type_name) == "Const") { const int64_t size = a->get()->size(); diff --git a/src/core/src/pass/visualize_tree.cpp b/src/core/src/pass/visualize_tree.cpp index e981f7c4c95911..bcd3bcd1713390 100644 --- a/src/core/src/pass/visualize_tree.cpp +++ b/src/core/src/pass/visualize_tree.cpp @@ -376,6 +376,8 @@ static std::string get_value(const std::shared_ptr& consta case ov::element::Type_t::u4: case ov::element::Type_t::nf4: case ov::element::Type_t::i4: + case ov::element::Type_t::f8e4m3: + case ov::element::Type_t::f8e5m2: ss << constant->get_output_element_type(0).get_type_name() << " value"; break; case ov::element::Type_t::bf16: diff --git a/src/core/src/pattern/op/pattern.cpp b/src/core/src/pattern/op/pattern.cpp index 22156d08c39a2d..f3c95fea3c1291 100644 --- a/src/core/src/pattern/op/pattern.cpp +++ b/src/core/src/pattern/op/pattern.cpp @@ -107,6 +107,16 @@ std::function)> type_matches_any(const std::vector)> all_of(const std::vector)>>& predicates) { + return [=](Output output) -> bool { + for (auto& p : predicates) { + if (!p(output)) + return false; + } + return true; + }; +} } // namespace pattern } // namespace pass } // namespace ov diff --git a/src/core/src/runtime/aligned_buffer.cpp b/src/core/src/runtime/aligned_buffer.cpp index 4207eefe5db9b7..993a3928e79c93 100644 --- a/src/core/src/runtime/aligned_buffer.cpp +++ b/src/core/src/runtime/aligned_buffer.cpp @@ -2,71 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/runtime/aligned_buffer.hpp" +#include "openvino/runtime/aligned_buffer.hpp" #include #include -#include "ngraph/util.hpp" -#include "openvino/runtime/aligned_buffer.hpp" -#include "openvino/util/log.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -namespace ngraph { - -runtime::AlignedBuffer::AlignedBuffer() : m_allocated_buffer(nullptr), m_aligned_buffer(nullptr), m_byte_size(0) {} - -runtime::AlignedBuffer::AlignedBuffer(size_t byte_size, size_t alignment) : m_byte_size(byte_size) { - m_byte_size = std::max(1, byte_size); - size_t allocation_size = m_byte_size + alignment; - m_allocated_buffer = new char[allocation_size]; - m_aligned_buffer = m_allocated_buffer; - size_t mod = (alignment != 0) ? size_t(m_aligned_buffer) % alignment : 0; - - if (mod != 0) { - m_aligned_buffer += (alignment - mod); - } -} - -runtime::AlignedBuffer::AlignedBuffer(AlignedBuffer&& other) - : m_allocated_buffer(other.m_allocated_buffer), - m_aligned_buffer(other.m_aligned_buffer), - m_byte_size(other.m_byte_size) { - other.m_allocated_buffer = nullptr; - other.m_aligned_buffer = nullptr; - other.m_byte_size = 0; -} - -runtime::AlignedBuffer::~AlignedBuffer() { - if (m_allocated_buffer != nullptr) { - delete[] m_allocated_buffer; - } -} - -runtime::AlignedBuffer& runtime::AlignedBuffer::operator=(AlignedBuffer&& other) { - if (this != &other) { - if (m_allocated_buffer != nullptr) { - delete[] m_allocated_buffer; - } - m_allocated_buffer = other.m_allocated_buffer; - m_aligned_buffer = other.m_aligned_buffer; - m_byte_size = other.m_byte_size; - other.m_allocated_buffer = nullptr; - other.m_aligned_buffer = nullptr; - other.m_byte_size = 0; - } - return *this; -} -} // namespace ngraph - -namespace ov { -AttributeAdapter>::AttributeAdapter( - std::shared_ptr& value) - : DirectValueAccessor>(value) {} -} // namespace ov -NGRAPH_SUPPRESS_DEPRECATED_END - namespace ov { AlignedBuffer::AlignedBuffer() : m_allocated_buffer(nullptr), m_aligned_buffer(nullptr), m_byte_size(0) {} diff --git a/src/core/src/runtime/host_tensor.cpp b/src/core/src/runtime/host_tensor.cpp deleted file mode 100644 index 000e8de2f645d3..00000000000000 --- a/src/core/src/runtime/host_tensor.cpp +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/runtime/host_tensor.hpp" - -#include -#include - -#include "ngraph/op/constant.hpp" -#include "ngraph/util.hpp" - -using namespace ngraph; -using namespace std; -OPENVINO_SUPPRESS_DEPRECATED_START - -static const size_t alignment = 64; - -runtime::HostTensor::HostTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer) - : runtime::Tensor(std::make_shared(element_type, shape)), - m_memory_pointer(memory_pointer) { - if (get_partial_shape().is_static() && get_element_type().is_static()) { - allocate_buffer(); - } else { - m_buffer_size = 0; - } -} - -runtime::HostTensor::HostTensor(const element::Type& element_type, const Shape& shape) - : HostTensor(element_type, shape, nullptr) {} - -runtime::HostTensor::HostTensor(const element::Type& element_type, const PartialShape& partial_shape) - : runtime::Tensor(std::make_shared(element_type, partial_shape)), - m_buffer_size(0) { - // Defer allocation until ptr is requested -} - -runtime::HostTensor::HostTensor() : HostTensor(element::dynamic, PartialShape::dynamic()) {} - -NGRAPH_SUPPRESS_DEPRECATED_START -runtime::HostTensor::HostTensor(const Output& value) - : HostTensor(value.get_element_type(), value.get_partial_shape()) {} -NGRAPH_SUPPRESS_DEPRECATED_END - -void runtime::HostTensor::allocate_buffer() { - NGRAPH_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(get_partial_shape().is_static(), - "Attempt to allocate buffer for tensor with partial shape: ", - get_partial_shape()); - NGRAPH_CHECK(get_element_type().is_static(), - "Attempt to allocate buffer for tensor with dynamic type: ", - get_element_type()); - m_buffer_size = m_descriptor->size(); - if (m_memory_pointer != nullptr) { - m_aligned_buffer_pool = m_memory_pointer; - } else { - // Add 1 so that even for zero-sized tensor we get at least 1 byte - size_t allocation_size = m_buffer_size + alignment + 1; - uint8_t* allocated_buffer_pool = static_cast(ngraph_malloc(allocation_size)); - m_allocated_buffer_pool = allocated_buffer_pool; - size_t mod = size_t(allocated_buffer_pool) % alignment; - if (mod == 0) { - m_aligned_buffer_pool = allocated_buffer_pool; - } else { - m_aligned_buffer_pool = (allocated_buffer_pool + alignment - mod); - } - } - NGRAPH_SUPPRESS_DEPRECATED_END -} - -NGRAPH_SUPPRESS_DEPRECATED_START -runtime::HostTensor::HostTensor(const std::shared_ptr& constant) : HostTensor() { - initialize(constant); -} -NGRAPH_SUPPRESS_DEPRECATED_END - -void runtime::HostTensor::initialize(const std::shared_ptr& constant) { - set_element_type(constant->get_output_element_type(0)); - set_shape(constant->get_output_shape(0)); - memcpy(get_data_ptr(), constant->get_data_ptr(), get_size_in_bytes()); -} - -runtime::HostTensor::~HostTensor() { - NGRAPH_SUPPRESS_DEPRECATED_START - if (m_allocated_buffer_pool != nullptr) { - ngraph_free(m_allocated_buffer_pool); - } - NGRAPH_SUPPRESS_DEPRECATED_END -} - -void* runtime::HostTensor::get_data_ptr() { - if (!m_aligned_buffer_pool) { - allocate_buffer(); - } - return m_aligned_buffer_pool; -} - -const void* runtime::HostTensor::get_data_ptr() const { - NGRAPH_CHECK(m_aligned_buffer_pool, "Buffer not initialized"); - return m_aligned_buffer_pool; -} - -void runtime::HostTensor::write(const void* source, size_t n) { - void* target = get_data_ptr(); - if (n != m_buffer_size) { - throw out_of_range("partial tensor write not supported"); - } - if (n > 0) { - if (!source) { - throw runtime_error("nullptr passed to HostTensor::write"); - } - memcpy(target, source, n); - } -} - -void runtime::HostTensor::read(void* target, size_t n) const { - const void* source = get_data_ptr(); - if (n != m_buffer_size) { - throw out_of_range("partial tensor read access not supported"); - } - if (n > 0) { - if (!target) { - throw runtime_error("nullptr passed to HostTensor::read"); - } - memcpy(target, source, n); - } -} - -bool runtime::HostTensor::get_is_allocated() const { - return m_aligned_buffer_pool != nullptr; -} - -void runtime::HostTensor::set_element_type(const element::Type& element_type) { - OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(get_element_type().is_dynamic() || get_element_type() == element_type, - "Can not change a static element type"); - m_descriptor->set_element_type(element_type); - OPENVINO_SUPPRESS_DEPRECATED_END -} - -void runtime::HostTensor::set_shape(const Shape& shape) { - NGRAPH_CHECK(PartialShape(shape).refines(get_partial_shape()) || - (m_descriptor->get_partial_shape().is_static() && - m_descriptor->get_partial_shape().to_shape() == ov::Shape{0}), - "Allocation shape ", - shape, - " must be compatible with the partial shape: ", - get_partial_shape()); - m_descriptor->m_partial_shape = shape; - m_descriptor->m_shape_changed = true; -} - -void runtime::HostTensor::set_unary(const HostTensorPtr& arg) { - set_element_type(arg->get_element_type()); - set_shape(arg->get_partial_shape().get_shape()); -} - -void runtime::HostTensor::set_broadcast(const op::AutoBroadcastSpec& autob, - const HostTensorPtr& arg0, - const HostTensorPtr& arg1) { - element::Type element_type = arg0->get_element_type(); - NGRAPH_CHECK(element::Type::merge(element_type, element_type, arg1->get_element_type()), - "Argument element types are inconsistent."); - set_broadcast(autob, arg0, arg1, element_type); -} - -void runtime::HostTensor::set_broadcast(const op::AutoBroadcastSpec& autob, - const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const element::Type& element_type) { - set_element_type(element_type); - - PartialShape pshape = arg0->get_partial_shape(); - if (autob.m_type == op::AutoBroadcastType::NONE) { - NGRAPH_CHECK(PartialShape::merge_into(pshape, arg1->get_partial_shape()), "Argument shapes are inconsistent."); - } else if (autob.m_type == op::AutoBroadcastType::NUMPY || autob.m_type == op::AutoBroadcastType::PDPD) { - NGRAPH_CHECK(PartialShape::broadcast_merge_into(pshape, arg1->get_partial_shape(), autob), - "Argument shapes are inconsistent."); - } else { - NGRAPH_CHECK(false, "Unsupported auto broadcast specification"); - } - set_shape(pshape.get_shape()); -} diff --git a/src/core/src/runtime/tensor.cpp b/src/core/src/runtime/tensor.cpp deleted file mode 100644 index f7f587d1a95e9d..00000000000000 --- a/src/core/src/runtime/tensor.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/runtime/tensor.hpp" - -using namespace ngraph; -using namespace std; - -OPENVINO_SUPPRESS_DEPRECATED_START - -const Shape& runtime::Tensor::get_shape() const { - return m_descriptor->get_shape(); -} - -const PartialShape& runtime::Tensor::get_partial_shape() const { - return m_descriptor->get_partial_shape(); -} - -const element::Type& runtime::Tensor::get_element_type() const { - return m_descriptor->get_element_type(); -} - -size_t runtime::Tensor::get_element_count() const { - return shape_size(m_descriptor->get_shape()); -} - -size_t runtime::Tensor::get_size_in_bytes() const { - return m_descriptor->size(); -} diff --git a/src/core/src/specialize_function.cpp b/src/core/src/specialize_function.cpp index 1cbb7c1116b7b6..08401fde6fc389 100644 --- a/src/core/src/specialize_function.cpp +++ b/src/core/src/specialize_function.cpp @@ -5,13 +5,14 @@ #include "ngraph/specialize_function.hpp" #include "itt.hpp" -#include "ngraph/op/assign.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/op/util/op_types.hpp" +#include "openvino/op/constant.hpp" using namespace ngraph; NGRAPH_SUPPRESS_DEPRECATED_START; +using ov::op::v0::Constant; + std::shared_ptr ngraph::specialize_function(std::shared_ptr f, const std::vector& parameter_element_types, const std::vector& parameter_shapes, @@ -20,21 +21,21 @@ std::shared_ptr ngraph::specialize_function(std::shared_ptr { OV_ITT_SCOPED_TASK(ov::itt::domains::core, "specialize_function"); - NGRAPH_CHECK(f->get_parameters().size() == parameter_shapes.size()); - NGRAPH_CHECK(f->get_parameters().size() == parameter_element_types.size()); - NGRAPH_CHECK(f->get_parameters().size() == parameter_values.size()); + OPENVINO_ASSERT(f->get_parameters().size() == parameter_shapes.size()); + OPENVINO_ASSERT(f->get_parameters().size() == parameter_element_types.size()); + OPENVINO_ASSERT(f->get_parameters().size() == parameter_values.size()); NodeMap m; for (size_t i = 0; i < parameter_shapes.size(); i++) { - NGRAPH_CHECK(f->get_parameters()[i]->get_element_type().is_dynamic() || - parameter_element_types[i] == f->get_parameters()[i]->get_element_type()); + OPENVINO_ASSERT(f->get_parameters()[i]->get_element_type().is_dynamic() || + parameter_element_types[i] == f->get_parameters()[i]->get_element_type()); if (parameter_values[i] != nullptr && parameter_shapes[i].is_static() && parameter_element_types[i].is_static()) { - m[f->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], - parameter_shapes[i].to_shape(), - parameter_values[i]); + m[f->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], + parameter_shapes[i].to_shape(), + parameter_values[i]); } else { m[f->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], parameter_shapes[i]); @@ -86,12 +87,12 @@ std::shared_ptr ngraph::specialize_function(std::shared_ptr ResultVector new_results = f->get_results(); for (size_t i = 0; i < new_results.size(); i++) { auto name = new_results[i]->get_friendly_name(); - new_results[i] = std::static_pointer_cast(m[new_results[i].get()]); + new_results[i] = std::static_pointer_cast(m[new_results[i].get()]); new_results[i]->set_friendly_name(name); } - SinkVector new_sinks = f->get_sinks(); + auto new_sinks = f->get_sinks(); for (size_t i = 0; i < new_sinks.size(); i++) { - new_sinks[i] = std::static_pointer_cast(m[new_sinks[i].get()]); + new_sinks[i] = std::static_pointer_cast(m[new_sinks[i].get()]); } return std::make_shared(new_results, new_sinks, new_parameters); diff --git a/src/core/src/type/element_type.cpp b/src/core/src/type/element_type.cpp index e5607e6a60a4de..a49312b6530378 100644 --- a/src/core/src/type/element_type.cpp +++ b/src/core/src/type/element_type.cpp @@ -71,6 +71,10 @@ inline TypeInfo get_type_info(ov::element::Type_t type) { return {64, false, false, false, "uint64_t", "u64"}; case ov::element::Type_t::nf4: return {4, false, false, true, "nfloat4", "nf4"}; + case ov::element::Type_t::f8e4m3: + return {8, true, true, true, "f8e4m3", "f8e4m3"}; + case ov::element::Type_t::f8e5m2: + return {8, true, true, true, "f8e5m2", "f8e5m2"}; case ov::element::Type_t::string: return {8 * sizeof(std::string), false, false, false, "string", "string"}; default: @@ -119,6 +123,10 @@ ov::element::Type type_from_string(const std::string& type) { return ::ov::element::Type(::ov::element::Type_t::dynamic); } else if (type == "nf4" || type == "NF4") { return ::ov::element::Type(::ov::element::Type_t::nf4); + } else if (type == "f8e4m3" || type == "F8E4M3") { + return ::ov::element::Type(::ov::element::Type_t::f8e4m3); + } else if (type == "f8e5m2" || type == "F8E5M2") { + return ::ov::element::Type(::ov::element::Type_t::f8e5m2); } else { OPENVINO_THROW("Incorrect type: ", type); } @@ -126,24 +134,12 @@ ov::element::Type type_from_string(const std::string& type) { } // namespace std::vector ov::element::Type::get_known_types() { - std::vector rc = {&ov::element::dynamic, - &ov::element::boolean, - &ov::element::bf16, - &ov::element::f16, - &ov::element::f32, - &ov::element::f64, - &ov::element::i4, - &ov::element::i8, - &ov::element::i16, - &ov::element::i32, - &ov::element::i64, - &ov::element::u1, - &ov::element::u4, - &ov::element::u8, - &ov::element::u16, - &ov::element::u32, - &ov::element::u64, - &ov::element::string}; + std::vector rc = { + &ov::element::dynamic, &ov::element::boolean, &ov::element::bf16, &ov::element::f16, &ov::element::f32, + &ov::element::f64, &ov::element::i4, &ov::element::i8, &ov::element::i16, &ov::element::i32, + &ov::element::i64, &ov::element::u1, &ov::element::u4, &ov::element::u8, &ov::element::u16, + &ov::element::u32, &ov::element::u64, &ov::element::nf4, &ov::element::f8e4m3, &ov::element::f8e5m2, + &ov::element::string}; return rc; } @@ -172,7 +168,9 @@ ov::element::Type::Type(size_t bitwidth, {ov::element::Type_t::u16, {16, false, false, false, "uint16_t", "u16"}}, {ov::element::Type_t::u32, {32, false, false, false, "uint32_t", "u32"}}, {ov::element::Type_t::u64, {64, false, false, false, "uint64_t", "u64"}}, - {ov::element::Type_t::u4, {4, false, false, false, "uint4_t", "nf4"}}, + {ov::element::Type_t::nf4, {4, false, false, true, "nfloat4", "nf4"}}, + {ov::element::Type_t::f8e4m3, {8, true, true, true, "f8e4m3", "f8e4m3"}}, + {ov::element::Type_t::f8e5m2, {8, true, true, true, "f8e5m2", "f8e5m2"}}, {ov::element::Type_t::string, {8 * sizeof(std::string), false, false, false, "string", "string"}}, }; for (const auto& t : elements_map) { @@ -266,6 +264,14 @@ Type from() { return Type_t::bf16; } template <> +Type from() { + return Type_t::f8e4m3; +} +template <> +Type from() { + return Type_t::f8e5m2; +} +template <> Type from() { return Type_t::string; } @@ -282,6 +288,10 @@ Type fundamental_type_for(const Type& type) { return from::value_type>(); case Type_t::f64: return from::value_type>(); + case Type_t::f8e4m3: + return from::value_type>(); + case Type_t::f8e5m2: + return from::value_type>(); case Type_t::i4: return from::value_type>(); case Type_t::i8: @@ -304,6 +314,8 @@ Type fundamental_type_for(const Type& type) { return from::value_type>(); case Type_t::u64: return from::value_type>(); + case Type_t::nf4: + return from::value_type>(); case Type_t::string: return from::value_type>(); default: @@ -320,24 +332,13 @@ std::ostream& ov::element::operator<<(std::ostream& out, const ov::element::Type std::istream& ov::element::operator>>(std::istream& in, ov::element::Type& obj) { const std::unordered_map legacy = { - {"BOOL", ov::element::boolean}, - {"BF16", ov::element::bf16}, - {"I4", ov::element::i4}, - {"I8", ov::element::i8}, - {"I16", ov::element::i16}, - {"I32", ov::element::i32}, - {"I64", ov::element::i64}, - {"U4", ov::element::u4}, - {"U8", ov::element::u8}, - {"U16", ov::element::u16}, - {"U32", ov::element::u32}, - {"U64", ov::element::u64}, - {"FP32", ov::element::f32}, - {"FP64", ov::element::f64}, - {"FP16", ov::element::f16}, - {"BIN", ov::element::u1}, - {"NF4", ov::element::nf4}, - {"STRING", ov::element::string}, + {"BOOL", ov::element::boolean}, {"BF16", ov::element::bf16}, {"I4", ov::element::i4}, + {"I8", ov::element::i8}, {"I16", ov::element::i16}, {"I32", ov::element::i32}, + {"I64", ov::element::i64}, {"U4", ov::element::u4}, {"U8", ov::element::u8}, + {"U16", ov::element::u16}, {"U32", ov::element::u32}, {"U64", ov::element::u64}, + {"FP32", ov::element::f32}, {"FP64", ov::element::f64}, {"FP16", ov::element::f16}, + {"BIN", ov::element::u1}, {"NF4", ov::element::nf4}, {"F8E4M3", ov::element::f8e4m3}, + {"F8E5M2", ov::element::f8e5m2}, {"STRING", ov::element::string}, }; std::string str; in >> str; @@ -420,6 +421,8 @@ inline size_t compiler_byte_size(ov::element::Type_t et) { ET_CASE(u32); ET_CASE(u64); ET_CASE(nf4); + ET_CASE(f8e4m3); + ET_CASE(f8e5m2); ET_CASE(string); #undef ET_CASE case ov::element::Type_t::undefined: @@ -454,6 +457,8 @@ OPENVINO_API EnumNames& EnumNames::get() { {"u32", element::Type_t::u32}, {"u64", element::Type_t::u64}, {"nf4", element::Type_t::nf4}, + {"f8e4m3", element::Type_t::f8e4m3}, + {"f8e5m2", element::Type_t::f8e5m2}, {"string", element::Type_t::string}}); return enum_names; } diff --git a/src/core/src/type/float8_e4m3.cpp b/src/core/src/type/float8_e4m3.cpp new file mode 100644 index 00000000000000..9041b8a0070497 --- /dev/null +++ b/src/core/src/type/float8_e4m3.cpp @@ -0,0 +1,135 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e4m3.hpp" + +#include +#include +#include + +namespace ov { + +static_assert(sizeof(float8_e4m3) == 1, "class f8e4m3 must be exactly 1 byte"); +static_assert(std::is_trivially_constructible::value, "should be trivially constructible"); +static_assert(std::is_trivially_copyable::value, "must be trivially copyable"); +static_assert(std::is_trivially_destructible::value, "must be trivially destructible"); + +namespace { +constexpr auto float_nan = std::numeric_limits::quiet_NaN(); +// Lookup table for conversion f8 -> float. The f8 bit value without sign bit (masked 0x7f) is LUT offset. +static constexpr std::array f8_to_float_lut{ + 0.0f, 0.001953125f, 0.00390625f, 0.005859375f, 0.0078125f, 0.009765625f, 0.01171875f, 0.013671875f, + 0.015625f, 0.017578125f, 0.01953125f, 0.021484375f, 0.0234375f, 0.025390625f, 0.02734375f, 0.029296875f, + 0.03125f, 0.03515625f, 0.0390625f, 0.04296875f, 0.046875f, 0.05078125f, 0.0546875f, 0.05859375f, + 0.0625f, 0.0703125f, 0.078125f, 0.0859375f, 0.09375f, 0.1015625f, 0.109375f, 0.1171875f, + 0.125f, 0.140625f, 0.15625f, 0.171875f, 0.1875f, 0.203125f, 0.21875f, 0.234375f, + 0.25f, 0.28125f, 0.3125f, 0.34375f, 0.375f, 0.40625f, 0.4375f, 0.46875f, + 0.5f, 0.5625f, 0.625f, 0.6875f, 0.75f, 0.8125f, 0.875f, 0.9375f, + 1.0f, 1.125f, 1.25f, 1.375f, 1.5f, 1.625f, 1.75f, 1.875f, + 2.0f, 2.25f, 2.5f, 2.75f, 3.0f, 3.25f, 3.5f, 3.75f, + 4.0f, 4.5f, 5.0f, 5.5f, 6.0f, 6.5f, 7.0f, 7.5f, + 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f, + 32.0f, 36.0f, 40.0f, 44.0f, 48.0f, 52.0f, 56.0f, 60.0f, + 64.0f, 72.0f, 80.0f, 88.0f, 96.0f, 104.0f, 112.0f, 120.0f, + 128.0f, 144.0f, 160.0f, 176.0f, 192.0f, 208.0f, 224.0f, 240.0f, + 256.0f, 288.0f, 320.0f, 352.0f, 384.0f, 416.0f, 448.0f, float_nan}; + +constexpr uint32_t three_bytes_shift = 24; + +constexpr uint8_t f8e4m3_s_mask = 0x80; // f8e4m3 sign bit mask +constexpr uint8_t f8e4m3_e_size = 4; // f8e4m3 exponent bit size +constexpr uint8_t f8e4m3_e_mask = 0x78; // f8e4m3 exponent bit mask +constexpr uint8_t f8e4m3_e_bias = 7; // f8e4m3 exponent bias +constexpr uint8_t f8e4m3_e_max = 0x0f; // f8e4m3 exponent max value +constexpr uint8_t f8e4m3_m_size = 3; // f8e4m3 mantissa bits size +constexpr uint8_t f8e4m3_m_mask = 0x07; // f8e4m3 mantissa bit mask + +union f32_t { + float value; + uint32_t bits; +}; + +uint8_t f32_to_f8e4m3_bits(const float value) { + constexpr uint32_t f32_s_mask = 0x80000000; // f32 sign bit mask + constexpr uint32_t f32_e_mask = 0x7F800000; // f32 exponent bits mask + constexpr uint32_t f32_e_bias = 127; // f32 exponent bias + constexpr uint32_t f32_e_size = 8; // f32 exponent bits size + constexpr uint32_t f32_m_mask = 0x007fffff; // f32 mantissa bits mask + constexpr uint32_t f32_m_size = 23; // f32 mantissa bits size + + constexpr uint32_t f8_e_mask = f8e4m3_e_mask << three_bytes_shift; // f8 exponent bits mask (on u32) + constexpr uint32_t f8_m_mask = f8e4m3_m_mask << three_bytes_shift; // f8 mantissa bits mask (on u32) + constexpr uint32_t f8_m_hidden_one_mask = 0x08000000; // f8 mantissa hidden one bits mask (on u32) + + constexpr uint32_t round_half = 0x01ffffff; // value for half to even round for f8 + constexpr uint32_t round_norm = 0x007fffff; // value for normal round for f8 + constexpr uint32_t round_even = 0x00800000; // value for half to even round for f8 + constexpr uint32_t round_odd = 0x01800000; // value for an non-half to even round for f8 + + const auto input = f32_t{value}; + auto f8_bits = static_cast((input.bits & f32_s_mask) >> three_bytes_shift); + + uint32_t f32_e_field = input.bits & f32_e_mask; + + if (f32_e_field == f32_e_mask) { + f8_bits |= (f8e4m3_e_mask | f8e4m3_m_mask); + } else if (f32_e_field != 0) { + int32_t f8_biased_exp = (f32_e_field >> f32_m_size) - (f32_e_bias - f8e4m3_e_bias); + uint32_t fractional = (input.bits & f32_m_mask) << (f32_e_size - f8e4m3_e_size); + + // for normalized values round apply rounding change f8 fractional and biased exponent + if ((fractional & round_half) == round_odd || (fractional & round_norm) != 0) { + fractional += round_even; + if (0 != (fractional & f8_e_mask)) { + fractional &= f8_e_mask; + ++f8_biased_exp; + } + } + fractional &= f8_m_mask; + + // set exponent and mantissa on f8 bits + if (f8_biased_exp > f8e4m3_e_max) { + // Use NAN as this type has no infinity + f8_bits |= (f8e4m3_e_mask | f8e4m3_m_mask); + } else if (f8_biased_exp > 0) { + f8_bits |= (f8_biased_exp << f8e4m3_m_size) | (fractional >> three_bytes_shift); + } else { + // Restore the hidden 1 in f8 mantissa for subnormal calculation + fractional = f8_m_hidden_one_mask | (input.bits & f32_m_mask) << (f32_e_size - f8e4m3_e_size); + // Will any bits be shifted off? + int32_t shift = f8_biased_exp < -(f8e4m3_e_max) ? 0 : (1U << (1 - f8_biased_exp)); + uint32_t sticky = (fractional & (shift - 1)) ? 1 : 0; + + fractional = ((1 + f8_biased_exp) > f8e4m3_e_max) ? 0 : fractional >> (1 - f8_biased_exp); + fractional |= sticky; + // apply rounding + if (((fractional & round_half) == round_odd) || ((fractional & round_norm) != 0)) { + fractional += round_even; + } + + f8_bits |= fractional >> three_bytes_shift; + } + } + + return f8_bits; +} +} // namespace + +float8_e4m3::float8_e4m3(const uint32_t sign, const uint32_t biased_exponent, const uint32_t fraction) + : m_value(((sign & 0x01U) << (f8e4m3_e_size + f8e4m3_m_size)) | + (biased_exponent & (f8e4m3_e_mask >> f8e4m3_m_size)) << f8e4m3_m_size | (fraction & f8e4m3_m_mask)) {} + +float8_e4m3::float8_e4m3(const float value) : m_value{f32_to_f8e4m3_bits(value)} {} + +float8_e4m3::operator float() const { + auto converted = f32_t{f8_to_float_lut[m_value & (f8e4m3_e_mask | f8e4m3_m_mask)]}; + converted.bits |= (m_value & f8e4m3_s_mask) << three_bytes_shift; + return converted.value; +} + +uint8_t float8_e4m3::to_bits() const { + return m_value; +} +} // namespace ov diff --git a/src/core/src/type/float8_e5m2.cpp b/src/core/src/type/float8_e5m2.cpp new file mode 100644 index 00000000000000..b44a0f75b21948 --- /dev/null +++ b/src/core/src/type/float8_e5m2.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e5m2.hpp" + +#include +#include + +#include "openvino/reference/fake_convert.hpp" + +namespace ov { +static_assert(sizeof(float8_e5m2) == 1, "class f8e5m2 must be exactly 1 byte"); +static_assert(std::is_trivially_constructible::value, "should be trivially constructible"); +static_assert(std::is_trivially_copyable::value, "must be trivially copyable"); +static_assert(std::is_trivially_destructible::value, "must be trivially destructible"); + +namespace { + +constexpr uint8_t byte_shift = 8; + +constexpr uint8_t f8e5m2_e_size = 5; // f8e5m2 exponent bit size +constexpr uint8_t f8e5m2_e_mask = 0x7c; // f8e5m2 exponent bit mask +constexpr uint8_t f8e5m2_m_size = 2; // f8e5m2 mantissa bits size +constexpr uint8_t f8e5m2_m_mask = 0x03; // f8e5m2 mantissa bit mask + +uint8_t f32_to_f8e5m2_bits(const float value) { + auto f16 = static_cast(value); + reference::func::emulate_f8e5m2_on_fp16(&f16, &f16, 1); + return static_cast((f16.to_bits() >> byte_shift)); +} +} // namespace + +float8_e5m2::float8_e5m2(uint32_t sign, uint32_t biased_exponent, uint32_t fraction) + : m_value((sign & 0x01) << (f8e5m2_e_size + f8e5m2_m_size) | + (biased_exponent & (f8e5m2_e_mask >> f8e5m2_m_size)) << f8e5m2_m_size | (fraction & f8e5m2_m_mask)) {} + +float8_e5m2::float8_e5m2(const float value) : m_value(f32_to_f8e5m2_bits(value)){}; + +float8_e5m2::operator float() const { + return static_cast(float16::from_bits((static_cast(m_value) << byte_shift))); +} + +uint8_t float8_e5m2::to_bits() const { + return m_value; +} +} // namespace ov diff --git a/src/core/src/util.cpp b/src/core/src/util.cpp index 49ae1575101e7b..cf94286b04116d 100644 --- a/src/core/src/util.cpp +++ b/src/core/src/util.cpp @@ -262,7 +262,7 @@ void parse_version_string(std::string version, size_t& major, size_t& minor, siz } } // namespace ngraph -std::vector read_float_vector(std::shared_ptr tv) { +std::vector read_float_vector(std::shared_ptr tv) { std::vector float_vec; ov::element::Type element_type = tv->get_element_type(); @@ -338,7 +338,7 @@ std::vector read_float_vector(std::shared_ptr tv return float_vec; } -std::vector read_index_vector(std::shared_ptr tv) { +std::vector read_index_vector(std::shared_ptr tv) { std::vector index_vec; ov::element::Type element_type = tv->get_element_type(); diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index aa71e2122ede58..a4597f7aac6a76 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -10,10 +10,10 @@ #include "bound_evaluate.hpp" #include "compare.hpp" #include "ngraph/evaluator.hpp" -#include "ngraph/op/negative.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/gather.hpp" +#include "openvino/op/negative.hpp" #include "openvino/op/ops.hpp" #include "sequnce_generator.hpp" #include "validation_util.hpp" @@ -22,6 +22,12 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { using ov::Dimension; +namespace op { +namespace v0 { +using ov::op::v0::Constant; +using ov::op::v0::Negative; +} // namespace v0 +} // namespace op Strides conv_default_strides(const Node* /* node */, const PartialShape& data_batch_shape, @@ -882,35 +888,6 @@ std::pair maximum_value(const Output& value) { return std::pair(val.m_value < std::numeric_limits::max(), val.m_value); } -void evaluate_nodes(std::map& value_map, - std::map& output_tensor_map, - const OutputVector& outputs, - const EvaluationContext& evaluation_context) { - Evaluator evaluator({}, value_map); - evaluator.set_universal_handler( - [&output_tensor_map, &evaluation_context](Node* node, - const HostTensorVector& input_tensors) -> HostTensorVector { - HostTensorVector output_tensors; - for (const auto& v : node->outputs()) { - auto it = output_tensor_map.find(v); - if (it == output_tensor_map.end()) { - auto c = std::make_shared(v); - output_tensors.push_back(c); - } else { - output_tensors.push_back(it->second); - } - } - if (node->evaluate(output_tensors, input_tensors, evaluation_context)) { - return output_tensors; - } else { - OPENVINO_THROW("Evaluation failed on ", node); - } - }); - for (const auto& value : outputs) { - evaluator.evaluate(value); - } -} - std::shared_ptr get_constant_max_of_type(element::Type_t t) { auto tensor = ov::util::make_tensor_of_max_value(t); return tensor ? std::make_shared(tensor) : nullptr; @@ -951,15 +928,6 @@ std::shared_ptr get_constant_lowest_of_type(element::Type_t t) return nullptr; } } - -bool validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) { - return (tensor_vector.size() == size) && - std::none_of(tensor_vector.cbegin(), tensor_vector.cend(), ov::cmp::Equal(nullptr)); -} - -std::shared_ptr operator-(const Output& arg0) { - return std::make_shared(arg0); -} } // namespace ngraph void ov::infer_auto_padding(const Shape& image_shape, diff --git a/src/core/template_extension/CMakeLists.txt b/src/core/template_extension/CMakeLists.txt index 6d759918ded42c..cff16cefc7dcca 100644 --- a/src/core/template_extension/CMakeLists.txt +++ b/src/core/template_extension/CMakeLists.txt @@ -2,10 +2,24 @@ # SPDX-License-Identifier: Apache-2.0 # -add_subdirectory(old) -add_subdirectory(new) +# [cmake:extension] +set(CMAKE_CXX_STANDARD 11) -# Enable code style check +set(TARGET_NAME "openvino_template_extension") + +find_package(OpenVINO REQUIRED) + +set(SRC identity.cpp ov_extension.cpp) + +add_library(${TARGET_NAME} MODULE ${SRC}) -file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/new/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/new/*.hpp") +target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_OPENVINO_EXTENSION_API) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) + +# [cmake:extension] +install(TARGETS ${TARGET_NAME} + LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) + +# Enable code style check +file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp") ov_add_clang_format_target(openvino_template_extension_clang FOR_SOURCES ${template_extension_src}) diff --git a/src/core/template_extension/new/identity.cpp b/src/core/template_extension/identity.cpp similarity index 100% rename from src/core/template_extension/new/identity.cpp rename to src/core/template_extension/identity.cpp diff --git a/src/core/template_extension/new/identity.hpp b/src/core/template_extension/identity.hpp similarity index 100% rename from src/core/template_extension/new/identity.hpp rename to src/core/template_extension/identity.hpp diff --git a/src/core/template_extension/new/CMakeLists.txt b/src/core/template_extension/new/CMakeLists.txt deleted file mode 100644 index 10860e773bbcc6..00000000000000 --- a/src/core/template_extension/new/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# [cmake:extension] -set(CMAKE_CXX_STANDARD 11) - -set(TARGET_NAME "openvino_template_extension") - -find_package(OpenVINO REQUIRED) - -set(SRC identity.cpp ov_extension.cpp) - -add_library(${TARGET_NAME} MODULE ${SRC}) - -target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_OPENVINO_EXTENSION_API) -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) -# [cmake:extension] -install(TARGETS ${TARGET_NAME} - LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/src/core/template_extension/old/CMakeLists.txt b/src/core/template_extension/old/CMakeLists.txt deleted file mode 100644 index e93b371f7b762e..00000000000000 --- a/src/core/template_extension/old/CMakeLists.txt +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# [cmake:extension] -set(CMAKE_CXX_STANDARD 11) - -set(TARGET_NAME "template_extension") - -find_package(OpenVINO REQUIRED COMPONENTS Runtime OPTIONAL_COMPONENTS ONNX) -find_package(OpenCV QUIET COMPONENTS core) -if(OpenCV_VERSION VERSION_LESS 3) - set(OpenCV_FOUND OFF) -endif() - -set(SRC cpu_kernel.cpp extension.cpp op.cpp) - -if(OpenCV_FOUND) - list(APPEND SRC fft_kernel.cpp fft_op.cpp) -endif() - -add_library(${TARGET_NAME} MODULE ${SRC}) - -if(OpenCV_FOUND) - target_compile_definitions(${TARGET_NAME} PRIVATE OPENCV_IMPORT_ENABLED) - target_link_libraries(${TARGET_NAME} PRIVATE opencv_core) -endif() - -target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_API) -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) - -if(OpenVINO_Frontend_ONNX_FOUND) - target_link_libraries(${TARGET_NAME} PRIVATE openvino::frontend::onnx) - target_compile_definitions(${TARGET_NAME} PRIVATE OPENVINO_ONNX_FRONTEND_ENABLED) -endif() -# [cmake:extension] - -# Enable code style check -file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp") -ov_add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${template_extension_src}) - -install(TARGETS ${TARGET_NAME} - LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/src/core/template_extension/old/cpu_kernel.cpp b/src/core/template_extension/old/cpu_kernel.cpp deleted file mode 100644 index f754cb8f90e01b..00000000000000 --- a/src/core/template_extension/old/cpu_kernel.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "cpu_kernel.hpp" - -#include - -#include "op.hpp" - -using namespace TemplateExtension; - -//! [cpu_implementation:ctor] -OpImplementation::OpImplementation(const std::shared_ptr& node) { - try { - auto castedNode = std::dynamic_pointer_cast(node); - if (!castedNode) - IE_THROW() << "Cannot create implementation for unknown operation!"; - if (castedNode->inputs().size() != 1 || castedNode->outputs().size() != 1) - IE_THROW() << "Cannot create implementation for operation with incorrect number of inputs or outputs!"; - if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic()) - IE_THROW() << "Cannot create implementation for op with dynamic shapes!"; - if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4) - IE_THROW() << "Operation supports only 4d tensors for input and output."; - if (castedNode->get_input_element_type(0) != ngraph::element::f32 || - castedNode->get_output_element_type(0) != ngraph::element::f32) - IE_THROW() << "Operation supports only FP32 tensors."; - add = castedNode->getAddAttr(); - inShape = castedNode->get_input_shape(0); - outShape = castedNode->get_output_shape(0); - } catch (InferenceEngine::Exception& ex) { - error = ex.what(); - } -} -//! [cpu_implementation:ctor] - -//! [cpu_implementation:getSupportedConfigurations] -InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations( - std::vector& conf, - InferenceEngine::ResponseDesc* resp) noexcept { - auto createConfig = [](const InferenceEngine::SizeVector inShape, - const InferenceEngine::SizeVector& outShape, - bool planar) { - InferenceEngine::LayerConfig config; - config.dynBatchSupport = false; - InferenceEngine::DataConfig inData; - InferenceEngine::DataConfig outData; - InferenceEngine::SizeVector order = {0, 1, 2, 3}; - // Allow any offset before data - size_t offset((std::numeric_limits::max)()); - if (planar) { - inData.desc = - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset}); - config.inConfs.push_back(inData); - outData.desc = - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset}); - config.outConfs.push_back(outData); - } else { - // Add blocked (nChw8c) format - auto div_up = [](const int a, const int b) -> int { - if (!b) - return 0; - return (a + b - 1) / b; - }; - - order.push_back(1); - InferenceEngine::SizeVector inBlkDims = inShape; - inBlkDims[1] = div_up(static_cast(inBlkDims[1]), 8); - inBlkDims.push_back(8); - InferenceEngine::SizeVector outBlkDims = outShape; - outBlkDims[1] = div_up(static_cast(outBlkDims[1]), 8); - outBlkDims.push_back(8); - inData.desc = - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset}); - config.inConfs.push_back(inData); - outData.desc = - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset}); - config.outConfs.push_back(outData); - } - return config; - }; - if (!error.empty()) { - if (resp) { - strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg) - 1] = 0; - } - return InferenceEngine::GENERAL_ERROR; - } - // Add planar format - conf.emplace_back(createConfig(inShape, outShape, true)); - // Add blocked format nChw8c - conf.emplace_back(createConfig(inShape, outShape, false)); - return InferenceEngine::OK; -} -//! [cpu_implementation:getSupportedConfigurations] - -//! [cpu_implementation:init] -InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, - InferenceEngine::ResponseDesc* resp) noexcept { - try { - if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { - IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!"; - } - - if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) { - IE_THROW() << "Operation can be initialized only with 4d input/output tensors!"; - } - - if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || - config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { - IE_THROW() << "Operation supports only FP32 precisions!"; - } - } catch (InferenceEngine::Exception& ex) { - error = ex.what(); - if (resp) { - strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg) - 1] = 0; - } - return InferenceEngine::GENERAL_ERROR; - } - - return InferenceEngine::OK; -} -//! [cpu_implementation:init] - -//! [cpu_implementation:execute] -InferenceEngine::StatusCode OpImplementation::execute(std::vector& inputs, - std::vector& outputs, - InferenceEngine::ResponseDesc* resp) noexcept { - const float* src_data = - inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - float* dst_data = - outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - - for (size_t i = 0; i < inputs[0]->size(); i++) { - dst_data[i] = src_data[i] + add; - } - return InferenceEngine::OK; -} -//! [cpu_implementation:execute] diff --git a/src/core/template_extension/old/cpu_kernel.hpp b/src/core/template_extension/old/cpu_kernel.hpp deleted file mode 100644 index 4dece3ab3417b2..00000000000000 --- a/src/core/template_extension/old/cpu_kernel.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include - -namespace TemplateExtension { - -//! [cpu_implementation:header] -class OpImplementation : public InferenceEngine::ILayerExecImpl { -public: - explicit OpImplementation(const std::shared_ptr& node); - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, - InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, - InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector& inputs, - std::vector& outputs, - InferenceEngine::ResponseDesc* resp) noexcept override; - -private: - int64_t add; - ngraph::Shape inShape; - ngraph::Shape outShape; - std::string error; -}; -//! [cpu_implementation:header] - -} // namespace TemplateExtension diff --git a/src/core/template_extension/old/extension.cpp b/src/core/template_extension/old/extension.cpp deleted file mode 100644 index 8e293fd93dd3fd..00000000000000 --- a/src/core/template_extension/old/extension.cpp +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "extension.hpp" - -#include "cpu_kernel.hpp" -#include "op.hpp" -#ifdef OPENCV_IMPORT_ENABLED -# include "fft_kernel.hpp" -# include "fft_op.hpp" -#endif -#include -#ifdef OPENVINO_ONNX_FRONTEND_ENABLED -# include -#endif -#include -#include -#include -#include - -#include "openvino/core/deprecated.hpp" - -using namespace TemplateExtension; - -//! [extension:ctor] -Extension::Extension() { -#ifdef OPENVINO_ONNX_FRONTEND_ENABLED - OPENVINO_SUPPRESS_DEPRECATED_START - ngraph::onnx_import::register_operator(Operation::get_type_info_static().name, - 1, - "custom_domain", - [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; - int64_t add = node.get_attribute_value("add"); - return {std::make_shared(ng_inputs.at(0), add)}; - }); -# ifdef OPENCV_IMPORT_ENABLED - ngraph::onnx_import::register_operator(FFTOp::get_type_info_static().name, - 1, - "custom_domain", - [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; - bool inverse = node.get_attribute_value("inverse"); - return {std::make_shared(ng_inputs.at(0), inverse)}; - }); -# endif - OPENVINO_SUPPRESS_DEPRECATED_END -#endif -} -//! [extension:ctor] - -//! [extension:dtor] -Extension::~Extension() { -#ifdef OPENVINO_ONNX_FRONTEND_ENABLED - OPENVINO_SUPPRESS_DEPRECATED_START - ngraph::onnx_import::unregister_operator(Operation::get_type_info_static().name, 1, "custom_domain"); -# ifdef OPENCV_IMPORT_ENABLED - ngraph::onnx_import::unregister_operator(FFTOp::get_type_info_static().name, 1, "custom_domain"); -# endif // OPENCV_IMPORT_ENABLED - OPENVINO_SUPPRESS_DEPRECATED_END -#endif // OPENVINO_ONNX_FRONTEND_ENABLED -} -//! [extension:dtor] - -//! [extension:GetVersion] -void Extension::GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept { - static InferenceEngine::Version ExtensionDescription = { - {1, 0}, // extension API version - "1.0", - "template_ext" // extension description message - }; - - versionInfo = &ExtensionDescription; -} -//! [extension:GetVersion] - -//! [extension:getOpSets] -std::map Extension::getOpSets() { - std::map opsets; - ngraph::OpSet opset; - opset.insert(); -#ifdef OPENCV_IMPORT_ENABLED - opset.insert(); -#endif - opsets["custom_opset"] = opset; - return opsets; -} -//! [extension:getOpSets] - -//! [extension:getImplTypes] -std::vector Extension::getImplTypes(const std::shared_ptr& node) { - if (std::dynamic_pointer_cast(node)) { - return {"CPU"}; - } -#ifdef OPENCV_IMPORT_ENABLED - if (std::dynamic_pointer_cast(node)) { - return {"CPU"}; - } -#endif - return {}; -} -//! [extension:getImplTypes] - -//! [extension:getImplementation] -InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr& node, - const std::string& implType) { - if (implType == "CPU") { - if (std::dynamic_pointer_cast(node)) { - return std::make_shared(node); - } -#ifdef OPENCV_IMPORT_ENABLED - if (std::dynamic_pointer_cast(node) && implType == "CPU") { - return std::make_shared(node); - } -#endif - } - return nullptr; -} -//! [extension:getImplementation] - -//! [extension:CreateExtension] -// Generate exported function -IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension) -//! [extension:CreateExtension] - -INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) -InferenceEngine::CreateExtension(InferenceEngine::IExtension*& ext, InferenceEngine::ResponseDesc* resp) noexcept { - try { - ext = new Extension(); - return OK; - } catch (std::exception& ex) { - if (resp) { - std::string err = ((std::string) "Couldn't create extension: ") + ex.what(); - err.copy(resp->msg, 255); - } - return InferenceEngine::GENERAL_ERROR; - } -} diff --git a/src/core/template_extension/old/extension.hpp b/src/core/template_extension/old/extension.hpp deleted file mode 100644 index 8c236aa9ed1e81..00000000000000 --- a/src/core/template_extension/old/extension.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include -#include -#include -#include -#include - -//! [extension:header] -namespace TemplateExtension { - -class Extension : public InferenceEngine::IExtension { -public: - Extension(); - ~Extension(); - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override; - void Unload() noexcept override {} - - std::map getOpSets() override; - std::vector getImplTypes(const std::shared_ptr& node) override; - InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, - const std::string& implType) override; -}; - -} // namespace TemplateExtension -//! [extension:header] diff --git a/src/core/template_extension/old/fft_kernel.cpp b/src/core/template_extension/old/fft_kernel.cpp deleted file mode 100644 index 301eb9e3ec0398..00000000000000 --- a/src/core/template_extension/old/fft_kernel.cpp +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -//! [fft_kernel:implementation] -#include "fft_kernel.hpp" - -#include - -#include - -#include "fft_op.hpp" - -using namespace TemplateExtension; - -FFTImpl::FFTImpl(const std::shared_ptr& node) { - auto castedNode = std::dynamic_pointer_cast(node); - if (!castedNode) - IE_THROW() << "Cannot create implementation for unknown operation!"; - if (castedNode->inputs().size() != 1 || castedNode->outputs().size() != 1) - IE_THROW() << "Cannot create implementation for operation with incorrect number of inputs or outputs!"; - if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic()) - IE_THROW() << "Cannot create implementation for op with dynamic shapes!"; - if (castedNode->get_input_element_type(0) != ngraph::element::f32 || - castedNode->get_output_element_type(0) != ngraph::element::f32) - IE_THROW() << "Operation supports only FP32 tensors."; - inpShape = castedNode->get_input_shape(0); - outShape = castedNode->get_output_shape(0); - inverse = castedNode->inverse; -} - -InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector& conf, - InferenceEngine::ResponseDesc* resp) noexcept { - std::vector inDataConfig; - std::vector outDataConfig; - InferenceEngine::SizeVector order(inpShape.size()); - std::iota(order.begin(), order.end(), 0); - - // Allow any offset before data - size_t offset((std::numeric_limits::max)()); - - // Input shape - InferenceEngine::DataConfig inpConf; - inpConf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inpShape, {inpShape, order, offset}); - inDataConfig.push_back(inpConf); - - // Output shape - InferenceEngine::DataConfig outConf; - outConf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset}); - outDataConfig.push_back(outConf); - - InferenceEngine::LayerConfig layerConfig; - layerConfig.inConfs = inDataConfig; - layerConfig.outConfs = outDataConfig; - - conf.push_back(layerConfig); - return InferenceEngine::StatusCode::OK; -} - -InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config, - InferenceEngine::ResponseDesc* resp) noexcept { - try { - if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { - IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!"; - } - - if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || - config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { - IE_THROW() << "Operation supports only FP32 precisions!"; - } - } catch (InferenceEngine::Exception& ex) { - error = ex.what(); - if (resp) { - strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg) - 1] = 0; - } - return InferenceEngine::GENERAL_ERROR; - } - return InferenceEngine::OK; -} - -static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) { - // NOTE: Inference Engine sizes are reversed. - std::vector dims = blob->getTensorDesc().getDims(); - std::vector size(dims.size()); - std::transform(dims.begin(), dims.end(), size.begin(), [](size_t v) { - return static_cast(v); - }); - const auto& precision = blob->getTensorDesc().getPrecision(); - CV_Assert(precision == InferenceEngine::Precision::FP32); - return cv::Mat(size, CV_32F, (void*)blob->buffer()); -} - -InferenceEngine::StatusCode FFTImpl::execute(std::vector& inputs, - std::vector& outputs, - InferenceEngine::ResponseDesc* resp) noexcept { - cv::Mat inp = infEngineBlobToMat(inputs[0]); - cv::Mat out = infEngineBlobToMat(outputs[0]); - - const int n = inp.size[0]; - const int h = inp.size[2]; - const int w = inp.size[3]; - cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2); - for (int i = 0; i < n; ++i) { - std::vector components = {cv::Mat(h, w, CV_32F, inp.ptr(i, 0)), - cv::Mat(h, w, CV_32F, inp.ptr(i, 1))}; - cv::merge(components, complex); - - if (!inverse) - cv::dft(complex, interleavedOut); - else - cv::idft(complex, interleavedOut, cv::DFT_SCALE); - - components = {cv::Mat(h, w, CV_32F, out.ptr(i, 0)), cv::Mat(h, w, CV_32F, out.ptr(i, 1))}; - cv::split(interleavedOut, components); - } - return InferenceEngine::OK; -} -//! [fft_kernel:implementation] diff --git a/src/core/template_extension/old/fft_kernel.hpp b/src/core/template_extension/old/fft_kernel.hpp deleted file mode 100644 index 775f94794982bf..00000000000000 --- a/src/core/template_extension/old/fft_kernel.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -//! [fft_kernel:header] -#pragma once - -#include - -#include - -namespace TemplateExtension { - -class FFTImpl : public InferenceEngine::ILayerExecImpl { -public: - explicit FFTImpl(const std::shared_ptr& node); - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, - InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, - InferenceEngine::ResponseDesc* resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector& inputs, - std::vector& outputs, - InferenceEngine::ResponseDesc* resp) noexcept override; - -private: - ngraph::Shape inpShape; - ngraph::Shape outShape; - bool inverse; - std::string error; -}; - -} // namespace TemplateExtension -//! [fft_kernel:header] diff --git a/src/core/template_extension/old/fft_op.cpp b/src/core/template_extension/old/fft_op.cpp deleted file mode 100644 index df33379027abad..00000000000000 --- a/src/core/template_extension/old/fft_op.cpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -//! [fft_op:implementation] -#include "fft_op.hpp" - -using namespace TemplateExtension; - -FFTOp::FFTOp(const ngraph::Output& inp, bool _inverse) : Op({inp}) { - constructor_validate_and_infer_types(); - inverse = _inverse; -} - -void FFTOp::validate_and_infer_types() { - auto outShape = get_input_partial_shape(0); - set_output_type(0, get_input_element_type(0), outShape); -} - -std::shared_ptr FFTOp::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { - if (new_args.size() != 1) { - OPENVINO_THROW("Incorrect number of new arguments"); - } - return std::make_shared(new_args.at(0), inverse); -} - -bool FFTOp::visit_attributes(ngraph::AttributeVisitor& visitor) { - visitor.on_attribute("inverse", inverse); - return true; -} -//! [fft_op:implementation] diff --git a/src/core/template_extension/old/fft_op.hpp b/src/core/template_extension/old/fft_op.hpp deleted file mode 100644 index b41f880551da3e..00000000000000 --- a/src/core/template_extension/old/fft_op.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -//! [fft_op:header] -#pragma once - -#include - -namespace TemplateExtension { - -class FFTOp : public ngraph::op::Op { -public: - OPENVINO_OP("FFT", "custom_opset"); - - FFTOp() = default; - FFTOp(const ngraph::Output& inp, bool inverse); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override; - bool visit_attributes(ngraph::AttributeVisitor& visitor) override; - - bool inverse; -}; - -} // namespace TemplateExtension -//! [fft_op:header] diff --git a/src/core/template_extension/old/op.cpp b/src/core/template_extension/old/op.cpp deleted file mode 100644 index ae0c0b677bb5bc..00000000000000 --- a/src/core/template_extension/old/op.cpp +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "op.hpp" - -using namespace TemplateExtension; - -//! [op:ctor] -Operation::Operation(const ngraph::Output& arg, int64_t add) : Op({arg}), add(add) { - constructor_validate_and_infer_types(); -} -//! [op:ctor] - -//! [op:validate] -void Operation::validate_and_infer_types() { - // Operation doesn't change shapes end element type - set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr Operation::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { - if (new_args.size() != 1) { - OPENVINO_THROW("Incorrect number of new arguments"); - } - - return std::make_shared(new_args.at(0), add); -} -//! [op:copy] - -//! [op:visit_attributes] -bool Operation::visit_attributes(ngraph::AttributeVisitor& visitor) { - visitor.on_attribute("add", add); - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -namespace { - -template -void implementation(const T* input, T* output, int64_t add, size_t size) { - for (size_t i = 0; i < size; i++) { - output[i] = static_cast(input[i] + add); - } -} - -template -bool evaluate_op(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, int64_t add) { - size_t size = ngraph::shape_size(arg0->get_shape()); - implementation(arg0->get_data_ptr(), out->get_data_ptr(), add, size); - return true; -} - -} // namespace - -bool Operation::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const { - switch (inputs[0]->get_element_type()) { - case ngraph::element::Type_t::i8: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i16: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i32: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i64: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u8: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u16: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u32: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u64: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::bf16: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::f16: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::f32: - return evaluate_op(inputs[0], outputs[0], getAddAttr()); - default: - break; - } - return false; -} - -bool Operation::has_evaluate() const { - switch (get_input_element_type(0)) { - case ngraph::element::Type_t::i8: - case ngraph::element::Type_t::i16: - case ngraph::element::Type_t::i32: - case ngraph::element::Type_t::i64: - case ngraph::element::Type_t::u8: - case ngraph::element::Type_t::u16: - case ngraph::element::Type_t::u32: - case ngraph::element::Type_t::u64: - case ngraph::element::Type_t::bf16: - case ngraph::element::Type_t::f16: - case ngraph::element::Type_t::f32: - return true; - default: - break; - } - return false; -} -//! [op:evaluate] diff --git a/src/core/template_extension/old/op.hpp b/src/core/template_extension/old/op.hpp deleted file mode 100644 index 98ada3ae2a8eb3..00000000000000 --- a/src/core/template_extension/old/op.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -//! [op:header] -namespace TemplateExtension { - -class Operation : public ngraph::op::Op { -public: - OPENVINO_OP("Template", "custom_opset"); - - Operation() = default; - Operation(const ngraph::Output& arg, int64_t add); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override; - bool visit_attributes(ngraph::AttributeVisitor& visitor) override; - int64_t getAddAttr() const { - return add; - } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END - bool has_evaluate() const override; - -private: - int64_t add; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/src/core/template_extension/new/ov_extension.cpp b/src/core/template_extension/ov_extension.cpp similarity index 100% rename from src/core/template_extension/new/ov_extension.cpp rename to src/core/template_extension/ov_extension.cpp diff --git a/src/core/tests/CMakeLists.txt b/src/core/tests/CMakeLists.txt index 45751efe2012b2..9866fae18823dd 100644 --- a/src/core/tests/CMakeLists.txt +++ b/src/core/tests/CMakeLists.txt @@ -22,7 +22,6 @@ if(SUGGEST_OVERRIDE_SUPPORTED) endif() list(APPEND UNIT_TESTS_DEPENDENCIES openvino_template_extension) -list(APPEND UNIT_TESTS_DEPENDENCIES template_extension) list(APPEND EXCLUDE_TESTS ${CMAKE_CURRENT_SOURCE_DIR}/dnnl.cpp) diff --git a/src/core/tests/element_type.cpp b/src/core/tests/element_type.cpp index abf8b3f8aa7603..d7bd49c2e2e252 100644 --- a/src/core/tests/element_type.cpp +++ b/src/core/tests/element_type.cpp @@ -67,6 +67,10 @@ TEST(element_type, from_string) { EXPECT_EQ(element::Type("U64"), element::u64); EXPECT_EQ(element::Type("nf4"), element::nf4); EXPECT_EQ(element::Type("NF4"), element::nf4); + EXPECT_EQ(element::Type("f8e4m3"), element::f8e4m3); + EXPECT_EQ(element::Type("F8E4M3"), element::f8e4m3); + EXPECT_EQ(element::Type("f8e5m2"), element::f8e5m2); + EXPECT_EQ(element::Type("F8E5M2"), element::f8e5m2); EXPECT_EQ(element::Type("string"), element::string); EXPECT_EQ(element::Type("STRING"), element::string); diff --git a/src/core/tests/eval.cpp b/src/core/tests/eval.cpp index 224272715b4da1..b92adc29b1ac99 100644 --- a/src/core/tests/eval.cpp +++ b/src/core/tests/eval.cpp @@ -1028,6 +1028,24 @@ TEST(eval, evaluate_sign) { ASSERT_EQ(result_val, expec); } +TEST(eval, evaluate_sign_nan) { + auto p = make_shared(element::f16, Shape{2, 3}); + auto sign = make_shared(p); + auto model = make_shared(OutputVector{sign}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{ + make_tensor(Shape{2, 3}, + {std::numeric_limits::quiet_NaN(), -2, 0, -4.8f, 4.8f, -0.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f16); + EXPECT_THAT(read_vector(result), + Pointwise(NanSensitiveFloatEq(), + std::vector{std::numeric_limits::quiet_NaN(), -1, 0, -1, 1, 0})); +} + TEST(eval, evaluate_sin) { auto p = make_shared(element::f32, Shape{11}); auto sin = make_shared(p); @@ -2963,7 +2981,8 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e4m3_no_scale_no_shift) { using namespace testing; constexpr auto et = element::f32; - std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f}; + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; const auto data_shape = Shape{input_data.size()}; auto data = make_shared(et, data_shape); @@ -2983,10 +3002,10 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e4m3_no_scale_no_shift) { EXPECT_EQ(result.get_shape(), data_shape); EXPECT_THAT( read_vector(result), - Pointwise( - FloatEq(), - std::vector< - float>{0.f, 0.1015625f, 0.203125f, 0.3125f, 0.40625f, 0.5f, 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f})); + Pointwise(FloatEq(), std::vector{0.f, 0.1015625f, 0.203125f, 0.3125f, 0.40625f, 0.5f, + 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f, -0.f, + -0.1015625f, -0.203125f, -0.3125f, -0.40625f, -0.5f, -0.625f, + -0.6875f, -0.8125f, -0.875f, -1.f})); } TEST(eval, evaluate_fake_convert_f32_seq_to_f8e4m3_scale_1) { @@ -3223,7 +3242,8 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e5m2_scale_1) { using namespace testing; constexpr auto et = element::f32; - std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f}; + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; const auto data_shape = Shape{input_data.size()}; @@ -3244,11 +3264,11 @@ TEST(eval, evaluate_fake_convert_f32_to_f8e5m2_scale_1) { EXPECT_EQ(result.get_element_type(), et); EXPECT_EQ(result.get_shape(), data_shape); - EXPECT_THAT( - read_vector(result), - Pointwise( - FloatEq(), - std::vector{0.f, 0.09375f, 0.1875f, 0.3125f, 0.375f, 0.5f, 0.625f, 0.75f, 0.75f, 0.875f, 1.f})); + EXPECT_THAT(read_vector(result), + Pointwise(FloatEq(), + std::vector{0.f, 0.09375f, 0.1875f, 0.3125f, 0.375f, 0.5f, 0.625f, 0.75f, + 0.75f, 0.875f, 1.f, -0.f, -0.09375f, -0.1875f, -0.3125f, -0.375f, + -0.5f, -0.625f, -0.75f, -0.75f, -0.875f, -1.f})); } TEST(eval, evaluate_fake_convert_f16_to_f8e5m2_scale_1) { @@ -3707,7 +3727,7 @@ TEST(eval, evaluate_fake_convert_bf16_matching_f8_to_f8e5m2_scale_1) { 4096.f, 5120.f, 6144.f, 7168.f, 8192.f, 10240.f, 12288.f, 14336.f, 16384.f, 20480.f, 24576.f, 28672.f, - 32768.f, 40960.f, 49152.f, 57344.0 + 32768.f, 40960.f, 49152.f, 57344.f }; // clang-format on @@ -3766,6 +3786,92 @@ TEST(eval, evaluate_fake_convert_f32_matching_f8e4m3_to_f8e5m2_scale_1) { EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); } +TEST(eval, evaluate_f8e5m2_const_from_f32) { + using namespace testing; + constexpr auto et = element::f8e5m2; + + std::vector input_data{ + 0.017578125f, 0.021484375f, 0.025390625f, 0.029296875f, 0.03515625f, 0.0703125f, 0.140625f, + 0.28125f, 0.5625f, 1.125f, 1.625f, 1.875f, 2.25f, 3.75f, + 4.5f, 9.f, 18.f, 36.f, 72.f, 144.f, 288.f, + }; + /* Rounded to f8e5m2 vals */ + std::vector output_data{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, 0.125f, + 0.25f, 0.5f, 1.f, 1.5, 2.f, 2.f, 4.f, + 4.f, 8.f, 16.f, 32.f, 64.f, 128.f, 256.f}; + + const auto data_shape = Shape{input_data.size()}; + + auto op = make_shared(et, data_shape, input_data); + auto model = make_shared(OutputVector{op}, ParameterVector{}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), et); + EXPECT_EQ(result.get_shape(), data_shape); + EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); +} + +TEST(eval, evaluate_f8e5m2_const_seq_from_f32) { + using namespace testing; + constexpr auto et = element::f8e5m2; + + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; + + /* Rounded to f8e5m2 vals */ + std::vector output_data{0.f, 0.09375f, 0.1875f, 0.3125f, 0.375f, 0.5f, 0.625f, 0.75f, + 0.75f, 0.875f, 1.f, -0.f, -0.09375f, -0.1875f, -0.3125f, -0.375f, + -0.5f, -0.625f, -0.75f, -0.75f, -0.875f, -1.f}; + + const auto data_shape = Shape{input_data.size()}; + + auto op = make_shared(et, data_shape, input_data); + auto model = make_shared(OutputVector{op}, ParameterVector{}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), et); + EXPECT_EQ(result.get_shape(), data_shape); + EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); +} + +TEST(eval, evaluate_f8e4m3_const_seq_from_f32) { + using namespace testing; + constexpr auto et = element::f8e4m3; + + std::vector input_data{0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.f, + -0.0f, -0.1f, -0.2f, -0.3f, -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f}; + + /* Rounded to f8e4m3 vals */ + std::vector output_data{ + 0.f, 0.1015625f, 0.203125f, 0.3125f, 0.40625f, 0.5f, 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f, + -0.f, -0.1015625f, -0.203125f, -0.3125f, -0.40625f, -0.5f, -0.625f, -0.6875f, -0.8125f, -0.875f, -1.f}; + + const auto data_shape = Shape{input_data.size()}; + + auto op = make_shared(et, data_shape, input_data); + auto model = make_shared(OutputVector{op}, ParameterVector{}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), et); + EXPECT_EQ(result.get_shape(), data_shape); + EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), output_data)); +} + TEST(eval, evaluate_fake_convert_f32_seq_to_f8e5m2_scale_shift) { using namespace testing; constexpr auto et = element::f32; diff --git a/src/core/tests/float8_e4m3.cpp b/src/core/tests/float8_e4m3.cpp new file mode 100644 index 00000000000000..6265b530e10105 --- /dev/null +++ b/src/core/tests/float8_e4m3.cpp @@ -0,0 +1,175 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e4m3.hpp" + +#include + +#include + +#include "common_test_utils/float_util.hpp" +namespace ov { +namespace test { + +template +std::vector> enumerate(const TContainer& values) { + std::vector> enum_values; + int i = 0; + for (const auto& v : values) { + enum_values.emplace_back(i, v); + ++i; + } + return enum_values; +} + +TEST(F8E4M3Test, f32_inf) { + const auto f8 = ov::float8_e4m3(std::numeric_limits::infinity()); + + EXPECT_EQ(f8.to_bits(), 0x7f); +} + +TEST(F8E4M3Test, f32_minus_inf) { + const auto f8 = ov::float8_e4m3(-std::numeric_limits::infinity()); + // f8 is NaN as there is no infinity + EXPECT_EQ(f8.to_bits(), 0xff); +} + +TEST(F8E4M3Test, f32_nan) { + const auto f8 = ov::float8_e4m3(std::numeric_limits::quiet_NaN()); + + EXPECT_EQ(f8.to_bits(), 0x7f); +} + +TEST(F8E4M3Test, f32_gt_zero_le_f8_half_lowest_subnormal) { + const auto f8 = ov::float8_e4m3(0.0009765625f); + + EXPECT_EQ(f8.to_bits(), 0x00); +} + +TEST(F8E4M3Test, f32_gt_zero_gt_f8_half_lowest_subnormal) { + const auto f8 = ov::float8_e4m3(0.00097656273283064365387f); + + EXPECT_EQ(f8.to_bits(), 0x01); +} + +TEST(F8E4M3Test, f32_normal_fractional_rounding) { + const auto f8 = ov::float8_e4m3(0.129f); + + // Rounded to 0.140625f -> 0x21 + EXPECT_EQ(f8.to_bits(), 0x20); +} + +TEST(F8E4M3Test, f32_normal_negative_fractional_rounding) { + const auto f8 = ov::float8_e4m3(-0.281f); + + // Rounded to -0.28125f -> 0x21 + EXPECT_EQ(f8.to_bits(), 0xa9); +} + +TEST(F8E4M3Test, f32_ge_f8_max_within_round_to_max) { + const auto f8 = ov::float8_e4m3(460.0f); + + // Rounded to 448.0f -> 0x7e + EXPECT_EQ(f8.to_bits(), 0x7e); +} + +TEST(F8E4M3Test, f32_ge_f8_max_not_within_round_to_max) { + const auto f8 = ov::float8_e4m3(560.0f); + + // f8 has no such value (NaN) + EXPECT_EQ(f8.to_bits(), 0x7f); +} + +TEST(F8E4M3Test, f32_le_f8_lowest_within_round_to_lowest) { + const auto f8 = ov::float8_e4m3(-460.0f); + + // Rounded to -448.0f -> 0xfe + EXPECT_EQ(f8.to_bits(), 0xfe); +} + +TEST(F8E4M3Test, f32_le_f8_lowest_not_within_round_to_lowest) { + const auto f8 = ov::float8_e4m3(-760.0f); + + // f8 has no such value (NaN) + EXPECT_EQ(f8.to_bits(), 0xff); +} + +TEST(F8E4M3Test, stream_operator) { + std::stringstream s; + s << ov::float8_e4m3(2.5f); + + EXPECT_EQ(s.str(), "2.5"); +} + +TEST(F8E4M3Test, to_string) { + const auto f8 = ov::float8_e4m3::from_bits(0b00111010); + + EXPECT_EQ(std::to_string(f8), "1.250000"); +} +constexpr auto f32_qnan = std::numeric_limits::quiet_NaN(); + +const auto exp_floats = std::vector{ + 0.0f, 0.001953125f, 0.00390625f, 0.005859375f, 0.0078125f, 0.009765625f, 0.01171875f, 0.013671875f, + 0.015625f, 0.017578125f, 0.01953125f, 0.021484375f, 0.0234375f, 0.025390625f, 0.02734375f, 0.029296875f, + 0.03125f, 0.03515625f, 0.0390625f, 0.04296875f, 0.046875f, 0.05078125f, 0.0546875f, 0.05859375f, + 0.0625f, 0.0703125f, 0.078125f, 0.0859375f, 0.09375f, 0.1015625f, 0.109375f, 0.1171875f, + 0.125f, 0.140625f, 0.15625f, 0.171875f, 0.1875f, 0.203125f, 0.21875f, 0.234375f, + 0.25f, 0.28125f, 0.3125f, 0.34375f, 0.375f, 0.40625f, 0.4375f, 0.46875f, + 0.5f, 0.5625f, 0.625f, 0.6875f, 0.75f, 0.8125f, 0.875f, 0.9375f, + 1.0f, 1.125f, 1.25f, 1.375f, 1.5f, 1.625f, 1.75f, 1.875f, + 2.0f, 2.25f, 2.5f, 2.75f, 3.0f, 3.25f, 3.5f, 3.75f, + 4.0f, 4.5f, 5.0f, 5.5f, 6.0f, 6.5f, 7.0f, 7.5f, + 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f, + 32.0f, 36.0f, 40.0f, 44.0f, 48.0f, 52.0f, 56.0f, 60.0f, + 64.0f, 72.0f, 80.0f, 88.0f, 96.0f, 104.0f, 112.0f, 120.0f, + 128.0f, 144.0f, 160.0f, 176.0f, 192.0f, 208.0f, 224.0f, 240.0f, + 256.0f, 288.0f, 320.0f, 352.0f, 384.0f, 416.0f, 448.0f, f32_qnan, + -0.0f, -0.001953125f, -0.00390625f, -0.005859375f, -0.0078125f, -0.009765625f, -0.01171875f, -0.013671875f, + -0.015625f, -0.017578125f, -0.01953125f, -0.021484375f, -0.0234375f, -0.025390625f, -0.02734375f, -0.029296875f, + -0.03125f, -0.03515625f, -0.0390625f, -0.04296875f, -0.046875f, -0.05078125f, -0.0546875f, -0.05859375f, + -0.0625f, -0.0703125f, -0.078125f, -0.0859375f, -0.09375f, -0.1015625f, -0.109375f, -0.1171875f, + -0.125f, -0.140625f, -0.15625f, -0.171875f, -0.1875f, -0.203125f, -0.21875f, -0.234375f, + -0.25f, -0.28125f, -0.3125f, -0.34375f, -0.375f, -0.40625f, -0.4375f, -0.46875f, + -0.5f, -0.5625f, -0.625f, -0.6875f, -0.75f, -0.8125f, -0.875f, -0.9375f, + -1.0f, -1.125f, -1.25f, -1.375f, -1.5f, -1.625f, -1.75f, -1.875f, + -2.0f, -2.25f, -2.5f, -2.75f, -3.0f, -3.25f, -3.5f, -3.75f, + -4.0f, -4.5f, -5.0f, -5.5f, -6.0f, -6.5f, -7.0f, -7.5f, + -8.0f, -9.0f, -10.0f, -11.0f, -12.0f, -13.0f, -14.0f, -15.0f, + -16.0f, -18.0f, -20.0f, -22.0f, -24.0f, -26.0f, -28.0f, -30.0f, + -32.0f, -36.0f, -40.0f, -44.0f, -48.0f, -52.0f, -56.0f, -60.0f, + -64.0f, -72.0f, -80.0f, -88.0f, -96.0f, -104.0f, -112.0f, -120.0f, + -128.0f, -144.0f, -160.0f, -176.0f, -192.0f, -208.0f, -224.0f, -240.0f, + -256.0f, -288.0f, -320.0f, -352.0f, -384.0f, -416.0f, -448.0f, -f32_qnan}; + +using f8m4e3_params = std::tuple; +class F8E4M3PTest : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(convert, + F8E4M3PTest, + testing::ValuesIn(enumerate(exp_floats)), + testing::PrintToStringParamName()); + +TEST_P(F8E4M3PTest, f8_bits_to_f32) { + const auto& params = GetParam(); + const auto& exp_value = std::get<1>(params); + const auto f8 = ov::float8_e4m3::from_bits(std::get<0>(params)); + + if (std::isnan(exp_value)) { + EXPECT_TRUE(std::isnan(static_cast(f8))); + } else { + EXPECT_EQ(static_cast(f8), exp_value); + } +} + +TEST_P(F8E4M3PTest, f32_to_f8_bits) { + const auto& params = GetParam(); + const auto& exp_value = std::get<0>(params); + const auto& value = std::get<1>(params); + const auto f8 = ov::float8_e4m3(value); + + EXPECT_EQ(f8.to_bits(), exp_value); +} +} // namespace test +} // namespace ov diff --git a/src/core/tests/float8_e5m2.cpp b/src/core/tests/float8_e5m2.cpp new file mode 100644 index 00000000000000..38028497341c17 --- /dev/null +++ b/src/core/tests/float8_e5m2.cpp @@ -0,0 +1,176 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/float8_e5m2.hpp" + +#include + +#include + +#include "common_test_utils/float_util.hpp" + +namespace ov { +namespace test { + +TEST(F8E5M2Test, stream_operator) { + std::stringstream s; + s << ov::float8_e5m2(2.5f); + + EXPECT_EQ(s.str(), "2.5"); +} + +TEST(F8E5M2Test, to_string) { + const auto f8 = ov::float8_e5m2::from_bits(0b00111010); + + EXPECT_EQ(std::to_string(f8), "0.750000"); +} + +TEST(F8E5M2Test, f32_inf) { + const auto f8 = ov::float8_e5m2(std::numeric_limits::infinity()); + + EXPECT_EQ(f8.to_bits(), 0b01111100); +} + +TEST(F8E5M2Test, f32_minus_inf) { + const auto f8 = ov::float8_e5m2(-std::numeric_limits::infinity()); + + EXPECT_EQ(f8.to_bits(), 0b11111100); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_inf) { + const auto f8 = ov::float8_e5m2(65520.0f); + + EXPECT_EQ(f8.to_bits(), 0b01111100); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_max) { + const auto f8 = ov::float8_e5m2(65519.9f); + + EXPECT_EQ(f8.to_bits(), 0b01111011); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_minus_inf) { + const auto f8 = ov::float8_e5m2(-65520.0f); + + EXPECT_EQ(f8.to_bits(), 0b11111100); +} + +TEST(F8E5M2Test, f32_ge_f8_max_round_to_lowest) { + const auto f8 = ov::float8_e5m2(-65519.9f); + + EXPECT_EQ(f8.to_bits(), 0b11111011); +} + +template +std::vector> enumerate(const TContainer& values) { + std::vector> enum_values; + int i = 0; + for (const auto& v : values) { + enum_values.emplace_back(i, v); + ++i; + } + return enum_values; +} + +constexpr auto f32_qnan = std::numeric_limits::quiet_NaN(); +constexpr auto f32_inf = std::numeric_limits::infinity(); + +// clang-format off +const auto exp_floats = std::vector{ + 0.0f, 1.52587890625e-05f, 3.0517578125e-05f, 4.57763671875e-05f, + 6.103515625e-05f, 7.62939453125e-05f, 9.1552734375e-05f, 0.0001068115234375f, + 0.0001220703125f, 0.000152587890625f, 0.00018310546875f, 0.000213623046875f, + 0.000244140625f, 0.00030517578125f, 0.0003662109375f, 0.00042724609375f, + 0.00048828125f, 0.0006103515625f, 0.000732421875f, 0.0008544921875f, + 0.0009765625f, 0.001220703125f, 0.00146484375f, 0.001708984375f, + 0.001953125f, 0.00244140625f, 0.0029296875f, 0.00341796875f, + 0.00390625f, 0.0048828125f, 0.005859375f, 0.0068359375f, + 0.0078125f, 0.009765625f, 0.01171875f, 0.013671875f, + 0.015625f, 0.01953125f, 0.0234375f, 0.02734375f, + 0.03125f, 0.0390625f, 0.046875f, 0.0546875f, + 0.0625f, 0.078125f, 0.09375f, 0.109375f, + 0.125f, 0.15625f, 0.1875f, 0.21875f, + 0.25f, 0.3125f, 0.375f, 0.4375f, + 0.5f, 0.625f, 0.75f, 0.875f, + 1.0f, 1.25f, 1.5f, 1.75f, + 2.0f, 2.5f, 3.0f, 3.5f, + 4.0f, 5.0f, 6.0f, 7.0f, + 8.0f, 10.0f, 12.0f, 14.0f, + 16.0f, 20.0f, 24.0f, 28.0f, + 32.0f, 40.0f, 48.0f, 56.0f, + 64.0f, 80.0f, 96.0f, 112.0f, + 128.0f, 160.0f, 192.0f, 224.0f, + 256.0f, 320.0f, 384.0f, 448.0f, + 512.0f, 640.0f, 768.0f, 896.0f, + 1024.0f, 1280.0f, 1536.0f, 1792.0f, + 2048.0f, 2560.0f, 3072.0f, 3584.0f, + 4096.0f, 5120.0f, 6144.0f, 7168.0f, + 8192.0f, 10240.0f, 12288.0f, 14336.0f, + 16384.0f, 20480.0f, 24576.0f, 28672.0f, + 32768.0f, 40960.0f, 49152.0f, 57344.0f, + f32_inf, f32_qnan, f32_qnan, f32_qnan, + -0.0f, -1.52587890625e-05f, -3.0517578125e-05f, -4.57763671875e-05f, + -6.103515625e-05f, -7.62939453125e-05f, -9.1552734375e-05f, -0.0001068115234375f, + -0.0001220703125f, -0.000152587890625f, -0.00018310546875f, -0.000213623046875f, + -0.000244140625f, -0.00030517578125f, -0.0003662109375f, -0.00042724609375f, + -0.00048828125f, -0.0006103515625f, -0.000732421875f, -0.0008544921875f, + -0.0009765625f, -0.001220703125f, -0.00146484375f, -0.001708984375f, + -0.001953125f, -0.00244140625f, -0.0029296875f, -0.00341796875f, + -0.00390625f, -0.0048828125f, -0.005859375f, -0.0068359375f, + -0.0078125f, -0.009765625f, -0.01171875f, -0.013671875f, + -0.015625f, -0.01953125f, -0.0234375f, -0.02734375f, + -0.03125f, -0.0390625f, -0.046875f, -0.0546875f, + -0.0625f, -0.078125f, -0.09375f, -0.109375f, + -0.125f, -0.15625f, -0.1875f, -0.21875f, + -0.25f, -0.3125f, -0.375f, -0.4375f, + -0.5f, -0.625f, -0.75f, -0.875f, + -1.0f, -1.25f, -1.5f, -1.75f, + -2.0f, -2.5f, -3.0f, -3.5f, + -4.0f, -5.0f, -6.0f, -7.0f, + -8.0f, -10.0f, -12.0f, -14.0f, + -16.0f, -20.0f, -24.0f, -28.0f, + -32.0f, -40.0f, -48.0f, -56.0f, + -64.0f, -80.0f, -96.0f, -112.0f, + -128.0f, -160.0f, -192.0f, -224.0f, + -256.0f, -320.0f, -384.0f, -448.0f, + -512.0f, -640.0f, -768.0f, -896.0f, + -1024.0f, -1280.0f, -1536.0f, -1792.0f, + -2048.0f, -2560.0f, -3072.0f, -3584.0f, + -4096.0f, -5120.0f, -6144.0f, -7168.0f, + -8192.0f, -10240.0f, -12288.0f, -14336.0f, + -16384.0f, -20480.0f, -24576.0f, -28672.0f, + -32768.0f, -40960.0f, -49152.0f, -57344.0f, + -f32_inf, -f32_qnan, -f32_qnan, -f32_qnan}; +// clang-format on + +using f8m5e2_params = std::tuple; +class F8E5M2PTest : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(convert, + F8E5M2PTest, + testing::ValuesIn(enumerate(exp_floats)), + testing::PrintToStringParamName()); + +TEST_P(F8E5M2PTest, f8_bits_to_f32) { + const auto& params = GetParam(); + const auto& exp_value = std::get<1>(params); + const auto f8 = ov::float8_e5m2::from_bits(std::get<0>(params)); + + if (std::isnan(exp_value)) { + EXPECT_TRUE(std::isnan(static_cast(f8))); + } else { + EXPECT_EQ(static_cast(f8), exp_value); + } +} + +TEST_P(F8E5M2PTest, f32_to_f8_bits) { + const auto& params = GetParam(); + const auto& value = std::get<1>(params); + const auto& exp_value = std::isnan(value) ? (std::signbit(value) ? 0xfe : 0x7e) : std::get<0>(params); + const auto f8 = ov::float8_e5m2(value); + + EXPECT_EQ(f8.to_bits(), exp_value); +} +} // namespace test +} // namespace ov diff --git a/src/core/tests/ov_tensor_test.cpp b/src/core/tests/ov_tensor_test.cpp index 9dfbe2853bbdaf..2e64323477ab68 100644 --- a/src/core/tests/ov_tensor_test.cpp +++ b/src/core/tests/ov_tensor_test.cpp @@ -570,6 +570,50 @@ TEST_F(OVTensorTest, makeRangeRoiStringTensor) { ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type()); } +TEST_F(OVTensorTest, setSmallerShapeOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 4}}; + const ov::Shape newShape({1, 1, 3, 2}); + + ASSERT_EQ(roi_tensor.get_shape(), ov::Shape({1, 2, 4, 2})); + + roi_tensor.set_shape(newShape); + ASSERT_EQ(roi_tensor.get_shape(), newShape); +} + +TEST_F(OVTensorTest, setMaxSizeShapeOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 5}}; + const ov::Shape new_shape({1, 2, 1, 1}); + const ov::Shape roi_capacity({1, 2, 4, 3}); + + ASSERT_EQ(roi_tensor.get_shape(), roi_capacity); + + roi_tensor.set_shape(new_shape); + ASSERT_EQ(roi_tensor.get_shape(), new_shape); + + roi_tensor.set_shape(roi_capacity); + ASSERT_EQ(roi_tensor.get_shape(), roi_capacity); +} + +TEST_F(OVTensorTest, setShapeGtMaxOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 5}}; + const ov::Shape newShape({0, 0, 0, 0}); + + roi_tensor.set_shape(newShape); + ASSERT_EQ(roi_tensor.get_shape(), newShape); +} + +TEST_F(OVTensorTest, setMinShapeOnRoiTensor) { + ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; + ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 2, 5, 5}}; + const ov::Shape newShape({1, 3, 6, 3}); // ROI coordinate begin + newShape[2] is bigger than t.shape[2] + + ASSERT_EQ(roi_tensor.get_shape(), ov::Shape({1, 2, 4, 3})); + ASSERT_THROW(roi_tensor.set_shape(newShape), ov::Exception); +} + TEST_F(OVTensorTest, cannotSetShapeOnRoiTensor) { ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6 ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}}; diff --git a/src/core/tests/specialize_function.cpp b/src/core/tests/specialize_function.cpp index 9bbbf83ed591fd..9cd4b3bbc27376 100644 --- a/src/core/tests/specialize_function.cpp +++ b/src/core/tests/specialize_function.cpp @@ -5,21 +5,26 @@ #include "ngraph/specialize_function.hpp" #include "gtest/gtest.h" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" using namespace ngraph; NGRAPH_SUPPRESS_DEPRECATED_START; +using ov::op::v0::Constant; +using ov::op::v0::Convert; +using ov::op::v0::Parameter; +using ov::op::v1::Add; + // Simple case: create a function with static parameter shapes and "specialize" them to the same // shapes. TEST(specialize_function, et_shape_static) { - auto p0 = std::make_shared(element::f32, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -36,11 +41,11 @@ TEST(specialize_function, et_shape_static) { // Test specialization of dynamic element types. TEST(specialize_function, et_dynamic_shape_static) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -57,11 +62,11 @@ TEST(specialize_function, et_dynamic_shape_static) { // Test specialization of rank-dynamic shapes. TEST(specialize_function, et_static_shape_rank_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -78,11 +83,11 @@ TEST(specialize_function, et_static_shape_rank_dynamic) { // Test specialization of rank-static dynamic shapes. TEST(specialize_function, et_static_shape_rank_static_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -99,11 +104,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic) { // Test specialization of values to a shape-dynamic parameters. TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -119,11 +124,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); ASSERT_EQ(g->get_output_element_type(0), element::f32); - auto plus_node = ov::as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto plus_node = ov::as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(plus_node); - auto convert_node = ov::as_type_ptr(plus_node->input_value(1).get_node_shared_ptr()); + auto convert_node = ov::as_type_ptr(plus_node->input_value(1).get_node_shared_ptr()); ASSERT_TRUE(convert_node); - auto const_node = ov::as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); + auto const_node = ov::as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(const_node); ASSERT_EQ(const_node->get_output_element_type(0), element::i32); @@ -135,11 +140,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { // // (The input shapes we provide at specialization time are inconsistent.) TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -159,11 +164,11 @@ TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) { // // (The input element types we provide at specialization time are inconsistent.) TEST(specialize_function, et_dynamic_shape_static_validation_fails) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -183,14 +188,14 @@ TEST(specialize_function, et_dynamic_shape_static_validation_fails) { // rank. // // (Note that we are testing for a different exception class here because the failure is in -// specialize_shape's pre-checks, which use NGRAPH_CHECK, rather than inside validation as we +// specialize_shape's pre-checks, which use OPENVINO_ASSERT, rather than inside validation as we // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -210,14 +215,14 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) { // dimensions. // // (Note that we are testing for a different exception class here because the failure is in -// specialize_shape's pre-checks, which use NGRAPH_CHECK, rather than inside validation as we +// specialize_shape's pre-checks, which use OPENVINO_ASSERT, rather than inside validation as we // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, ov::Dimension::dynamic(), 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, ov::Dimension::dynamic(), 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -235,11 +240,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) { // Test for failure when we supply the wrong number of replacement element types. TEST(specialize_function, et_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -257,11 +262,11 @@ TEST(specialize_function, et_count_wrong) { // Test for failure when we supply the wrong number of replacement shapes. TEST(specialize_function, shape_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -279,11 +284,11 @@ TEST(specialize_function, shape_count_wrong) { // Test for failure when we supply the wrong number of replacement parameter values. TEST(specialize_function, value_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); - auto a = std::make_shared(p0, k); + auto k = std::make_shared(p1, element::f32); + auto a = std::make_shared(p0, k); auto f = std::make_shared(a, ParameterVector{p0, p1}); diff --git a/src/core/tests/tensor.cpp b/src/core/tests/tensor.cpp index 39b47ad2a86aee..29f7baa5b2c267 100644 --- a/src/core/tests/tensor.cpp +++ b/src/core/tests/tensor.cpp @@ -10,7 +10,6 @@ #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" -#include "ngraph/runtime/host_tensor.hpp" #include "openvino/core/model.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/relu.hpp" diff --git a/src/core/tests/visitors/visitors.hpp b/src/core/tests/visitors/visitors.hpp index 893d982a59b3eb..51ca244307767e 100644 --- a/src/core/tests/visitors/visitors.hpp +++ b/src/core/tests/visitors/visitors.hpp @@ -97,9 +97,6 @@ class ValueHolder { virtual operator std::vector&() { OPENVINO_THROW("Invalid type access"); } - virtual operator ngraph::HostTensorPtr&() { - OPENVINO_THROW("Invalid type access"); - } virtual operator std::shared_ptr&() { OPENVINO_THROW("Invalid type access"); } @@ -289,10 +286,8 @@ class DeserializeAttributeVisitor : public AttributeVisitor { adapter.set(m_values.get>(name)); } void on_adapter(const std::string& name, ValueAccessor& adapter) override { - OPENVINO_SUPPRESS_DEPRECATED_START - ngraph::HostTensorPtr& data = m_values.get(name); - data->read(adapter.get_ptr(), adapter.size()); - OPENVINO_SUPPRESS_DEPRECATED_END + auto data = m_values.get(name); + std::memcpy(adapter.get_ptr(), data.data(), adapter.size()); } protected: diff --git a/src/frontends/common/src/manager.cpp b/src/frontends/common/src/manager.cpp index 6194fca7583937..22ce0ed4b772d9 100644 --- a/src/frontends/common/src/manager.cpp +++ b/src/frontends/common/src/manager.cpp @@ -4,11 +4,9 @@ #include "openvino/frontend/manager.hpp" -#include -#include - #include "openvino/frontend/exception.hpp" #include "openvino/util/env_util.hpp" +#include "openvino/util/file_util.hpp" #include "openvino/util/log.hpp" #include "plugin_loader.hpp" #include "utils.hpp" @@ -210,7 +208,7 @@ class FrontEndManager::Impl { } void search_all_plugins() { - auto fe_lib_dir = get_frontend_library_path(); + auto fe_lib_dir = ov::util::get_ov_lib_path(); if (!fe_lib_dir.empty()) find_plugins(fe_lib_dir, m_plugins); } diff --git a/src/frontends/common/src/plugin_loader.hpp b/src/frontends/common/src/plugin_loader.hpp index dccf8ddf7a39f3..388402f0139550 100644 --- a/src/frontends/common/src/plugin_loader.hpp +++ b/src/frontends/common/src/plugin_loader.hpp @@ -10,12 +10,7 @@ #include #include "openvino/frontend/manager.hpp" - -#ifdef _WIN32 -static const char PathSeparator[] = ";"; -#else -static const char PathSeparator[] = ":"; -#endif // _WIN32 +#include "openvino/util/file_util.hpp" namespace ov { namespace frontend { diff --git a/src/frontends/common/src/utils.cpp b/src/frontends/common/src/utils.cpp deleted file mode 100644 index 3a0db585fd2eaa..00000000000000 --- a/src/frontends/common/src/utils.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "utils.hpp" - -#include "openvino/frontend/exception.hpp" -#include "openvino/util/file_util.hpp" -#include "plugin_loader.hpp" - -#ifndef _WIN32 -# include -# include -# include -#else -# if defined(WINAPI_FAMILY) && !WINAPI_PARTITION_DESKTOP -# error "Only WINAPI_PARTITION_DESKTOP is supported, because of GetModuleHandleEx[A|W]" -# endif -# ifndef NOMINMAX -# define NOMINMAX -# endif -# include -#endif - -namespace { - -static std::string _get_frontend_library_path() { -#ifdef _WIN32 -# ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - WCHAR ie_library_path[MAX_PATH]; - HMODULE hm = NULL; - if (!GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - reinterpret_cast(ov::frontend::get_frontend_library_path), - &hm)) { - FRONT_END_INITIALIZATION_CHECK(false, "GetModuleHandle returned ", GetLastError()); - } - GetModuleFileNameW(hm, (LPWSTR)ie_library_path, sizeof(ie_library_path) / sizeof(ie_library_path[0])); - return ov::util::wstring_to_string(ov::util::get_directory(std::wstring(ie_library_path))); -# else - CHAR ie_library_path[MAX_PATH]; - HMODULE hm = NULL; - if (!GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - reinterpret_cast(ov::frontend::get_frontend_library_path), - &hm)) { - FRONT_END_INITIALIZATION_CHECK(false, "GetModuleHandle returned ", GetLastError()); - } - GetModuleFileNameA(hm, (LPSTR)ie_library_path, sizeof(ie_library_path)); - return ov::util::get_directory(std::string(ie_library_path)); -# endif -#elif defined(__APPLE__) || defined(__linux__) || defined(__EMSCRIPTEN__) - Dl_info info; - dladdr(reinterpret_cast(ov::frontend::get_frontend_library_path), &info); - return ov::util::get_directory(ov::util::get_absolute_file_path(std::string(info.dli_fname))).c_str(); -#else -# error "Unsupported OS" -#endif // _WIN32 -} -} // namespace - -std::string ov::frontend::get_frontend_library_path() { - return _get_frontend_library_path(); -} diff --git a/src/frontends/common/src/utils.hpp b/src/frontends/common/src/utils.hpp index 24f1bb547e6ca2..bdf29be618f185 100644 --- a/src/frontends/common/src/utils.hpp +++ b/src/frontends/common/src/utils.hpp @@ -48,9 +48,3 @@ catch (...) { \ OPENVINO_ASSERT(false, (MESSAGE)); \ } - -namespace ov { -namespace frontend { -std::string get_frontend_library_path(); -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/CMakeLists.txt b/src/frontends/onnx/CMakeLists.txt index c3f02f98cedcf4..8585c8b80c5641 100644 --- a/src/frontends/onnx/CMakeLists.txt +++ b/src/frontends/onnx/CMakeLists.txt @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # - add_subdirectory(onnx_common) add_subdirectory(frontend) diff --git a/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp b/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp index e46c37321820af..ed114552cbdb13 100644 --- a/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp +++ b/src/frontends/onnx/frontend/include/onnx_import/core/node.hpp @@ -17,11 +17,11 @@ #include #include -#include "ngraph/deprecated.hpp" -#include "ngraph/except.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" #include "onnx_import/onnx_importer_visibility.hpp" +#include "openvino/core/deprecated.hpp" +#include "openvino/core/except.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/constant.hpp" namespace ONNX_NAMESPACE { // forward declaration diff --git a/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp b/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp index e55ff9cfbf634f..69bb95a62834cb 100644 --- a/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp +++ b/src/frontends/onnx/frontend/include/onnx_import/core/operator_set.hpp @@ -8,7 +8,6 @@ #include #include -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp b/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp index 631c689cf0dd66..8e4de8a6605635 100644 --- a/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp +++ b/src/frontends/onnx/frontend/include/onnx_import/onnx_utils.hpp @@ -17,9 +17,9 @@ #include #include -#include "ngraph/deprecated.hpp" #include "onnx_import/core/operator_set.hpp" #include "onnx_importer_visibility.hpp" +#include "openvino/core/deprecated.hpp" namespace ngraph { namespace onnx_import { diff --git a/src/frontends/onnx/frontend/src/core/attribute.hpp b/src/frontends/onnx/frontend/src/core/attribute.hpp index 4bd9756bccbbda..ea8bedb5a28d0f 100644 --- a/src/frontends/onnx/frontend/src/core/attribute.hpp +++ b/src/frontends/onnx/frontend/src/core/attribute.hpp @@ -8,7 +8,7 @@ #include "core/sparse_tensor.hpp" #include "core/tensor.hpp" -#include "ngraph/except.hpp" +#include "openvino/core/except.hpp" namespace ngraph { namespace onnx_import { diff --git a/src/frontends/onnx/frontend/src/core/graph.cpp b/src/frontends/onnx/frontend/src/core/graph.cpp index ac94cc4af26746..df61ef1a7371db 100644 --- a/src/frontends/onnx/frontend/src/core/graph.cpp +++ b/src/frontends/onnx/frontend/src/core/graph.cpp @@ -11,7 +11,6 @@ #include "core/transform.hpp" #include "core/value_info.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" #include "onnx_framework_node.hpp" #include "onnx_import/core/node.hpp" @@ -148,7 +147,7 @@ Graph::Graph(const std::string& model_dir, for (const auto& initializer_tensor : m_model->get_graph().initializer()) { if (initializer_tensor.has_name()) { Tensor tensor = Tensor{initializer_tensor, m_model_dir, m_mmap_cache}; - std::shared_ptr ov_constant; + std::shared_ptr ov_constant; // For each initializer create a Constant node and store it in cache try { ov_constant = tensor.get_ov_constant(); @@ -165,7 +164,7 @@ Graph::Graph(const std::string& model_dir, } } - // Process all ONNX graph inputs, convert them to nGraph nodes and store in cache + // Process all ONNX graph inputs, convert them to OV nodes and store in cache for (const auto& input : m_model->get_graph().input()) { // Check if a Constant node was already created from an initializer if (m_cache->contains(input.name())) { @@ -183,7 +182,7 @@ void Graph::convert_to_ov_nodes() { const float total = static_cast(m_model->get_graph().node().size()); unsigned int completed = 0u; std::map op_statistics; - // Process ONNX graph nodes, convert to nGraph nodes + // Process ONNX graph nodes, convert to OV nodes for (const auto& node_proto : m_model->get_graph().node()) { if (m_extensions.telemetry) { op_statistics[node_proto.op_type()]++; @@ -249,10 +248,10 @@ void Graph::set_metadata(std::shared_ptr& model) const { } } -std::shared_ptr Graph::convert() { +std::shared_ptr Graph::convert() { convert_to_ov_nodes(); remove_dangling_parameters(); - auto function = create_function(); + auto function = create_model(); set_metadata(function); return function; } @@ -263,10 +262,10 @@ OutputVector Graph::make_framework_nodes(const Node& onnx_node) { if (onnx_node.has_subgraphs()) { const auto& subgraphs = onnx_node.get_subgraphs(); auto inputs = onnx_node.get_ng_inputs(); - std::vector> functions; + std::vector> models; for (const auto& kv : subgraphs) { auto& subgraph = kv.second; - functions.push_back(subgraph->decode()); + models.push_back(subgraph->decode()); for (const auto& input : subgraph->get_inputs_from_parent()) { const auto& name = input.get_node()->get_friendly_name(); if (std::find_if(inputs.begin(), inputs.end(), [&name](const Output& n) -> bool { @@ -276,7 +275,7 @@ OutputVector Graph::make_framework_nodes(const Node& onnx_node) { } } } - framework_node = std::make_shared(onnx_node, functions, inputs); + framework_node = std::make_shared(onnx_node, models, inputs); } else { framework_node = std::make_shared(onnx_node); } @@ -287,7 +286,7 @@ void Graph::decode_to_framework_nodes() { const float total = static_cast(m_model->get_graph().node().size()); unsigned int completed = 0u; std::map op_statistics; - // Process ONNX graph nodes, convert to nGraph nodes + // Process ONNX graph nodes, convert to OV nodes for (const auto& node_proto : m_model->get_graph().node()) { if (m_extensions.telemetry) { op_statistics[node_proto.op_type()]++; @@ -312,22 +311,22 @@ void Graph::decode_to_framework_nodes() { } OPENVINO_SUPPRESS_DEPRECATED_END -std::shared_ptr Graph::create_function() { - auto function = std::make_shared(get_ov_outputs(), m_parameters, get_name()); +std::shared_ptr Graph::create_model() { + auto model = std::make_shared(get_ov_outputs(), m_parameters, get_name()); const auto& onnx_outputs = m_model->get_graph().output(); - for (std::size_t i{0}; i < function->get_output_size(); ++i) { - const auto& result_node = function->get_output_op(i); + for (std::size_t i{0}; i < model->get_output_size(); ++i) { + const auto& result_node = model->get_output_op(i); const std::string onnx_output_name = onnx_outputs.Get(static_cast(i)).name(); result_node->set_friendly_name(onnx_output_name + "/sink_port_0"); const auto& previous_operation = result_node->get_input_node_shared_ptr(0); previous_operation->set_friendly_name(onnx_output_name); } - return function; + return model; } -std::shared_ptr Graph::decode() { +std::shared_ptr Graph::decode() { decode_to_framework_nodes(); - auto function = create_function(); + auto function = create_model(); auto& rt_info = function->get_rt_info(); rt_info[ONNX_GRAPH_RT_ATTRIBUTE] = shared_from_this(); return function; @@ -486,9 +485,9 @@ Output Subgraph::get_ov_node_from_cache(const std::string& name) { return new_param; } -std::shared_ptr Subgraph::convert() { +std::shared_ptr Subgraph::convert() { convert_to_ov_nodes(); - return create_function(); + return create_model(); } const std::vector> Subgraph::get_inputs_from_parent() const { diff --git a/src/frontends/onnx/frontend/src/core/graph.hpp b/src/frontends/onnx/frontend/src/core/graph.hpp index bccde080ad1339..f11f0936f5dadb 100644 --- a/src/frontends/onnx/frontend/src/core/graph.hpp +++ b/src/frontends/onnx/frontend/src/core/graph.hpp @@ -34,8 +34,8 @@ class Graph : public std::enable_shared_from_this { Graph& operator=(const Graph&) = delete; Graph& operator=(Graph&&) = default; - std::shared_ptr decode(); - virtual std::shared_ptr convert(); + std::shared_ptr decode(); + virtual std::shared_ptr convert(); OutputVector get_ov_outputs(); const std::string& get_name() const { return m_model->get_graph().name(); @@ -80,7 +80,7 @@ class Graph : public std::enable_shared_from_this { void convert_to_ov_nodes(); void remove_dangling_parameters(); void set_metadata(std::shared_ptr& model) const; - std::shared_ptr create_function(); + std::shared_ptr create_model(); ParameterVector m_parameters; std::unique_ptr m_model; @@ -111,7 +111,7 @@ class Subgraph : public Graph { /// \return Vector of edge nodes from parent scope. const std::vector> get_inputs_from_parent() const; - std::shared_ptr convert() override; + std::shared_ptr convert() override; Subgraph() = delete; diff --git a/src/frontends/onnx/frontend/src/core/graph_cache.cpp b/src/frontends/onnx/frontend/src/core/graph_cache.cpp index 5966310d3d4ba0..8ef624c76da4fa 100644 --- a/src/frontends/onnx/frontend/src/core/graph_cache.cpp +++ b/src/frontends/onnx/frontend/src/core/graph_cache.cpp @@ -8,7 +8,7 @@ namespace ngraph { namespace onnx_import { -void GraphCache::emplace_node(const std::string& name, Output&& node) { +void GraphCache::emplace_node(const std::string& name, ov::Output&& node) { m_graph_cache_map[name] = std::move(node); } @@ -19,7 +19,7 @@ void GraphCache::remove_node(const std::string& name) { } } -Output GraphCache::get_node(const std::string& name) const { +ov::Output GraphCache::get_node(const std::string& name) const { try { return m_graph_cache_map.at(name); } catch (const std::out_of_range&) { diff --git a/src/frontends/onnx/frontend/src/core/graph_cache.hpp b/src/frontends/onnx/frontend/src/core/graph_cache.hpp index f763abe73484d0..fe021e64289dfc 100644 --- a/src/frontends/onnx/frontend/src/core/graph_cache.hpp +++ b/src/frontends/onnx/frontend/src/core/graph_cache.hpp @@ -21,7 +21,7 @@ class GraphCache { /// /// \param[in] name The name of node added to the cache. /// \param[in] node The node added to the cache. - void emplace_node(const std::string& name, Output&& node); + void emplace_node(const std::string& name, ov::Output&& node); /// \brief Remove node from the cache /// @@ -35,7 +35,7 @@ class GraphCache { /// \param[in] name The name of the node. /// /// \return The node named `name`. - virtual Output get_node(const std::string& name) const; + virtual ov::Output get_node(const std::string& name) const; /// \brief Return true if the node named `name` exist in the cache. /// @@ -47,7 +47,7 @@ class GraphCache { virtual ~GraphCache() = default; private: - std::map> m_graph_cache_map; + std::map> m_graph_cache_map; }; } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp index c8818e98114580..eb70a82ea08e16 100644 --- a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp @@ -8,8 +8,8 @@ #include -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" #include "tensor.hpp" namespace ngraph { @@ -26,7 +26,7 @@ class SparseTensor { if (m_shape == Shape{0}) { // It's possible to construct a sparse tensor in ONNX with "dims: 0" property // Such tensor contains a scalar. This results in a Shape{0} stored in m_shape. - // In nGraph a scalar is represented with Shape{} and thus this replacement. + // In OpenVINO a scalar is represented with Shape{} and thus this replacement. m_shape = Shape{}; } } diff --git a/src/frontends/onnx/frontend/src/core/transform.hpp b/src/frontends/onnx/frontend/src/core/transform.hpp index c061b7ab88de80..4a4e3707315043 100644 --- a/src/frontends/onnx/frontend/src/core/transform.hpp +++ b/src/frontends/onnx/frontend/src/core/transform.hpp @@ -10,8 +10,11 @@ namespace ngraph { namespace onnx_import { namespace transform { -static const std::vector onnx_functions_to_expand = {"Bernoulli", +static const std::vector onnx_functions_to_expand = {"AffineGrid", + "Bernoulli", "Celu", + "CenterCropPad", + "Gelu", "NegativeLogLikelihoodLoss", "SoftmaxCrossEntropyLoss", "LayerNormalization"}; @@ -48,7 +51,7 @@ static const std::vector legacy_ops_to_fixup = {"DeformableConv2D", /// Some legacy models use custom operators (listed in legacy_ops_to_fixup vector) which /// were registered in the default ONNX domain. This function updates nodes with these /// operations to use OPENVINO_ONNX_DOMAIN in order to process them correctly -/// in the nGraph ONNX Importer. +/// in the OpenVINO ONNX Frontend. /// /// \param model_proto Protobuf message with ONNX model to transform. void fixup_legacy_operators(ONNX_NAMESPACE::ModelProto& model_proto); diff --git a/src/frontends/onnx/frontend/src/core/value_info.hpp b/src/frontends/onnx/frontend/src/core/value_info.hpp index 99ac5b3bca3dba..5004064c425fd3 100644 --- a/src/frontends/onnx/frontend/src/core/value_info.hpp +++ b/src/frontends/onnx/frontend/src/core/value_info.hpp @@ -7,17 +7,16 @@ #include #include "core/tensor.hpp" -#include "default_opset.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/type/element_type.hpp" #include "onnx_common/utils.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" #include "utils/common.hpp" using namespace ov::frontend::onnx::common; +using namespace ov::op; + namespace ngraph { namespace onnx_import { class ValueInfo { @@ -49,7 +48,7 @@ class ValueInfo { if (m_value_info_proto->type().tensor_type().has_elem_type()) { return common::get_ov_element_type(m_value_info_proto->type().tensor_type().elem_type()); } - return ngraph::element::dynamic; + return ov::element::dynamic; } std::shared_ptr get_ov_node(ParameterVector& parameters, @@ -63,14 +62,14 @@ class ValueInfo { } protected: - std::shared_ptr get_ov_parameter() const { - auto parameter = std::make_shared(get_element_type(), get_shape()); + std::shared_ptr get_ov_parameter() const { + auto parameter = std::make_shared(get_element_type(), get_shape()); parameter->set_friendly_name(get_name()); parameter->get_output_tensor(0).set_names({get_name()}); return parameter; } - std::shared_ptr get_ov_constant(const Tensor& tensor) const { + std::shared_ptr get_ov_constant(const Tensor& tensor) const { return tensor.get_ov_constant(); } diff --git a/src/frontends/onnx/frontend/src/edge_mapper.cpp b/src/frontends/onnx/frontend/src/edge_mapper.cpp index 8d81ba4361f562..22e1fb4b8bc009 100644 --- a/src/frontends/onnx/frontend/src/edge_mapper.cpp +++ b/src/frontends/onnx/frontend/src/edge_mapper.cpp @@ -8,7 +8,7 @@ #include -#include "ngraph/except.hpp" +#include "openvino/core/except.hpp" #include "openvino/frontend/exception.hpp" using namespace ov; diff --git a/src/frontends/onnx/frontend/src/edge_mapper.hpp b/src/frontends/onnx/frontend/src/edge_mapper.hpp index 11e1f76deae1a5..b4b4d846337bbb 100644 --- a/src/frontends/onnx/frontend/src/edge_mapper.hpp +++ b/src/frontends/onnx/frontend/src/edge_mapper.hpp @@ -39,8 +39,8 @@ class EdgeMapper { /// In such a case the algorthim tries to match the given node name /// with the input name (providing an input index is not enough). /// If a unique edge is found, it will be returned. - /// If InputEdge cannot be determined based on parameter values an ngraph_error - /// exception will be thrown. + /// If InputEdge cannot be determined based on parameter values an + /// ov:Exception will be thrown. /// /// \param node An EditorNode helper structure created based on a node name /// or a node output name. @@ -56,8 +56,8 @@ class EdgeMapper { /// In such a case the algorthim will try to match the given node name /// with the output name (providing an output index is not enough). /// If after such operation a found edge is unique, it is returned. - /// If OutputEdge cannot be determined based on given params the ngraph_error - /// exception is thrown. + /// If OutputEdge cannot be determined based on given params an + /// ov::Exception is thrown. /// /// \param node An EditorNode helper structure created based on a node name /// or a node output name. diff --git a/src/frontends/onnx/frontend/src/editor.cpp b/src/frontends/onnx/frontend/src/editor.cpp index 0ea9b1455f0ded..906675a0e4998a 100644 --- a/src/frontends/onnx/frontend/src/editor.cpp +++ b/src/frontends/onnx/frontend/src/editor.cpp @@ -22,7 +22,7 @@ using namespace ov; using namespace ov::onnx_editor; using namespace ov::frontend::onnx::common; -NGRAPH_SUPPRESS_DEPRECATED_START +OPENVINO_SUPPRESS_DEPRECATED_START namespace { using namespace ONNX_NAMESPACE; @@ -97,7 +97,7 @@ void add_dim_to_onnx_shape(const Dimension& dim, ONNX_NAMESPACE::TensorShapeProt if (dim.is_static()) { new_dim->set_dim_value(dim.get_length()); } else { - // nGraph Dimension is also considered dynamic if it represents a constrained range + // Dimension is also considered dynamic if it represents a constrained range // of allowed values as well as if it's unconstrained at all. ONNX cannot represent // ranged dimensions so this might not be 100% accurate. The modified ONNX model will // always have a fully dynamic dimension in this case. @@ -140,7 +140,7 @@ std::string extract_name(const T& input_or_initializer) { void modify_initializer(TensorProto& initializer, const std::string& name, - const std::shared_ptr values, + const std::shared_ptr values, ValueInfoProto* input) { const auto elem_type = values->get_element_type(); OPENVINO_ASSERT(is_supported_ov_type(elem_type), @@ -392,7 +392,7 @@ element::Type_t onnx_editor::ONNXModelEditor::get_input_type(const std::string& return ngraph::onnx_import::common::get_ov_element_type(type); } -void onnx_editor::ONNXModelEditor::set_input_shapes(const std::map& input_shapes) { +void onnx_editor::ONNXModelEditor::set_input_shapes(const std::map& input_shapes) { auto* onnx_graph = m_pimpl->m_model_proto->mutable_graph(); for (const auto& input_desc : input_shapes) { @@ -540,7 +540,7 @@ std::shared_ptr onnx_editor::ONNXModelEditor::get_function() const { } void onnx_editor::ONNXModelEditor::set_input_values( - const std::map>& input_values) { + const std::map>& input_values) { auto onnx_graph = m_pimpl->m_model_proto->mutable_graph(); for (const auto& input : input_values) { diff --git a/src/frontends/onnx/frontend/src/editor.hpp b/src/frontends/onnx/frontend/src/editor.hpp index 3edb098e77291c..1a6a5faf60c6a6 100644 --- a/src/frontends/onnx/frontend/src/editor.hpp +++ b/src/frontends/onnx/frontend/src/editor.hpp @@ -9,15 +9,13 @@ #include #include "editor_types.hpp" -#include "ngraph/deprecated.hpp" -#include "ngraph/function.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/type/element_type.hpp" #include "onnx_import/onnx_importer_visibility.hpp" +#include "openvino/core/deprecated.hpp" +#include "openvino/core/model.hpp" #include "openvino/frontend/extension/holder.hpp" #include "openvino/frontend/extension/progress_reporter.hpp" #include "openvino/frontend/extension/telemetry.hpp" +#include "openvino/op/constant.hpp" #include "utils/tensor_external_data.hpp" namespace ov { @@ -25,7 +23,7 @@ namespace onnx_editor { /// \brief A class representing a set of utilities allowing modification of an ONNX model /// /// \note This class can be used to modify an ONNX model before it gets translated to -/// an ngraph::Function by the import_onnx_model function. It lets you modify the +/// an ov::Model by the frontend->convert method. It lets you modify the /// model's input types and shapes, extract a subgraph and more. class ONNX_IMPORTER_API ONNXModelEditor final { public: @@ -73,7 +71,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// be used to modified the ONNX model loaded from a file. This /// method throws an exception if the model doesn't contain any of /// the inputs specified in its parameter. - void set_input_shapes(const std::map& input_shapes); + void set_input_shapes(const std::map& input_shapes); /// \brief Get shape of ONNX tensor indicated by the tensor_name. /// @@ -109,7 +107,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// \param input_values A collection of pairs {input_name: new_input_values} used to /// update the ONNX model. Initializers already existing are /// overwritten. - void set_input_values(const std::map>& input_values); + void set_input_values(const std::map>& input_values); /// \brief Changes the name of given tensor. /// @@ -154,7 +152,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// \brief Returns a serialized ONNX model, possibly modified by the editor. std::string model_string() const; - /// \brief Converts an edited ONNX model to an nGraph Function representation. + /// \brief Converts an edited ONNX model to an OpenVINO Model representation. std::shared_ptr get_function() const; /// \brief Returns a list of all inputs of the in-memory model. @@ -204,8 +202,8 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// In such a case the algorthim tries to match the given node name /// with the input name (providing an input index is not enough). /// If a unique edge is found, it will be returned. - /// If InputEdge cannot be determined based on parameter values an ngraph_error - /// exception will be thrown. + /// If InputEdge cannot be determined based on parameter values an ov::Exception + /// will be thrown. /// /// \param node A node helper structure created based on a node name /// or a node output name. @@ -221,8 +219,8 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// In such a case the algorthim will try to match the given node name /// with the output name (providing an output index is not enough). /// If after such operation a found edge is unique, it is returned. - /// If OutputEdge cannot be determined based on given params the ngraph_error - /// exception is thrown. + /// If OutputEdge cannot be determined based on given params the ov::Exception + /// will be thrown. /// /// \param node A node helper structure created based on a node name /// or a node output name. @@ -287,7 +285,7 @@ class ONNX_IMPORTER_API ONNXModelEditor final { /// std::vector get_output_ports(const EditorNode& node) const; - /// \brief Returns a nGraph function based on edited model + /// \brief Returns a OpenVINO Model based on edited model /// decoded to framework nodes /// std::shared_ptr decode(); diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index b4cb7c168da1e6..29ffcccbd63603 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -96,6 +96,8 @@ ov::frontend::Place::Ptr InputModel::get_place_by_operation_name_and_output_port } void InputModel::set_name_for_tensor(const ov::frontend::Place::Ptr& tensor, const std::string& new_name) { + FRONT_END_GENERAL_CHECK(tensor, __FUNCTION__, " expects a pointer to place."); + const auto onnx_tensor = std::dynamic_pointer_cast(tensor); FRONT_END_GENERAL_CHECK(onnx_tensor, __FUNCTION__, " expects a pointer to place of ONNX tensor type."); const auto original_name = onnx_tensor->get_names().at(0); @@ -113,6 +115,8 @@ void InputModel::set_name_for_tensor(const ov::frontend::Place::Ptr& tensor, con } void InputModel::set_name_for_operation(const ov::frontend::Place::Ptr& operation, const std::string& new_name) { + FRONT_END_GENERAL_CHECK(operation, __FUNCTION__, " expects a pointer to place."); + const auto onnx_operation = std::dynamic_pointer_cast(operation); FRONT_END_GENERAL_CHECK(onnx_operation, __FUNCTION__, " expects a pointer to place of ONNX operation type."); onnx_operation->set_name(new_name); @@ -125,12 +129,15 @@ void InputModel::free_name_for_operation(const std::string& name) { void InputModel::set_name_for_dimension(const ov::frontend::Place::Ptr& tensor, size_t shape_dim_index, const std::string& dim_name) { + FRONT_END_GENERAL_CHECK(tensor, __FUNCTION__, " expects a pointer to place."); + const auto onnx_tensor = std::dynamic_pointer_cast(tensor); FRONT_END_GENERAL_CHECK(onnx_tensor, __FUNCTION__, " expects a pointer to place of ONNX tensor type."); onnx_tensor->set_name_for_dimension(shape_dim_index, dim_name); } void InputModel::add_name_for_tensor(const ov::frontend::Place::Ptr& tensor, const std::string& new_name) { + FRONT_END_GENERAL_CHECK(tensor, __FUNCTION__, " expects a pointer to place."); FRONT_END_GENERAL_CHECK(!new_name.empty(), "The additional tensor name cannot be empty."); ov::frontend::Place::Ptr tensor_place = tensor; @@ -153,6 +160,8 @@ void InputModel::free_name_for_tensor(const std::string&) { } void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const ov::PartialShape& shape) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string input_name; // name of the model input which should be reshaped const auto input_edge = std::dynamic_pointer_cast(place); if (input_edge) { @@ -173,6 +182,8 @@ void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const } ov::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& place) const { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string tensor_name; // name of the model input which should be reshaped const auto input_edge = std::dynamic_pointer_cast(place); const auto output_edge = std::dynamic_pointer_cast(place); @@ -194,13 +205,16 @@ ov::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& p } void InputModel::set_element_type(const ov::frontend::Place::Ptr& place, const ov::element::Type& type) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::map m; m[place->get_names().at(0)] = type; m_editor->set_input_types(m); } ov::element::Type InputModel::get_element_type(const ov::frontend::Place::Ptr& place) const { - OPENVINO_ASSERT(place, "Cannot return a type for nullptr Place."); + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string tensor_name; const auto input_edge = std::dynamic_pointer_cast(place); const auto output_edge = std::dynamic_pointer_cast(place); @@ -333,6 +347,8 @@ void InputModel::extract_subgraph(const std::vector& i } ov::frontend::Place::Ptr InputModel::add_output(const ov::frontend::Place::Ptr& place) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string name = place->get_names().at(0); const auto& outputs = m_editor->model_outputs(); @@ -364,6 +380,8 @@ ov::frontend::Place::Ptr InputModel::add_output(const ov::frontend::Place::Ptr& } void InputModel::remove_output(const ov::frontend::Place::Ptr& place) { + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); + std::string name = place->get_names().at(0); std::vector outputs = get_outputs(); const auto& output_names = m_editor->model_outputs(); @@ -383,12 +401,14 @@ void InputModel::remove_output(const ov::frontend::Place::Ptr& place) { } void InputModel::cut_and_add_new_input(const ov::frontend::Place::Ptr& place, const std::string& new_name_optional) { - std::vector inputs = get_inputs(); - std::vector outputs = get_outputs(); + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); if (place->is_input()) return; + std::vector inputs = get_inputs(); + std::vector outputs = get_outputs(); + const auto edge_place = convert_place_to_input_edge({place}); const auto edge_outputs = convert_place_to_output_edge(outputs); @@ -404,15 +424,17 @@ void InputModel::cut_and_add_new_input(const ov::frontend::Place::Ptr& place, co } void InputModel::set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) { - std::map> map; + FRONT_END_GENERAL_CHECK(place, __FUNCTION__, " expects a pointer to place."); if (const auto var_place = std::dynamic_pointer_cast(place)) { + std::map> map; + auto name = place->get_names().at(0); auto p_shape = m_editor->get_tensor_shape(name); auto el_type = m_editor->get_input_type(name); - std::shared_ptr constant = - ngraph::op::Constant::create(el_type, p_shape.to_shape(), value); + std::shared_ptr constant = + ov::op::v0::Constant::create(el_type, p_shape.to_shape(), value); constant->set_friendly_name(name); map.emplace(name, constant); @@ -488,6 +510,8 @@ std::vector InputModel::convert_place_to_output_edge( } void InputModel::add_tensor_names(std::shared_ptr& model) { + FRONT_END_GENERAL_CHECK(model, __FUNCTION__, " expects a pointer to model."); + auto model_inputs = model->inputs(); const auto find_input_by_tensor_name = [&model_inputs](const std::string& name) { return std::find_if(std::begin(model_inputs), @@ -508,6 +532,8 @@ void InputModel::add_tensor_names(std::shared_ptr& model) { } void InputModel::reshape_model_inputs(std::shared_ptr& model) { + FRONT_END_GENERAL_CHECK(model, __FUNCTION__, " expects a pointer to model."); + const auto& inputs = model->inputs(); const auto is_input_name = [&inputs](const std::string& name) { return std::find_if(std::begin(inputs), std::end(inputs), [&name](const OutputVector::value_type& input) { diff --git a/src/frontends/onnx/frontend/src/onnx.cpp b/src/frontends/onnx/frontend/src/onnx.cpp index b9dc2f40f644cc..3ad2995f70b446 100644 --- a/src/frontends/onnx/frontend/src/onnx.cpp +++ b/src/frontends/onnx/frontend/src/onnx.cpp @@ -39,7 +39,7 @@ std::shared_ptr import_onnx_model(std::istream& stream, std::move(extensions)); OPENVINO_SUPPRESS_DEPRECATED_END const auto error_message = common::collect_translation_exceptions(model); - NGRAPH_CHECK(error_message.empty(), error_message); + OPENVINO_ASSERT(error_message.empty(), error_message); return model; } @@ -54,7 +54,7 @@ std::shared_ptr import_onnx_model(const std::string& file_path, const const auto model = import_onnx_model(model_stream, file_path, enable_mmap); OPENVINO_SUPPRESS_DEPRECATED_END const auto error_message = common::collect_translation_exceptions(model); - NGRAPH_CHECK(error_message.empty(), error_message); + OPENVINO_ASSERT(error_message.empty(), error_message); return model; } diff --git a/src/frontends/onnx/frontend/src/op/abs.hpp b/src/frontends/onnx/frontend/src/op/abs.hpp index 34b21c91920062..135dc4b3116200 100644 --- a/src/frontends/onnx/frontend/src/op/abs.hpp +++ b/src/frontends/onnx/frontend/src/op/abs.hpp @@ -7,12 +7,9 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/abs.hpp" namespace ngraph { namespace onnx_import { @@ -22,7 +19,7 @@ inline OutputVector abs(const Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Abs op is not supported"); - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/acos.hpp b/src/frontends/onnx/frontend/src/op/acos.hpp index ab9c64421a80ce..65181f0601efb6 100644 --- a/src/frontends/onnx/frontend/src/op/acos.hpp +++ b/src/frontends/onnx/frontend/src/op/acos.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/acos.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_7 { inline OutputVector acos(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/acosh.hpp b/src/frontends/onnx/frontend/src/op/acosh.hpp index 745a8fa8c5c2cd..eefa36762db808 100644 --- a/src/frontends/onnx/frontend/src/op/acosh.hpp +++ b/src/frontends/onnx/frontend/src/op/acosh.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/acosh.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_9 { inline OutputVector acosh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_9 diff --git a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp index 0aec90df094022..29540fa3175df2 100644 --- a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp +++ b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp @@ -4,8 +4,10 @@ #include "op/adaptive_avg_pooling2d.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" +#include "openvino/op/adaptive_avg_pool.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -19,7 +21,7 @@ OutputVector adaptive_avg_pooling2d(const Node& node) { CHECK_VALID_NODE(node, num_inputs == 2, "adaptive_avg_pooling2d expects 2 input tensors. Got: ", num_inputs); - return {std::make_shared(inputs[0], inputs[1])}; + return {std::make_shared(inputs[0], inputs[1])}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/add.cpp b/src/frontends/onnx/frontend/src/op/add.cpp index 61ac900e731775..3cadf83099eb02 100644 --- a/src/frontends/onnx/frontend/src/op/add.cpp +++ b/src/frontends/onnx/frontend/src/op/add.cpp @@ -4,11 +4,12 @@ #include "op/add.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/add.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -18,19 +19,19 @@ OutputVector add(const Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Add op is not supported"); - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_6 { OutputVector add(const Node& node) { - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_6 namespace set_7 { OutputVector add(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/add.hpp b/src/frontends/onnx/frontend/src/op/add.hpp index 12f9dd548e8008..ed8d9e8aa1a95f 100644 --- a/src/frontends/onnx/frontend/src/op/add.hpp +++ b/src/frontends/onnx/frontend/src/op/add.hpp @@ -7,9 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/affine.cpp b/src/frontends/onnx/frontend/src/op/affine.cpp index 443a353ff853b1..419a3c3f0da14f 100644 --- a/src/frontends/onnx/frontend/src/op/affine.cpp +++ b/src/frontends/onnx/frontend/src/op/affine.cpp @@ -4,9 +4,11 @@ #include "op/affine.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/multiply.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -28,8 +30,7 @@ OutputVector affine(const Node& node) { const auto alpha_const = node.get_attribute_as_constant("alpha", data.get_element_type()); const auto beta_const = node.get_attribute_as_constant("beta", data.get_element_type()); - return { - std::make_shared(std::make_shared(data, alpha_const), beta_const)}; + return {std::make_shared(std::make_shared(data, alpha_const), beta_const)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/affine.hpp b/src/frontends/onnx/frontend/src/op/affine.hpp index ed3e216357e1f3..d69188ccf635a3 100644 --- a/src/frontends/onnx/frontend/src/op/affine.hpp +++ b/src/frontends/onnx/frontend/src/op/affine.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/and.hpp b/src/frontends/onnx/frontend/src/op/and.hpp index f40216e8e14981..c10dd2740896ae 100644 --- a/src/frontends/onnx/frontend/src/op/and.hpp +++ b/src/frontends/onnx/frontend/src/op/and.hpp @@ -7,26 +7,21 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/and.hpp" #include "onnx_import/core/node.hpp" -#include "utils/common.hpp" +#include "openvino/op/logical_and.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector logical_and(const Node& node) { - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { inline OutputVector logical_and(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/argmax.cpp b/src/frontends/onnx/frontend/src/op/argmax.cpp index 5e9fae310c1cfd..0147316231c13e 100644 --- a/src/frontends/onnx/frontend/src/op/argmax.cpp +++ b/src/frontends/onnx/frontend/src/op/argmax.cpp @@ -5,7 +5,6 @@ #include "op/argmax.hpp" #include "exceptions.hpp" -#include "onnx_import/core/node.hpp" #include "utils/arg_min_max_factory.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/argmax.hpp b/src/frontends/onnx/frontend/src/op/argmax.hpp index c88d63dc3fc8de..8e2aec04362cf1 100644 --- a/src/frontends/onnx/frontend/src/op/argmax.hpp +++ b/src/frontends/onnx/frontend/src/op/argmax.hpp @@ -7,29 +7,28 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX ArgMax operation to an nGraph node. +/// \brief Convert ONNX ArgMax operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. OutputVector argmax(const Node& node); } // namespace set_1 namespace set_12 { -/// \brief Convert ONNX ArgMax operation to an nGraph node. +/// \brief Convert ONNX ArgMax operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. OutputVector argmax(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/argmin.cpp b/src/frontends/onnx/frontend/src/op/argmin.cpp index 8c69cb77539742..62d8d272b7c5fb 100644 --- a/src/frontends/onnx/frontend/src/op/argmin.cpp +++ b/src/frontends/onnx/frontend/src/op/argmin.cpp @@ -5,7 +5,6 @@ #include "op/argmin.hpp" #include "exceptions.hpp" -#include "onnx_import/core/node.hpp" #include "utils/arg_min_max_factory.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/argmin.hpp b/src/frontends/onnx/frontend/src/op/argmin.hpp index 7bacbc7b42caf5..1a3ed89a45cc87 100644 --- a/src/frontends/onnx/frontend/src/op/argmin.hpp +++ b/src/frontends/onnx/frontend/src/op/argmin.hpp @@ -7,29 +7,28 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX ArgMin operation to an nGraph node. +/// \brief Convert ONNX ArgMin operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMin operation. OutputVector argmin(const Node& node); } // namespace set_1 namespace set_12 { -/// \brief Convert ONNX ArgMin operation to an nGraph node. +/// \brief Convert ONNX ArgMin operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing an Ngraph node which produces the output +/// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. OutputVector argmin(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/asin.hpp b/src/frontends/onnx/frontend/src/op/asin.hpp index b4a9590ad17999..b8845871268d85 100644 --- a/src/frontends/onnx/frontend/src/op/asin.hpp +++ b/src/frontends/onnx/frontend/src/op/asin.hpp @@ -7,19 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/asin.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/asin.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector asin(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/asinh.hpp b/src/frontends/onnx/frontend/src/op/asinh.hpp index 317c4c4e183fe2..e402d6e29d81f0 100644 --- a/src/frontends/onnx/frontend/src/op/asinh.hpp +++ b/src/frontends/onnx/frontend/src/op/asinh.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/asinh.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector asinh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/atan.hpp b/src/frontends/onnx/frontend/src/op/atan.hpp index 414256dc420c90..aa251a8210ed02 100644 --- a/src/frontends/onnx/frontend/src/op/atan.hpp +++ b/src/frontends/onnx/frontend/src/op/atan.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/atan.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector atan(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/atanh.hpp b/src/frontends/onnx/frontend/src/op/atanh.hpp index ef3bd26c48ab7f..c7879925a0f7ee 100644 --- a/src/frontends/onnx/frontend/src/op/atanh.hpp +++ b/src/frontends/onnx/frontend/src/op/atanh.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/atanh.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector atanh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/aten.cpp b/src/frontends/onnx/frontend/src/op/aten.cpp index d8103000b5d72a..380718b2745674 100644 --- a/src/frontends/onnx/frontend/src/op/aten.cpp +++ b/src/frontends/onnx/frontend/src/op/aten.cpp @@ -4,12 +4,21 @@ #include "op/aten.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "onnx_import/core/node.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/embeddingbag_offsets_sum.hpp" +#include "openvino/op/embeddingbag_packedsum.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/unsqueeze.hpp" #include "openvino/opsets/opset8.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -41,11 +50,11 @@ OutputVector aten(const Node& node) { Output embedding_bag; if (is_packed_two_inputs) { - embedding_bag = std::make_shared(inputs[0], inputs[1]); + embedding_bag = std::make_shared(inputs[0], inputs[1]); } else if (is_packed_three_inputs) { - embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[3]); + embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[3]); } else if (is_offsets_three_inputs) { - embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[2]); + embedding_bag = std::make_shared(inputs[0], inputs[1], inputs[2]); } else if (inputs.size() >= 4) { // Need to expand embedding table with zeros (default values for empty bags) const auto& emb_tbl_in = inputs[0]; @@ -56,30 +65,28 @@ OutputVector aten(const Node& node) { const auto data_type = emb_tbl_in.get_element_type(); const auto ind_type = indices_in.get_element_type(); - const auto zero_const = std::make_shared(ind_type, Shape{}, 0); + const auto zero_const = std::make_shared(ind_type, Shape{}, 0); // Shape aligned node, filled with zeros - const auto zero_of_data_type_const = std::make_shared(data_type, Shape{1}, 0); - const auto weights_shape_node = std::make_shared(emb_tbl_in, ind_type); - const auto weights_last_dim_idx = std::make_shared(element::i32, Shape{1}, -1); + const auto zero_of_data_type_const = std::make_shared(data_type, Shape{1}, 0); + const auto weights_shape_node = std::make_shared(emb_tbl_in, ind_type); + const auto weights_last_dim_idx = std::make_shared(element::i32, Shape{1}, -1); const auto weights_last_dim = - std::make_shared(weights_shape_node, weights_last_dim_idx, zero_const); - const auto zero_col_node = - std::make_shared(zero_of_data_type_const, weights_last_dim); - const auto default_embeddings_node = std::make_shared(zero_col_node, zero_const); + std::make_shared(weights_shape_node, weights_last_dim_idx, zero_const); + const auto zero_col_node = std::make_shared(zero_of_data_type_const, weights_last_dim); + const auto default_embeddings_node = std::make_shared(zero_col_node, zero_const); // Expanded embedding table weights - const auto weights_concat = - std::make_shared(OutputVector{emb_tbl_in, default_embeddings_node}, 0); + const auto weights_concat = std::make_shared(OutputVector{emb_tbl_in, default_embeddings_node}, 0); // Index in embedding table to fill empty bags - const auto weights_first_dim = std::make_shared( - std::make_shared(weights_shape_node, zero_const, zero_const)); - - embedding_bag = std::make_shared(weights_concat, - indices_in, - offsets_in, - weights_first_dim, // default index - per_sample_weights_in); + const auto weights_first_dim = + std::make_shared(std::make_shared(weights_shape_node, zero_const, zero_const)); + + embedding_bag = std::make_shared(weights_concat, + indices_in, + offsets_in, + weights_first_dim, // default index + per_sample_weights_in); } else { OPENVINO_THROW("Unsupported inputs configuration for ATen `embedding_bag` operation."); diff --git a/src/frontends/onnx/frontend/src/op/aten.hpp b/src/frontends/onnx/frontend/src/op/aten.hpp index 9420bcfd16b6b0..7c6d2198651710 100644 --- a/src/frontends/onnx/frontend/src/op/aten.hpp +++ b/src/frontends/onnx/frontend/src/op/aten.hpp @@ -7,8 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/average_pool.cpp b/src/frontends/onnx/frontend/src/op/average_pool.cpp index ff9bcb9bfbf791..d27df456c9993c 100644 --- a/src/frontends/onnx/frontend/src/op/average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/average_pool.cpp @@ -4,7 +4,6 @@ #include "op/average_pool.hpp" -#include "ngraph/node.hpp" #include "utils/pooling_factory.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/average_pool.hpp b/src/frontends/onnx/frontend/src/op/average_pool.hpp index b3b0d086b6c3d4..03d2bfd36dc9b0 100644 --- a/src/frontends/onnx/frontend/src/op/average_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/average_pool.hpp @@ -7,18 +7,17 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX AveragePool operation to an nGraph node. +/// \brief Convert ONNX AveragePool operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX AveragePool +/// \return The vector containing OV nodes producing output of ONNX AveragePool /// operation. OutputVector average_pool(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.cpp b/src/frontends/onnx/frontend/src/op/batch_norm.cpp index db49ff8d74e7a4..e187128e8bc1f8 100644 --- a/src/frontends/onnx/frontend/src/op/batch_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/batch_norm.cpp @@ -7,9 +7,11 @@ #include #include -#include "default_opset.hpp" #include "exceptions.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/batch_norm.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -22,8 +24,8 @@ OutputVector batch_norm(const Node& node) { auto x = inputs.at(0); auto scale = inputs.at(1); auto bias = inputs.at(2); - Output mean; - Output var; + Output mean; + Output var; double epsilon{node.get_attribute_value("epsilon", 1e-5)}; @@ -40,7 +42,7 @@ OutputVector batch_norm(const Node& node) { if (inputs.size() >= 5) { mean = inputs.at(3); var = inputs.at(4); - return {std::make_shared(x, scale, bias, mean, var, epsilon), + return {std::make_shared(x, scale, bias, mean, var, epsilon), after_bn_mean, after_bn_var, saved_mean, @@ -67,7 +69,7 @@ OutputVector batch_norm(const Node& node) { CHECK_VALID_NODE(node, node.get_outputs_size() == 1, "Training mode of BatchNormalization is not supported."); - return {std::make_shared(x, scale, bias, mean, var, epsilon)}; + return {std::make_shared(x, scale, bias, mean, var, epsilon)}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.hpp b/src/frontends/onnx/frontend/src/op/batch_norm.hpp index bcbf7b4cef1fdc..e8bbbebe828481 100644 --- a/src/frontends/onnx/frontend/src/op/batch_norm.hpp +++ b/src/frontends/onnx/frontend/src/op/batch_norm.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitshift.cpp b/src/frontends/onnx/frontend/src/op/bitshift.cpp index 87354dbe88539d..317ad25ee57c33 100644 --- a/src/frontends/onnx/frontend/src/op/bitshift.cpp +++ b/src/frontends/onnx/frontend/src/op/bitshift.cpp @@ -4,9 +4,13 @@ #include "op/bitshift.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/power.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,8 +18,8 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector bitshift(const Node& node) { - const Output input_x = node.get_ng_inputs().at(0); - const Output input_y = node.get_ng_inputs().at(1); + const Output input_x = node.get_ng_inputs().at(0); + const Output input_y = node.get_ng_inputs().at(1); std::string direction = node.get_attribute_value("direction", ""); @@ -27,14 +31,12 @@ OutputVector bitshift(const Node& node) { "attribute. Given: ", direction); - auto shift = std::make_shared( - default_opset::Constant::create(input_y.get_element_type(), Shape{1}, {2}), - input_y); + auto shift = std::make_shared(v0::Constant::create(input_y.get_element_type(), Shape{1}, {2}), input_y); if (direction == "RIGHT") { - return {std::make_shared(input_x, shift)}; + return {std::make_shared(input_x, shift)}; } else { - return {std::make_shared(input_x, shift)}; + return {std::make_shared(input_x, shift)}; } } diff --git a/src/frontends/onnx/frontend/src/op/bitshift.hpp b/src/frontends/onnx/frontend/src/op/bitshift.hpp index b6ac40023d35f1..24caba1911b770 100644 --- a/src/frontends/onnx/frontend/src/op/bitshift.hpp +++ b/src/frontends/onnx/frontend/src/op/bitshift.hpp @@ -7,9 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp index 54961812505cb5..c82d7dbac27b42 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp @@ -5,7 +5,7 @@ #include "op/bitwise_and.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" +#include "openvino/op/bitwise_and.hpp" using namespace ov::op; diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp b/src/frontends/onnx/frontend/src/op/bitwise_and.hpp index d0f66569c95228..e1d81434342855 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_and.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.cpp b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp new file mode 100644 index 00000000000000..403a65c86ab287 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op/bitwise_not.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "default_opset.hpp" + +using namespace ov::op; + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector bitwise_not(const Node& node) { + return {std::make_shared(node.get_ng_inputs().at(0))}; +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.hpp b/src/frontends/onnx/frontend/src/op/bitwise_not.hpp new file mode 100644 index 00000000000000..be3112b9de7e49 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/bitwise_not.hpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector bitwise_not(const Node& node); + +} // namespace set_1 + +} // namespace op + +} // namespace onnx_import + +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp index 38d9f04a48b0fa..adb642d2195391 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp @@ -5,7 +5,7 @@ #include "op/bitwise_or.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" +#include "openvino/op/bitwise_or.hpp" using namespace ov::op; diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp b/src/frontends/onnx/frontend/src/op/bitwise_or.hpp index 8bb00623c48a0c..22c17690ee1cc2 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_or.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp index 0fc5e36cd629a8..315538f5c21cac 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp @@ -5,7 +5,7 @@ #include "op/bitwise_xor.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" +#include "openvino/op/bitwise_xor.hpp" using namespace ov::op; diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp index fc0ae510742994..8f0d8a364cd5e1 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index d4bb144725dbfe..b50af889a5377b 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -7,11 +7,18 @@ #include -#include - -#include "default_opset.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -27,57 +34,46 @@ OutputVector blackmanwindow(const Node& node) { // Weights as described in ONNX BlackmanWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow - const auto float_size = std::make_shared(size, ov::element::f32); - const auto a_0 = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.42f}); - const auto a_1 = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{-0.50f}); - const auto a_2 = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.08f}); + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.42f}); + const auto a_1 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{-0.50f}); + const auto a_2 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.08f}); - const auto start = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto one_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto two_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); - const auto four_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f}); - const auto range = std::make_shared(start, size, one_const, ov::element::f32); - const auto pi = - default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto four_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = v0::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); std::shared_ptr factor_1, factor_2; if (periodic) { - factor_1 = std::make_shared( + factor_1 = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - float_size)); - factor_2 = std::make_shared( + std::make_shared(std::make_shared(pi, two_const), float_size)); + factor_2 = std::make_shared( range, - std::make_shared(std::make_shared(pi, four_const), - float_size)); + std::make_shared(std::make_shared(pi, four_const), float_size)); } else { - factor_1 = std::make_shared( + factor_1 = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - std::make_shared(float_size, one_const))); - factor_2 = std::make_shared( + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + factor_2 = std::make_shared( range, - std::make_shared(std::make_shared(pi, four_const), - std::make_shared(float_size, one_const))); + std::make_shared(std::make_shared(pi, four_const), + std::make_shared(float_size, one_const))); } - const auto cos_1 = std::make_shared(factor_1); - const auto cos_2 = std::make_shared(factor_2); - const auto scaled_cos_1 = std::make_shared(cos_1, a_1); - const auto scaled_cos_2 = std::make_shared(cos_2, a_2); - const auto y_values = - std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); + const auto cos_1 = std::make_shared(factor_1); + const auto cos_2 = std::make_shared(factor_2); + const auto scaled_cos_1 = std::make_shared(cos_1, a_1); + const auto scaled_cos_2 = std::make_shared(cos_2, a_2); + const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); if (output_datatype == element::f32) { return {y_values}; } else { - return {std::make_shared(y_values, output_datatype)}; + return {std::make_shared(y_values, output_datatype)}; } } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index ccff09c84817af..ca708f68d1f951 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -6,7 +6,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/cast.cpp b/src/frontends/onnx/frontend/src/op/cast.cpp index ba53f7aa9e3d13..4e1d01f26f6a8f 100644 --- a/src/frontends/onnx/frontend/src/op/cast.cpp +++ b/src/frontends/onnx/frontend/src/op/cast.cpp @@ -4,12 +4,11 @@ #include "op/cast.hpp" -#include - -#include "default_opset.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/op/convert.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -21,7 +20,7 @@ OutputVector cast(const Node& node) { int64_t target_type = node.get_attribute_value("to"); element::Type elem_type = common::get_ov_element_type(target_type); - return {std::make_shared(data, elem_type)}; + return {std::make_shared(data, elem_type)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cast.hpp b/src/frontends/onnx/frontend/src/op/cast.hpp index f645bbc3666d58..ff0afe050e4a84 100644 --- a/src/frontends/onnx/frontend/src/op/cast.hpp +++ b/src/frontends/onnx/frontend/src/op/cast.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/cast_like.cpp b/src/frontends/onnx/frontend/src/op/cast_like.cpp index 6d9edc85a3043b..f678057194a632 100644 --- a/src/frontends/onnx/frontend/src/op/cast_like.cpp +++ b/src/frontends/onnx/frontend/src/op/cast_like.cpp @@ -4,11 +4,9 @@ #include "op/cast_like.hpp" -#include +#include "openvino/op/convert_like.hpp" -#include "default_opset.hpp" -#include "ngraph/type/element_type.hpp" -#include "utils/common.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -18,7 +16,7 @@ namespace set_1 { OutputVector cast_like(const Node& node) { auto inputs = node.get_ng_inputs(); - return {std::make_shared(inputs.at(0), inputs.at(1))}; + return {std::make_shared(inputs.at(0), inputs.at(1))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cast_like.hpp b/src/frontends/onnx/frontend/src/op/cast_like.hpp index 84387c59e59749..a51d209f1b1c65 100644 --- a/src/frontends/onnx/frontend/src/op/cast_like.hpp +++ b/src/frontends/onnx/frontend/src/op/cast_like.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/ceil.hpp b/src/frontends/onnx/frontend/src/op/ceil.hpp index 283c7c372338ce..c31dc61e0490d1 100644 --- a/src/frontends/onnx/frontend/src/op/ceil.hpp +++ b/src/frontends/onnx/frontend/src/op/ceil.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/ceiling.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector ceil(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/clip.cpp b/src/frontends/onnx/frontend/src/op/clip.cpp index cad2cc6ebcbd67..06b634797c46c1 100644 --- a/src/frontends/onnx/frontend/src/op/clip.cpp +++ b/src/frontends/onnx/frontend/src/op/clip.cpp @@ -5,11 +5,14 @@ #include "op/clip.hpp" #include -#include -#include "default_opset.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/minimum.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -23,7 +26,7 @@ OutputVector clip(const Node& node) { const double min_value = node.get_attribute_value("min", std::numeric_limits::lowest()); - return {std::make_shared(data, min_value, max_value)}; + return {std::make_shared(data, min_value, max_value)}; } } // namespace set_1 @@ -31,10 +34,10 @@ OutputVector clip(const Node& node) { namespace set_11 { OutputVector clip(const Node& node) { const OutputVector inputs{node.get_ng_inputs()}; - const Output data = inputs.at(0); + const Output data = inputs.at(0); const element::Type data_type = data.get_element_type(); - Output min; - Output max; + Output min; + Output max; // If second input is provided, assign to min input, otherwise set lowest // numeric limit of data type as min input. @@ -56,9 +59,9 @@ OutputVector clip(const Node& node) { OPENVINO_SUPPRESS_DEPRECATED_END } - const auto max_of_min_and_data = std::make_shared(min, data); + const auto max_of_min_and_data = std::make_shared(min, data); - return {std::make_shared(max, max_of_min_and_data)}; + return {std::make_shared(max, max_of_min_and_data)}; } } // namespace set_11 diff --git a/src/frontends/onnx/frontend/src/op/clip.hpp b/src/frontends/onnx/frontend/src/op/clip.hpp index 6281e318eda278..dfddb5cd7e8f59 100644 --- a/src/frontends/onnx/frontend/src/op/clip.hpp +++ b/src/frontends/onnx/frontend/src/op/clip.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp index c5ad926f30de80..4b2f326c784499 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp @@ -4,7 +4,6 @@ #include "op/com.microsoft/attention.hpp" -#include "default_opset.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" diff --git a/src/frontends/onnx/frontend/src/op/compress.cpp b/src/frontends/onnx/frontend/src/op/compress.cpp index d1d31f02a192fd..50141a03ef2a68 100644 --- a/src/frontends/onnx/frontend/src/op/compress.cpp +++ b/src/frontends/onnx/frontend/src/op/compress.cpp @@ -4,11 +4,14 @@ #include "op/compress.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/non_zero.hpp" +#include "openvino/op/squeeze.hpp" #include "ov_models/ov_builders/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -22,14 +25,16 @@ OutputVector compress(const Node& node) { if (node.has_attribute("axis")) { axis = node.get_attribute_value("axis"); } else { - data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); + data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); + data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); + data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); } - auto axis_node = default_opset::Constant::create(element::i64, Shape{}, {axis}); - auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - auto result = std::make_shared( - data, - std::make_shared(std::make_shared(condition), zero_node), - axis_node); + auto axis_node = v0::Constant::create(element::i64, Shape{}, {axis}); + auto zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + auto result = + std::make_shared(data, + std::make_shared(std::make_shared(condition), zero_node), + axis_node); return {result}; } diff --git a/src/frontends/onnx/frontend/src/op/compress.hpp b/src/frontends/onnx/frontend/src/op/compress.hpp index 9135b07155005a..e667aa04ee60f8 100644 --- a/src/frontends/onnx/frontend/src/op/compress.hpp +++ b/src/frontends/onnx/frontend/src/op/compress.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/output_vector.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/concat.cpp b/src/frontends/onnx/frontend/src/op/concat.cpp index 63931703559f19..2a121cf6b9567b 100644 --- a/src/frontends/onnx/frontend/src/op/concat.cpp +++ b/src/frontends/onnx/frontend/src/op/concat.cpp @@ -4,11 +4,11 @@ #include "op/concat.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/concat.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -21,7 +21,7 @@ OutputVector concat(const Node& node) { std::copy_if(inputs.begin(), inputs.end(), std::back_inserter(valid_inputs), [](ov::Output& in) -> bool { return !common::is_failsafe_node(in.get_node_shared_ptr()); }); - return {std::make_shared(valid_inputs, axis)}; + return {std::make_shared(valid_inputs, axis)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/concat.hpp b/src/frontends/onnx/frontend/src/op/concat.hpp index 65e51e0e17823b..be3736f1a46ed0 100644 --- a/src/frontends/onnx/frontend/src/op/concat.hpp +++ b/src/frontends/onnx/frontend/src/op/concat.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/constant.cpp b/src/frontends/onnx/frontend/src/op/constant.cpp index 1893cfa63c4d39..087bbe5eaadb29 100644 --- a/src/frontends/onnx/frontend/src/op/constant.cpp +++ b/src/frontends/onnx/frontend/src/op/constant.cpp @@ -9,10 +9,11 @@ #include "core/attribute.hpp" #include "core/sparse_tensor.hpp" #include "core/tensor.hpp" -#include "default_opset.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/constant.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -36,24 +37,24 @@ std::vector get_dense_vector(const std::vector& values, const std::vector< } template -std::shared_ptr make_dense_tensor_as_constant(const std::vector& indices, - const Tensor& values_tensor, - const Shape& shape) { +std::shared_ptr make_dense_tensor_as_constant(const std::vector& indices, + const Tensor& values_tensor, + const Shape& shape) { auto values = values_tensor.get_data(); auto dense_vector = get_dense_vector(values, indices, shape_size(shape)); - return default_opset::Constant::create(values_tensor.get_ov_type(), shape, dense_vector); + return v0::Constant::create(values_tensor.get_ov_type(), shape, dense_vector); } -std::shared_ptr get_dense_tensor_as_constant(const std::vector& absolute_indices, - const Tensor& values_tensor, - const Shape& shape) { +std::shared_ptr get_dense_tensor_as_constant(const std::vector& absolute_indices, + const Tensor& values_tensor, + const Shape& shape) { switch (values_tensor.get_ov_type()) { case element::boolean: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::f32: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::f16: - return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); + return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::f64: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::i8: @@ -73,7 +74,7 @@ std::shared_ptr get_dense_tensor_as_constant(const std: case element::u64: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::bf16: - return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); + return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); default: FRONT_END_THROW("Tensor has an unsupported data type"); } @@ -124,15 +125,15 @@ OutputVector constant(const onnx_import::Node& node) { auto& attribute = node.get_attribute(attributes_names[0]); if (attribute.is_float()) { - return {default_opset::Constant::create(element::f32, ngraph::Shape{}, {attribute.get_float()})}; + return {v0::Constant::create(element::f32, ov::Shape{}, {attribute.get_float()})}; } else if (attribute.is_float_array()) { auto values = attribute.get_float_array(); - return {default_opset::Constant::create(element::f32, ngraph::Shape{values.size()}, values)}; + return {v0::Constant::create(element::f32, ov::Shape{values.size()}, values)}; } else if (attribute.is_integer()) { - return {default_opset::Constant::create(element::i64, ngraph::Shape{}, {attribute.get_integer()})}; + return {v0::Constant::create(element::i64, ov::Shape{}, {attribute.get_integer()})}; } else if (attribute.is_integer_array()) { auto values = attribute.get_integer_array(); - return {default_opset::Constant::create(element::i64, ngraph::Shape{values.size()}, values)}; + return {v0::Constant::create(element::i64, ov::Shape{values.size()}, values)}; } else if (attribute.is_sparse_tensor()) { auto sparse_tensor = attribute.get_sparse_tensor(); const Tensor& values_tensor = sparse_tensor.get_values(); diff --git a/src/frontends/onnx/frontend/src/op/constant.hpp b/src/frontends/onnx/frontend/src/op/constant.hpp index fd95f64261c6ff..d4e89450c44dc8 100644 --- a/src/frontends/onnx/frontend/src/op/constant.hpp +++ b/src/frontends/onnx/frontend/src/op/constant.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.cpp b/src/frontends/onnx/frontend/src/op/constant_fill.cpp index 7dbbb02d8567a5..e4641c29be1cf3 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.cpp @@ -6,13 +6,12 @@ #include // onnx types -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" #include "onnx_common/utils.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +using namespace ov::op; using namespace ov::frontend::onnx::common; OPENVINO_SUPPRESS_DEPRECATED_START @@ -21,7 +20,7 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector constant_fill(const Node& node) { - Output target_shape; + Output target_shape; const auto dtype = node.get_attribute_value("dtype", static_cast(TensorProto_DataType_FLOAT)); const auto ng_type = onnx_to_ov_data_type(static_cast(dtype)); const auto const_val_to_fill = node.get_attribute_as_constant("value", 0.f, ng_type); @@ -35,14 +34,14 @@ OutputVector constant_fill(const Node& node) { if (node.has_attribute("extra_shape")) { const auto extra_shape_const = node.get_attribute_as_constant>("extra_shape", target_shape.get_element_type()); - target_shape = std::make_shared(OutputVector{target_shape, extra_shape_const}, 0); + target_shape = std::make_shared(OutputVector{target_shape, extra_shape_const}, 0); } } else // use shape attribute as target shape { target_shape = node.get_attribute_as_constant>("shape", ng_type); } - return {std::make_shared(const_val_to_fill, target_shape)}; + return {std::make_shared(const_val_to_fill, target_shape)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.hpp b/src/frontends/onnx/frontend/src/op/constant_fill.hpp index 1f3b9c618d7561..f0bbf50855a76f 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.hpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.hpp @@ -7,9 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp index 559b497ac80e6f..e9d628ce628db7 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp @@ -5,33 +5,35 @@ #include "op/constant_of_shape.hpp" #include "core/tensor.hpp" -#include "default_opset.hpp" -#include "ngraph/op/constant.hpp" #include "onnx_import/core/null_node.hpp" #include "op/constant.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector constant_of_shape(const onnx_import::Node& node) { - Output constant_value; + Output constant_value; if (node.has_attribute("value")) { auto value_tensor = node.get_attribute_value("value"); constant_value = value_tensor.get_ov_constant(); constant_value = reshape::interpret_as_scalar(constant_value); } else { - constant_value = default_opset::Constant::create(element::f32, {}, {0}); + constant_value = v0::Constant::create(element::f32, {}, {0}); } const auto& inputs = node.get_ng_inputs(); if (inputs.size() == 0 || common::is_failsafe_node(inputs[0].get_node_shared_ptr()) || ov::op::util::is_null(inputs[0])) { return {constant_value}; } - return {std::make_shared(constant_value, inputs[0])}; + return {std::make_shared(constant_value, inputs[0])}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp index 3bd2fbd16e7f4b..9f09462ac3f9a2 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/conv.cpp b/src/frontends/onnx/frontend/src/op/conv.cpp index e105b51e0f7e5a..35c4228dca6c74 100644 --- a/src/frontends/onnx/frontend/src/op/conv.cpp +++ b/src/frontends/onnx/frontend/src/op/conv.cpp @@ -4,40 +4,33 @@ #include "op/conv.hpp" -#include -#include -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/group_conv.hpp" -#include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/shape_of.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START +using namespace ov::op; + namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { namespace detail { -std::shared_ptr add_bias(const Output& ng_conv, const Output& bias) { - const auto conv_shape = std::make_shared(ng_conv); - const auto conv_rank = std::make_shared(conv_shape); +std::shared_ptr add_bias(const Output& ng_conv, const Output& bias) { + const auto conv_shape = std::make_shared(ng_conv); + const auto conv_rank = std::make_shared(conv_shape); - return { - std::make_shared(ng_conv, reshape::reshape_channel_shaped_node_to_nchw(bias, conv_rank))}; + return {std::make_shared(ng_conv, reshape::reshape_channel_shaped_node_to_nchw(bias, conv_rank))}; } -OutputVector conv(const Node& node, - Output data, - Output filters, - Output bias) { +OutputVector conv(const Node& node, Output data, Output filters, Output bias) { // in the current implementation we assume that the data input rank is static // and only the 'batch' dimension can be dynamic const auto groups = node.get_attribute_value("group", 1); @@ -48,7 +41,7 @@ OutputVector conv(const Node& node, const auto strides = convpool::get_strides(node); const auto dilations = convpool::get_dilations(node); const auto paddings = convpool::get_pads(node); - const ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node); + const ov::op::PadType auto_pad_type = convpool::get_auto_pad(node); const auto& padding_below = paddings.first; const auto& padding_above = paddings.second; diff --git a/src/frontends/onnx/frontend/src/op/conv.hpp b/src/frontends/onnx/frontend/src/op/conv.hpp index 90eaaa206df9f2..85e75b5cf61202 100644 --- a/src/frontends/onnx/frontend/src/op/conv.hpp +++ b/src/frontends/onnx/frontend/src/op/conv.hpp @@ -7,21 +7,21 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { namespace detail { -OutputVector conv(const Node& node, Output data, Output filters, Output bias); +OutputVector conv(const Node& node, Output data, Output filters, Output bias); } /// \brief Performs ONNX Conv operation. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX convolution +/// \return The vector containing OV nodes producing output of ONNX convolution /// operation. OutputVector conv(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.cpp b/src/frontends/onnx/frontend/src/op/conv_integer.cpp index ad01ea55f39aeb..4f6c2b057c3d43 100644 --- a/src/frontends/onnx/frontend/src/op/conv_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_integer.cpp @@ -4,35 +4,41 @@ #include "op/conv_integer.hpp" -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/unsqueeze.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace { -std::shared_ptr get_filter_zero_point(const OutputVector& inputs) { +std::shared_ptr get_filter_zero_point(const OutputVector& inputs) { const auto& original_zero_point = - (inputs.size() > 3) ? inputs.at(3) : ngraph::op::Constant::create(ngraph::element::i32, {}, {0}); + (inputs.size() > 3) ? inputs.at(3) : v0::Constant::create(ov::element::i32, {}, {0}); const auto filter_zero_point_rank = original_zero_point.get_partial_shape().rank(); if (filter_zero_point_rank.is_static() && filter_zero_point_rank.get_length() == 0) { - return std::make_shared(original_zero_point, element::i32); + return std::make_shared(original_zero_point, element::i32); } else { // in case of 1D zero point filter, it has to be unsqueezed to match the data input's rank - const auto& converted_filter_zero_point = - std::make_shared(original_zero_point, element::i32); - const auto& input_shape = std::make_shared(inputs.at(0), element::i32); - const auto& input_rank = std::make_shared(input_shape, element::i32); + const auto& converted_filter_zero_point = std::make_shared(original_zero_point, element::i32); + const auto& input_shape = std::make_shared(inputs.at(0), element::i32); + const auto& input_rank = std::make_shared(input_shape, element::i32); const auto& input_rank_scalar = reshape::interpret_as_scalar(input_rank); - const auto& one_node = ngraph::op::Constant::create(ngraph::element::i32, {}, {1}); + const auto& one_node = v0::Constant::create(ov::element::i32, {}, {1}); const auto& missing_dimensions = - std::make_shared(one_node, input_rank_scalar, one_node, element::i32); + std::make_shared(one_node, input_rank_scalar, one_node, element::i32); - return std::make_shared(converted_filter_zero_point, missing_dimensions); + return std::make_shared(converted_filter_zero_point, missing_dimensions); } } } // namespace @@ -44,23 +50,22 @@ OutputVector conv_integer(const Node& node) { const auto& input = inputs.at(0); const auto& filter = inputs.at(1); - const auto& input_zero_point = - (inputs.size() > 2) ? inputs.at(2) : ngraph::op::Constant::create(ngraph::element::i32, {}, {0}); + const auto& input_zero_point = (inputs.size() > 2) ? inputs.at(2) : v0::Constant::create(ov::element::i32, {}, {0}); - const auto& converted_input = std::make_shared(input, element::i32); - const auto& converted_filter = std::make_shared(filter, element::i32); + const auto& converted_input = std::make_shared(input, element::i32); + const auto& converted_filter = std::make_shared(filter, element::i32); - const auto& converted_input_zero_point = std::make_shared(input_zero_point, element::i32); + const auto& converted_input_zero_point = std::make_shared(input_zero_point, element::i32); const auto& filter_zero_point = get_filter_zero_point(inputs); - const auto& shifted_input = std::make_shared(converted_input, converted_input_zero_point); - const auto& shifted_filter = std::make_shared(converted_filter, filter_zero_point); + const auto& shifted_input = std::make_shared(converted_input, converted_input_zero_point); + const auto& shifted_filter = std::make_shared(converted_filter, filter_zero_point); const auto& groups = node.get_attribute_value("group", 1); const auto& strides = convpool::get_strides(node); const auto& dilations = convpool::get_dilations(node); const auto& paddings = convpool::get_pads(node); - const ngraph::op::PadType& auto_pad_type = convpool::get_auto_pad(node); + const ov::op::PadType& auto_pad_type = convpool::get_auto_pad(node); const auto& padding_below = paddings.first; const auto& padding_above = paddings.second; diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.hpp b/src/frontends/onnx/frontend/src/op/conv_integer.hpp index 4a933a224c3e5e..01cd9b505fec6a 100644 --- a/src/frontends/onnx/frontend/src/op/conv_integer.hpp +++ b/src/frontends/onnx/frontend/src/op/conv_integer.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { @@ -18,7 +17,7 @@ namespace set_1 { /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of quantized ONNX +/// \return The vector containing OV nodes producing output of quantized ONNX /// convolution operation. OutputVector conv_integer(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp index 0fcd58d900f310..565696be7fb86d 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp @@ -4,41 +4,39 @@ #include "op/conv_transpose.hpp" -#include -#include -#include -#include -#include -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/output_vector.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convolution.hpp" +#include "openvino/op/group_conv.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/subtract.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "utils/convpool.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { namespace { -Output make_group_conv_backprop(const Output& data, - const Output& filters, - const Strides& strides, - const Strides& dilations, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const ngraph::op::PadType& auto_pad_type, - const std::vector& output_shape, - const std::vector& output_padding) { +Output make_group_conv_backprop(const Output& data, + const Output& filters, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const ov::op::PadType& auto_pad_type, + const std::vector& output_shape, + const std::vector& output_padding) { if (output_shape.empty()) { - return std::make_shared( + return std::make_shared( data, filters, strides, @@ -48,10 +46,10 @@ Output make_group_conv_backprop(const Output& data, auto_pad_type, CoordinateDiff(std::begin(output_padding), std::end(output_padding))); } else { - return std::make_shared( + return std::make_shared( data, filters, - default_opset::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), + v0::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), strides, dilations, auto_pad_type, @@ -59,17 +57,17 @@ Output make_group_conv_backprop(const Output& data, } } -Output make_conv_backprop(const Output& data, - const Output& filters, - const Strides& strides, - const Strides& dilations, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const ngraph::op::PadType& auto_pad_type, - const std::vector& output_shape, - const std::vector& output_padding) { +Output make_conv_backprop(const Output& data, + const Output& filters, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const ov::op::PadType& auto_pad_type, + const std::vector& output_shape, + const std::vector& output_padding) { if (output_shape.empty()) { - return std::make_shared( + return std::make_shared( data, filters, strides, @@ -79,10 +77,10 @@ Output make_conv_backprop(const Output& data, auto_pad_type, CoordinateDiff(std::begin(output_padding), std::end(output_padding))); } else { - return std::make_shared( + return std::make_shared( data, filters, - default_opset::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), + v0::Constant::create(element::i64, Shape{output_shape.size()}, output_shape), strides, pads_begin, pads_end, @@ -92,39 +90,37 @@ Output make_conv_backprop(const Output& data, } } -Output get_prepared_bias(const Output& bias, const Output& conv) { +Output get_prepared_bias(const Output& bias, const Output& conv) { // Prepare bias shape [1, C, 1, 1] const auto& conv_pshape = conv.get_partial_shape(); - std::shared_ptr bias_shape_node; + std::shared_ptr bias_shape_node; if (conv_pshape.rank().is_static() && conv_pshape[1].is_static()) { Shape new_bias_shape(conv_pshape.rank().get_length(), 1); new_bias_shape[1] = conv_pshape[1].get_length(); - bias_shape_node = default_opset::Constant::create(element::i64, Shape{new_bias_shape.size()}, new_bias_shape); + bias_shape_node = v0::Constant::create(element::i64, Shape{new_bias_shape.size()}, new_bias_shape); } else { - const auto conv_shape = std::make_shared(conv); - const auto conv_rank = std::make_shared(conv_shape); + const auto conv_shape = std::make_shared(conv); + const auto conv_rank = std::make_shared(conv_shape); // Prepare new bias shape base: [1, 1, 1, 1, ... ] - const auto one_node = default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto two_node = default_opset::Constant::create(element::i64, Shape{1}, {2}); - const auto remaining_shape_length = std::make_shared(conv_rank, two_node); - const auto remaining_bias_shape_ones = - std::make_shared(one_node, remaining_shape_length); - - const auto C_dim = std::make_shared(conv_shape, - one_node, // begin - two_node, // end - std::vector{0}, // begin mask - std::vector{0}); // end mask + const auto one_node = v0::Constant::create(element::i64, Shape{1}, {1}); + const auto two_node = v0::Constant::create(element::i64, Shape{1}, {2}); + const auto remaining_shape_length = std::make_shared(conv_rank, two_node); + const auto remaining_bias_shape_ones = std::make_shared(one_node, remaining_shape_length); + + const auto C_dim = std::make_shared(conv_shape, + one_node, // begin + two_node, // end + std::vector{0}, // begin mask + std::vector{0}); // end mask // Construct new bias shape: [1, C, 1, 1, ... ] - bias_shape_node = - std::make_shared(OutputVector{one_node, C_dim, remaining_bias_shape_ones}, 0); + bias_shape_node = std::make_shared(OutputVector{one_node, C_dim, remaining_bias_shape_ones}, 0); } - return std::make_shared(bias, bias_shape_node, false); + return std::make_shared(bias, bias_shape_node, false); } } // namespace @@ -145,7 +141,7 @@ OutputVector conv_transpose(const Node& node) { std::size_t num_spatial_dims = 0; Strides strides, dilations; std::pair paddings; - ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node); + ov::op::PadType auto_pad_type = convpool::get_auto_pad(node); // Get attirbutes or infer them from input data rank it it's static. if (data_pshape.rank().is_static()) { @@ -180,7 +176,7 @@ OutputVector conv_transpose(const Node& node) { CHECK_VALID_NODE(node, groups >= 0, "Incorrect value of 'group' attribute: ", groups); - Output conv_node; + Output conv_node; if (groups > 1) { filters = convpool::get_reshaped_filters(filters, groups); @@ -211,7 +207,7 @@ OutputVector conv_transpose(const Node& node) { } const auto reshaped_bias = get_prepared_bias(inputs[2], conv_node); - return {std::make_shared(conv_node, reshaped_bias)}; + return {std::make_shared(conv_node, reshaped_bias)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp b/src/frontends/onnx/frontend/src/op/conv_transpose.hpp index f5a9866cdfebdb..94fd5cdc1f3efb 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { @@ -18,7 +17,7 @@ namespace set_1 { /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX convolution +/// \return The vector containing OV nodes producing output of ONNX convolution /// operation. OutputVector conv_transpose(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/cos.cpp b/src/frontends/onnx/frontend/src/op/cos.cpp index 05fb3bbd78ffe9..63a565246f4402 100644 --- a/src/frontends/onnx/frontend/src/op/cos.cpp +++ b/src/frontends/onnx/frontend/src/op/cos.cpp @@ -4,9 +4,9 @@ #include "op/cos.hpp" -#include +#include "openvino/op/cos.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,7 +14,7 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector cos(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cos.hpp b/src/frontends/onnx/frontend/src/op/cos.hpp index f79f066a4ba4d4..b7998734ac3804 100644 --- a/src/frontends/onnx/frontend/src/op/cos.hpp +++ b/src/frontends/onnx/frontend/src/op/cos.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/output_vector.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/cosh.cpp b/src/frontends/onnx/frontend/src/op/cosh.cpp index 4928a16bd62db4..eaaf27ab2c886a 100644 --- a/src/frontends/onnx/frontend/src/op/cosh.cpp +++ b/src/frontends/onnx/frontend/src/op/cosh.cpp @@ -4,9 +4,9 @@ #include "op/cosh.hpp" -#include +#include "openvino/op/cosh.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,7 +14,7 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector cosh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cosh.hpp b/src/frontends/onnx/frontend/src/op/cosh.hpp index 22c7eb0d3ce3b9..293b2a6534ca76 100644 --- a/src/frontends/onnx/frontend/src/op/cosh.hpp +++ b/src/frontends/onnx/frontend/src/op/cosh.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/output_vector.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/crop.cpp b/src/frontends/onnx/frontend/src/op/crop.cpp index 175b18bf218680..cc1d54f3b5a803 100644 --- a/src/frontends/onnx/frontend/src/op/crop.cpp +++ b/src/frontends/onnx/frontend/src/op/crop.cpp @@ -4,9 +4,13 @@ #include "op/crop.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/shape.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/strided_slice.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -23,12 +27,11 @@ OutputVector crop(const Node& node) { // Border values: leftBorder, topBorder, rightBorder, bottomBorder. const auto border = node.get_attribute_value>("border"); - std::shared_ptr end; + std::shared_ptr end; // Set slice begin values to border values (note order of indexes) - const auto begin = default_opset::Constant::create(ngraph::element::i64, - Shape{4}, - std::vector{0, 0, border[1], border[0]}); + const auto begin = + v0::Constant::create(ov::element::i64, Shape{4}, std::vector{0, 0, border[1], border[0]}); // If scale is given, then start crop at left/top `border` // and end on left/top `border` + `scale`. @@ -43,10 +46,9 @@ OutputVector crop(const Node& node) { // Set slice end values to topBorder+heightScale and leftBorder+widthScale // Note that indexes don't match, e.g. border[0] + scale[1] - end = default_opset::Constant::create( - ngraph::element::i64, - Shape{4}, - std::vector{0, 0, border[1] + scale[0], border[0] + scale[1]}); + end = v0::Constant::create(ov::element::i64, + Shape{4}, + std::vector{0, 0, border[1] + scale[0], border[0] + scale[1]}); } // If scale is not provided, crop the image by values provided in `border`. else { @@ -56,19 +58,17 @@ OutputVector crop(const Node& node) { border.size()); // Calculate ends as shape(input) - border[2:3] - const auto input_shape = std::make_shared(input_data); + const auto input_shape = std::make_shared(input_data); const auto end_offset = - default_opset::Constant::create(ngraph::element::i64, - Shape{4}, - std::vector{0, 0, -border[3], -border[2]}); - end = std::make_shared(input_shape, end_offset); + v0::Constant::create(ov::element::i64, Shape{4}, std::vector{0, 0, -border[3], -border[2]}); + end = std::make_shared(input_shape, end_offset); } // Input data shape [N,C,H,W], slicing only along spatial dimensions std::vector begin_mask{1, 1, 0, 0}; std::vector end_mask{1, 1, 0, 0}; - return {std::make_shared(input_data, begin, end, begin_mask, end_mask)}; + return {std::make_shared(input_data, begin, end, begin_mask, end_mask)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/crop.hpp b/src/frontends/onnx/frontend/src/op/crop.hpp index 58310c65a762fb..6acc222c580f2e 100644 --- a/src/frontends/onnx/frontend/src/op/crop.hpp +++ b/src/frontends/onnx/frontend/src/op/crop.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.cpp b/src/frontends/onnx/frontend/src/op/cum_sum.cpp index 7f4bef11793799..f3bcf29fde38dc 100644 --- a/src/frontends/onnx/frontend/src/op/cum_sum.cpp +++ b/src/frontends/onnx/frontend/src/op/cum_sum.cpp @@ -4,11 +4,12 @@ #include "op/cum_sum.hpp" -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/cum_sum.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -19,16 +20,16 @@ OutputVector cum_sum(const Node& node) { auto data = inputs.at(0); bool exclusive = node.get_attribute_value("exclusive", 0); bool reverse = node.get_attribute_value("reverse", 0); - Output axis; + Output axis; if (inputs.size() > 1) { // optional input, 0-D or 1-D tensor const auto& axis_shape = inputs.at(1).get_partial_shape(); axis = axis_shape.is_dynamic() ? inputs.at(1) : ngraph::onnx_import::reshape::interpret_as_scalar(inputs.at(1)); } else { - axis = default_opset::Constant::create(element::i64, Shape{}, {0}); // default + axis = v0::Constant::create(element::i64, Shape{}, {0}); // default } - return OutputVector{std::make_shared(data, axis, exclusive, reverse)}; + return OutputVector{std::make_shared(data, axis, exclusive, reverse)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.hpp b/src/frontends/onnx/frontend/src/op/cum_sum.hpp index 4e3a39c8e297c2..f12e32e6ced3df 100644 --- a/src/frontends/onnx/frontend/src/op/cum_sum.hpp +++ b/src/frontends/onnx/frontend/src/op/cum_sum.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp index 8940e77eb74b35..d644f35447a89d 100644 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp +++ b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp @@ -4,8 +4,10 @@ #include "op/depth_to_space.hpp" -#include "default_opset.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/depth_to_space.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -18,16 +20,16 @@ OutputVector depth_to_space(const Node& node) { FRONT_END_GENERAL_CHECK(shape.rank().is_static() && shape.rank().get_length() == 4, "Input must be 4-dimensional"); const auto mode = node.get_attribute_value("mode", "DCR"); - default_opset::DepthToSpace::DepthToSpaceMode ngraph_mode; + v0::DepthToSpace::DepthToSpaceMode ov_mode; if (mode == "DCR") - ngraph_mode = default_opset::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST; + ov_mode = v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST; else if (mode == "CRD") - ngraph_mode = default_opset::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST; + ov_mode = v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST; else FRONT_END_GENERAL_CHECK(false, "only 'DCR' and 'CRD' modes are supported"); const auto block_size = node.get_attribute_value("blocksize"); - return OutputVector{std::make_shared(data, ngraph_mode, block_size)}; + return OutputVector{std::make_shared(data, ov_mode, block_size)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp b/src/frontends/onnx/frontend/src/op/depth_to_space.hpp index dae6bf41e6541a..9e30edc25431e7 100644 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp +++ b/src/frontends/onnx/frontend/src/op/depth_to_space.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index ac3b334f4f47d2..5e234a39b1a5d0 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -7,26 +7,29 @@ #include #include -#include "default_opset.hpp" -#include "ngraph/axis_set.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace detail { -std::shared_ptr get_zero_point(const OutputVector& inputs) { +std::shared_ptr get_zero_point(const OutputVector& inputs) { if (inputs.size() == 3 && !ov::op::util::is_null(inputs[2])) { const auto& zero_point = inputs[2]; if (zero_point.get_element_type() != element::f32) { - return std::make_shared(zero_point, element::f32); + return std::make_shared(zero_point, element::f32); } return zero_point.get_node_shared_ptr(); @@ -48,22 +51,20 @@ OutputVector dequantize_linear(const Node& node) { common::validate_scalar_input("Dequantization scale", scale.get_node_shared_ptr(), {element::f32}); - const auto converted_x = std::make_shared(x, element::f32); + const auto converted_x = std::make_shared(x, element::f32); if (zero_point) { common::validate_scalar_input("Zero point", zero_point); - return {std::make_shared( - std::make_shared(converted_x, zero_point), - scale)}; + return {std::make_shared(std::make_shared(converted_x, zero_point), scale)}; } else { - return {std::make_shared(converted_x, scale)}; + return {std::make_shared(converted_x, scale)}; } } } // namespace set_1 namespace set_13 { namespace detail { -void validate_scale(const Output scale, const Output x, const int64_t axis) { +void validate_scale(const Output scale, const Output x, const int64_t axis) { const auto& scale_shape = scale.get_partial_shape(); FRONT_END_GENERAL_CHECK(scale_shape.rank().get_length() == 0 || scale_shape.rank().get_length() == 1, "Dequantization scale needs to be a scalar or a vector."); @@ -83,7 +84,7 @@ void validate_scale(const Output scale, const Output } } -void validate_zero_point(const Output zero_point, const Output x, const int64_t axis) { +void validate_zero_point(const Output zero_point, const Output x, const int64_t axis) { const auto& zero_point_shape = zero_point.get_partial_shape(); FRONT_END_GENERAL_CHECK(zero_point_shape.rank().get_length() == 0 || zero_point_shape.rank().get_length() == 1, "Zero point needs to be a scalar or a vector."); @@ -103,9 +104,9 @@ void validate_zero_point(const Output zero_point, const Output reshape_input(const Output& input, - const int64_t axis, - const PartialShape& x_shape) { +std::shared_ptr reshape_input(const Output& input, + const int64_t axis, + const PartialShape& x_shape) { // these reshapes make sure that dequantization happens over the specified axis auto input_rank = input.get_partial_shape().rank(); @@ -130,14 +131,14 @@ std::shared_ptr reshape_input(const Output& input, target_dims.push_back(1); } - const auto target_shape = default_opset::Constant::create(element::i64, Shape{target_dims.size()}, target_dims); + const auto target_shape = v0::Constant::create(element::i64, Shape{target_dims.size()}, target_dims); - return std::make_shared(input, target_shape, true); + return std::make_shared(input, target_shape, true); } -OutputVector dequantize_linear(const Output& x, - const Output& scale, - const std::shared_ptr& zero_point, +OutputVector dequantize_linear(const Output& x, + const Output& scale, + const std::shared_ptr& zero_point, int64_t axis, const Node& node) { const auto& x_shape = x.get_partial_shape(); @@ -145,20 +146,20 @@ OutputVector dequantize_linear(const Output& x, FRONT_END_GENERAL_CHECK(x_shape.rank().is_static(), "Rank of the input data tensor has to be known (static)."); OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, x_shape.rank()); + axis = ov::normalize_axis(node.get_description(), axis, x_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END validate_scale(scale, x, axis); const auto scale_reshaped = reshape_input(scale, axis, x_shape); - const auto converted_x = std::make_shared(x, element::f32); + const auto converted_x = std::make_shared(x, element::f32); if (zero_point) { validate_zero_point(zero_point, x, axis); - return {std::make_shared( - std::make_shared(converted_x, reshape_input(zero_point, axis, x_shape)), + return {std::make_shared( + std::make_shared(converted_x, reshape_input(zero_point, axis, x_shape)), scale_reshaped)}; } else { - return {std::make_shared(converted_x, scale_reshaped)}; + return {std::make_shared(converted_x, scale_reshaped)}; } } } // namespace detail diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp index d44a019adabd8f..7bb121d7e2df29 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp @@ -7,8 +7,8 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/core/node.hpp" namespace ngraph { namespace onnx_import { @@ -21,9 +21,9 @@ OutputVector dequantize_linear(const Node& node); namespace set_13 { namespace detail { -OutputVector dequantize_linear(const Output& x, - const Output& scale, - const std::shared_ptr& zero_point, +OutputVector dequantize_linear(const Output& x, + const Output& scale, + const std::shared_ptr& zero_point, int64_t axis, const Node& node); } diff --git a/src/frontends/onnx/frontend/src/op/dft.hpp b/src/frontends/onnx/frontend/src/op/dft.hpp index 0390bbedc0c875..bae572d84e51a8 100644 --- a/src/frontends/onnx/frontend/src/op/dft.hpp +++ b/src/frontends/onnx/frontend/src/op/dft.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/div.hpp b/src/frontends/onnx/frontend/src/op/div.hpp index 8d37bae67a81f9..a2d6afbfcf4940 100644 --- a/src/frontends/onnx/frontend/src/op/div.hpp +++ b/src/frontends/onnx/frontend/src/op/div.hpp @@ -7,26 +7,22 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/shape.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/divide.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector div(const Node& node) { - return common::handle_opset6_binary_op(node); + return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { inline OutputVector div(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_7 diff --git a/src/frontends/onnx/frontend/src/op/dropout.cpp b/src/frontends/onnx/frontend/src/op/dropout.cpp index 0cfd8f2d941f47..553006e97cef7d 100644 --- a/src/frontends/onnx/frontend/src/op/dropout.cpp +++ b/src/frontends/onnx/frontend/src/op/dropout.cpp @@ -4,14 +4,15 @@ #include "op/dropout.hpp" -#include - -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/op/util/op_types.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -24,9 +25,8 @@ OutputVector build_dropout(const Node& node, bool training_mode) { const bool return_mask = node.get_outputs_size() > 1; if (return_mask) { - const auto mask = std::make_shared( - default_opset::Constant::create(ngraph::element::boolean, Shape{}, {true}), - std::make_shared(input_data)); + const auto mask = std::make_shared(v0::Constant::create(ov::element::boolean, Shape{}, {true}), + std::make_shared(input_data)); return {input_data, mask}; } else { return {input_data}; @@ -44,8 +44,7 @@ OutputVector dropout(const Node& node) { CHECK_VALID_NODE(node, ov::op::util::is_constant(ng_inputs.at(2).get_node_shared_ptr()), "Non-constant training_mode input is not supported."); - training_mode = - ov::as_type_ptr(ng_inputs.at(2).get_node_shared_ptr())->cast_vector()[0]; + training_mode = ov::as_type_ptr(ng_inputs.at(2).get_node_shared_ptr())->cast_vector()[0]; } return build_dropout(node, training_mode); } diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp index 74a33816db2419..fabe0c784d14b5 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp @@ -4,71 +4,76 @@ #include "op/dynamic_quantize_linear.hpp" -#include -#include - -#include "default_opset.hpp" -#include "ngraph/axis_set.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/minimum.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_max.hpp" +#include "openvino/op/reduce_min.hpp" +#include "openvino/op/round.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace { -std::shared_ptr find_min_value(const ov::Output& input) { - const auto& zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto& one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); +std::shared_ptr find_min_value(const ov::Output& input) { + const auto& zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto& one_node = v0::Constant::create(element::i64, Shape{}, {1}); - const auto& input_shape = std::make_shared(input); - const auto& input_rank = std::make_shared(input_shape); - const auto& input_rank_as_scalar = std::make_shared(input_rank); + const auto& input_shape = std::make_shared(input); + const auto& input_rank = std::make_shared(input_shape); + const auto& input_rank_as_scalar = std::make_shared(input_rank); - const auto& reduce_axes = - std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); + const auto& reduce_axes = std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); - const auto& input_min = std::make_shared(input, reduce_axes); + const auto& input_min = std::make_shared(input, reduce_axes); - const auto& zero_node_u8 = default_opset::Constant::create(element::f32, Shape{}, {0}); - return std::make_shared(zero_node_u8, input_min); + const auto& zero_node_u8 = v0::Constant::create(element::f32, Shape{}, {0}); + return std::make_shared(zero_node_u8, input_min); } -std::shared_ptr find_max_value(const ov::Output& input) { - const auto& zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto& one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); +std::shared_ptr find_max_value(const ov::Output& input) { + const auto& zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto& one_node = v0::Constant::create(element::i64, Shape{}, {1}); - const auto& input_shape = std::make_shared(input); - const auto& input_rank = std::make_shared(input_shape); - const auto& input_rank_as_scalar = std::make_shared(input_rank); + const auto& input_shape = std::make_shared(input); + const auto& input_rank = std::make_shared(input_shape); + const auto& input_rank_as_scalar = std::make_shared(input_rank); - const auto& reduce_axes = - std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); + const auto& reduce_axes = std::make_shared(zero_node, input_rank_as_scalar, one_node, element::i64); - const auto& input_max = std::make_shared(input, reduce_axes); + const auto& input_max = std::make_shared(input, reduce_axes); - const auto& zero_node_u8 = default_opset::Constant::create(element::f32, Shape{}, {0}); - return std::make_shared(zero_node_u8, input_max); + const auto& zero_node_u8 = v0::Constant::create(element::f32, Shape{}, {0}); + return std::make_shared(zero_node_u8, input_max); } -std::shared_ptr quantize_linear(Output x, - Output x_span, - Output quant_range_span, - Output y_zero_point) { - const auto& x_scaled = - std::make_shared(std::make_shared(x, quant_range_span), x_span); +std::shared_ptr quantize_linear(Output x, + Output x_span, + Output quant_range_span, + Output y_zero_point) { + const auto& x_scaled = std::make_shared(std::make_shared(x, quant_range_span), x_span); - const auto& x_rounded = - std::make_shared(x_scaled, ov::op::v5::Round::RoundMode::HALF_TO_EVEN); + const auto& x_rounded = std::make_shared(x_scaled, ov::op::v5::Round::RoundMode::HALF_TO_EVEN); - const auto& y_zero_point_f32 = std::make_shared(y_zero_point, ov::element::f32); + const auto& y_zero_point_f32 = std::make_shared(y_zero_point, ov::element::f32); - const auto& result_shifted = std::make_shared(x_rounded, y_zero_point_f32); - const auto& result_clamped = std::make_shared(result_shifted, 0, 255); + const auto& result_shifted = std::make_shared(x_rounded, y_zero_point_f32); + const auto& result_clamped = std::make_shared(result_shifted, 0, 255); - return std::make_shared(result_clamped, ov::element::u8); + return std::make_shared(result_clamped, ov::element::u8); } } // namespace namespace op { @@ -78,24 +83,23 @@ OutputVector dynamic_quantize_linear(const Node& node) { const auto& x = inputs.at(0); // quantization range in case of uint8 is [0, 255] - const auto& quant_range_min = default_opset::Constant::create(element::f32, Shape{}, {0}); - const auto& quant_range_max = default_opset::Constant::create(element::f32, Shape{}, {255}); - const auto& quant_range_span = std::make_shared(quant_range_max, quant_range_min); + const auto& quant_range_min = v0::Constant::create(element::f32, Shape{}, {0}); + const auto& quant_range_max = v0::Constant::create(element::f32, Shape{}, {255}); + const auto& quant_range_span = std::make_shared(quant_range_max, quant_range_min); const auto& x_max = find_max_value(x); const auto& x_min = find_min_value(x); - const auto& x_span = std::make_shared(x_max, x_min); + const auto& x_span = std::make_shared(x_max, x_min); - const auto& y_scale = std::make_shared(x_span, quant_range_max); + const auto& y_scale = std::make_shared(x_span, quant_range_max); - const auto& x_min_shifted = std::make_shared(quant_range_min, x_min); + const auto& x_min_shifted = std::make_shared(quant_range_min, x_min); const auto& intermediate_zero_point = - std::make_shared(std::make_shared(x_min_shifted, y_scale), - ov::op::v5::Round::RoundMode::HALF_TO_EVEN); + std::make_shared(std::make_shared(x_min_shifted, y_scale), + ov::op::v5::Round::RoundMode::HALF_TO_EVEN); - const auto& y_zero_point = std::make_shared( - std::make_shared(intermediate_zero_point, 0, 255), - ov::element::u8); + const auto& y_zero_point = + std::make_shared(std::make_shared(intermediate_zero_point, 0, 255), ov::element::u8); const auto& y = quantize_linear(x, x_span, quant_range_span, y_zero_point); diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp index 84d8bc852628d7..8962f6602145fa 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/einsum.cpp b/src/frontends/onnx/frontend/src/op/einsum.cpp index c2732ae4210198..77375139cc5ac0 100644 --- a/src/frontends/onnx/frontend/src/op/einsum.cpp +++ b/src/frontends/onnx/frontend/src/op/einsum.cpp @@ -4,7 +4,9 @@ #include "op/einsum.hpp" -#include "default_opset.hpp" +#include "openvino/op/einsum.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,7 +16,7 @@ namespace set_1 { OutputVector einsum(const Node& node) { const std::string& equation{node.get_attribute_value("equation")}; - return OutputVector{std::make_shared(node.get_ng_inputs(), equation)}; + return OutputVector{std::make_shared(node.get_ng_inputs(), equation)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/einsum.hpp b/src/frontends/onnx/frontend/src/op/einsum.hpp index 7252006fdffba6..b037fd8652f99b 100644 --- a/src/frontends/onnx/frontend/src/op/einsum.hpp +++ b/src/frontends/onnx/frontend/src/op/einsum.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/elu.cpp b/src/frontends/onnx/frontend/src/op/elu.cpp index 4af7e99c1a549a..ca74d8fe92c044 100644 --- a/src/frontends/onnx/frontend/src/op/elu.cpp +++ b/src/frontends/onnx/frontend/src/op/elu.cpp @@ -4,10 +4,9 @@ #include "op/elu.hpp" -#include -#include +#include "openvino/op/elu.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -18,7 +17,7 @@ OutputVector elu(const Node& node) { auto data = node.get_ng_inputs().at(0); double alpha = node.get_attribute_value("alpha", 1); - return OutputVector{std::make_shared(data, alpha)}; + return OutputVector{std::make_shared(data, alpha)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/elu.hpp b/src/frontends/onnx/frontend/src/op/elu.hpp index d0b6cc0daf107e..4304a197ec23c8 100644 --- a/src/frontends/onnx/frontend/src/op/elu.hpp +++ b/src/frontends/onnx/frontend/src/op/elu.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/equal.hpp b/src/frontends/onnx/frontend/src/op/equal.hpp index 108ab06527bbcf..cf8be7ceb632b9 100644 --- a/src/frontends/onnx/frontend/src/op/equal.hpp +++ b/src/frontends/onnx/frontend/src/op/equal.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/equal.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector equal(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/erf.hpp b/src/frontends/onnx/frontend/src/op/erf.hpp index 76933ef8cd370a..1dd303743352c2 100644 --- a/src/frontends/onnx/frontend/src/op/erf.hpp +++ b/src/frontends/onnx/frontend/src/op/erf.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/erf.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector erf(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/exp.hpp b/src/frontends/onnx/frontend/src/op/exp.hpp index ecd5a7c6b46b53..33a7ba16c5da65 100644 --- a/src/frontends/onnx/frontend/src/op/exp.hpp +++ b/src/frontends/onnx/frontend/src/op/exp.hpp @@ -7,18 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/exp.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector exp(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/expand.cpp b/src/frontends/onnx/frontend/src/op/expand.cpp index 12abc218ecf1db..cb5c1d462dede6 100644 --- a/src/frontends/onnx/frontend/src/op/expand.cpp +++ b/src/frontends/onnx/frontend/src/op/expand.cpp @@ -4,33 +4,29 @@ #include "op/expand.hpp" -#include - -#include "default_opset.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/multiply.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector expand(const Node& node) { - const Output data{node.get_ng_inputs().at(0)}; - const Output shape{node.get_ng_inputs().at(1)}; + const Output data{node.get_ng_inputs().at(0)}; + const Output shape{node.get_ng_inputs().at(1)}; if (common::is_failsafe_node(shape.get_node_shared_ptr())) { // in case the "shape" input is connected to a failsafe node created in place of an invalid initializer // the target shape should be ignored and this Expand operation should not modify its input tensor // the Broadcast created below should be eliminated later on by an appropriate optimization pass - const auto identity_broadcast = default_opset::Constant::create(element::i64, Shape{1}, {1}); - return {std::make_shared(data, - identity_broadcast, - ngraph::op::BroadcastType::BIDIRECTIONAL)}; + const auto identity_broadcast = v0::Constant::create(element::i64, Shape{1}, {1}); + return {std::make_shared(data, identity_broadcast, ov::op::BroadcastType::BIDIRECTIONAL)}; } else { - return {std::make_shared(data, shape, ngraph::op::BroadcastType::BIDIRECTIONAL)}; + return {std::make_shared(data, shape, ov::op::BroadcastType::BIDIRECTIONAL)}; } } diff --git a/src/frontends/onnx/frontend/src/op/expand.hpp b/src/frontends/onnx/frontend/src/op/expand.hpp index d76745d0cd7fd5..0f9454b9bd1464 100644 --- a/src/frontends/onnx/frontend/src/op/expand.hpp +++ b/src/frontends/onnx/frontend/src/op/expand.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/eye_like.cpp b/src/frontends/onnx/frontend/src/op/eye_like.cpp index 30bb9369d288ba..b617bda7f1d47a 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.cpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.cpp @@ -4,16 +4,15 @@ #include "op/eye_like.hpp" -#include - #include "exceptions.hpp" -#include "ngraph/output_vector.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/eye.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/shape_of.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -22,12 +21,10 @@ namespace detail { namespace { /// \brief Split a shape returned by a ShapeOf operation into two outputs: width and height. -OutputVector get_shape_width_and_height(const Output& shape) { - const auto axis = ngraph::op::Constant::create(ngraph::element::i64, {1}, {0}); - const auto height = - std::make_shared(shape, ngraph::op::Constant::create(ngraph::element::i64, {1}, {0}), axis); - const auto width = - std::make_shared(shape, ngraph::op::Constant::create(ngraph::element::i64, {1}, {1}), axis); +OutputVector get_shape_width_and_height(const Output& shape) { + const auto axis = v0::Constant::create(ov::element::i64, {1}, {0}); + const auto height = std::make_shared(shape, v0::Constant::create(ov::element::i64, {1}, {0}), axis); + const auto width = std::make_shared(shape, v0::Constant::create(ov::element::i64, {1}, {1}), axis); return {width, height}; } @@ -59,9 +56,9 @@ OutputVector eye_like(const Node& node) { const auto width = dims.at(0); const auto height = dims.at(1); const auto k = - ov::op::v0::Constant::create(ngraph::element::i64, {1}, {node.get_attribute_value("k", 0)}); + ov::op::v0::Constant::create(ov::element::i64, {1}, {node.get_attribute_value("k", 0)}); - const auto output = std::make_shared(height, width, k, target_type); + const auto output = std::make_shared(height, width, k, target_type); return {output}; } diff --git a/src/frontends/onnx/frontend/src/op/eye_like.hpp b/src/frontends/onnx/frontend/src/op/eye_like.hpp index 011495f1bac732..de3c5fa1d6b24d 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.hpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/flatten.cpp b/src/frontends/onnx/frontend/src/op/flatten.cpp index ebf4499a279d4a..cbdc74697540b1 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.cpp +++ b/src/frontends/onnx/frontend/src/op/flatten.cpp @@ -4,12 +4,12 @@ #include "op/flatten.hpp" -#include - #include "exceptions.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "ov_models/ov_builders/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -25,7 +25,7 @@ OutputVector flatten(const Node& node) { const std::int64_t data_rank_value = data_rank.get_length(); // Accepted range is [-r, r] where r = rank(input). OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, data_rank_value, -data_rank_value, data_rank_value); + axis = ov::normalize_axis(node.get_description(), axis, data_rank_value, -data_rank_value, data_rank_value); OPENVINO_SUPPRESS_DEPRECATED_END } return {ov::op::util::flatten(data, static_cast(axis))}; diff --git a/src/frontends/onnx/frontend/src/op/flatten.hpp b/src/frontends/onnx/frontend/src/op/flatten.hpp index 8f0c7e8458c543..f810b438f12779 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.hpp +++ b/src/frontends/onnx/frontend/src/op/flatten.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/floor.hpp b/src/frontends/onnx/frontend/src/op/floor.hpp index 535dc4771207b0..b721a5c4a20efd 100644 --- a/src/frontends/onnx/frontend/src/op/floor.hpp +++ b/src/frontends/onnx/frontend/src/op/floor.hpp @@ -7,19 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/floor.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/floor.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector floor(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/gather.hpp b/src/frontends/onnx/frontend/src/op/gather.hpp index 15f826f5f809b6..f56adcb5531851 100644 --- a/src/frontends/onnx/frontend/src/op/gather.hpp +++ b/src/frontends/onnx/frontend/src/op/gather.hpp @@ -7,11 +7,8 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "ngraph/node.hpp" -#include "ngraph/validation_util.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" namespace ngraph { @@ -26,7 +23,7 @@ inline OutputVector gather(const Node& node) { return {std::make_shared(data, indices, - default_opset::Constant::create(element::i64, Shape{}, {axis}))}; + ov::op::v0::Constant::create(element::i64, Shape{}, {axis}))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/gather_elements.hpp b/src/frontends/onnx/frontend/src/op/gather_elements.hpp index a5052a52fc87df..3785be1a672a2a 100644 --- a/src/frontends/onnx/frontend/src/op/gather_elements.hpp +++ b/src/frontends/onnx/frontend/src/op/gather_elements.hpp @@ -7,8 +7,7 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/output_vector.hpp" +#include "openvino/op/gather_elements.hpp" namespace ngraph { namespace onnx_import { @@ -20,7 +19,7 @@ inline OutputVector gather_elements(const Node& node) { auto indices = ng_inputs.at(1); auto axis = node.get_attribute_value("axis", 0); - return {std::make_shared(data, indices, axis)}; + return {std::make_shared(data, indices, axis)}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.cpp b/src/frontends/onnx/frontend/src/op/gather_nd.cpp index fe50c1689b30ac..3332bb923c2c02 100644 --- a/src/frontends/onnx/frontend/src/op/gather_nd.cpp +++ b/src/frontends/onnx/frontend/src/op/gather_nd.cpp @@ -7,8 +7,9 @@ #include "op/gather_nd.hpp" -#include "default_opset.hpp" -#include "utils/common.hpp" +#include "openvino/op/gather_nd.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -21,7 +22,7 @@ OutputVector gather_nd(const Node& node) { const auto indices = ng_inputs.at(1); const auto batch_dims = node.get_attribute_value("batch_dims", 0); - return {std::make_shared(data, indices, batch_dims)}; + return {std::make_shared(data, indices, batch_dims)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.hpp b/src/frontends/onnx/frontend/src/op/gather_nd.hpp index ad2092f21275b7..c16e40b89baa6b 100644 --- a/src/frontends/onnx/frontend/src/op/gather_nd.hpp +++ b/src/frontends/onnx/frontend/src/op/gather_nd.hpp @@ -10,7 +10,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/gemm.cpp b/src/frontends/onnx/frontend/src/op/gemm.cpp index 7807539f72f4cf..a2988cc013aa2a 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.cpp +++ b/src/frontends/onnx/frontend/src/op/gemm.cpp @@ -4,15 +4,14 @@ #include "op/gemm.hpp" -#include - -#include "default_opset.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/matmul.hpp" -#include "ngraph/op/multiply.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/multiply.hpp" #include "ov_models/ov_builders/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -20,14 +19,14 @@ namespace op { namespace set_1 { OutputVector gemm(const Node& node) { OutputVector inputs{node.get_ng_inputs()}; - Output input_a = inputs.at(0); - Output input_b = inputs.at(1); - Output input_c; + Output input_a = inputs.at(0); + Output input_b = inputs.at(1); + Output input_c; if (inputs.size() == 3) { input_c = inputs.at(2); } else { - input_c = default_opset::Constant::create(input_b.get_element_type(), ngraph::Shape{}, {0}); + input_c = v0::Constant::create(input_b.get_element_type(), ov::Shape{}, {0}); } const auto alpha = node.get_attribute_value("alpha", 1); @@ -47,16 +46,16 @@ OutputVector gemm(const Node& node) { input_a = ov::op::util::flatten(input_a, 1); input_b = ov::op::util::flatten(input_b, 1); - std::shared_ptr matmul_node = std::make_shared(input_a, input_b); + std::shared_ptr matmul_node = std::make_shared(input_a, input_b); if (alpha != 1) { - const auto alpha_node = default_opset::Constant::create(input_b.get_element_type(), Shape{}, {alpha}); - matmul_node = std::make_shared(matmul_node, alpha_node); + const auto alpha_node = v0::Constant::create(input_b.get_element_type(), Shape{}, {alpha}); + matmul_node = std::make_shared(matmul_node, alpha_node); } - auto beta_times_input_c = std::make_shared(beta_node, input_c); + auto beta_times_input_c = std::make_shared(beta_node, input_c); - return OutputVector{std::make_shared(matmul_node, beta_times_input_c)}; + return OutputVector{std::make_shared(matmul_node, beta_times_input_c)}; } } // namespace set_1 @@ -64,14 +63,14 @@ OutputVector gemm(const Node& node) { namespace set_6 { OutputVector gemm(const Node& node) { OutputVector inputs{node.get_ng_inputs()}; - Output input_a = inputs.at(0); - Output input_b = inputs.at(1); - Output input_c; + Output input_a = inputs.at(0); + Output input_b = inputs.at(1); + Output input_c; if (inputs.size() == 3) { input_c = inputs.at(2); } else { - input_c = default_opset::Constant::create(input_b.get_element_type(), ngraph::Shape{}, {0}); + input_c = v0::Constant::create(input_b.get_element_type(), ov::Shape{}, {0}); } const auto alpha_node = node.get_attribute_as_constant("alpha", 1, input_b.get_element_type()); @@ -80,13 +79,13 @@ OutputVector gemm(const Node& node) { const bool trans_a = node.get_attribute_value("transA", 0); const bool trans_b = node.get_attribute_value("transB", 0); - const auto matmul_node = std::make_shared(input_a, input_b, trans_a, trans_b); - const auto matmul_times_alpha = std::make_shared(matmul_node, alpha_node); + const auto matmul_node = std::make_shared(input_a, input_b, trans_a, trans_b); + const auto matmul_times_alpha = std::make_shared(matmul_node, alpha_node); - const auto beta_times_input_c = std::make_shared(beta_node, input_c); + const auto beta_times_input_c = std::make_shared(beta_node, input_c); const std::string onnx_name = !node.get_name().empty() ? node.get_name() : node.output(0); matmul_node->set_friendly_name(onnx_name + "/WithoutBiases"); - return {std::make_shared(matmul_times_alpha, beta_times_input_c)}; + return {std::make_shared(matmul_times_alpha, beta_times_input_c)}; } } // namespace set_6 diff --git a/src/frontends/onnx/frontend/src/op/gemm.hpp b/src/frontends/onnx/frontend/src/op/gemm.hpp index fd28462c9c1c5f..7b1e801404a88b 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.hpp +++ b/src/frontends/onnx/frontend/src/op/gemm.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp index 750c97c6c2d5f4..83ff50bb6c7279 100644 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp @@ -4,11 +4,13 @@ #include "op/global_average_pool.hpp" -#include -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_mean.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -27,18 +29,17 @@ OutputVector global_average_pool(const Node& node) { // Expected spatial dims indexes: [2, 3, 4] auto data = node.get_ng_inputs()[0]; - const auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); - const auto two_node = default_opset::Constant::create(element::i64, Shape{}, {2}); + const auto zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto one_node = v0::Constant::create(element::i64, Shape{}, {1}); + const auto two_node = v0::Constant::create(element::i64, Shape{}, {2}); - const auto data_shape = std::make_shared(data); - const auto data_rank = std::make_shared(data_shape); - const auto data_rank_as_scalar = std::make_shared(data_rank); + const auto data_shape = std::make_shared(data); + const auto data_rank = std::make_shared(data_shape); + const auto data_rank_as_scalar = std::make_shared(data_rank); - const auto reduce_axes = - std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); + const auto reduce_axes = std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); - return {std::make_shared(data, reduce_axes, true)}; + return {std::make_shared(data, reduce_axes, true)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp b/src/frontends/onnx/frontend/src/op/global_average_pool.hpp index 6d267bab74ba61..e503ce011cc4ab 100644 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/global_average_pool.hpp @@ -7,18 +7,17 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX GlobalAveragePool operation to an nGraph node. +/// \brief Convert ONNX GlobalAveragePool operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX +/// \return The vector containing OV nodes producing output of ONNX /// GlobalAveragePool operation. OutputVector global_average_pool(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp index e7a90f31af0635..2a28a21eeb2c5c 100644 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp @@ -4,11 +4,13 @@ #include "op/global_max_pool.hpp" -#include -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_max.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -27,18 +29,17 @@ OutputVector global_max_pool(const Node& node) { // Expected spatial dims indexes: [2, 3, 4] auto data = node.get_ng_inputs()[0]; - const auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto one_node = default_opset::Constant::create(element::i64, Shape{}, {1}); - const auto two_node = default_opset::Constant::create(element::i64, Shape{}, {2}); + const auto zero_node = v0::Constant::create(element::i64, Shape{}, {0}); + const auto one_node = v0::Constant::create(element::i64, Shape{}, {1}); + const auto two_node = v0::Constant::create(element::i64, Shape{}, {2}); - const auto data_shape = std::make_shared(data); - const auto data_rank = std::make_shared(data_shape); - const auto data_rank_as_scalar = std::make_shared(data_rank); + const auto data_shape = std::make_shared(data); + const auto data_rank = std::make_shared(data_shape); + const auto data_rank_as_scalar = std::make_shared(data_rank); - const auto reduce_axes = - std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); + const auto reduce_axes = std::make_shared(two_node, data_rank_as_scalar, one_node, element::i64); - return {std::make_shared(data, reduce_axes, true)}; + return {std::make_shared(data, reduce_axes, true)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp b/src/frontends/onnx/frontend/src/op/global_max_pool.hpp index f3e1c6221f30b0..2b9053ddf4528b 100644 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/global_max_pool.hpp @@ -7,18 +7,17 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Convert ONNX GlobalMaxPool operation to an nGraph node. +/// \brief Convert ONNX GlobalMaxPool operation to an OV node. /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX +/// \return The vector containing OV nodes producing output of ONNX /// GlobalMaxPool operation. OutputVector global_max_pool(const Node& node); diff --git a/src/frontends/onnx/frontend/src/op/greater.hpp b/src/frontends/onnx/frontend/src/op/greater.hpp index f39f1744ca34c1..da9d950c3353be 100644 --- a/src/frontends/onnx/frontend/src/op/greater.hpp +++ b/src/frontends/onnx/frontend/src/op/greater.hpp @@ -7,19 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include - -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/greater.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/greater.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector greater(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp index ce239143c7edd5..9eb595e216f62d 100644 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp +++ b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp @@ -4,11 +4,11 @@ #include "op/greater_or_equal.hpp" -#include -#include - -#include "default_opset.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -22,7 +22,7 @@ OutputVector greater_or_equal(const Node& node) { FRONT_END_GENERAL_CHECK(A.get_element_type() != ov::element::bf16 && B.get_element_type() != ov::element::bf16, "The input data bfloat16 isn't supported in opset 12"); - const auto C = std::make_shared(A, B); + const auto C = std::make_shared(A, B); return {C}; } @@ -33,7 +33,7 @@ OutputVector greater_or_equal(const Node& node) { const auto A = node.get_ng_inputs().at(0); const auto B = node.get_ng_inputs().at(1); - const auto C = std::make_shared(A, B); + const auto C = std::make_shared(A, B); return {C}; } diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp index c623c17b964e06..30e00067477fba 100644 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp +++ b/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.cpp b/src/frontends/onnx/frontend/src/op/grid_sample.cpp index 63129b4c3dec18..651ca69f5e3d89 100644 --- a/src/frontends/onnx/frontend/src/op/grid_sample.cpp +++ b/src/frontends/onnx/frontend/src/op/grid_sample.cpp @@ -4,7 +4,9 @@ #include "op/grid_sample.hpp" -#include "openvino/opsets/opset9.hpp" +#include "openvino/op/grid_sample.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -15,16 +17,16 @@ OutputVector grid_sample(const Node& node) { const auto data = node.get_ng_inputs().at(0); const auto grid = node.get_ng_inputs().at(1); - ov::opset9::GridSample::Attributes attributes{}; + v9::GridSample::Attributes attributes{}; attributes.align_corners = node.get_attribute_value("align_corners", 0); - attributes.mode = EnumNames::as_enum( + attributes.mode = EnumNames::as_enum( node.get_attribute_value("mode", "bilinear")); - attributes.padding_mode = EnumNames::as_enum( - node.get_attribute_value("padding_mode", "zeros")); + attributes.padding_mode = + EnumNames::as_enum(node.get_attribute_value("padding_mode", "zeros")); - return {std::make_shared(data, grid, attributes)}; + return {std::make_shared(data, grid, attributes)}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.hpp b/src/frontends/onnx/frontend/src/op/grid_sample.hpp index 5c2a4715fbe2f6..b081064bda7196 100644 --- a/src/frontends/onnx/frontend/src/op/grid_sample.hpp +++ b/src/frontends/onnx/frontend/src/op/grid_sample.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.cpp b/src/frontends/onnx/frontend/src/op/group_normalization.cpp index b0dc8f786d8aef..1217ed03232e14 100644 --- a/src/frontends/onnx/frontend/src/op/group_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/group_normalization.cpp @@ -4,7 +4,16 @@ #include "op/group_normalization.hpp" -#include "default_opset.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/group_normalization.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/unsqueeze.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -22,26 +31,25 @@ OutputVector group_normalization(const Node& node) { const auto eps = node.get_attribute_value("epsilon", 1e-05f); const auto num_groups = node.get_attribute_value("num_groups"); - const auto zero = default_opset::Constant::create(element::i64, Shape{1}, {0}); - const auto one = default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto c_dim = - std::make_shared(std::make_shared(data), one, zero); - const auto g_dim = default_opset::Constant::create(element::i64, Shape{1}, {num_groups}); + const auto zero = v0::Constant::create(element::i64, Shape{1}, {0}); + const auto one = v0::Constant::create(element::i64, Shape{1}, {1}); + const auto c_dim = std::make_shared(std::make_shared(data), one, zero); + const auto g_dim = v0::Constant::create(element::i64, Shape{1}, {num_groups}); - const auto c_g_div = std::make_shared(c_dim, g_dim); + const auto c_g_div = std::make_shared(c_dim, g_dim); // Adjust scale and bias shape, [G] -> [G, C/G] -> [C] - const auto scale_unsq = std::make_shared(scale, one); + const auto scale_unsq = std::make_shared(scale, one); const auto broadcast_scale = - std::make_shared(scale_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); - const auto c_scale = std::make_shared(broadcast_scale, c_dim, false); + std::make_shared(scale_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); + const auto c_scale = std::make_shared(broadcast_scale, c_dim, false); - const auto bias_unsq = std::make_shared(bias, one); + const auto bias_unsq = std::make_shared(bias, one); const auto broadcast_bias = - std::make_shared(bias_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); - const auto c_bias = std::make_shared(broadcast_bias, c_dim, false); + std::make_shared(bias_unsq, c_g_div, ov::op::BroadcastType::BIDIRECTIONAL); + const auto c_bias = std::make_shared(broadcast_bias, c_dim, false); - return {std::make_shared(data, c_scale, c_bias, num_groups, eps)}; + return {std::make_shared(data, c_scale, c_bias, num_groups, eps)}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.hpp b/src/frontends/onnx/frontend/src/op/group_normalization.hpp index fbd38d3667d4dd..4484c6a052e7cc 100644 --- a/src/frontends/onnx/frontend/src/op/group_normalization.hpp +++ b/src/frontends/onnx/frontend/src/op/group_normalization.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/gru.cpp b/src/frontends/onnx/frontend/src/op/gru.cpp index 4a88d3cbfc0ab9..fa38b87c7c7e44 100644 --- a/src/frontends/onnx/frontend/src/op/gru.cpp +++ b/src/frontends/onnx/frontend/src/op/gru.cpp @@ -4,16 +4,17 @@ #include "op/gru.hpp" -#include -#include - -#include "default_opset.hpp" -#include "ngraph/shape.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gru_sequence.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "ov_models/ov_builders/split.hpp" #include "utils/recurrent.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -34,8 +35,8 @@ struct GRUInputMap : public recurrent::OpInputMap { // gates_count * 2 since B is: [Wb, Rb] const int split_parts = 2 * 3; const auto split_bias = ov::op::util::split(bias, split_parts, 1); - const auto wr_z_bias = std::make_shared(split_bias.at(0), split_bias.at(3)); - const auto wr_r_bias = std::make_shared(split_bias.at(1), split_bias.at(4)); + const auto wr_z_bias = std::make_shared(split_bias.at(0), split_bias.at(3)); + const auto wr_r_bias = std::make_shared(split_bias.at(1), split_bias.at(4)); // The result has shape: [num_directions, 4 * hidden_size] // and data layout: // [ @@ -45,17 +46,17 @@ struct GRUInputMap : public recurrent::OpInputMap { // [Rb_h], // // num_directions times // ] - m_map[recurrent::OpInput::B] = std::make_shared( - OutputVector{wr_z_bias, wr_r_bias, split_bias.at(2), split_bias.at(5)}, - 1); + m_map[recurrent::OpInput::B] = + std::make_shared(OutputVector{wr_z_bias, wr_r_bias, split_bias.at(2), split_bias.at(5)}, + 1); } else { const std::size_t hidden_size = m_map[recurrent::OpInput::R].get_shape().back(); const std::size_t num_directions = m_map[recurrent::OpInput::W].get_shape().front(); m_map[recurrent::OpInput::B] = - std::make_shared(el_type, - Shape{num_directions, (gates_count + 1) * hidden_size}, - 0.f); + std::make_shared(el_type, + Shape{num_directions, (gates_count + 1) * hidden_size}, + 0.f); } } } @@ -81,19 +82,19 @@ OutputVector gru(const Node& node) { GRUInputMap input_map{node, gates_count}; GRUAttributes attributes{node}; - auto gru_sequence = std::make_shared(input_map.at(recurrent::OpInput::X), - input_map.at(recurrent::OpInput::INIT_H), - input_map.at(recurrent::OpInput::SEQ_LENGTHS), - input_map.at(recurrent::OpInput::W), - input_map.at(recurrent::OpInput::R), - input_map.at(recurrent::OpInput::B), - attributes.m_hidden_size, - attributes.m_direction, - attributes.m_activations, - attributes.m_activations_alpha, - attributes.m_activations_beta, - attributes.m_clip_threshold, - attributes.m_linear_before_reset); + auto gru_sequence = std::make_shared(input_map.at(recurrent::OpInput::X), + input_map.at(recurrent::OpInput::INIT_H), + input_map.at(recurrent::OpInput::SEQ_LENGTHS), + input_map.at(recurrent::OpInput::W), + input_map.at(recurrent::OpInput::R), + input_map.at(recurrent::OpInput::B), + attributes.m_hidden_size, + attributes.m_direction, + attributes.m_activations, + attributes.m_activations_alpha, + attributes.m_activations_beta, + attributes.m_clip_threshold, + attributes.m_linear_before_reset); const auto Y = gru_sequence->output(0); const auto Y_h = gru_sequence->output(1); diff --git a/src/frontends/onnx/frontend/src/op/gru.hpp b/src/frontends/onnx/frontend/src/op/gru.hpp index 94f0dcba72533e..b77d6c0ab66246 100644 --- a/src/frontends/onnx/frontend/src/op/gru.hpp +++ b/src/frontends/onnx/frontend/src/op/gru.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 702e0695422037..f59b6e3e3bf7d5 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -7,11 +7,17 @@ #include -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -27,43 +33,38 @@ OutputVector hammingwindow(const Node& node) { // Weights as described in ONNX HammingWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow - const auto float_size = std::make_shared(size, ov::element::f32); - const auto a_0 = std::make_shared( - std::make_shared(ov::element::f32, ov::Shape(), std::vector{25.0f}), - std::make_shared(ov::element::f32, ov::Shape(), std::vector{46.0f})); - const auto a_1 = std::make_shared( - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}), + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared( + std::make_shared(ov::element::f32, ov::Shape(), std::vector{25.0f}), + std::make_shared(ov::element::f32, ov::Shape(), std::vector{46.0f})); + const auto a_1 = std::make_shared( + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}), a_0); - const auto start = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto one_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto two_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); - const auto range = std::make_shared(start, size, one_const, ov::element::f32); - const auto pi = - default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = v0::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); std::shared_ptr factor; if (periodic) { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - float_size)); + std::make_shared(std::make_shared(pi, two_const), float_size)); } else { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - std::make_shared(float_size, one_const))); + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); } - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); if (output_datatype == element::f32) { return {y_values}; } else { - return {std::make_shared(y_values, output_datatype)}; + return {std::make_shared(y_values, output_datatype)}; } } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp index d088b4105abc3a..c6523178aae138 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -6,7 +6,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 47911bf1771c36..33d306636eee1a 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -7,11 +7,17 @@ #include -#include - -#include "default_opset.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/subtract.hpp" #include "utils/common.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -27,39 +33,34 @@ OutputVector hannwindow(const Node& node) { // Weights as described in ONNX HannWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow - const auto float_size = std::make_shared(size, ov::element::f32); - const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.5f}); + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.5f}); const auto a_1 = a_0; - const auto start = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto one_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto two_const = - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); - const auto range = std::make_shared(start, size, one_const, ov::element::f32); - const auto pi = - default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = v0::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); std::shared_ptr factor; if (periodic) { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - float_size)); + std::make_shared(std::make_shared(pi, two_const), float_size)); } else { - factor = std::make_shared( + factor = std::make_shared( range, - std::make_shared(std::make_shared(pi, two_const), - std::make_shared(float_size, one_const))); + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); } - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); if (output_datatype == element::f32) { return {y_values}; } else { - return {std::make_shared(y_values, output_datatype)}; + return {std::make_shared(y_values, output_datatype)}; } } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp index 0c9e6993048ef3..0798b5f132208b 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -6,7 +6,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp index 0d7703373671a2..d23deeae05fd2c 100644 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp +++ b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp @@ -4,9 +4,10 @@ #include "op/hard_sigmoid.hpp" -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/hard_sigmoid.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -17,16 +18,15 @@ OutputVector hard_sigmoid(const Node& node) { const auto data = node.get_ng_inputs().at(0); const auto alpha = - default_opset::Constant::create(data.get_element_type(), - Shape{}, - std::vector{node.get_attribute_value("alpha", 0.2)}); + v0::Constant::create(data.get_element_type(), + Shape{}, + std::vector{node.get_attribute_value("alpha", 0.2)}); - const auto beta = - default_opset::Constant::create(data.get_element_type(), - Shape{}, - std::vector{node.get_attribute_value("beta", 0.5)}); + const auto beta = v0::Constant::create(data.get_element_type(), + Shape{}, + std::vector{node.get_attribute_value("beta", 0.5)}); - return {std::make_shared(data, alpha, beta)}; + return {std::make_shared(data, alpha, beta)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp index 66d4c0b5ac04f1..e3161760a75741 100644 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp +++ b/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hard_swish.hpp b/src/frontends/onnx/frontend/src/op/hard_swish.hpp index 7a771ab39d2cdd..50c2e7000364e5 100644 --- a/src/frontends/onnx/frontend/src/op/hard_swish.hpp +++ b/src/frontends/onnx/frontend/src/op/hard_swish.hpp @@ -7,16 +7,15 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/hswish.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector hard_swish(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hardmax.cpp b/src/frontends/onnx/frontend/src/op/hardmax.cpp index cb799fb66d8e6f..3bb60bfee46e9d 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.cpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.cpp @@ -5,7 +5,7 @@ #include "op/hardmax.hpp" #include "exceptions.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/gather.hpp" @@ -17,6 +17,8 @@ #include "utils/common.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -29,7 +31,7 @@ OutputVector hardmax(const Node& node) { auto axis = node.get_attribute_value("axis", 1); if (input_shape.rank().is_static()) { OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, input_shape.rank()); + axis = ov::normalize_axis(node.get_description(), axis, input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END } @@ -37,29 +39,26 @@ OutputVector hardmax(const Node& node) { const auto coerced_tensor = ov::op::util::flatten(input, static_cast(axis)); const auto coerced_tensor_shape = std::make_shared(coerced_tensor); - Output row_size = - std::make_shared(coerced_tensor_shape, - ov::op::v0::Constant::create(element::i64, {1}, {1}), - ov::op::v0::Constant::create(element::i64, {}, {0})); + Output row_size = std::make_shared(coerced_tensor_shape, + ov::op::v0::Constant::create(element::i64, {1}, {1}), + ov::op::v0::Constant::create(element::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); const auto indices_axis = 1; - const auto topk = - std::make_shared(coerced_tensor, - ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}), - indices_axis, - ov::op::v11::TopK::Mode::MAX, - ov::op::v11::TopK::SortType::NONE); + const auto topk = std::make_shared(coerced_tensor, + ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}), + indices_axis, + ov::op::v11::TopK::Mode::MAX, + ov::op::v11::TopK::SortType::NONE); - const auto on_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}); - const auto off_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto results = - std::make_shared(topk->output(1), row_size, on_value, off_value, indices_axis); - const auto converted_results = std::make_shared(results, input.get_element_type()); + const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, indices_axis); + const auto converted_results = std::make_shared(results, input.get_element_type()); const auto output_shape = std::make_shared(input); - return {std::make_shared(converted_results, output_shape, false)}; + return {std::make_shared(converted_results, output_shape, false)}; } } // namespace set_1 @@ -70,31 +69,29 @@ OutputVector hardmax(const Node& node) { auto axis = node.get_attribute_value("axis", -1); OPENVINO_SUPPRESS_DEPRECATED_START - axis = ngraph::normalize_axis(node.get_description(), axis, input_shape.rank()); + axis = ov::normalize_axis(node.get_description(), axis, input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END const auto input_runtime_shape = std::make_shared(input); - Output row_size = - std::make_shared(input_runtime_shape, - ov::op::v0::Constant::create(element::i64, {1}, {axis}), - ov::op::v0::Constant::create(element::i64, {}, {0})); + Output row_size = std::make_shared(input_runtime_shape, + ov::op::v0::Constant::create(element::i64, {1}, {axis}), + ov::op::v0::Constant::create(element::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); - const auto topk = - std::make_shared(input, - ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}), - axis, - ov::op::v11::TopK::Mode::MAX, - ov::op::v11::TopK::SortType::NONE); + const auto topk = std::make_shared(input, + ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}), + axis, + ov::op::v11::TopK::Mode::MAX, + ov::op::v11::TopK::SortType::NONE); - const auto on_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}); - const auto off_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, axis); - const auto converted_results = std::make_shared(results, input.get_element_type()); + const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, axis); + const auto converted_results = std::make_shared(results, input.get_element_type()); const auto output_shape = std::make_shared(input); - return {std::make_shared(converted_results, output_shape, false)}; + return {std::make_shared(converted_results, output_shape, false)}; } } // namespace set_13 diff --git a/src/frontends/onnx/frontend/src/op/hardmax.hpp b/src/frontends/onnx/frontend/src/op/hardmax.hpp index 2bc5bf414472d7..820bac69e5e3ab 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.hpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/instance_norm.cpp b/src/frontends/onnx/frontend/src/op/instance_norm.cpp index a082aed15cd869..73addcfa76a55a 100644 --- a/src/frontends/onnx/frontend/src/op/instance_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/instance_norm.cpp @@ -10,11 +10,7 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/op/add.hpp" #include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/op/subtract.hpp" #include "ngraph/partial_shape.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" diff --git a/src/frontends/onnx/frontend/src/op/less.hpp b/src/frontends/onnx/frontend/src/op/less.hpp index 9903323219033e..8f5350476645c3 100644 --- a/src/frontends/onnx/frontend/src/op/less.hpp +++ b/src/frontends/onnx/frontend/src/op/less.hpp @@ -11,7 +11,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/less.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/loop.cpp b/src/frontends/onnx/frontend/src/op/loop.cpp index 40dcb2ef103c9f..7cc7c16a3ee9a4 100644 --- a/src/frontends/onnx/frontend/src/op/loop.cpp +++ b/src/frontends/onnx/frontend/src/op/loop.cpp @@ -4,17 +4,18 @@ #include "op/loop.hpp" -#include -#include - #include "core/graph.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/function.hpp" -#include "ngraph/op/util/op_types.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/loop.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "openvino/op/util/op_types.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -25,7 +26,7 @@ namespace { /// iterations. /// It allows to replace termination condition body output with /// Constant. -/// As a result ngraph Loop shape inference is able to handle more +/// As a result OV Loop shape inference is able to handle more /// cases. /// /// \param[in] cond_in boolean input to the loop body depicting loop termination condition @@ -33,7 +34,7 @@ namespace { /// \param[in] cond_out loop termination condition computed after each iteration /// /// \return true if termination condition is not modified during loop iterations, false otherwise. -bool is_termination_condition_always_true(const ngraph::Node* cond_in, const ngraph::Node* cond_out) { +bool is_termination_condition_always_true(const ov::Node* cond_in, const ov::Node* cond_out) { return cond_in == cond_out; } } // namespace @@ -55,25 +56,24 @@ OutputVector loop(const Node& node) { } // optional inputs - Output trip_count; + Output trip_count; // trip count skipped or has value max(int64_t) means infinitive loop if (ov::op::util::is_null(ng_inputs.at(0)) || - (ngraph::op::is_constant(ng_inputs.at(0).get_node_shared_ptr()) && - ov::as_type_ptr(ng_inputs.at(0).get_node_shared_ptr())->cast_vector()[0] == + (ov::op::util::is_constant(ng_inputs.at(0).get_node_shared_ptr()) && + ov::as_type_ptr(ng_inputs.at(0).get_node_shared_ptr())->cast_vector()[0] == std::numeric_limits::max())) { // -1 means infinite Loop - trip_count = ngraph::op::Constant::create(ngraph::element::i64, {1}, {-1}); + trip_count = v0::Constant::create(ov::element::i64, {1}, {-1}); } else { trip_count = ng_inputs.at(0); } - Output termination_cond; // true means that first interation should be run + Output termination_cond; // true means that first interation should be run if (ov::op::util::is_null(ng_inputs.at(1).get_node_shared_ptr())) // termination condition skipped { - termination_cond = ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); - } else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && - ov::as_type_ptr(ng_inputs.at(1).get_node_shared_ptr()) - ->cast_vector()[0] == false) { + termination_cond = v0::Constant::create(ov::element::boolean, {1}, {true}); + } else if (ov::op::util::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && + ov::as_type_ptr(ng_inputs.at(1).get_node_shared_ptr())->cast_vector()[0] == false) { // no iteration is performed so initial values are returned OutputVector node_outputs; // final values @@ -90,17 +90,17 @@ OutputVector loop(const Node& node) { } const int64_t concat_axis = 0; - const auto concat_axis_const = ngraph::op::Constant::create(ngraph::element::i64, {1}, {concat_axis}); + const auto concat_axis_const = v0::Constant::create(ov::element::i64, {1}, {concat_axis}); // add dimension along which scan outputs will be concatenated for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size(); ++i) { - body_outputs[i] = std::make_shared(body_outputs[i], concat_axis_const); + body_outputs[i] = std::make_shared(body_outputs[i], concat_axis_const); } const auto& cond_in = body_inputs[1]; const auto& cond_out = body_outputs[0]; // optimization allow to improve nG Loop shape inference if (is_termination_condition_always_true(cond_in.get(), cond_out.get_node())) { - body_outputs[0] = ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); + body_outputs[0] = v0::Constant::create(ov::element::boolean, {1}, {true}); } CHECK_VALID_NODE(node, @@ -123,9 +123,9 @@ OutputVector loop(const Node& node) { ParameterVector body_params(body_inputs.begin() + 2, body_inputs.end()); body_params.emplace(body_params.begin(), body_inputs[0]); // current iteration body input - const auto body = std::make_shared(body_outputs, body_params); - auto loop = std::make_shared(trip_count, termination_cond); - default_opset::Loop::SpecialBodyPorts spec_ports{0, 0}; + const auto body = std::make_shared(body_outputs, body_params); + auto loop = std::make_shared(trip_count, termination_cond); + v5::Loop::SpecialBodyPorts spec_ports{0, 0}; loop->set_special_body_ports(spec_ports); loop->set_function(body); diff --git a/src/frontends/onnx/frontend/src/op/loop.hpp b/src/frontends/onnx/frontend/src/op/loop.hpp index 9e7af71c3daab4..d5a67533367b69 100644 --- a/src/frontends/onnx/frontend/src/op/loop.hpp +++ b/src/frontends/onnx/frontend/src/op/loop.hpp @@ -7,21 +7,20 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -/// \brief Creates nGraph node representing ONNX loop operator. +/// \brief Creates OV node representing ONNX loop operator. /// /// \note Details available here: /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop /// /// \param[in] node The input ONNX node representing this operation. /// -/// \return Vector of nodes containting resulting nGraph nodes. +/// \return Vector of nodes containting resulting OV nodes. /// OutputVector loop(const Node& node); } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/lstm.cpp b/src/frontends/onnx/frontend/src/op/lstm.cpp index 495d34f119dc6f..8281f19fe33be1 100644 --- a/src/frontends/onnx/frontend/src/op/lstm.cpp +++ b/src/frontends/onnx/frontend/src/op/lstm.cpp @@ -14,9 +14,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/enum_names.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/lstm_sequence.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" @@ -58,18 +55,18 @@ struct LSTMNgInputMap { // Weight tensor for the gates. // Shape: [num_directions, 4*hidden_size, input_size] m_input_map[LSTMInput::LSTM_INPUT_W] = - ngraph::op::util::convert_lstm_node_format(ng_inputs.at(1), - ngraph::op::util::LSTMWeightsFormat::IOFC, - ngraph::op::util::LSTMWeightsFormat::FICO, - 1); + ov::op::util::convert_lstm_node_format(ng_inputs.at(1), + ov::op::util::LSTMWeightsFormat::IOFC, + ov::op::util::LSTMWeightsFormat::FICO, + 1); // The recurrence weight tensor. // Shape: [num_directions, 4*hidden_size, hidden_size] m_input_map[LSTMInput::LSTM_INPUT_R] = - ngraph::op::util::convert_lstm_node_format(ng_inputs.at(2), - ngraph::op::util::LSTMWeightsFormat::IOFC, - ngraph::op::util::LSTMWeightsFormat::FICO, - 1); + ov::op::util::convert_lstm_node_format(ng_inputs.at(2), + ov::op::util::LSTMWeightsFormat::IOFC, + ov::op::util::LSTMWeightsFormat::FICO, + 1); // Get dimensions needed for default inputs creation auto shape_of_x = std::make_shared(m_input_map[LSTMInput::LSTM_INPUT_X]); @@ -103,10 +100,10 @@ struct LSTMNgInputMap { m_input_map[LSTMInput::LSTM_INPUT_B] = std::make_shared(split_bias.at(0), split_bias.at(1)); m_input_map[LSTMInput::LSTM_INPUT_B] = - ngraph::op::util::convert_lstm_node_format(m_input_map[LSTMInput::LSTM_INPUT_B], - ngraph::op::util::LSTMWeightsFormat::IOFC, - ngraph::op::util::LSTMWeightsFormat::FICO, - 1); + ov::op::util::convert_lstm_node_format(m_input_map[LSTMInput::LSTM_INPUT_B], + ov::op::util::LSTMWeightsFormat::IOFC, + ov::op::util::LSTMWeightsFormat::FICO, + 1); } else { auto b_shape = std::make_shared( OutputVector{num_directions_node, diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp index 26f0100c92e171..a467b4cc82b452 100644 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp @@ -4,11 +4,13 @@ #include "op/matmul_integer.hpp" -#include -#include -#include +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/unsqueeze.hpp" -#include "default_opset.hpp" +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -20,33 +22,30 @@ OutputVector matmul_integer(const Node& node) { const auto& A = inputs.at(0); const auto& B = inputs.at(1); - const auto& A_zero_point = - (inputs.size() > 2) ? inputs.at(2) : ngraph::op::Constant::create(ngraph::element::i32, {1}, {0}); - const auto& B_zero_point = - (inputs.size() > 3) ? inputs.at(3) : ngraph::op::Constant::create(ngraph::element::i32, {1}, {0}); + const auto& A_zero_point = (inputs.size() > 2) ? inputs.at(2) : v0::Constant::create(ov::element::i32, {1}, {0}); + const auto& B_zero_point = (inputs.size() > 3) ? inputs.at(3) : v0::Constant::create(ov::element::i32, {1}, {0}); - const auto& converted_A = std::make_shared(A, element::i32); - const auto& converted_B = std::make_shared(B, element::i32); + const auto& converted_A = std::make_shared(A, element::i32); + const auto& converted_B = std::make_shared(B, element::i32); - const auto& converted_A_zero_point = std::make_shared(A_zero_point, element::i32); - const auto& converted_B_zero_point = std::make_shared(B_zero_point, element::i32); + const auto& converted_A_zero_point = std::make_shared(A_zero_point, element::i32); + const auto& converted_B_zero_point = std::make_shared(B_zero_point, element::i32); const auto& A_zero_point_rank = A_zero_point.get_partial_shape().rank(); - Output shifted_A; + Output shifted_A; if (A_zero_point_rank.is_static() && A_zero_point_rank.get_length() == 1) { - const auto& one_node = ngraph::op::Constant::create(ngraph::element::i32, {1}, {1}); - const auto& reshaped_A_zero_point = - std::make_shared(converted_A_zero_point, one_node); + const auto& one_node = v0::Constant::create(ov::element::i32, {1}, {1}); + const auto& reshaped_A_zero_point = std::make_shared(converted_A_zero_point, one_node); - shifted_A = std::make_shared(converted_A, reshaped_A_zero_point); + shifted_A = std::make_shared(converted_A, reshaped_A_zero_point); } else { - shifted_A = std::make_shared(converted_A, converted_A_zero_point); + shifted_A = std::make_shared(converted_A, converted_A_zero_point); } - const auto& shifted_B = std::make_shared(converted_B, converted_B_zero_point); + const auto& shifted_B = std::make_shared(converted_B, converted_B_zero_point); - const auto& result = std::make_shared(shifted_A, shifted_B); + const auto& result = std::make_shared(shifted_A, shifted_B); return {result}; } diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp b/src/frontends/onnx/frontend/src/op/matmul_integer.hpp index 3da61339848ca6..3bb07670c5778c 100644 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp +++ b/src/frontends/onnx/frontend/src/op/matmul_integer.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { @@ -18,7 +17,7 @@ namespace set_1 { /// /// \param node The ONNX node object representing this operation. /// -/// \return The vector containing Ngraph nodes producing output of ONNX quantizied +/// \return The vector containing OV nodes producing output of ONNX quantizied /// matrix multiplication integer operation. OutputVector matmul_integer(const Node& node); } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp b/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp index acc595b2751bc9..cc5b5092ff003d 100644 --- a/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp @@ -23,9 +23,9 @@ OutputVector max_roi_pool(const Node& node) { const auto X = inputs.at(0); const auto rois = inputs.at(1); - NGRAPH_CHECK(X.get_element_type() == element::f16 || X.get_element_type() == element::f32 || - X.get_element_type() == element::f64, - "MaxRoiPool operator only supports float16, float32 and float64 datatypes."); + OPENVINO_ASSERT(X.get_element_type() == element::f16 || X.get_element_type() == element::f32 || + X.get_element_type() == element::f64, + "MaxRoiPool operator only supports float16, float32 and float64 datatypes."); const auto pooled_shape = node.get_attribute_value>("pooled_shape"); const auto spatial_scale = node.get_attribute_value("spatial_scale", 1.0); diff --git a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp index 6c94dabce3be4d..1c30edb27bac07 100644 --- a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp @@ -9,8 +9,9 @@ #include "default_opset.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/op/mvn.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/op/mvn.hpp" +#include "openvino/opsets/opset5.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -36,7 +37,7 @@ OutputVector mean_variance_normalization(const Node& node) { ngraph::normalize_axes(node.get_description(), axes, data.get_partial_shape().rank()); OPENVINO_SUPPRESS_DEPRECATED_END auto const_axes = default_opset::Constant::create(element::i64, Shape{normalized_axes.size()}, normalized_axes); - return {std::make_shared(data, const_axes, true, 1e-09f, ngraph::op::MVNEpsMode::OUTSIDE_SQRT)}; + return {std::make_shared(data, const_axes, true, 1e-09f, ov::op::MVNEpsMode::OUTSIDE_SQRT)}; } } // namespace set_9 diff --git a/src/frontends/onnx/frontend/src/op/mod.cpp b/src/frontends/onnx/frontend/src/op/mod.cpp index 9f52d7251f7aed..755bd28ff08547 100644 --- a/src/frontends/onnx/frontend/src/op/mod.cpp +++ b/src/frontends/onnx/frontend/src/op/mod.cpp @@ -2,16 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/mod.hpp" +#include "op/mod.hpp" #include #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/abs.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "op/mod.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/abs.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/mul.hpp b/src/frontends/onnx/frontend/src/op/mul.hpp index f9a1d01a236472..6ad8680e19eed8 100644 --- a/src/frontends/onnx/frontend/src/op/mul.hpp +++ b/src/frontends/onnx/frontend/src/op/mul.hpp @@ -11,8 +11,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/multiply.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/neg.hpp b/src/frontends/onnx/frontend/src/op/neg.hpp index 19f7cbdcbb82e0..0ae61f23e64a1d 100644 --- a/src/frontends/onnx/frontend/src/op/neg.hpp +++ b/src/frontends/onnx/frontend/src/op/neg.hpp @@ -8,15 +8,15 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "ngraph/node.hpp" -#include "ngraph/op/negative.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/negative.hpp" namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { inline OutputVector neg(const Node& node) { - return {-node.get_ng_inputs().at(0)}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp index 27ac97eb60218a..cd6044489de204 100644 --- a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp +++ b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/non_max_suppression.hpp" +#include "op/non_max_suppression.hpp" #include @@ -10,7 +10,6 @@ #include "exceptions.hpp" #include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" -#include "op/non_max_suppression.hpp" #include "utils/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp index 1eef678c6ec2b0..c17db4f66676b0 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp @@ -2,10 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/detection_output.hpp" +#include "op/org.openvinotoolkit/detection_output.hpp" #include "onnx_import/core/node.hpp" -#include "op/org.openvinotoolkit/detection_output.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/detection_output.hpp" diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp index 63c26e61c9f4fd..e686bc345b4344 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/prior_box.hpp" +#include "op/org.openvinotoolkit/prior_box.hpp" #include "exceptions.hpp" #include "onnx_import/core/node.hpp" -#include "op/org.openvinotoolkit/prior_box.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/prior_box.hpp" diff --git a/src/frontends/onnx/frontend/src/op/pad.cpp b/src/frontends/onnx/frontend/src/op/pad.cpp index 1f7368e70c9340..e02b526a1c702e 100644 --- a/src/frontends/onnx/frontend/src/op/pad.cpp +++ b/src/frontends/onnx/frontend/src/op/pad.cpp @@ -2,19 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/pad.hpp" +#include "op/pad.hpp" #include #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/shape.hpp" #include "onnx_import/core/null_node.hpp" -#include "op/pad.hpp" #include "ov_models/ov_builders/split.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index 95ab25b8f79470..408c2e8ab4fe60 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -6,8 +6,8 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/shape.hpp" +#include "openvino/opsets/opset8.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp index 6fbaba619cf5dc..0537d141ea3520 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp @@ -6,8 +6,8 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/shape.hpp" +#include "openvino/op/random_uniform.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/reciprocal.cpp b/src/frontends/onnx/frontend/src/op/reciprocal.cpp index cb698716c9ad48..ab530c0cdb6dc1 100644 --- a/src/frontends/onnx/frontend/src/op/reciprocal.cpp +++ b/src/frontends/onnx/frontend/src/op/reciprocal.cpp @@ -8,7 +8,6 @@ #include #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/roi_align.cpp b/src/frontends/onnx/frontend/src/op/roi_align.cpp index fbdb77e0246e3a..4efd14a6a727a8 100644 --- a/src/frontends/onnx/frontend/src/op/roi_align.cpp +++ b/src/frontends/onnx/frontend/src/op/roi_align.cpp @@ -13,7 +13,9 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { + namespace set_1 { + OutputVector roi_align(const Node& node) { const auto inputs = node.get_ng_inputs(); diff --git a/src/frontends/onnx/frontend/src/op/selu.cpp b/src/frontends/onnx/frontend/src/op/selu.cpp index 16f8bca9149cfd..9eba44e5fd6fd3 100644 --- a/src/frontends/onnx/frontend/src/op/selu.cpp +++ b/src/frontends/onnx/frontend/src/op/selu.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/selu.hpp" +#include "op/selu.hpp" #include #include #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" -#include "op/selu.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/softsign.cpp b/src/frontends/onnx/frontend/src/op/softsign.cpp index c6fd91a190b111..2228f59c39cd3c 100644 --- a/src/frontends/onnx/frontend/src/op/softsign.cpp +++ b/src/frontends/onnx/frontend/src/op/softsign.cpp @@ -9,6 +9,7 @@ #include "default_opset.hpp" #include "ngraph/shape.hpp" +#include "openvino/opsets/opset9.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/squeeze.cpp b/src/frontends/onnx/frontend/src/op/squeeze.cpp index 8bfc2035cb2315..3c01b1ffe13e53 100644 --- a/src/frontends/onnx/frontend/src/op/squeeze.cpp +++ b/src/frontends/onnx/frontend/src/op/squeeze.cpp @@ -2,11 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/squeeze.hpp" +#include "op/squeeze.hpp" #include "default_opset.hpp" -#include "ngraph/op/constant.hpp" -#include "op/squeeze.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/topk.cpp b/src/frontends/onnx/frontend/src/op/topk.cpp index 1e7a67b71d3395..b19eb8f53ccd33 100644 --- a/src/frontends/onnx/frontend/src/op/topk.cpp +++ b/src/frontends/onnx/frontend/src/op/topk.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/topk.hpp" +#include "op/topk.hpp" #include #include #include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/validation_util.hpp" -#include "op/topk.hpp" #include "openvino/frontend/exception.hpp" #include "utils/reshape.hpp" diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index fc584553a54baf..faff600625a6cf 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -29,6 +29,7 @@ #include "op/batch_norm.hpp" #include "op/bitshift.hpp" #include "op/bitwise_and.hpp" +#include "op/bitwise_not.hpp" #include "op/bitwise_or.hpp" #include "op/bitwise_xor.hpp" #include "op/blackmanwindow.hpp" @@ -356,6 +357,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BitShift", 1, bitshift); REGISTER_OPERATOR("BitwiseAnd", 1, bitwise_and); + REGISTER_OPERATOR("BitwiseNot", 1, bitwise_not); REGISTER_OPERATOR("BitwiseOr", 1, bitwise_or); REGISTER_OPERATOR("BitwiseXor", 1, bitwise_xor); REGISTER_OPERATOR("BlackmanWindow", 1, blackmanwindow); diff --git a/src/frontends/onnx/frontend/src/ops_bridge.hpp b/src/frontends/onnx/frontend/src/ops_bridge.hpp index 7cbc25f302b196..2618260bf9129e 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.hpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.hpp @@ -13,7 +13,6 @@ #include "onnx_import/core/operator_set.hpp" #include "openvino/core/deprecated.hpp" -#include "openvino/core/except.hpp" #include "version_range.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/utils/common.cpp b/src/frontends/onnx/frontend/src/utils/common.cpp index aeda69b6063f58..206f0b0325127f 100644 --- a/src/frontends/onnx/frontend/src/utils/common.cpp +++ b/src/frontends/onnx/frontend/src/utils/common.cpp @@ -6,7 +6,6 @@ #include // onnx types -#include "ngraph/graph_util.hpp" #include "onnx_framework_node.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/frontend/exception.hpp" diff --git a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp index 6cd180b6ecc64f..da42025c29bbd4 100644 --- a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp @@ -4,7 +4,6 @@ #include "utils/conv_factory.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/op/group_conv.hpp" diff --git a/src/frontends/onnx/frontend/src/utils/convpool.hpp b/src/frontends/onnx/frontend/src/utils/convpool.hpp index 36d880abcbffe9..e275271918ff2a 100644 --- a/src/frontends/onnx/frontend/src/utils/convpool.hpp +++ b/src/frontends/onnx/frontend/src/utils/convpool.hpp @@ -4,7 +4,6 @@ #pragma once -#include "ngraph/coordinate_diff.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/core/shape.hpp" diff --git a/src/frontends/onnx/frontend/src/utils/reshape.hpp b/src/frontends/onnx/frontend/src/utils/reshape.hpp index e40c119a7ce7e3..57d76d08823f29 100644 --- a/src/frontends/onnx/frontend/src/utils/reshape.hpp +++ b/src/frontends/onnx/frontend/src/utils/reshape.hpp @@ -9,8 +9,7 @@ #include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" +#include "openvino/core/node.hpp" namespace ngraph { namespace onnx_import { @@ -44,7 +43,7 @@ std::vector infer_dimensions(const std::string& node_name, /// /// \return Original node or a node representing a reshape of the original. /// -Output interpret_as_scalar(const Output& node); +Output interpret_as_scalar(const Output& node); /// \brief Reshape node from shape {C} to {1, C, 1, 1,...} /// @@ -58,8 +57,8 @@ Output interpret_as_scalar(const Output& node); /// /// \return Original node or a node representing a reshape of the original. /// -Output reshape_channel_shaped_node_to_nchw(const Output& node, - const Output& expected_rank); +Output reshape_channel_shaped_node_to_nchw(const Output& node, + const Output& expected_rank); } // namespace reshape } // namespace onnx_import diff --git a/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp b/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp index 96dd0a4e0380f5..d96f354c65e1c5 100644 --- a/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp +++ b/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp @@ -58,9 +58,7 @@ Buffer TensorExternalData::load_external_mmap_data(const std:: Buffer TensorExternalData::load_external_data(const std::string& model_dir) const { auto full_path = ov::util::path_join({model_dir, m_data_location}); #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - NGRAPH_SUPPRESS_DEPRECATED_START ov::util::convert_path_win_style(full_path); - NGRAPH_SUPPRESS_DEPRECATED_END std::ifstream external_data_stream(ov::util::string_to_wstring(full_path).c_str(), std::ios::binary | std::ios::in | std::ios::ate); #else diff --git a/src/frontends/onnx/frontend/src/utils/variadic.hpp b/src/frontends/onnx/frontend/src/utils/variadic.hpp index fe48ea92b34938..50e8a5ebadcff5 100644 --- a/src/frontends/onnx/frontend/src/utils/variadic.hpp +++ b/src/frontends/onnx/frontend/src/utils/variadic.hpp @@ -6,10 +6,6 @@ #include -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/shape.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" #include "utils/common.hpp" @@ -17,24 +13,23 @@ namespace ngraph { namespace onnx_import { namespace variadic { -/// \brief Create an nGraph version of an ONNX variadic operation. +/// \brief Create an OpenVINO version of an ONNX variadic operation. /// This creates a subgraph with a series of binary operations. /// /// \param node Incoming ONNX opearation. /// -/// \tparam T Class of an nGraph binary operation (e.g. Add, Minimum, Maximum) +/// \tparam T Class of an OpenVINO binary operation (e.g. Add, Minimum, Maximum) /// -/// \return nGraph node equivalent of the ONNX operation +/// \return OpenVINO node equivalent of the ONNX operation OPENVINO_SUPPRESS_DEPRECATED_START template inline OutputVector make_ng_variadic_op( const Node& node, - const ngraph::op::AutoBroadcastSpec& auto_broadcast = ngraph::op::AutoBroadcastType::NUMPY) { + const ov::op::AutoBroadcastSpec& auto_broadcast = ov::op::AutoBroadcastType::NUMPY) { const OutputVector ng_inputs{node.get_ng_inputs()}; // Templated binary operation - Creates Add, Minimum, Maximum, etc. - const auto binary_operation = [&auto_broadcast](const Output& arg0, - const Output& arg1) { + const auto binary_operation = [&auto_broadcast](const Output& arg0, const Output& arg1) { return std::make_shared(arg0, arg1, auto_broadcast); }; diff --git a/src/frontends/onnx/tests/CMakeLists.txt b/src/frontends/onnx/tests/CMakeLists.txt index 0a9c36c711d151..eea80d57ecbf1a 100644 --- a/src/frontends/onnx/tests/CMakeLists.txt +++ b/src/frontends/onnx/tests/CMakeLists.txt @@ -74,10 +74,8 @@ set(SRC onnx_editor.cpp onnx_editor_topological_sort.cpp onnx_import_exceptions.cpp - onnx_import_library.cpp onnx_importer_test.cpp onnx_tensor_names.cpp - onnx_test_util.cpp onnx_utils.cpp onnx_transformations.cpp op_extension.cpp @@ -118,11 +116,13 @@ foreach(BACKEND_NAME IN LISTS ACTIVE_BACKEND_LIST) message(STATUS "Adding unit test for backend ${BACKEND_NAME}") endforeach() +# Create target + add_executable(ov_onnx_frontend_tests ${SRC}) add_test(NAME ov_onnx_frontend_tests COMMAND ov_onnx_frontend_tests --gtest_filter=-*IE_GPU*) set_property(TEST ov_onnx_frontend_tests PROPERTY LABELS OV UNIT ONNX_FE) -add_dependencies(ov_onnx_frontend_tests template_extension) +add_dependencies(ov_onnx_frontend_tests openvino_template_extension) target_include_directories(ov_onnx_frontend_tests PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") @@ -141,27 +141,14 @@ endif() target_link_libraries(ov_onnx_frontend_tests PRIVATE gtest_main_manifest frontend_shared_test_classes - openvino::runtime::dev - openvino_onnx_frontend - openvino_onnx_common + openvino::frontend::onnx func_test_utils) -# It's needed by onnx_import_library.cpp and onnx_import_exceptions.cpp tests to include onnx_pb.h. -# Not linking statically to libprotobuf (linked into libonnx) avoids false-failing onnx_editor tests. -target_include_directories(ov_onnx_frontend_tests - SYSTEM PRIVATE - $ - $ - $) -target_compile_definitions(ov_onnx_frontend_tests PRIVATE $) -target_compile_definitions(ov_onnx_frontend_tests PRIVATE ENABLE_OV_ONNX_FRONTEND) - if(OV_COMPILER_IS_CLANG) target_compile_options(ov_onnx_frontend_tests PRIVATE -Wno-undef -Wno-reserved-id-macro) endif() -target_include_directories(ov_onnx_frontend_tests PRIVATE - $) +# Install rules install(TARGETS ov_onnx_frontend_tests RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/src/frontends/onnx/tests/__init__.py b/src/frontends/onnx/tests/__init__.py index e47f08323e0a48..f2843e89ce33b2 100644 --- a/src/frontends/onnx/tests/__init__.py +++ b/src/frontends/onnx/tests/__init__.py @@ -168,7 +168,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): # ONNX 1.15 xfail_issue_125485 = xfail_test(reason="AffineGrid operation is not supported") -xfail_issue_125486 = xfail_test(reason="Gelu operation is not supported") xfail_issue_125488 = xfail_test(reason="ImageDecoder operation is not supported") skip_issue_125487 = pytest.mark.skip(reason="GridSample doesn't support cubic and linear modes, and 4D tensor") # Need to enable after bumping to 1.15 skip_issue_125489 = pytest.mark.skip(reason="IsInf changed behavior since opset-20") # Need to enable after opset-20 will be released diff --git a/src/frontends/onnx/tests/models/bitwise_not.prototxt b/src/frontends/onnx/tests/models/bitwise_not.prototxt new file mode 100644 index 00000000000000..29e97d88172b18 --- /dev/null +++ b/src/frontends/onnx/tests/models/bitwise_not.prototxt @@ -0,0 +1,41 @@ +ir_version: 9 +producer_name: "BitwiseNotModel" +graph { + node { + input: "x" + output: "y" + name: "BitwiseNotNode" + op_type: "BitwiseNot" + } + name: "BitwiseNotGraph" + input { + name: "x" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 5 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 5 + } + } + } + } + } +} +opset_import { + domain: "" + version: 16 +} diff --git a/src/frontends/onnx/tests/onnx_editor.cpp b/src/frontends/onnx/tests/onnx_editor.cpp index b3c8038165c4d8..56aa60642ec667 100644 --- a/src/frontends/onnx/tests/onnx_editor.cpp +++ b/src/frontends/onnx/tests/onnx_editor.cpp @@ -6,16 +6,15 @@ #include #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" #include "openvino/op/constant.hpp" using namespace ov; -using namespace ov::onnx_editor; +using namespace ov::frontend; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); @@ -42,13 +41,13 @@ std::shared_ptr find_input(const ParameterVector& inputs, con OPENVINO_TEST(onnx_editor, types__single_input_type_substitution) { // the original model contains 2 inputs with i64 data type and one f32 input - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_abc.onnx", &front_end); - editor.set_input_types({{"A", element::i64}}); + input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::i64); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); const auto float_inputs_count = std::count_if(std::begin(graph_inputs), std::end(graph_inputs), element_type_is(element::f32)); @@ -64,14 +63,16 @@ OPENVINO_TEST(onnx_editor, types__single_input_type_substitution) { OPENVINO_TEST(onnx_editor, types__all_inputs_type_substitution) { // the original model contains 2 inputs with i64 data type and one f32 input - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_abc.onnx", &front_end); - editor.set_input_types({{"A", element::i8}, {"B", element::i8}, {"C", element::i8}}); + input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::i8); + input_model->set_element_type(input_model->get_place_by_tensor_name("B"), element::i8); + input_model->set_element_type(input_model->get_place_by_tensor_name("C"), element::i8); - const auto function = editor.get_function(); + const auto model = front_end->convert(input_model); - const auto graph_inputs = function->get_parameters(); + const auto graph_inputs = model->get_parameters(); const auto float_inputs_count = std::count_if(std::begin(graph_inputs), std::end(graph_inputs), element_type_is(element::f32)); @@ -84,85 +85,80 @@ OPENVINO_TEST(onnx_editor, types__all_inputs_type_substitution) { } OPENVINO_TEST(onnx_editor, types__missing_type_in_input_descriptor) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/invalid_input_no_type.onnx"})}; + auto input_model = load_model("model_editor/invalid_input_no_type.onnx"); // input A doesn't have the "type" field in the model and so the data type cannot be modified - EXPECT_THROW(editor.set_input_types({{"A", element::f32}}), ov::Exception); + EXPECT_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::f32), + ov::Exception); } OPENVINO_TEST(onnx_editor, types__missing_tensor_type_in_input_descriptor) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/invalid_input_no_tensor_type.onnx"})}; + auto input_model = load_model("model_editor/invalid_input_no_tensor_type.onnx"); // input A doesn't have the "tensor_type" field in the model - EXPECT_THROW(editor.set_input_types({{"A", element::f32}}), ov::Exception); + EXPECT_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::f32), + ov::Exception); } OPENVINO_TEST(onnx_editor, types__unsupported_data_type_passed) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + auto input_model = load_model("model_editor/add_abc.onnx"); - EXPECT_THROW(editor.set_input_types({{"A", element::dynamic}}), ov::Exception); + EXPECT_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::dynamic), + ov::Exception); } OPENVINO_TEST(onnx_editor, types__incorrect_input_name_passed) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; + auto input_model = load_model("model_editor/add_abc.onnx"); - EXPECT_THROW(editor.set_input_types({{"ShiaLaBeouf", element::i64}}), ov::Exception); + EXPECT_EQ(input_model->get_place_by_tensor_name("ShiaLaBeouf"), nullptr); } OPENVINO_TEST(onnx_editor, types__elem_type_missing_in_input) { // the original model contains 2 inputs with i64 data type and one f32 input - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/elem_type_missing_in_input.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/elem_type_missing_in_input.onnx", &front_end); // the "elem_type" is missing in the model but it should be possible to set the type anyway - EXPECT_NO_THROW(editor.set_input_types({{"A", element::i64}})); + EXPECT_NO_THROW(input_model->set_element_type(input_model->get_place_by_tensor_name("A"), element::i64)); - const auto function = editor.get_function(); + const auto model = front_end->convert(input_model); - const auto graph_inputs = function->get_parameters(); + const auto graph_inputs = model->get_parameters(); const auto integer_inputs_count = std::count_if(std::begin(graph_inputs), std::end(graph_inputs), element_type_is(element::i64)); EXPECT_EQ(integer_inputs_count, 2); - const auto function_result = function->get_result(); + const auto function_result = model->get_result(); EXPECT_EQ(function_result->get_element_type(), element::i64); } OPENVINO_TEST(onnx_editor, shapes__modify_single_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{1}; - editor.set_input_shapes({{"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "B")->get_partial_shape().same_scheme(new_shape)); } OPENVINO_TEST(onnx_editor, shapes__modify_all_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{1, 2, 3, 5, 8, 13}; - editor.set_input_shapes({{"A", new_shape}, {"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); for (const auto& input : graph_inputs) { EXPECT_TRUE(input->get_partial_shape().same_scheme(new_shape)); @@ -170,48 +166,46 @@ OPENVINO_TEST(onnx_editor, shapes__modify_all_inputs) { } OPENVINO_TEST(onnx_editor, shapes__dynamic_rank_in_model) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__dynamic_rank_in_model.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__dynamic_rank_in_model.onnx", &front_end); // input A in the model doesn't have the "shape" field meaning it has dynamic rank // it should still be possible to set such input's shape to some custom value const auto expected_shape_of_A = PartialShape{1, 2}; - EXPECT_NO_THROW(editor.set_input_shapes({{"A", expected_shape_of_A}})); + EXPECT_NO_THROW(input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), expected_shape_of_A)); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "A")->get_partial_shape().same_scheme(expected_shape_of_A)); } OPENVINO_TEST(onnx_editor, shapes__set_dynamic_dimension) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{Dimension::dynamic()}; - editor.set_input_shapes({{"A", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "A")->get_partial_shape().same_scheme(new_shape)); } OPENVINO_TEST(onnx_editor, shapes__set_mixed_dimensions) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape_A = PartialShape{21, Dimension::dynamic()}; const auto new_shape_B = PartialShape{Dimension::dynamic(), 37}; - editor.set_input_shapes({{"A", new_shape_A}, {"B", new_shape_B}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape_A); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape_B); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); const auto input_A = find_input(graph_inputs, "A"); EXPECT_TRUE(input_A->get_partial_shape().same_scheme(new_shape_A)); @@ -221,16 +215,16 @@ OPENVINO_TEST(onnx_editor, shapes__set_mixed_dimensions) { } OPENVINO_TEST(onnx_editor, shapes__set_scalar_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape{}; - editor.set_input_shapes({{"A", new_shape}, {"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); const auto input_A = find_input(graph_inputs, "A"); EXPECT_TRUE(input_A->get_partial_shape().same_scheme(new_shape)); @@ -240,16 +234,16 @@ OPENVINO_TEST(onnx_editor, shapes__set_scalar_inputs) { } OPENVINO_TEST(onnx_editor, shapes__static_to_dynamic_rank_substitution) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/shapes__add_two_inputs.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/shapes__add_two_inputs.onnx", &front_end); const auto new_shape = PartialShape::dynamic(); - editor.set_input_shapes({{"A", new_shape}, {"B", new_shape}}); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("A"), new_shape); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("B"), new_shape); - const auto function = editor.get_function(); - const auto graph_inputs = function->get_parameters(); + const auto model = front_end->convert(input_model); + const auto graph_inputs = model->get_parameters(); for (const auto& input : graph_inputs) { EXPECT_TRUE(input->get_partial_shape().same_scheme(new_shape)); @@ -257,1614 +251,802 @@ OPENVINO_TEST(onnx_editor, shapes__static_to_dynamic_rank_substitution) { } OPENVINO_TEST(onnx_editor, subgraph__linear_model_head_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge(1, 0)}}, {}); + input_model->extract_subgraph({input_model->get_place_by_operation_name("relu1")}, {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_head_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_head_cut_ins_and_outs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge(1, 0)}}, {{OutputEdge(2, 0)}}); + input_model->extract_subgraph({input_model->get_place_by_operation_name("relu1")}, {input_model->get_outputs()[0]}); - // expected to behave the same way as subgraph__linear_model_head_cut - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_head_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_deeper_head_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge(2, 0)}}, {}); + input_model->extract_subgraph({input_model->get_place_by_operation_name("maxpool1")}, {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_deeper_head_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_deeper_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_operation_name("relu1")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_tail_cut_ins_and_outs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({{InputEdge{0, 0}}}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({input_model->get_inputs()[0]}, {input_model->get_place_by_operation_name("relu1")}); - // expected to behave the same way as subgraph__linear_model_tail_cut - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_with_initializer_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head_with_initializer.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head_with_initializer.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("conv1/7x7_s2_2")}); - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__initializer_without_matching_input_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__initializer_without_matching_input.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__initializer_without_matching_input.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{1, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("conv1/7x7_s2_2")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__initializer_without_matching_input_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__initializer_without_matching_input_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__linear_model_deeper_tail_cut) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{0, 0}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_deeper_tail_cut.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_deeper_tail_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__no_input_params) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - ONNXModelEditor editor{model_path}; + input_model->extract_subgraph({}, {}); - editor.extract_subgraph({}, {}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/subgraph__inception_head.onnx"); - const auto result = compare_onnx_models(editor.model_string(), model_path); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__initializer_to_input_replacement) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head_with_initializer.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head_with_initializer.onnx", &front_end); - editor.extract_subgraph({{InputEdge{0, 2}}}, {{OutputEdge{0, 0}}}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("conv1/7x7_s2_b_0")}, + {input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}); - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__initializer_to_input_replacement.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__initializer_to_input_replacement.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__initializer_to_input_replacement_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__initializer_without_matching_input.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__initializer_without_matching_input.onnx", &front_end); - editor.extract_subgraph({{InputEdge{0, 2}}}, {{OutputEdge{0, 0}}}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("conv1/7x7_s2_b_0")}, + {input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}); - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__initializer_to_input_replacement.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__initializer_to_input_replacement.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__multiout_op_output_edge) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - editor.extract_subgraph({}, {{OutputEdge{5, 1}}}); + input_model->extract_subgraph({}, {input_model->get_place_by_tensor_name("split2")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__multiout_op_output_edge.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = convert_model("model_editor/reference/subgraph__multiout_op_output_edge.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__existing_inputs_and_outputs_based_extraction) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - editor.extract_subgraph({{InputEdge{1, 1}, InputEdge{2, 0}}}, {{OutputEdge{4, 0}}}); + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("in1"), input_model->get_place_by_tensor_name("in3")}, + {input_model->get_place_by_tensor_name("mul2")}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__existing_inputs_and_outputs_based_extraction.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = + convert_model("model_editor/reference/subgraph__existing_inputs_and_outputs_based_extraction.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__twice_input_edge_from_tensor_with_single_consumer) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_ab.onnx", &front_end); - editor.extract_subgraph({InputEdge{1, 1}}, {}); + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("X")->get_consuming_operations()[0]->get_input_port(1)}, + {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__twice_input_edge_from_tensor_with_single_consumer.onnx"}); + auto model = front_end->convert(input_model); + auto model_ref = + convert_model("model_editor/reference/subgraph__twice_input_edge_from_tensor_with_single_consumer.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - editor.extract_subgraph({{InputEdge{1, 0}, InputEdge{6, 0}}}, {{OutputEdge{6, 0}, OutputEdge{4, 0}}}); + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers.onnx"}); + input_model->extract_subgraph( + {relu_consumers[0]->get_input_port(0), relu_consumers[2]->get_input_port(0)}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("mul2")}); + auto model = front_end->convert(input_model); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers.onnx"); - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{3, 0}, InputEdge{3, 1}}}, {{OutputEdge{3, 0}, OutputEdge{4, 0}}}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_2.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_3) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{3, 0}, InputEdge{6, 0}}}, {{OutputEdge{6, 0}, OutputEdge{5, 1}}}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_3.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_4) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{1, 0}, InputEdge{3, 0}}}, {}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_4.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_5) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({InputEdge{3, 0}}, {{OutputEdge{6, 0}, OutputEdge{5, 1}}}); - - // expected to behave the same way as the test above - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_5.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - editor.extract_subgraph({{InputEdge{1, 0, "new_name_1"}, InputEdge{6, 0, "new_name_2"}}}, - {{OutputEdge{6, 0}, OutputEdge{4, 0}}}); - - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_input_relu2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{4, 0}}}, {}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_input_relu2.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{2, 0}}}, {}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_initializer.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{2, 0}, InputEdge{3, 0}}}, {}); - - // same as above - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_initializer.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_relu2_and_init) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - editor.extract_subgraph({{InputEdge{5, 0}, InputEdge{3, 0}}}, {}); - - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__invalid_edge_idx) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); - - ONNXModelEditor editor{model_path}; - try { - editor.extract_subgraph({{InputEdge{15, 0}}}, {}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("The specified node index is out of range of nodes in the original model") != - std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, subgraph__invalid_port_idx) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); - - ONNXModelEditor editor{model_path}; - try { - editor.extract_subgraph({{InputEdge{0, 3}}}, {}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("The specified node with index: 0 has not input port with index: 3") != std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, subgraph__inputs_getter) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - EXPECT_EQ(editor.model_inputs(), (std::vector{"data_0", "conv1/7x7_s2_w_0", "conv1/7x7_s2_b_0"})); - - editor.extract_subgraph({{InputEdge{1, 0}}}, {}); - - EXPECT_EQ(editor.model_inputs(), (std::vector{"conv1/7x7_s2_1"})); -} - -OPENVINO_TEST(onnx_editor, subgraph__custom_input_name_already_exist) { - const auto model_path = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"}); - - ONNXModelEditor editor{model_path}; - try { - editor.extract_subgraph({{InputEdge{1, 0, "conv1/7x7_s2_b_0"}}}, {}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("New custom input name: conv1/7x7_s2_b_0 already exist in the graph") != - std::string::npos); - } -} - -// HIGHT LEVEL API TESTS -// INPUT EDGES TEST -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_output_name_and_input_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - const InputEdge edge = - editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_2"}}, EditorInput{"conv1/7x7_s2_1"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_1"}}, EditorInput{"data_0"}); - EXPECT_EQ(edge2.m_node_idx, 0); - EXPECT_EQ(edge2.m_port_idx, 0); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_output_name_and_input_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_2"}}, EditorInput{0}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_1"}}, EditorInput{1}); - EXPECT_EQ(edge2.m_node_idx, 0); - EXPECT_EQ(edge2.m_port_idx, 1); - - const InputEdge edge3 = editor.find_input_edge(EditorNode{EditorOutput{"conv1/7x7_s2_1"}}, EditorInput{2}); - EXPECT_EQ(edge3.m_node_idx, 0); - EXPECT_EQ(edge3.m_port_idx, 2); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_name_and_input_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{"relu1"}, EditorInput{"conv1/7x7_s2_1"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"conv1"}, EditorInput{"conv1/7x7_s2_w_0"}); - EXPECT_EQ(edge2.m_node_idx, 0); - EXPECT_EQ(edge2.m_port_idx, 1); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_name_and_input_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{"relu1_name"}, EditorInput{0}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"split_name"}, EditorInput{0}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 0); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_name_and_input_index_custom_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{"relu1_name"}, EditorInput{0, "custom_input_name_1"}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - EXPECT_EQ(edge.m_new_input_name, "custom_input_name_1"); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"split_name"}, EditorInput{0, "custom_input_name_2"}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 0); - EXPECT_EQ(edge2.m_new_input_name, "custom_input_name_2"); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_node_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{0}, EditorInput{0, "custom_input_name_1"}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - EXPECT_EQ(edge.m_new_input_name, "custom_input_name_1"); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{5}, EditorInput{0}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 0); - - try { - editor.find_input_edge(EditorNode{99}, EditorInput{"conv1/7x7_s2_1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Provided node index: 99 is out of scope") != std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_empty_node_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - try { - editor.find_input_edge(EditorNode{""}, EditorInput{"conv1/7x7_s2_1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with name: not_given and output_name: not_given was not found") != - std::string::npos); - } -} - -// OUTPUT EDGES TEST -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_output_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const OutputEdge edge = editor.find_output_edge(EditorNode{EditorOutput{"mul2"}}, EditorOutput{"mul2"}); - EXPECT_EQ(edge.m_node_idx, 4); - EXPECT_EQ(edge.m_port_idx, 0); - - const OutputEdge edge2 = editor.find_output_edge(EditorNode{EditorOutput{"split1"}}, EditorOutput{"split2"}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); - - // simplified overload - const OutputEdge edge3 = editor.find_output_edge("mul2"); - EXPECT_EQ(edge3.m_node_idx, 4); - EXPECT_EQ(edge3.m_port_idx, 0); +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_2) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - const OutputEdge edge4 = editor.find_output_edge("split2"); - EXPECT_EQ(edge4.m_node_idx, 5); - EXPECT_EQ(edge4.m_port_idx, 1); -} + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_output_name_and_output_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + input_model->extract_subgraph( + {relu_consumers[1]->get_input_port(0), relu_consumers[1]->get_input_port(1)}, + {input_model->get_place_by_tensor_name("mul2"), relu_consumers[1]->get_output_port()}); + auto model = front_end->convert(input_model); - const OutputEdge edge = editor.find_output_edge(EditorNode{EditorOutput{"add2"}}, EditorOutput{0}); - EXPECT_EQ(edge.m_node_idx, 3); - EXPECT_EQ(edge.m_port_idx, 0); + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_2.onnx"); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{EditorOutput{"split1"}}, EditorOutput{1}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - const OutputEdge edge3 = editor.find_output_edge(EditorNode{EditorOutput{"split2"}}, EditorOutput{0}); - EXPECT_EQ(edge3.m_node_idx, 5); - EXPECT_EQ(edge3.m_port_idx, 0); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_node_name_and_output_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const OutputEdge edge = editor.find_output_edge(EditorNode{"relu1_name"}, EditorOutput{"relu1"}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); - - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"split_name"}, EditorOutput{"split2"}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_node_name_and_output_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_3) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - const OutputEdge edge = editor.find_output_edge(EditorNode{"relu1_name"}, EditorOutput{0}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"split_name"}, EditorOutput{1}); - EXPECT_EQ(edge2.m_node_idx, 5); - EXPECT_EQ(edge2.m_port_idx, 1); -} + input_model->extract_subgraph( + {relu_consumers[1]->get_input_port(0), relu_consumers[2]->get_input_port(0)}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("split2")}); + auto model = front_end->convert(input_model); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_node_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_3.onnx"); - const OutputEdge edge = editor.find_output_edge(EditorNode{5}, EditorOutput{1}); - EXPECT_EQ(edge.m_node_idx, 5); - EXPECT_EQ(edge.m_port_idx, 1); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - try { - editor.find_output_edge(EditorNode{99}, EditorOutput{"conv1/7x7_s2_1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Provided node index: 99 is out of scope") != std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_edge_const_network) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - const InputEdge edge = editor.find_input_edge(EditorNode{EditorOutput{"relu4"}}, EditorInput{0}); - EXPECT_EQ(edge.m_node_idx, 3); - EXPECT_EQ(edge.m_port_idx, 0); - - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"relu4_name"}, EditorOutput{0}); - EXPECT_EQ(edge2.m_node_idx, 3); - EXPECT_EQ(edge2.m_port_idx, 0); - - const OutputEdge edge3 = editor.find_output_edge(EditorNode{"add1_name"}, EditorOutput{0}); - EXPECT_EQ(edge3.m_node_idx, 4); - EXPECT_EQ(edge3.m_port_idx, 0); -} - -OPENVINO_TEST(onnx_editor, editor_api_select_edge_error_handling) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; - - // node with given output name not found - try { - editor.find_input_edge(EditorNode{EditorOutput{"not_existed"}}, EditorInput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with name: not_given and output_name: not_existed was not found") != - std::string::npos); - } +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_4) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - // node with given name not found - try { - editor.find_input_edge(EditorNode{"not_existed"}, EditorInput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with name: not_existed and output_name: not_given was not found") != - std::string::npos); - } + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - // input index out of scope - try { - editor.find_input_edge(EditorNode{"relu4_name"}, EditorInput{1}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not input with index: 1") != std::string::npos); - } + input_model->extract_subgraph({relu_consumers[0]->get_input_port(0), relu_consumers[1]->get_input_port(0)}, {}); + auto model = front_end->convert(input_model); - // output index out of scope - try { - editor.find_output_edge(EditorNode{"relu4_name"}, EditorOutput{1}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not output with index: 1") != std::string::npos); - } + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_4.onnx"); - // input name not found - try { - editor.find_input_edge(EditorNode{"relu4_name"}, EditorInput{"not_existed"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not input with name: not_existed") != std::string::npos); - } + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - // output name not found - try { - editor.find_output_edge(EditorNode{"relu4_name"}, EditorOutput{"not_existed"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 3 has not output with name: not_existed") != std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -// Nodes with ambiguous node names tests -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_ambiguous_node_name_but_matched_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - InputEdge edge = editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"in2"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 1); - - const InputEdge edge2 = editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"add1"}); - EXPECT_EQ(edge2.m_node_idx, 3); - EXPECT_EQ(edge2.m_port_idx, 1); -} +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_5) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_ambiguous_node_name_and_not_matched_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - try { - editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"in3"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Input edge described by: add_ambiguous_name and input name: in3 was not found") != - std::string::npos); - } + input_model->extract_subgraph( + {relu_consumers[1]->get_input_port(0)}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("split2")}); + auto model = front_end->convert(input_model); - try { - editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{"relu1"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "Given node name: add_ambiguous_name and input name: relu1 are ambiguous to determine input edge") != - std::string::npos); - } -} + auto model_ref = + convert_model("model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_5.onnx"); -OPENVINO_TEST(onnx_editor, editor_api_select_input_edge_by_ambiguous_node_name_and_input_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - try { - editor.find_input_edge(EditorNode{"add_ambiguous_name"}, EditorInput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find("Given node name: add_ambiguous_name and input index: 0 are ambiguous to determine input edge") != - std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_ambiguous_node_name_but_matched_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); - const OutputEdge edge = editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{"add1"}); - EXPECT_EQ(edge.m_node_idx, 1); - EXPECT_EQ(edge.m_port_idx, 0); + auto relu_node = input_model->get_place_by_operation_name("relu1_name"); + auto relu_consumers = relu_node->get_consuming_operations(); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{"add2"}); - EXPECT_EQ(edge2.m_node_idx, 3); - EXPECT_EQ(edge2.m_port_idx, 0); -} + input_model->cut_and_add_new_input(relu_consumers[0]->get_input_port(0), "new_name_1"); + input_model->cut_and_add_new_input(relu_consumers[2]->get_input_port(0), "new_name_2"); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_the_same_node_name_and_output_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_2.onnx"})}; + input_model->extract_subgraph( + {}, + {input_model->get_place_by_tensor_name("mul1"), input_model->get_place_by_tensor_name("mul2")}); - const OutputEdge edge = editor.find_output_edge(EditorNode{"add1"}, EditorOutput{0}); - EXPECT_EQ(edge.m_node_idx, 0); - EXPECT_EQ(edge.m_port_idx, 0); + auto model = front_end->convert(input_model); - const OutputEdge edge2 = editor.find_output_edge(EditorNode{EditorOutput{"add1"}}, EditorOutput{0}); - EXPECT_EQ(edge2.m_node_idx, 4); - EXPECT_EQ(edge2.m_port_idx, 0); -} + auto model_ref = convert_model( + "model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.onnx"); -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_ambiguous_node_name_and_not_matched_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - try { - editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{"split2"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Output edge described by: add_ambiguous_name and output name: split2 was not found") != - std::string::npos); - } + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_select_output_edge_by_ambiguous_node_name_and_output_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - try { - editor.find_output_edge(EditorNode{"add_ambiguous_name"}, EditorOutput{0}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "Given node name: add_ambiguous_name and output index: 0 are ambiguous to determine output edge") != - std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, editor_api_use_edge_mapper_with_graph_cutter) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{1, "in2"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(1)); - // InputEdge{2, "in3"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("conv1")), EditorInput(0)); - - const auto output_edge = editor.find_output_edge(EditorNode(EditorOutput("mul2")), EditorOutput(0)); - // OutputEdge{4, "mul2"} - editor.extract_subgraph({input_edge_1, input_edge_2}, {output_edge}); - - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__existing_inputs_and_outputs_based_extraction.onnx"}); +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_input_relu2) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("relu1")}, {}); - EXPECT_TRUE(result.is_ok) << result.error_message; + auto model = front_end->convert(input_model); - // check if mapper was updated after the model changed - const auto input_edge_4 = editor.find_input_edge(EditorNode(EditorOutput("relu1")), EditorInput(0)); - EXPECT_EQ(input_edge_4.m_node_idx, 0); - EXPECT_EQ(input_edge_4.m_port_idx, 0); + auto model_ref = convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_input_relu2.onnx"); - const auto input_edge_5 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(1)); - EXPECT_EQ(input_edge_5.m_node_idx, 1); - EXPECT_EQ(input_edge_5.m_port_idx, 1); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - const auto output_edge_3 = editor.find_output_edge("mul2"); - EXPECT_EQ(output_edge_3.m_node_idx, 3); - EXPECT_EQ(output_edge_3.m_port_idx, 0); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_use_edge_mapper_with_graph_cutter_custom_names) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); - const auto input_edge_1 = editor.find_input_edge(EditorNode{EditorOutput{"mul2"}}, EditorInput{1, "new_name_1"}); - const auto input_edge_2 = - editor.find_input_edge(EditorNode{EditorOutput{"split2"}}, EditorInput{"add2", "new_name_2"}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("in2")}, {}); - editor.extract_subgraph({input_edge_1, input_edge_2}, {}); + auto model = front_end->convert(input_model); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__use_edge_mapper_with_graph_cutter_custom_names.onnx"}); + auto model_ref = convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_find_output_consumers) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - std::vector output_consumers = editor.find_output_consumers("relu1"); - EXPECT_EQ(output_consumers.size(), 3); - EXPECT_EQ(output_consumers[0].m_node_idx, 1); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); - EXPECT_EQ(output_consumers[1].m_node_idx, 3); - EXPECT_EQ(output_consumers[1].m_port_idx, 0); - EXPECT_EQ(output_consumers[2].m_node_idx, 6); - EXPECT_EQ(output_consumers[2].m_port_idx, 0); - - output_consumers = editor.find_output_consumers("add1"); - EXPECT_EQ(output_consumers.size(), 2); - EXPECT_EQ(output_consumers[0].m_node_idx, 3); - EXPECT_EQ(output_consumers[0].m_port_idx, 1); - EXPECT_EQ(output_consumers[1].m_node_idx, 4); - EXPECT_EQ(output_consumers[1].m_port_idx, 0); - - output_consumers = editor.find_output_consumers("in3"); - EXPECT_EQ(output_consumers.size(), 1); - EXPECT_EQ(output_consumers[0].m_node_idx, 2); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); -} +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_2) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); -OPENVINO_TEST(onnx_editor, editor_api_find_output_consumers_empty_result) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("in2"), input_model->get_place_by_tensor_name("in1")}, + {}); - const std::vector output_consumers = editor.find_output_consumers("not_existed"); - EXPECT_EQ(output_consumers.size(), 0); -} + auto model = front_end->convert(input_model); -OPENVINO_TEST(onnx_editor, editor_api_inputs_with_the_same_name) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; + auto model_ref = convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.onnx"); - std::vector output_consumers = editor.find_output_consumers("X"); - EXPECT_EQ(output_consumers[0].m_node_idx, 1); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); - EXPECT_EQ(output_consumers[1].m_node_idx, 1); - EXPECT_EQ(output_consumers[1].m_port_idx, 1); -} + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); -OPENVINO_TEST(onnx_editor, editor_api_find_output_consumers_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests_3.onnx"})}; - const std::string output_name{"2891"}; - - std::vector output_consumers = editor.find_output_consumers(output_name); - EXPECT_EQ(output_consumers[0].m_node_idx, 3); - EXPECT_EQ(output_consumers[0].m_port_idx, 0); - EXPECT_EQ(output_consumers[0].m_new_input_name, output_name); - EXPECT_EQ(output_consumers[1].m_node_idx, 4); - EXPECT_EQ(output_consumers[1].m_port_idx, 0); - EXPECT_EQ(output_consumers[1].m_new_input_name, output_name); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_is_correct_and_unambiguous_node) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - bool is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"relu1"}}); - EXPECT_EQ(is_correct_node, true); - - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"mul2"}}); - EXPECT_EQ(is_correct_node, true); - - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"split2"}}); - EXPECT_EQ(is_correct_node, true); - - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{"relu1_name"}); - EXPECT_EQ(is_correct_node, true); +OPENVINO_TEST(onnx_editor, subgraph__multiple_consumers_of_graph_initializer_relu2_and_init) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests_2.onnx", &front_end); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{2}); - EXPECT_EQ(is_correct_node, true); + input_model->extract_subgraph( + {input_model->get_place_by_tensor_name("in2"), + input_model->get_place_by_tensor_name("relu3")->get_consuming_operations()[0]->get_input_port(0)}, + {}); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{99}); - EXPECT_EQ(is_correct_node, false); + auto model = front_end->convert(input_model); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{EditorOutput{"in3"}}); - EXPECT_EQ(is_correct_node, false); + auto model_ref = + convert_model("model_editor/reference/subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.onnx"); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{"add_ambiguous_name"}); - EXPECT_EQ(is_correct_node, false); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - is_correct_node = editor.is_correct_and_unambiguous_node(EditorNode{"not_exist"}); - EXPECT_EQ(is_correct_node, false); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } -OPENVINO_TEST(onnx_editor, editor_api_get_node_index) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; +OPENVINO_TEST(onnx_editor, subgraph__inputs_getter) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); - EXPECT_EQ(editor.get_node_index(EditorNode{2}), 2); - EXPECT_EQ(editor.get_node_index(EditorNode{EditorOutput{"relu1"}}), 0); - EXPECT_EQ(editor.get_node_index(EditorNode{EditorOutput{"split2"}}), 5); - EXPECT_EQ(editor.get_node_index(EditorNode{"relu1_name"}), 0); + auto inputs = input_model->get_inputs(); + auto inputs_ref = std::vector{"data_0", "conv1/7x7_s2_w_0", "conv1/7x7_s2_b_0"}; - try { - editor.get_node_index(EditorNode{99}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Provided node index: 99 is out of scope") != std::string::npos); + EXPECT_EQ(inputs.size(), inputs_ref.size()); + for (size_t idx = 0; idx < inputs_ref.size(); ++idx) { + EXPECT_EQ(inputs[idx]->get_names()[0], inputs_ref[idx]); } - try { - editor.get_node_index(EditorNode{"add_ambiguous_name"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "The node with name: add_ambiguous_name, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } -} - -OPENVINO_TEST(onnx_editor, editor_api_input_edge_from_tensor_with_single_consumer) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; - - const auto edge = editor.find_input_edge(EditorNode{EditorOutput{"Y"}}, EditorInput{1}); - editor.extract_subgraph({edge}, {}); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("conv1/7x7_s2_1")}, {}); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/" - "subgraph__twice_input_edge_from_tensor_with_single_consumer.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; + inputs = input_model->get_inputs(); + EXPECT_EQ(inputs.size(), 1); + EXPECT_EQ(inputs[0]->get_names()[0], "conv1/7x7_s2_1"); } -OPENVINO_TEST(onnx_editor, editor_api_input_edge_from_tensor_with_single_consumer_ambiguous) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"})}; - +OPENVINO_TEST(onnx_editor, subgraph__custom_input_name_already_exist) { + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); try { - editor.find_input_edge(EditorNode{EditorOutput{"Y"}}, EditorInput{"X"}); + input_model->cut_and_add_new_input(input_model->get_place_by_operation_name("relu1"), "conv1/7x7_s2_b_0"); } catch (const std::exception& e) { std::string msg{e.what()}; - EXPECT_TRUE(msg.find("Node with index: 1 has more than one inputs with name: X") != std::string::npos); + EXPECT_TRUE(msg.find("The name 'conv1/7x7_s2_b_0' is already used by another tensor.") != std::string::npos); } } OPENVINO_TEST(onnx_editor, values__append_one_initializer) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_1D.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D.onnx", &front_end); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 2})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_tensor_value(place, std::vector{1, 2}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_input(Shape{2}, {5, 6}); test_case.add_expected_output(Shape{2}, {6, 8}); test_case.run(); } +/* +// Not applicable for InputModel OPENVINO_TEST(onnx_editor, values__append_two_initializers_to_invalid) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_1D_invalid.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_invalid.onnx", &front_end); std::map> in_vals; + // in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {4, 2})); + // in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 3})); + // editor.set_input_values(in_vals); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {4, 2})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 3})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_operation_name_and_input_port("add_node", 0); + input_model->set_tensor_value(place, std::vector{3, 2}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + place = input_model->get_place_by_operation_name_and_input_port("add_node", 1); + input_model->set_tensor_value(place, std::vector{1, 3}.data()); + + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {5, 5}); test_case.run(); } +*/ OPENVINO_TEST(onnx_editor, values__modify_one_initializer) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_1D_with_initializers.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_with_initializers.onnx", &front_end); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 4})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("B"); + input_model->set_tensor_value(place, std::vector{3, 4}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {4, 6}); test_case.run(); } OPENVINO_TEST(onnx_editor, values__modify_two_initializers) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_1D_with_initializers.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_with_initializers.onnx", &front_end); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 6})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 1})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_tensor_value(place, std::vector{3, 6}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + place = input_model->get_place_by_tensor_name("B"); + input_model->set_tensor_value(place, std::vector{2, 1}.data()); + + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {5, 7}); test_case.run(); } +/* +// Not applicable for InputModel OPENVINO_TEST(onnx_editor, values__no_inputs_modify_two_initializers) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_1D_with_initializers_only.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D_with_initializers_only.onnx", &front_end); std::map> in_vals; - in_vals.emplace("A", ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 2})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i64, Shape{2}, {11, 22})); - editor.set_input_values(in_vals); + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_tensor_value(place, std::vector{1, 2}.data()); + + place = input_model->get_place_by_tensor_name("B"); + input_model->set_tensor_value(place, std::vector{11, 22}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2}, {12, 24}); test_case.run(); } +*/ OPENVINO_TEST(onnx_editor, values__append_two_initializers_change_shape_type) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_1D.onnx"})}; - std::map> in_vals; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_1D.onnx", &front_end); + + auto place = input_model->get_place_by_tensor_name("A"); + input_model->set_element_type(place, element::i8); + input_model->set_partial_shape(place, Shape{2, 1}); + input_model->set_tensor_value(place, std::vector{-1, 1}.data()); - in_vals.emplace("A", ov::op::v0::Constant::create(element::i8, Shape{2, 1}, {-1, 1})); - in_vals.emplace("B", ov::op::v0::Constant::create(element::i8, Shape{2, 1}, {-2, 2})); - editor.set_input_values(in_vals); + place = input_model->get_place_by_tensor_name("B"); + input_model->set_element_type(place, element::i8); + input_model->set_partial_shape(place, Shape{2, 1}); + input_model->set_tensor_value(place, std::vector{-2, 2}.data()); - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2, 1}, {-3, 3}); test_case.run(); } OPENVINO_TEST(onnx_editor, values__append_two_initializers_mixed_types) { - onnx_editor::ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "gather_elements_float_3D_axis_2.onnx"})}; - std::map> in_vals; - - in_vals.emplace("data", ov::op::v0::Constant::create(element::i16, Shape{2, 2, 2}, {1, 2, 3, 4, 5, 6, 7, 8})); - in_vals.emplace("indices", ov::op::v0::Constant::create(element::i32, Shape{2, 2, 1}, {0, 1, 0, 1})); - editor.set_input_values(in_vals); - - const auto function = editor.get_function(); - auto test_case = ov::test::TestCase(function); + FrontEnd::Ptr front_end; + auto input_model = load_model("gather_elements_float_3D_axis_2.onnx", &front_end); + auto place = input_model->get_place_by_tensor_name("data"); + input_model->set_element_type(place, element::i16); + input_model->set_partial_shape(place, Shape{2, 2, 2}); + input_model->set_tensor_value(place, std::vector{1, 2, 3, 4, 5, 6, 7, 8}.data()); + + place = input_model->get_place_by_tensor_name("indices"); + input_model->set_element_type(place, element::i32); + input_model->set_partial_shape(place, Shape{2, 2, 1}); + input_model->set_tensor_value(place, std::vector{0, 1, 0, 1}.data()); + + const auto model = front_end->convert(input_model); + auto test_case = ov::test::TestCase(model); test_case.add_expected_output(Shape{2, 2, 1}, {1, 4, 5, 8}); test_case.run(); } -OPENVINO_TEST(onnx_editor, read_model_from_stream) { - std::string path = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "external_data/external_data.onnx"}); - std::ifstream stream{path, std::ios::in | std::ios::binary}; - ASSERT_TRUE(stream.is_open()); - ONNXModelEditor editor{stream, path}; - - auto test_case = ov::test::TestCase(editor.get_function()); - test_case.add_input({1.f, 2.f, 3.f, 4.f}); - test_case.add_expected_output(Shape{2, 2}, {3.f, 6.f, 9.f, 12.f}); - - test_case.run(); - - stream.close(); -} - OPENVINO_TEST(onnx_editor, combined__cut_and_replace_shape) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph__inception_head.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph__inception_head.onnx", &front_end); const auto new_shape = PartialShape({1, 64, 112, 112}); - editor.extract_subgraph({{InputEdge(1, 0)}}, {}); - editor.set_input_shapes({{"conv1/7x7_s2_1", new_shape}}); + auto place = input_model->get_place_by_tensor_name("conv1/7x7_s2_1"); + input_model->extract_subgraph({place}, {}); + input_model->set_partial_shape(place, new_shape); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__linear_model_head_cut.onnx"}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/reference/subgraph__linear_model_head_cut.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; - const auto graph_inputs = editor.get_function()->get_parameters(); + const auto graph_inputs = model->get_parameters(); EXPECT_TRUE(find_input(graph_inputs, "conv1/7x7_s2_1")->get_partial_shape().same_scheme(new_shape)); } OPENVINO_TEST(onnx_editor, cut_operator_with_no_schema) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/unknown_input_value_info.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/unknown_input_value_info.onnx", &front_end); + input_model->extract_subgraph({input_model->get_place_by_tensor_name("X")}, {}); - editor.extract_subgraph({{InputEdge{1, 0}}}, {}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/reference/unknown_input_value_info.onnx"); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/unknown_input_value_info.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, get_source_tensor_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{0, 0}), "in1"); - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{1, 0}), "relu1"); - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{1, 1}), "in2"); - const auto edge1 = editor.find_input_edge(EditorOutput{"conv1"}, 1); - EXPECT_EQ(editor.get_source_tensor_name(edge1), "in4"); - const auto edge2 = editor.find_input_edge(EditorOutput{"split2"}, 0); - EXPECT_EQ(editor.get_source_tensor_name(edge2), "add2"); - EXPECT_EQ(editor.get_source_tensor_name(InputEdge{999, 999}), ""); + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, is_model_input) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_TRUE(editor.is_input(InputEdge{0, 0})); - const auto edge1 = editor.find_input_edge(EditorOutput{"add1"}, 1); - EXPECT_TRUE(editor.is_input(edge1)); + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); - EXPECT_FALSE(editor.is_input(InputEdge{1, 2})); - EXPECT_FALSE(editor.is_input(InputEdge{3, 0})); - EXPECT_FALSE(editor.is_input(InputEdge{11, 0})); - const auto edge2 = editor.find_input_edge(EditorOutput{"conv1"}, 2); - EXPECT_FALSE(editor.is_input(edge2)); - EXPECT_FALSE(editor.is_input(InputEdge{2, 1})); // initializer is not treated as input - const auto edge3 = editor.find_input_edge(EditorOutput{"conv1"}, EditorInput{"in4"}); - EXPECT_FALSE(editor.is_input(edge3)); -} - -OPENVINO_TEST(onnx_editor, get_target_tensor_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{0, 0}), "relu1"); - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{1, 0}), "add1"); - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{4, 0}), "mul2"); - const auto edge1 = editor.find_output_edge("split1"); - EXPECT_EQ(editor.get_target_tensor_name(edge1), "split1"); - EXPECT_EQ(editor.get_target_tensor_name(OutputEdge{999, 999}), ""); + EXPECT_TRUE(input_model->get_place_by_tensor_name("in2")->is_input()); + EXPECT_FALSE(input_model->get_place_by_tensor_name("conv1")->is_input()); } OPENVINO_TEST(onnx_editor, is_model_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); - EXPECT_TRUE(editor.is_output(OutputEdge{4, 0})); - EXPECT_TRUE(editor.is_output(OutputEdge{5, 1})); - const auto edge1 = editor.find_output_edge(EditorNode{"split_name"}, EditorOutput{"split2"}); - EXPECT_TRUE(editor.is_output(edge1)); - - EXPECT_FALSE(editor.is_output(OutputEdge{4, 1})); - EXPECT_FALSE(editor.is_output(OutputEdge{0, 0})); - EXPECT_FALSE(editor.is_output(OutputEdge{11, 0})); - const auto edge2 = editor.find_output_edge("add2"); - EXPECT_FALSE(editor.is_output(edge2)); + EXPECT_TRUE(input_model->get_place_by_tensor_name("split2")->is_output()); + EXPECT_FALSE(input_model->get_place_by_tensor_name("add2")->is_output()); } OPENVINO_TEST(onnx_editor, model_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + + auto inputs = input_model->get_inputs(); + auto inputs_ref = std::vector{"in1", "in2", "in3"}; - const auto inputs = editor.model_inputs(); - EXPECT_TRUE(inputs == (std::vector{"in1", "in2", "in3"})); // in4 is initializer + EXPECT_EQ(inputs.size(), inputs_ref.size()); + for (size_t idx = 0; idx < inputs_ref.size(); ++idx) { + EXPECT_EQ(inputs[idx]->get_names()[0], inputs_ref[idx]); + } } OPENVINO_TEST(onnx_editor, model_inputs_with_non_input_initializers) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "instance_norm_dynamic.onnx"})}; + auto input_model = load_model("instance_norm_dynamic.onnx"); + auto inputs = input_model->get_inputs(); + auto inputs_ref = std::vector{"input"}; - const auto inputs = editor.model_inputs(); - EXPECT_TRUE(inputs == (std::vector{"input"})); + EXPECT_EQ(inputs.size(), inputs_ref.size()); + for (size_t idx = 0; idx < inputs_ref.size(); ++idx) { + EXPECT_EQ(inputs[idx]->get_names()[0], inputs_ref[idx]); + } } OPENVINO_TEST(onnx_editor, model_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + auto outputs = input_model->get_outputs(); + auto outputs_ref = std::vector{"mul1", "split2", "mul2"}; - const auto outputs = editor.model_outputs(); - EXPECT_TRUE(outputs == (std::vector{"mul1", "split2", "mul2"})); + EXPECT_EQ(outputs.size(), outputs_ref.size()); + for (size_t idx = 0; idx < outputs_ref.size(); ++idx) { + EXPECT_EQ(outputs[idx]->get_names()[0], outputs_ref[idx]); + } } OPENVINO_TEST(onnx_editor, get_tensor_shape) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_tensor_shape("mul2"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("in1"), (PartialShape{2, 2})); - EXPECT_EQ(editor.get_tensor_shape("in2"), (PartialShape{})); - EXPECT_EQ(editor.get_tensor_shape("in3"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("relu1"), (PartialShape{2, 2})); - EXPECT_EQ(editor.get_tensor_shape("add1"), (PartialShape{2, 2})); + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("mul2")), + (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in1")), (PartialShape{2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in2")), (PartialShape{})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in3")), (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("relu1")), (PartialShape{2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("add1")), (PartialShape{2, 2})); try { - editor.get_tensor_shape("not_existed"); + input_model->get_partial_shape(input_model->get_place_by_tensor_name("not_existed")); } catch (const std::exception& e) { std::string msg{e.what()}; - EXPECT_TRUE(msg.find("The tensor: not_existed was not found in the graph") != std::string::npos); + EXPECT_TRUE(msg.find("expects a pointer") != std::string::npos); } + EXPECT_THROW(input_model->get_partial_shape(nullptr), ov::Exception); } OPENVINO_TEST(onnx_editor, get_tensor_shape_after_modification) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_tensor_shape("in3"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("conv1"), (PartialShape{1, 1, 2, 2})); - EXPECT_EQ(editor.get_tensor_shape("mul2"), (PartialShape{1, 1, 2, 2})); - editor.set_input_shapes({{"in3", (PartialShape{1, 1, 4, 4})}}); - EXPECT_EQ(editor.get_tensor_shape("conv1"), (PartialShape{1, 1, 4, 4})); - EXPECT_EQ(editor.get_tensor_shape("in3"), (PartialShape{1, 1, 4, 4})); + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in3")), (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("conv1")), + (PartialShape{1, 1, 2, 2})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("mul2")), + (PartialShape{1, 1, 2, 2})); + input_model->set_partial_shape(input_model->get_place_by_tensor_name("in3"), PartialShape{1, 1, 4, 4}); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("conv1")), + (PartialShape{1, 1, 4, 4})); + EXPECT_EQ(input_model->get_partial_shape(input_model->get_place_by_tensor_name("in3")), (PartialShape{1, 1, 4, 4})); } OPENVINO_TEST(onnx_editor, is_correct_tensor_name) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_TRUE(editor.is_correct_tensor_name("in1")); - EXPECT_TRUE(editor.is_correct_tensor_name("relu1")); - EXPECT_TRUE(editor.is_correct_tensor_name("split2")); - EXPECT_TRUE(editor.is_correct_tensor_name("mul2")); - EXPECT_TRUE(editor.is_correct_tensor_name("in4")); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx", &front_end); + EXPECT_TRUE(input_model->get_place_by_tensor_name("in1")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("relu1")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("split2")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("mul2")); + EXPECT_TRUE(input_model->get_place_by_tensor_name("in4")); + EXPECT_FALSE(input_model->get_place_by_operation_name("add_ambiguous_name")); + EXPECT_FALSE(input_model->get_place_by_operation_name("")); - EXPECT_FALSE(editor.is_correct_tensor_name("relu1_name")); - EXPECT_FALSE(editor.is_correct_tensor_name("not_existed")); - EXPECT_FALSE(editor.is_correct_tensor_name("")); + EXPECT_FALSE(input_model->get_place_by_tensor_name("relu1_name")); + EXPECT_FALSE(input_model->get_place_by_tensor_name("not_existed")); + EXPECT_FALSE(input_model->get_place_by_tensor_name("")); } OPENVINO_TEST(onnx_editor, get_input_ports) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const auto ports_1 = editor.get_input_ports(EditorNode{"relu1_name"}); - EXPECT_EQ(ports_1.size(), 1); - EXPECT_EQ(ports_1[0], "in1"); - const auto ports_2 = editor.get_input_ports(EditorNode{"split_name"}); - EXPECT_EQ(ports_2.size(), 1); - EXPECT_EQ(ports_2[0], "add2"); - const auto ports_3 = editor.get_input_ports(EditorNode{EditorOutput{"add2"}}); - EXPECT_EQ(ports_3.size(), 2); - EXPECT_EQ(ports_3[0], "relu1"); - EXPECT_EQ(ports_3[1], "add1"); - try { - editor.get_input_ports(EditorNode{"add_ambiguous_name"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "The node with name: add_ambiguous_name, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } - try { - editor.get_input_ports(EditorNode{""}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find("The node with name: not_given, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + const auto ports_1 = input_model->get_place_by_operation_name("relu1_name"); + EXPECT_EQ(ports_1->get_input_port()->get_source_tensor()->get_names()[0], "in1"); + EXPECT_FALSE(ports_1->get_input_port(1)); + const auto ports_2 = input_model->get_place_by_operation_name("split_name"); + EXPECT_EQ(ports_2->get_input_port(0)->get_source_tensor()->get_names()[0], "add2"); + EXPECT_FALSE(ports_2->get_input_port(1)); + const auto ports_3 = input_model->get_place_by_tensor_name("add2")->get_producing_operation(); + EXPECT_EQ(ports_3->get_input_port(0)->get_source_tensor()->get_names()[0], "relu1"); + EXPECT_EQ(ports_3->get_input_port(1)->get_source_tensor()->get_names()[0], "add1"); + EXPECT_FALSE(ports_3->get_input_port(2)); } + OPENVINO_TEST(onnx_editor, get_output_ports) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - const auto ports_1 = editor.get_output_ports(EditorNode{"relu1_name"}); - EXPECT_EQ(ports_1.size(), 1); - EXPECT_EQ(ports_1[0], "relu1"); - const auto ports_2 = editor.get_output_ports(EditorNode{"split_name"}); - EXPECT_EQ(ports_2.size(), 2); - EXPECT_EQ(ports_2[0], "split1"); - EXPECT_EQ(ports_2[1], "split2"); - const auto ports_3 = editor.get_output_ports(EditorNode{EditorOutput{"add2"}}); - EXPECT_EQ(ports_3.size(), 1); - EXPECT_EQ(ports_3[0], "add2"); - try { - editor.get_output_ports(EditorNode{"add_ambiguous_name"}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find( - "The node with name: add_ambiguous_name, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } - try { - editor.get_output_ports(EditorNode{""}); - } catch (const std::exception& e) { - std::string msg{e.what()}; - EXPECT_TRUE( - msg.find("The node with name: not_given, output_name: not_given, node_index: not_given is ambiguous") != - std::string::npos); - } + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + const auto ports_1 = input_model->get_place_by_operation_name("relu1_name"); + EXPECT_EQ(ports_1->get_output_port(0)->get_target_tensor()->get_names()[0], "relu1"); + EXPECT_FALSE(ports_1->get_output_port(1)); + const auto ports_2 = input_model->get_place_by_operation_name("split_name"); + EXPECT_EQ(ports_2->get_output_port(0)->get_target_tensor()->get_names()[0], "split1"); + EXPECT_EQ(ports_2->get_output_port(1)->get_target_tensor()->get_names()[0], "split2"); + EXPECT_FALSE(ports_2->get_output_port(2)); + const auto ports_3 = input_model->get_place_by_tensor_name("add2")->get_producing_operation(); + EXPECT_EQ(ports_3->get_output_port()->get_target_tensor()->get_names()[0], "add2"); + EXPECT_FALSE(ports_3->get_output_port(1)); } OPENVINO_TEST(onnx_editor, add_output) { - ONNXModelEditor editor{ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_abc.onnx"})}; - - editor.add_output({OutputEdge{0, 0}}); - - const auto edge1 = editor.find_output_edge(EditorNode{"add_node1"}, EditorOutput{"X"}); - EXPECT_TRUE(editor.is_output(edge1)); -} + auto input_model = load_model("model_editor/add_abc.onnx"); -OPENVINO_TEST(onnx_editor, get_tensor_element_type) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - EXPECT_EQ(editor.get_input_type("in1"), (element::f32)); - EXPECT_EQ(editor.get_input_type("in2"), (element::f32)); - editor.set_input_types({{"in3", (element::f16)}}); - EXPECT_EQ(editor.get_input_type("in3"), (element::f16)); -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_one_edge_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{1, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1}}, {}, true); - - const auto ref_model = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_one_edge_and_merge_all_new_inputs.onnx"}); - - auto result = compare_onnx_models(editor.model_string(), ref_model); - - // InputEdge{5, "add2"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("split1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_2}}, {}, true); - - const auto ref_model1 = - ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_one_another_edge_and_merge_all_new_inputs.onnx"}); + input_model->add_output(input_model->get_place_by_operation_name("add_node1")->get_target_tensor()); - result = compare_onnx_models(editor.model_string(), ref_model1); + EXPECT_EQ(input_model->get_outputs().size(), 2); - EXPECT_TRUE(result.is_ok) << result.error_message; + EXPECT_THROW(input_model->add_output(nullptr), ov::Exception); } -OPENVINO_TEST(onnx_editor, subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(0)); - // InputEdge{6, "relu1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1, input_edge_2}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "add1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(1)); - // InputEdge{6, "relu1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1, input_edge_2}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(0)); - // InputEdge{6, "relu1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul1")), EditorInput(0)); - // InputEdge{1, "relu1"} - const auto input_edge_3 = editor.find_input_edge(EditorNode(EditorOutput("add1")), EditorInput(0)); - - editor.extract_subgraph({{input_edge_1, input_edge_2, input_edge_3}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; -} - -OPENVINO_TEST(onnx_editor, subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/subgraph_extraction_tests.onnx"})}; - - // InputEdge{3, "relu1"} - const auto input_edge_1 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(0)); - // InputEdge{4, "add1"} - const auto input_edge_2 = editor.find_input_edge(EditorNode(EditorOutput("mul2")), EditorInput(0)); - // InputEdge{3, "add1"} - const auto input_edge_3 = editor.find_input_edge(EditorNode(EditorOutput("add2")), EditorInput(1)); - - editor.extract_subgraph({{input_edge_2, input_edge_1, input_edge_3}}, {}, true); - - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/reference/subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs.onnx"}); - - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; +OPENVINO_TEST(onnx_editor, get_tensor_element_type) { + auto input_model = load_model("model_editor/subgraph_extraction_tests.onnx"); + EXPECT_EQ(input_model->get_element_type(input_model->get_place_by_tensor_name("in1")), element::f32); + EXPECT_EQ(input_model->get_element_type(input_model->get_place_by_tensor_name("in2")), element::f32); + input_model->set_element_type(input_model->get_place_by_tensor_name("in3"), element::f16); + EXPECT_EQ(input_model->get_element_type(input_model->get_place_by_tensor_name("in3")), element::f16); + EXPECT_THROW(input_model->get_element_type(nullptr), ov::Exception); } OPENVINO_TEST(onnx_editor, subgraph__duplicated_output) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_ab_duplicated_output.onnx"})}; - - const auto y_out_edge = editor.find_output_edge("Y"); - editor.extract_subgraph({}, {{y_out_edge}}); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_ab_duplicated_output.onnx", &front_end); + const auto y_out = input_model->get_place_by_tensor_name("Y"); + EXPECT_TRUE(y_out); + input_model->extract_subgraph({}, {y_out}); - const auto ref_model = ov::util::path_join( - {ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "model_editor/add_ab.onnx"}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/add_ab.onnx"); - const auto result = compare_onnx_models(editor.model_string(), ref_model); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, subgraph__duplicated_output_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_ab_duplicated_output.onnx"})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/add_ab_duplicated_output.onnx", &front_end); + const auto y_out_1 = input_model->get_place_by_tensor_name("Y"); + const auto y_out_2 = input_model->get_place_by_tensor_name("Y"); + EXPECT_TRUE(y_out_1); + EXPECT_TRUE(y_out_2); + input_model->extract_subgraph({}, {y_out_1, y_out_2}); - const auto y_out_edge_1 = editor.find_output_edge("Y"); - const auto y_out_edge_2 = editor.find_output_edge("Y"); - editor.extract_subgraph({}, {{y_out_edge_1, y_out_edge_2}}); + auto model = front_end->convert(input_model); + const auto model_ref = convert_model("model_editor/add_ab_duplicated_output.onnx"); - const auto ref_model = ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/add_ab_duplicated_output.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.enable(FunctionsComparator::CONSUMERS_COUNT); - // Model not changed - const auto result = compare_onnx_models(editor.model_string(), ref_model); - - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } OPENVINO_TEST(onnx_editor, onnx_shape_infer_exception) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/onnx_shape_infer_exception.onnx"})}; - - const auto input_edge = editor.find_input_edge(EditorNode(EditorOutput("input_ReduceMin")), EditorInput(0)); + auto input_model = load_model("model_editor/onnx_shape_infer_exception.onnx"); - EXPECT_NO_THROW(editor.extract_subgraph({{input_edge}}, {})); + EXPECT_NO_THROW(input_model->extract_subgraph({input_model->get_place_by_operation_name("input_ReduceMin")}, {})); } diff --git a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp index 7b8a36ad209f1c..7caea8199c64e3 100644 --- a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp +++ b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp @@ -7,64 +7,59 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" using namespace ov; -using namespace ov::onnx_editor; +using namespace ov::frontend; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); OPENVINO_TEST(onnx_editor, topological_sort_two_nodes_swap) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/two_nodes_swap.onnx"})}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/two_nodes_swap.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/completely_unsorted.onnx"})}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted_2) { - ONNXModelEditor editor{ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/completely_unsorted_2.onnx"})}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted_2_wstring) { - ONNXModelEditor editor{ - ov::util::string_to_wstring(ov::util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "model_editor/topological_sort/completely_unsorted_2.onnx"}))}; - ASSERT_NO_THROW(editor.get_function()); + FrontEnd::Ptr front_end; + auto input_model = load_model(L"model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); + ASSERT_NO_THROW(front_end->convert(input_model)); } #endif OPENVINO_TEST(onnx_editor, topological_sort_constant_node_in_the_graph) { const std::string rel_path_to_model = "model_editor/topological_sort/add_abc_const_node_unsorted.onnx"; - ONNXModelEditor editor{ - ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, rel_path_to_model})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(editor.get_function()); + ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_multioutput_node) { const std::string rel_path_to_model = "model_editor/topological_sort/multioutput_split_unsorted.onnx"; - ONNXModelEditor editor{ - ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, rel_path_to_model})}; + FrontEnd::Ptr front_end; + auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(editor.get_function()); + ASSERT_NO_THROW(front_end->convert(input_model)); } +/* +// No suitable functionality yet OPENVINO_TEST(onnx_editor, topological_sort_graph_not_changed_if_the_same_name_of_unsorted_node_and_initializer) { const std::string rel_path_to_model = "model_editor/topological_sort/same_name_of_unsorted_node_and_initializer.onnx"; @@ -91,3 +86,4 @@ OPENVINO_TEST(onnx_editor, topological_sort_graph_not_changed_if_empty_input_nam const auto result = compare_onnx_models(editor.model_string(), ref_model); EXPECT_TRUE(result.is_ok) << result.error_message; } +*/ \ No newline at end of file diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 5269cf3b0ce355..6d014e79ad7f3e 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6219,3 +6219,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_bitwise_xor_broadcast_condition) { test_case.run(); } + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_bitwise_not) { + auto model = convert_model("bitwise_not.onnx"); + + auto test_case = ov::test::TestCase(model, s_device); + test_case.add_input(Shape{5}, {5, 10, 200, 35, 1}); + test_case.add_expected_output(Shape{5}, {-6, -11, -201, -36, -2}); + + test_case.run(); +} diff --git a/src/frontends/onnx/tests/onnx_import_exceptions.cpp b/src/frontends/onnx/tests/onnx_import_exceptions.cpp index 09446f39738db7..62a4b332f148a7 100644 --- a/src/frontends/onnx/tests/onnx_import_exceptions.cpp +++ b/src/frontends/onnx/tests/onnx_import_exceptions.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/type_prop.hpp" -#include "exceptions.hpp" #include "gtest/gtest.h" #include "onnx_utils.hpp" @@ -34,10 +33,10 @@ TEST(onnx_importer, exception_msg_onnx_node_validation_failure) { convert_model("instance_norm_bad_scale_type.onnx"); // Should have thrown, so fail if it didn't FAIL() << "ONNX Importer did not detected incorrect model!"; - } catch (const ::ov::frontend::onnx_error::OnnxNodeValidationFailure& e) { + } catch (const ::ov::Exception& e) { EXPECT_HAS_SUBSTRING(e.what(), std::string("While validating ONNX node ' - -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_control.hpp" -#include "gtest/gtest.h" -#include "onnx_utils.hpp" - -using namespace ov::frontend::onnx::tests; - -static std::string s_manifest = onnx_backend_manifest(MANIFEST); - -OPENVINO_TEST(onnx, check_ir_version_support) { - // It appears you've changed the ONNX library version used by OpenVINO. Please update the value - // tested below to make sure it equals the current IR_VERSION enum value defined in ONNX headers - // - // You should also check the onnx_common/src/onnx_model_validator.cpp file and make sure that - // the details::onnx::is_correct_onnx_field() handles any new fields added in the new release - // of the ONNX library. Make sure to update the "Field" enum and the function mentioned above. - // - // The last step is to also update the details::onnx::contains_onnx_model_keys() function - // in the same file to make sure that prototxt format validation also covers the changes in ONNX - EXPECT_EQ(ONNX_NAMESPACE::Version::IR_VERSION, 9) - << "The IR_VERSION defined in ONNX does not match the version that OpenVINO supports. " - "Please check the source code of this test for details and explanation how to proceed."; -} diff --git a/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp b/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp index eb7edeccbb8198..28814dd17b3efa 100644 --- a/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp @@ -117,7 +117,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_priorbox_clustered_first_input_bad_shape) { } catch (const ov::Exception& e) { EXPECT_HAS_SUBSTRING(e.what(), std::string("Only 4D inputs are supported. First input rank: 5 (should be 4)")); } catch (...) { - FAIL() << "Expected OnnxNodeValidationFailure exception was not thrown"; + FAIL() << "Expected ov::Exception exception was not thrown"; } } @@ -128,7 +128,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_priorbox_clustered_second_input_bad_shape) { } catch (const ov::Exception& e) { EXPECT_HAS_SUBSTRING(e.what(), std::string("Only 4D inputs are supported. Second input rank: 5 (should be 4)")); } catch (...) { - FAIL() << "Expected OnnxNodeValidationFailure exception was not thrown"; + FAIL() << "Expected ov::Exception exception was not thrown"; } } diff --git a/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp b/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp index d0b9cb86391d43..b365c64f253e6c 100644 --- a/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_with_editor.in.cpp @@ -14,7 +14,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" #include "onnx_utils.hpp" #include "openvino/op/constant.hpp" @@ -24,7 +23,7 @@ using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); static std::string s_device = backend_name_to_device("${BACKEND_NAME}"); - +/* // ############################################################################ CORE TESTS OPENVINO_TEST(${BACKEND_NAME}, onnx_compress_axis_0) { ov::onnx_editor::ONNXModelEditor editor{ @@ -140,3 +139,4 @@ REGISTER_TYPED_TEST_SUITE_P(ElemTypesTests, onnx_test_split_multioutput_set_precission); typedef ::testing::Types ElemTypes; INSTANTIATE_TYPED_TEST_SUITE_P(${BACKEND_NAME}, ElemTypesTests, ElemTypes); +*/ diff --git a/src/frontends/onnx/tests/onnx_ops_registration.cpp b/src/frontends/onnx/tests/onnx_ops_registration.cpp index 9424fead9882a4..2ac414abb6d5b2 100644 --- a/src/frontends/onnx/tests/onnx_ops_registration.cpp +++ b/src/frontends/onnx/tests/onnx_ops_registration.cpp @@ -10,17 +10,14 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" using namespace ov; -using namespace ov::onnx_editor; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); - +/* OPENVINO_TEST(ops_registration, check_importing_abs_in_all_opset_versions) { ONNXModelEditor editor{ ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, "abs.onnx"})}; @@ -51,3 +48,4 @@ OPENVINO_TEST(ops_registration, check_importing_add_in_different_opsets) { } } } +*/ diff --git a/src/frontends/onnx/tests/onnx_test_util.cpp b/src/frontends/onnx/tests/onnx_test_util.cpp deleted file mode 100644 index 4a1025cc9103ef..00000000000000 --- a/src/frontends/onnx/tests/onnx_test_util.cpp +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "onnx_test_util.hpp" - -#include - -#include -#include -#include - -#include "onnx_common/parser.hpp" - -using namespace ov::frontend::onnx::tests; -using namespace ov::frontend::onnx::common; - -namespace { -ComparisonResult compare_nodes(const ONNX_NAMESPACE::GraphProto& graph, - const ONNX_NAMESPACE::GraphProto& ref_graph, - CompType comp) { - if (graph.node_size() != ref_graph.node_size()) { - return ComparisonResult::fail("The number of nodes in compared models doesn't match"); - } else { - for (int i = 0; i < graph.node_size(); ++i) { - const auto& lhs = graph.node(i); - const auto& rhs = ref_graph.node(i); - - if (lhs.op_type() != rhs.op_type()) { - return ComparisonResult::fail("Operation types are different at index " + std::to_string(i) + ": " + - lhs.op_type() + " vs " + rhs.op_type()); - } - - for (int j = 0; j < lhs.input_size(); ++j) { - if (!comp(lhs.input(j), rhs.input(j))) { - return ComparisonResult::fail("Input names don't match for nodes at index " + std::to_string(i) + - ": " + lhs.input(j) + " vs " + rhs.input(j)); - } - } - - for (int j = 0; j < lhs.output_size(); ++j) { - if (!comp(lhs.output(j), rhs.output(j))) { - return ComparisonResult::fail("Output names don't match for nodes at index " + std::to_string(i) + - ": " + lhs.output(j) + " vs " + rhs.output(j)); - } - } - } - } - - return ComparisonResult::pass(); -} - -ComparisonResult compare_value_info(const ONNX_NAMESPACE::ValueInfoProto& lhs, - const ONNX_NAMESPACE::ValueInfoProto& rhs, - const std::string& item_type) { - if (lhs.name() != rhs.name()) { - return ComparisonResult::fail(item_type + " names in the graph don't match: " + lhs.name() + " vs " + - rhs.name()); - } - - const auto& lhs_tensor = lhs.type().tensor_type(); - const auto& rhs_tensor = rhs.type().tensor_type(); - if (lhs_tensor.elem_type() != rhs_tensor.elem_type()) { - return ComparisonResult::fail("Element types don't match for " + item_type + " " + lhs.name() + ": " + - std::to_string(lhs_tensor.elem_type()) + " vs " + - std::to_string(rhs_tensor.elem_type())); - } - - const auto& lhs_shape = lhs_tensor.shape(); - const auto& rhs_shape = rhs_tensor.shape(); - if (lhs_shape.dim_size() != rhs_shape.dim_size()) { - return ComparisonResult::fail("Tensor ranks don't match for " + item_type + " " + lhs.name() + ": " + - std::to_string(lhs_shape.dim_size()) + " vs " + - std::to_string(rhs_shape.dim_size())); - } else { - for (int j = 0; j < lhs_shape.dim_size(); ++j) { - const auto& lhs_dim = lhs_shape.dim(j); - const auto& rhs_dim = rhs_shape.dim(j); - if ((lhs_dim.has_dim_value() && rhs_dim.has_dim_param()) || - (rhs_dim.has_dim_value() && lhs_dim.has_dim_param())) { - return ComparisonResult::fail("Dynamic vs static dimension mismatch for " + item_type + " " + - lhs.name() + " at index: " + std::to_string(j)); - } else if (lhs_dim.has_dim_value() && lhs_dim.dim_value() != rhs_dim.dim_value()) { - return ComparisonResult::fail("Shape dimensions don't match for " + item_type + " " + lhs.name() + - " at index: " + std::to_string(j) + ". " + - std::to_string(lhs_dim.dim_value()) + " vs " + - std::to_string(rhs_dim.dim_value())); - } - } - } - - return ComparisonResult::pass(); -} - -ComparisonResult compare_inputs(const ONNX_NAMESPACE::GraphProto& graph, const ONNX_NAMESPACE::GraphProto& ref_graph) { - if (graph.input_size() != ref_graph.input_size()) { - return ComparisonResult::fail( - "The number of inputs in compared models doesn't match: " + std::to_string(graph.input_size()) + " vs " + - std::to_string(ref_graph.input_size())); - } else { - for (int i = 0; i < graph.input_size(); ++i) { - const auto& lhs = graph.input(i); - const auto& rhs = ref_graph.input(i); - - const auto res = compare_value_info(lhs, rhs, "input"); - if (!res.is_ok) { - return res; - } - } - - return ComparisonResult::pass(); - } -} - -ComparisonResult compare_outputs(const ONNX_NAMESPACE::GraphProto& graph, const ONNX_NAMESPACE::GraphProto& ref_graph) { - if (graph.output_size() != ref_graph.output_size()) { - return ComparisonResult::fail("The number of outputs in compared models doesn't match" + - std::to_string(graph.output_size()) + " vs " + - std::to_string(ref_graph.output_size())); - } else { - for (int i = 0; i < graph.output_size(); ++i) { - const auto& lhs = graph.output(i); - const auto& rhs = ref_graph.output(i); - - const auto res = compare_value_info(lhs, rhs, "output"); - if (!res.is_ok) { - return res; - } - } - - return ComparisonResult::pass(); - } -} - -ComparisonResult compare_initializers(const ONNX_NAMESPACE::GraphProto& graph, - const ONNX_NAMESPACE::GraphProto& ref_graph) { - if (graph.initializer_size() != ref_graph.initializer_size()) { - return ComparisonResult::fail("The number of initializers in compared models doesn't match" + - std::to_string(graph.initializer_size()) + " vs " + - std::to_string(ref_graph.initializer_size())); - } else { - for (int i = 0; i < graph.initializer_size(); ++i) { - const auto& lhs = graph.initializer(i); - const auto& rhs = ref_graph.initializer(i); - - if (lhs.name() != rhs.name()) { - return ComparisonResult::fail("Initializer names in the graph don't match: " + lhs.name() + " vs " + - rhs.name()); - } else if (lhs.data_type() != rhs.data_type()) { - return ComparisonResult::fail( - "Initializer data types in the graph don't match: " + std::to_string(lhs.data_type()) + " vs " + - std::to_string(rhs.data_type())); - } else if (lhs.dims_size() != rhs.dims_size()) { - return ComparisonResult::fail( - "Initializer ranks in the graph don't match: " + std::to_string(lhs.dims_size()) + " vs " + - std::to_string(rhs.dims_size())); - } else { - for (int j = 0; j < lhs.dims_size(); ++j) { - if (lhs.dims(j) != rhs.dims(j)) { - return ComparisonResult::fail("Shape dimensions don't match for initializer " + lhs.name() + - " at index: " + std::to_string(j) + ". " + - std::to_string(lhs.dims(j)) + " vs " + - std::to_string(rhs.dims(j))); - } - } - } - } - - return ComparisonResult::pass(); - } -} - -ComparisonResult compare_onnx_graphs(const ONNX_NAMESPACE::GraphProto& graph, - const ONNX_NAMESPACE::GraphProto& ref_graph, - CompType comp = default_name_comparator) { - ComparisonResult comparison = compare_inputs(graph, ref_graph); - if (!comparison.is_ok) { - return comparison; - } - - comparison = compare_outputs(graph, ref_graph); - if (!comparison.is_ok) { - return comparison; - } - - comparison = compare_initializers(graph, ref_graph); - if (!comparison.is_ok) { - return comparison; - } - - return compare_nodes(graph, ref_graph, comp); -} -} // namespace -namespace ov { -namespace frontend { -namespace onnx { -namespace tests { - -bool default_name_comparator(std::string lhs, std::string rhs) { - return lhs == rhs; -} - -ComparisonResult compare_onnx_models(const std::string& model, const std::string& reference_model_path, CompType comp) { - std::stringstream model_stream{model}; - const auto model_proto = parse_from_istream(model_stream); - const auto ref_model = parse_from_file(reference_model_path); - return compare_onnx_graphs(model_proto.graph(), ref_model.graph(), comp); -} - -std::string change_opset_version(const std::string& model, - const std::vector& new_opset_version, - const std::string& domain) { - std::stringstream model_stream{model}; - auto model_proto = parse_from_istream(model_stream); - model_proto.clear_opset_import(); - for (const auto& opset_version : new_opset_version) { - auto* opset_import = model_proto.add_opset_import(); - opset_import->set_version(opset_version); - opset_import->set_domain(domain); - } - - return model_proto.SerializeAsString(); -} - -} // namespace tests -} // namespace onnx -} // namespace frontend -} // namespace ov \ No newline at end of file diff --git a/src/frontends/onnx/tests/onnx_test_util.hpp b/src/frontends/onnx/tests/onnx_test_util.hpp deleted file mode 100644 index a2aa222905e0c5..00000000000000 --- a/src/frontends/onnx/tests/onnx_test_util.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -namespace ov { -namespace frontend { -namespace onnx { -namespace tests { -struct ComparisonResult { - ComparisonResult() = default; - ComparisonResult(std::string error) : is_ok{false}, error_message{std::move(error)} {} - ComparisonResult(ComparisonResult&&) = default; - ComparisonResult(const ComparisonResult&) = default; - ComparisonResult& operator=(ComparisonResult&&) = default; - ComparisonResult& operator=(const ComparisonResult&) = default; - - bool is_ok = true; - std::string error_message; - - static ComparisonResult pass() { - return {}; - } - static ComparisonResult fail(std::string error) { - return ComparisonResult{std::move(error)}; - } -}; - -bool default_name_comparator(std::string lhs, std::string rhs); - -// comp is a function to compare inputs and outputs names (as default it is a usual std::string comparison) -using CompType = std::function; -ComparisonResult compare_onnx_models(const std::string& model, - const std::string& reference_model_path, - CompType comp = default_name_comparator); - -std::string change_opset_version(const std::string& model, - const std::vector& new_opset_version, - const std::string& domain = "ai.onnx"); -} // namespace tests -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/tests/onnx_transformations.cpp b/src/frontends/onnx/tests/onnx_transformations.cpp index 8f3d2117b5aaa9..389032c9bbde34 100644 --- a/src/frontends/onnx/tests/onnx_transformations.cpp +++ b/src/frontends/onnx/tests/onnx_transformations.cpp @@ -3,14 +3,12 @@ // #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_control.hpp" -#include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_test_util.hpp" #include "onnx_utils.hpp" using namespace ov; -using namespace ov::onnx_editor; using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); @@ -20,6 +18,8 @@ namespace { // As a result, the names are different during each tests execution. // It requires custom way of input/output names comparison. // https://github.com/onnx/onnx/blob/767f752829f83dbc9bd0a364d6138890f667fc38/onnx/defs/function.cc#L23 +/* +// Could be used later bool after_func_expand_name_comp(std::string lhs, std::string rhs) { // it is equivalent (simplified) to (0x)?[0-9A-Fa-f]{8,} regex, but GCC 4.8 has limited support auto cut_hex_address = [](std::string& name) { @@ -58,37 +58,32 @@ bool after_func_expand_name_comp(std::string lhs, std::string rhs) { }; return cut_hex_address(lhs) == cut_hex_address(rhs); } +*/ } // namespace OPENVINO_TEST(onnx_transformations, expand_function_greater_or_equal) { - ONNXModelEditor editor{util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/greater_or_equal.onnx"})}; - editor.decode(); // onnx transformations are applied + auto model = convert_model("transformations/greater_or_equal.onnx"); + auto model_ref = convert_model("transformations/reference/greater_or_equal_expanded.onnx"); - const auto ref_model = util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/reference/" - "greater_or_equal_expanded.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.disable(FunctionsComparator::TENSOR_NAMES); - const auto result = compare_onnx_models(editor.model_string(), ref_model, after_func_expand_name_comp); + const FunctionsComparator::Result res = func_comparator(model, model_ref); // After operation translation was implemented - check it doesn't apply - EXPECT_FALSE(result.is_ok) << result.error_message; + ASSERT_TRUE(!res.valid) << res.message; } // Disabled, ticket: #81976 +/* OPENVINO_TEST(onnx_transformations, DISABLED_expand_function_softmax_crossentropy) { - ONNXModelEditor editor{util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/softmax_crossentropy_consumed.onnx"})}; - editor.decode(); // onnx transformations are applied + auto model = convert_model("transformations/softmax_crossentropy_consumed.onnx"); + auto model_ref = convert_model("transformations/reference/softmax_crossentropy_consumed_expanded.onnx"); - const auto ref_model = util::path_join({ov::test::utils::getExecutableDirectory(), - TEST_ONNX_MODELS_DIRNAME, - "transformations/reference/" - "softmax_crossentropy_consumed_expanded.onnx"}); + FunctionsComparator func_comparator = FunctionsComparator::with_default(); + func_comparator.disable(FunctionsComparator::TENSOR_NAMES); - const auto result = compare_onnx_models(editor.model_string(), ref_model, after_func_expand_name_comp); - EXPECT_TRUE(result.is_ok) << result.error_message; + const FunctionsComparator::Result res = func_comparator(model, model_ref); + ASSERT_TRUE(res.valid) << res.message; } +*/ diff --git a/src/frontends/onnx/tests/onnx_utils.cpp b/src/frontends/onnx/tests/onnx_utils.cpp index 9795e1f4e09cc9..3b401578ab0793 100644 --- a/src/frontends/onnx/tests/onnx_utils.cpp +++ b/src/frontends/onnx/tests/onnx_utils.cpp @@ -23,13 +23,32 @@ namespace tests { const std::string ONNX_FE = ::ONNX_FE; -shared_ptr convert_model(const string& model_path, const ov::frontend::ConversionExtensionBase::Ptr& conv_ext) { - auto fem = FrontEndManager(); - FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); +static FrontEnd::Ptr get_onnx_frontend(bool default_front_end = true) { + static FrontEnd::Ptr _front_end = nullptr; + + FrontEnd::Ptr front_end = nullptr; + + if (default_front_end) { + if (_front_end == nullptr) { + auto fem = FrontEndManager(); + _front_end = fem.load_by_framework(ONNX_FE); + } + front_end = _front_end; + } else { + auto fem = FrontEndManager(); + front_end = fem.load_by_framework(ONNX_FE); + } + if (!front_end) { throw "ONNX FrontEnd is not initialized"; } + return front_end; +} + +shared_ptr convert_model(const string& model_path, const ov::frontend::ConversionExtensionBase::Ptr& conv_ext) { + auto front_end = get_onnx_frontend(conv_ext == nullptr); + if (conv_ext) { front_end->add_extension(conv_ext); } @@ -49,11 +68,7 @@ shared_ptr convert_model(const string& model_path, const ov::frontend::Co } shared_ptr convert_model(ifstream& model_stream) { - auto fem = FrontEndManager(); - FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); - if (!front_end) { - throw "ONNX FrontEnd is not initialized"; - } + auto front_end = get_onnx_frontend(); InputModel::Ptr input_model = front_end->load(dynamic_cast(&model_stream)); if (!input_model) { @@ -69,11 +84,7 @@ shared_ptr convert_model(ifstream& model_stream) { } shared_ptr convert_partially(const string& model_path) { - auto fem = FrontEndManager(); - FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); - if (!front_end) { - throw "ONNX FrontEnd is not initialized"; - } + auto front_end = get_onnx_frontend(); auto full_path = FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + model_path); InputModel::Ptr input_model = front_end->load(full_path); @@ -89,6 +100,39 @@ shared_ptr convert_partially(const string& model_path) { return model; } +InputModel::Ptr load_model(const string& model_path, FrontEnd::Ptr* return_front_end) { + auto front_end = get_onnx_frontend(); + + auto full_path = FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + model_path); + InputModel::Ptr input_model = front_end->load(full_path); + if (!input_model) { + throw "Input Model is not loaded"; + } + + if (return_front_end != nullptr) { + *return_front_end = front_end; + } + + return input_model; +} + +InputModel::Ptr load_model(const wstring& model_path, FrontEnd::Ptr* return_front_end) { + auto front_end = get_onnx_frontend(); + + auto full_path = + FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + ov::util::wstring_to_string(model_path)); + InputModel::Ptr input_model = front_end->load(ov::util::string_to_wstring(full_path)); + if (!input_model) { + throw "Input Model is not loaded"; + } + + if (return_front_end != nullptr) { + *return_front_end = front_end; + } + + return input_model; +} + std::string onnx_backend_manifest(const std::string& manifest) { return ov::util::path_join({ov::test::utils::getExecutableDirectory(), manifest}); } @@ -96,4 +140,4 @@ std::string onnx_backend_manifest(const std::string& manifest) { } // namespace tests } // namespace onnx } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/onnx/tests/onnx_utils.hpp b/src/frontends/onnx/tests/onnx_utils.hpp index eea1e10475a764..6a263c72546d02 100644 --- a/src/frontends/onnx/tests/onnx_utils.hpp +++ b/src/frontends/onnx/tests/onnx_utils.hpp @@ -6,11 +6,12 @@ #include -#include -#include #include #include "common_test_utils/test_constants.hpp" +#include "openvino/core/model.hpp" +#include "openvino/frontend/extension.hpp" +#include "openvino/frontend/manager.hpp" // Resolves different backend names to an internal device enumeration inline std::string backend_name_to_device(const std::string& backend_name) { @@ -38,7 +39,10 @@ std::shared_ptr convert_model(std::ifstream& model_stream); // A wrapper to create ONNX Frontend and configure the conversion pipeline to get // a model with possible Framework Nodes std::shared_ptr convert_partially(const std::string& model_path); - +// Returns loaded InputModel for customizing before conversion +// If FrontEnd::Ptr has been passed - return a FrontEnd object which was used for loading model +InputModel::Ptr load_model(const std::string& model_path, ov::frontend::FrontEnd::Ptr* return_front_end = nullptr); +InputModel::Ptr load_model(const std::wstring& model_path, ov::frontend::FrontEnd::Ptr* return_front_end = nullptr); // Returns path to a manifest file std::string onnx_backend_manifest(const std::string& manifest); } // namespace tests diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index de362b10af46f7..3f59e94c3f3bd1 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -74,7 +74,6 @@ xfail_issue_119925, xfail_issue_119926, xfail_issue_125485, - xfail_issue_125486, xfail_issue_125488, skip_issue_125487, skip_issue_125489, @@ -418,19 +417,14 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ( xfail_issue_99949, - "OnnxBackendNodeModelTest.test_bitwise_not_2d_cpu", "OnnxBackendNodeModelTest.test_bitwise_not_3d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_not_4d_cpu", ), ( xfail_issue_99950, - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_and_pad_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_chw_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_chw_expanded_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_hwc_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_hwc_expanded_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_pad_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_negative_axes_hwc_cpu", "OnnxBackendNodeModelTest.test_center_crop_pad_crop_negative_axes_hwc_expanded_cpu", ), @@ -702,13 +696,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_affine_grid_3d_align_corners_cpu", "OnnxBackendNodeModelTest.test_affine_grid_3d_cpu", ), - ( - xfail_issue_125486, - "OnnxBackendNodeModelTest.test_gelu_default_1_cpu", - "OnnxBackendNodeModelTest.test_gelu_default_2_cpu", - "OnnxBackendNodeModelTest.test_gelu_tanh_1_cpu", - "OnnxBackendNodeModelTest.test_gelu_tanh_2_cpu", - ), ( xfail_issue_125488, "OnnxBackendNodeModelTest.test_image_decoder_decode_bmp_rgb_cpu", diff --git a/src/frontends/onnx/tests/tests_python/test_frontend_extension.py b/src/frontends/onnx/tests/tests_python/test_frontend_extension.py index 6cdc890465c374..c747919c8f6600 100644 --- a/src/frontends/onnx/tests/tests_python/test_frontend_extension.py +++ b/src/frontends/onnx/tests/tests_python/test_frontend_extension.py @@ -64,6 +64,31 @@ def custom_converter(node: NodeContext): assert fe.check_conversion_extension_registered("CustomConverter") +@skip_if_frontend_is_disabled(TENSORFLOW_FRONTEND_NAME) +@skip_if_tensorflow_not_install_by_wheel_pkg() +def test_tensorflow_multiple_extensions_fe_wrapper(): + from openvino.frontend.tensorflow import ConversionExtension + from openvino.frontend import NodeContext + + fe = FrontEndWrapperTensorflow() + + def custom_converter_one(node: NodeContext): + node.get_input(0) + node.get_attribute("alpha") + + def custom_converter_two(node: NodeContext): + node.get_input(0) + node.get_attribute("beta") + + extensions = [ + ConversionExtension("CustomConverterOne", custom_converter_one), + ConversionExtension("CustomConverterTwo", custom_converter_two), + ] + fe.add_extension(extensions) + assert fe.check_conversion_extension_registered("CustomConverterOne") + assert fe.check_conversion_extension_registered("CustomConverterTwo") + + @skip_if_frontend_is_disabled(PADDLE_FRONTEND_NAME) def test_paddle_conversion_extension_fe_wrapper(): from openvino.frontend.paddle import ConversionExtension diff --git a/src/frontends/paddle/src/internal/pass/transform_if.cpp b/src/frontends/paddle/src/internal/pass/transform_if.cpp index 203fcb1fb61696..61edc4cf1aa5cb 100644 --- a/src/frontends/paddle/src/internal/pass/transform_if.cpp +++ b/src/frontends/paddle/src/internal/pass/transform_if.cpp @@ -58,10 +58,10 @@ ov::frontend::paddle::pass::TransformIf::TransformIf(std::vectorset_else_body(else_branch); const auto then_branch_inputs_from_parent = conditional_block->get_inputs_from_parent(); - NGRAPH_CHECK(then_branch_inputs_from_parent.size() == then_params.size(), - "Number of inputs to 'then_branch' is invalid. Expected " + - std::to_string(then_branch_inputs_from_parent.size()) + ", actual " + - std::to_string(then_params.size())); + OPENVINO_ASSERT(then_branch_inputs_from_parent.size() == then_params.size(), + "Number of inputs to 'then_branch' is invalid. Expected " + + std::to_string(then_branch_inputs_from_parent.size()) + ", actual " + + std::to_string(then_params.size())); auto then_param = then_params.cbegin(); for (const auto& from_parent : then_branch_inputs_from_parent) { if_node->set_input(from_parent, *then_param, nullptr); diff --git a/src/frontends/pytorch/src/node_context.cpp b/src/frontends/pytorch/src/node_context.cpp index 364999ccbe13cb..0ebd26d3b8e901 100644 --- a/src/frontends/pytorch/src/node_context.cpp +++ b/src/frontends/pytorch/src/node_context.cpp @@ -43,7 +43,6 @@ OutputVector NodeContext::as_constant() const { } else { auto c_outs = m_decoder->as_constant(); FRONT_END_OP_CONVERSION_CHECK(c_outs.size() == 1, "Constant must have exactly one output."); - c_outs[0].get_node_shared_ptr()->set_friendly_name(m_decoder->get_output_debug_name(0)); return c_outs; } } diff --git a/src/frontends/pytorch/src/op/gcd.cpp b/src/frontends/pytorch/src/op/gcd.cpp new file mode 100644 index 00000000000000..70301185afcb7a --- /dev/null +++ b/src/frontends/pytorch/src/op/gcd.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/loop.hpp" +#include "openvino/op/mod.hpp" +#include "openvino/op/not_equal.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reduce_logical_or.hpp" +#include "openvino/op/select.hpp" +#include "openvino/openvino.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_gcd(const NodeContext& context) { + num_inputs_check(context, 2, 2); + auto x = context.get_input(0); + auto y = context.get_input(1); + align_eltwise_input_types(context, x, y, true); + auto zero_i32 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); + + auto trip_count = std::make_shared(element::i32, Shape{}, 1000); + auto exec_condition = std::make_shared(element::boolean, Shape{}, true); + + auto loop = std::make_shared(trip_count, exec_condition); + + auto x_input = std::make_shared(x.get_element_type(), x.get_partial_shape()); + auto y_input = std::make_shared(y.get_element_type(), y.get_partial_shape()); + + x_input->set_element_type(x.get_element_type()); + y_input->set_element_type(y.get_element_type()); + auto zero = std::make_shared(zero_i32, x_input); + auto condition = std::make_shared(y_input, zero); + auto mod = std::make_shared(x_input, y_input); + auto new_x = std::make_shared(condition, y_input, x_input); + auto new_y = std::make_shared(condition, mod, zero); + + auto reduced_condition = std::make_shared(condition, zero); + + auto body = + std::make_shared(OutputVector{new_x, new_y, reduced_condition}, ParameterVector{x_input, y_input}); + loop->set_function(body); + + loop->set_special_body_ports({-1, 2}); + + loop->set_merged_input(x_input, x, new_x); + loop->set_merged_input(y_input, y, new_y); + + auto gcd_output = loop->get_iter_value(new_x, -1); + auto gcd_node = gcd_output.get_node_shared_ptr(); + + auto marked_gcd_node = context.mark_node(gcd_node); + return {marked_gcd_node}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/get_attr.cpp b/src/frontends/pytorch/src/op/get_attr.cpp index 9896ec65525ba9..58bc63e60a700e 100644 --- a/src/frontends/pytorch/src/op/get_attr.cpp +++ b/src/frontends/pytorch/src/op/get_attr.cpp @@ -17,6 +17,10 @@ OutputVector translate_get_attr(const NodeContext& context) { "Failed to obtain data from GetAttr with output tensor name: ", context.get_decoder()->get_output_debug_name(0)); if (res.size() == 1) { + auto node = res[0].get_node(); + if (node->get_friendly_name() != node->get_name()) { + res[0].add_names({node->get_friendly_name()}); + } return res; } else { // Packed params case diff --git a/src/frontends/pytorch/src/op/index_add.cpp b/src/frontends/pytorch/src/op/index_add.cpp new file mode 100644 index 00000000000000..d42253cb724036 --- /dev/null +++ b/src/frontends/pytorch/src/op/index_add.cpp @@ -0,0 +1,95 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/scatter_elements_update.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/slice.hpp" +#include "openvino/op/subtract.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_index_add(const NodeContext& context) { + // aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + // aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> + // Tensor(a!) + num_inputs_check(context, 5, 6); + auto input = context.get_input(0); + auto dim = context.get_input(1); + auto index = context.mark_node(std::make_shared(context.get_input(2), element::i32)); + auto src = context.get_input(3); + auto alpha = context.get_input(4); + auto converted_alpha = context.mark_node(std::make_shared(alpha, src)); + auto alpha_src = context.mark_node(std::make_shared(converted_alpha, src)); + auto input_shape_rank = get_shape_rank(context, input); + auto const_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + auto const_one_0d = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + auto inp_rank = std::get<1>(input_shape_rank); + // ScatterElementsUpdate required that index, source and update have the same rank + // in aten::index_add index represents as 1d-array for specific dim and update may have different size + // from source in non-indexing axes + // slice src for having only relevant data + auto src_broadcast_shape = context.mark_node(std::make_shared(const_one, inp_rank)); + auto src_broadcasted = context.mark_node( + std::make_shared(alpha_src, src_broadcast_shape, BroadcastType::BIDIRECTIONAL)); + auto src_shape_rank = get_shape_rank(context, src_broadcasted); + auto const_zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto src_rank = std::get<1>(src_shape_rank); + auto slice_start = context.mark_node(std::make_shared(const_zero, inp_rank)); + auto axes = get_node_axes_range(context, src_broadcasted); + auto const_inf = + context.mark_node(v0::Constant::create(element::i32, Shape{1}, {std::numeric_limits::max()})); + auto slice_end = context.mark_node(std::make_shared(const_inf, src_rank)); + auto slice_step = context.mark_node(std::make_shared(const_one, src_rank)); + auto dim_1d = context.mark_node(std::make_shared(dim, const_one)); + slice_end = + context.mark_node(std::make_shared(slice_end, + dim_1d, + const_one, + const_zero, + v12::ScatterElementsUpdate::Reduction::NONE)); + auto new_shape = context.mark_node(std::make_shared(input, slice_start, slice_end, slice_step, axes)); + new_shape = context.mark_node(std::make_shared(new_shape, element::i32)); + auto src_ = + context.mark_node(std::make_shared(src_broadcasted, new_shape, BroadcastType::BIDIRECTIONAL)); + auto src_input_dtype = context.mark_node(std::make_shared(src_, input)); + // brodcast index to input rank size + src_rank = context.mark_node(std::make_shared(new_shape, element::i32)); + auto new_index_shape = context.mark_node(std::make_shared(const_one, src_rank)); + auto const_minus_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + new_index_shape = context.mark_node( + std::make_shared(new_index_shape, dim_1d, const_minus_one, const_zero)); + // precerve indicies location for spicifc dim + auto reshaped_index = context.mark_node(std::make_shared(index, new_index_shape, false)); + auto broadcasted_index = + context.mark_node(std::make_shared(reshaped_index, new_shape, BroadcastType::BIDIRECTIONAL)); + auto scatter_result = + context.mark_node(std::make_shared(input, + broadcasted_index, + src_, + dim, + v12::ScatterElementsUpdate::Reduction::SUM)); + if (!context.input_is_none(5)) { + context.mutate_input(5, scatter_result); + } + return {scatter_result}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 629b2888aeab53..82674ea59dbf71 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -84,6 +84,7 @@ OP_CONVERTER(translate_frobenius_norm); OP_CONVERTER(translate_full); OP_CONVERTER(translate_full_like); OP_CONVERTER(translate_gather); +OP_CONVERTER(translate_gcd); OP_CONVERTER(translate_gelu); OP_CONVERTER(translate_get_attr); OP_CONVERTER(translate_getitem); @@ -94,6 +95,7 @@ OP_CONVERTER(translate_gru); OP_CONVERTER(translate_hardtanh); OP_CONVERTER(translate_if); OP_CONVERTER(translate_im2col); +OP_CONVERTER(translate_index_add); OP_CONVERTER(translate_index_put_); OP_CONVERTER(translate_index_select); OP_CONVERTER(translate_instance_norm); @@ -385,6 +387,7 @@ const std::map get_supported_ops_ts() { {"aten::full", op::translate_full}, {"aten::full_like", op::translate_full_like}, {"aten::gather", op::translate_gather}, + {"aten::gcd", op::translate_gcd}, {"aten::ge", op::translate_1to1_match_2_inputs_align_types}, {"aten::gelu", op::translate_gelu}, {"aten::glu", op::translate_glu}, @@ -401,6 +404,8 @@ const std::map get_supported_ops_ts() { // aten::imag - Supported in limited set of patterns // aten::index - Supported in limited set of patterns {"aten::index_put_", op::inplace_op}, + {"aten::index_add", op::translate_index_add}, + {"aten::index_add_", op::inplace_op}, {"aten::index_select", op::translate_index_select}, {"aten::instance_norm", op::translate_instance_norm}, {"aten::Int", op::translate_int}, diff --git a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp index 141243a5a6e3af..8d2b3975769917 100644 --- a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp @@ -134,21 +134,10 @@ AtenIndexPutReplacer::AtenIndexPutReplacer() { auto input_shape = rg.make(input, element::i32); auto input_rank = rg.make(input_shape, element::i32); auto one_const = v0::Constant::create(element::i32, Shape{1}, {1}); - auto expand_shape = rg.make(one_const, input_rank, BroadcastType::BIDIRECTIONAL); - auto expanded_mask = rg.make(index, expand_shape, BroadcastType::BIDIRECTIONAL); - auto nonzero = rg.make(expanded_mask, element::i32); + auto nonzero = rg.make(index, element::i32); auto input_order = v0::Constant::create(element::i32, Shape{2}, {1, 0}); index = rg.make(nonzero, input_order); - // source can be arbitary shape, select only relevant data - auto const_minus_1 = v0::Constant::create(element::i32, Shape{1}, {-1}); - auto flatten_values = rg.make(values, const_minus_1, false); - auto const_0 = v0::Constant::create(element::i32, Shape{1}, {0}); - - auto index_shape = rg.make(index, element::i32); - auto index_dim_zero = rg.make(index_shape, const_0, const_0); - auto slice_steps = v0::Constant::create(element::i32, Shape{1}, {1}); - auto sliced_source = rg.make(flatten_values, const_0, index_dim_zero, slice_steps, const_0); - auto result = rg.make(input, index, sliced_source); + auto result = rg.make(input, index, values); copy_runtime_info_and_name(index_op, rg.get(), rt_copy_from); replace_node(index_op, result); return true; diff --git a/src/frontends/pytorch/src/transforms/dict_resolver.cpp b/src/frontends/pytorch/src/transforms/dict_resolver.cpp index d51eb793813bf7..a4c237210ae4d9 100644 --- a/src/frontends/pytorch/src/transforms/dict_resolver.cpp +++ b/src/frontends/pytorch/src/transforms/dict_resolver.cpp @@ -48,6 +48,7 @@ bool DictParameterResolver::run_on_model(const std::shared_ptr& model) { getitem_node->get_output_partial_shape(0)); new_param->set_friendly_name(name); getitem_node->output(0).replace(new_param); + new_param->output(0).set_names({name}); new_params.push_back(new_param); changed = true; } else { diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 15b8c1cd6e07a7..e228cedd8d834e 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -111,6 +111,10 @@ Output reshape_kernel_for_group(const NodeContext& context, const Output get_axes_range(const NodeContext& context, int input_id) { auto x = context.get_input(input_id); + return get_node_axes_range(context, x); +}; + +std::shared_ptr get_node_axes_range(const NodeContext& context, const Output& x) { auto start = std::make_shared(element::i32, Shape{}, 0); auto step = std::make_shared(element::i32, Shape{}, 1); Output reduced_rank; diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 41e19bf03f92b2..e8a9f49e7d6445 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -40,6 +40,8 @@ Output reshape_kernel_for_group(const NodeContext& context, const Output get_axes_range(const NodeContext& context, int input_id); +std::shared_ptr get_node_axes_range(const NodeContext& context, const Output& x); + Output normalize_axis(const NodeContext& context, const Output& axis, const Output& input_node); std::shared_ptr numel(const NodeContext& context, const Output& x); diff --git a/src/frontends/tensorflow/src/input_model.cpp b/src/frontends/tensorflow/src/input_model.cpp index 459ce69b5466bb..87ebc3e605b348 100644 --- a/src/frontends/tensorflow/src/input_model.cpp +++ b/src/frontends/tensorflow/src/input_model.cpp @@ -295,8 +295,9 @@ void InputModel::InputModelTFImpl::load_places() { auto output_place = std::make_shared(m_input_model, ov::PartialShape({}), ov::element::dynamic, - std::vector{output_name}); - m_tensor_places[output_name] = output_place; + std::vector{output_name + ":0"}); + // TODO: Create tensor places for each ouput port, ticket-129464 + m_tensor_places[output_name + ":0"] = output_place; m_outputs.push_back(output_place); } return; diff --git a/src/frontends/tensorflow/src/translate_session.cpp b/src/frontends/tensorflow/src/translate_session.cpp index cc21022c61a71b..65f9372737ef1a 100644 --- a/src/frontends/tensorflow/src/translate_session.cpp +++ b/src/frontends/tensorflow/src/translate_session.cpp @@ -639,12 +639,11 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu } } else if (port_type == "out") { const auto& node_outputs = indexed_from_named(ng_op_map[operation_name]); - FRONT_END_GENERAL_CHECK(node_outputs.size() > port_index, - "Output port with index " + std::to_string(port_index) + " of " + - operation_name + "node specified as custom output does not exist"); - auto result_node = std::make_shared(node_outputs[port_index]); - result_node->set_friendly_name(model_output_name); - results.push_back(result_node); + if (node_outputs.size() > port_index) { + auto result_node = std::make_shared(node_outputs[port_index]); + result_node->set_friendly_name(model_output_name); + results.push_back(result_node); + } } else if (port_type == "in") { // TODO: avoid this traversing by having a map for OpPlace objects, for example std::shared_ptr operation_place = nullptr; diff --git a/src/frontends/tensorflow/tests/compilation.cpp b/src/frontends/tensorflow/tests/compilation.cpp index 09e2466f5d7471..c4dff3f40f925e 100644 --- a/src/frontends/tensorflow/tests/compilation.cpp +++ b/src/frontends/tensorflow/tests/compilation.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include - #include "gtest/gtest.h" +#include "openvino/frontend/manager.hpp" +#include "openvino/openvino.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "tf_utils.hpp" #include "utils.hpp" @@ -54,7 +53,7 @@ TEST_F(CompileModelsTests, ModelWithSplitConvConcat) ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); const auto runtime_model = compiled_model.get_runtime_model(); auto get_layer_type = [](const std::shared_ptr& node) { - return node->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + return node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); }; const auto ops = runtime_model->get_ops(); EXPECT_EQ(0, std::count_if(ops.begin(), ops.end(), [&](const std::shared_ptr& node) { @@ -76,7 +75,7 @@ TEST_F(CompileModelsTests, ModelWithShapeOf) { ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); const auto runtime_model = compiled_model.get_runtime_model(); auto get_layer_type = [](const std::shared_ptr& node) { - return node->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + return node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); }; const auto ops = runtime_model->get_ops(); // one Input, one Eltwise and one Output diff --git a/src/frontends/tensorflow/tests/convert_saved_model.cpp b/src/frontends/tensorflow/tests/convert_saved_model.cpp index 2c9a8a6362d16f..9a41c19167c4da 100644 --- a/src/frontends/tensorflow/tests/convert_saved_model.cpp +++ b/src/frontends/tensorflow/tests/convert_saved_model.cpp @@ -178,7 +178,7 @@ TEST_F(FrontEndConversionWithReferenceTestsF, SavedModelWithNumericalNames) { auto z = make_shared(element::f32, Shape{1}); z->output(0).set_names({"2"}); auto add = make_shared(x, y); - add->output(0).set_names({"3", "3:0"}); + add->output(0).set_names({"3:0"}); auto sub = make_shared(add, z); sub->output(0).set_names({"4"}); auto result = make_shared(sub); diff --git a/src/frontends/tensorflow_common/include/utils.hpp b/src/frontends/tensorflow_common/include/utils.hpp index 1fa5d0083fde55..3ee268a9ebfded 100644 --- a/src/frontends/tensorflow_common/include/utils.hpp +++ b/src/frontends/tensorflow_common/include/utils.hpp @@ -6,16 +6,12 @@ #include "openvino/core/validation_util.hpp" #include "openvino/frontend/node_context.hpp" -#include "openvino/opsets/opset10.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/transpose.hpp" #include "openvino/pass/graph_rewrite.hpp" -#ifndef TENSORFLOW_OP_VALIDATION -# define TENSORFLOW_OP_VALIDATION(node_context, ...) \ - OPENVINO_ASSERT_HELPER(::ov::frontend::OpValidationFailure, \ - ("While validating node '" + node_context.get_op_type() + "'"), \ - __VA_ARGS__) -#endif - namespace ov { namespace frontend { namespace tensorflow { @@ -35,11 +31,11 @@ bool is_conditional_edge(const std::string& input_tensor_name); template ov::Output create_same_type_const_scalar(const ov::Output& same_type_output, const T& value) { if (same_type_output.get_element_type().is_static()) { - return std::make_shared(same_type_output.get_element_type(), ov::Shape{}, value); + return std::make_shared(same_type_output.get_element_type(), ov::Shape{}, value); } else { ov::Output const_res = - std::make_shared(ov::element::from(), ov::Shape{}, value); - const_res = std::make_shared(const_res, same_type_output); + std::make_shared(ov::element::from(), ov::Shape{}, value); + const_res = std::make_shared(const_res, same_type_output); return const_res; } } @@ -49,10 +45,10 @@ ov::Output create_same_type_const(const ov::Output& same_typ const std::vector& value, const ov::Shape& shape) { if (same_type_output.get_element_type().is_static()) { - return std::make_shared(same_type_output.get_element_type(), shape, value); + return std::make_shared(same_type_output.get_element_type(), shape, value); } else { - ov::Output const_res = std::make_shared(ov::element::from(), shape, value); - const_res = std::make_shared(const_res, same_type_output); + ov::Output const_res = std::make_shared(ov::element::from(), shape, value); + const_res = std::make_shared(const_res, same_type_output); return const_res; } } @@ -103,11 +99,11 @@ Output compute_subgraph_scalar_rank(const Output& output, element::Type output_type, bool as_scalar = false); -std::shared_ptr make_transpose(const ov::Output& arg, - const ov::AxisVector& input_order); +std::shared_ptr make_transpose(const ov::Output& arg, + const ov::AxisVector& input_order); -std::shared_ptr make_reshape(const ov::Output& arg, - const std::vector& new_shape); +std::shared_ptr make_reshape(const ov::Output& arg, + const std::vector& new_shape); template void convert_nhwc_to_hw(const std::vector& src, std::vector& dst) { diff --git a/src/frontends/tensorflow_common/src/op/binary_op.cpp b/src/frontends/tensorflow_common/src/op/binary_op.cpp index b2accf847a8984..07d047251ab9b2 100644 --- a/src/frontends/tensorflow_common/src/op/binary_op.cpp +++ b/src/frontends/tensorflow_common/src/op/binary_op.cpp @@ -14,6 +14,7 @@ #include "openvino/op/equal.hpp" #include "openvino/op/floor.hpp" #include "openvino/op/floor_mod.hpp" +#include "openvino/op/gather.hpp" #include "openvino/op/greater.hpp" #include "openvino/op/greater_eq.hpp" #include "openvino/op/less.hpp" diff --git a/src/frontends/tensorflow_common/src/op/conv_2d.cpp b/src/frontends/tensorflow_common/src/op/conv_2d.cpp index 726f232b3e3c5d..aa8515d19fe66d 100644 --- a/src/frontends/tensorflow_common/src/op/conv_2d.cpp +++ b/src/frontends/tensorflow_common/src/op/conv_2d.cpp @@ -3,12 +3,8 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" #include "utils.hpp" -using namespace std; -using namespace ov::opset8; - namespace ov { namespace frontend { namespace tensorflow { diff --git a/src/frontends/tensorflow_common/src/op/conv_3d.cpp b/src/frontends/tensorflow_common/src/op/conv_3d.cpp index ce2ef9244566d9..a4d7e71a0ed155 100644 --- a/src/frontends/tensorflow_common/src/op/conv_3d.cpp +++ b/src/frontends/tensorflow_common/src/op/conv_3d.cpp @@ -3,10 +3,7 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" - -using namespace std; -using namespace ov::opset8; +#include "utils.hpp" // Translate Conv3D Op namespace ov { diff --git a/src/frontends/tensorflow_common/src/op/div.cpp b/src/frontends/tensorflow_common/src/op/div.cpp index 2d9ead42d50f6e..c77ad4b9ad052f 100644 --- a/src/frontends/tensorflow_common/src/op/div.cpp +++ b/src/frontends/tensorflow_common/src/op/div.cpp @@ -3,7 +3,13 @@ // #include "common_op_table.hpp" +#include "openvino/op/add.hpp" #include "openvino/op/divide.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/mod.hpp" +#include "openvino/op/not_equal.hpp" +#include "openvino/op/select.hpp" using namespace std; using namespace ov::op; diff --git a/src/frontends/tensorflow_common/src/op/dynamic_partition.cpp b/src/frontends/tensorflow_common/src/op/dynamic_partition.cpp index 648b914aa65f08..d777681e7066ee 100644 --- a/src/frontends/tensorflow_common/src/op/dynamic_partition.cpp +++ b/src/frontends/tensorflow_common/src/op/dynamic_partition.cpp @@ -14,6 +14,7 @@ #include "openvino/op/range.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/scatter_update.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/topk.hpp" #include "openvino/op/unique.hpp" diff --git a/src/frontends/tensorflow_common/src/op/fake_quant_min_max_vars.cpp b/src/frontends/tensorflow_common/src/op/fake_quant_min_max_vars.cpp index d7c5cafbbc24b4..29f9333d81a186 100644 --- a/src/frontends/tensorflow_common/src/op/fake_quant_min_max_vars.cpp +++ b/src/frontends/tensorflow_common/src/op/fake_quant_min_max_vars.cpp @@ -3,6 +3,7 @@ // #include "common_op_table.hpp" +#include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" #include "openvino/op/fake_quantize.hpp" @@ -10,6 +11,8 @@ #include "openvino/op/less.hpp" #include "openvino/op/maximum.hpp" #include "openvino/op/minimum.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/round.hpp" #include "openvino/op/select.hpp" #include "openvino/op/subtract.hpp" diff --git a/src/frontends/tensorflow_common/src/op/fft.cpp b/src/frontends/tensorflow_common/src/op/fft.cpp index b46a7ce91757cf..184cb3765d731a 100644 --- a/src/frontends/tensorflow_common/src/op/fft.cpp +++ b/src/frontends/tensorflow_common/src/op/fft.cpp @@ -5,6 +5,7 @@ #include "common_op_table.hpp" #include "helper_ops/complex_type_mark.hpp" #include "openvino/op/dft.hpp" +#include "openvino/op/range.hpp" #include "openvino/op/subtract.hpp" #include "utils.hpp" diff --git a/src/frontends/tensorflow_common/src/op/fused_batch_norm.cpp b/src/frontends/tensorflow_common/src/op/fused_batch_norm.cpp index 7f1095c8f89f24..02ac8f34bad757 100644 --- a/src/frontends/tensorflow_common/src/op/fused_batch_norm.cpp +++ b/src/frontends/tensorflow_common/src/op/fused_batch_norm.cpp @@ -3,14 +3,17 @@ // #include "common_op_table.hpp" +#include "openvino/op/add.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" +#include "openvino/op/gather.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/mvn.hpp" #include "openvino/op/power.hpp" #include "openvino/op/range.hpp" #include "openvino/op/reduce_mean.hpp" +#include "openvino/op/reduce_prod.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/subtract.hpp" #include "openvino/op/unsqueeze.hpp" diff --git a/src/frontends/tensorflow_common/src/op/identity.cpp b/src/frontends/tensorflow_common/src/op/identity.cpp index cfffaafc4f1ccd..3d66e5a5c0218d 100644 --- a/src/frontends/tensorflow_common/src/op/identity.cpp +++ b/src/frontends/tensorflow_common/src/op/identity.cpp @@ -29,7 +29,6 @@ OutputVector translate_identity_op(const NodeContext& node) { // set only tensor names // no need to change node name since Identity node is skipped - set_out_name(node.get_name(), input); set_out_name(node.get_name() + ":" + "0", input); return {input}; } diff --git a/src/frontends/tensorflow_common/src/op/ifft.cpp b/src/frontends/tensorflow_common/src/op/ifft.cpp index 927d7934f549ee..36f61ce872c734 100644 --- a/src/frontends/tensorflow_common/src/op/ifft.cpp +++ b/src/frontends/tensorflow_common/src/op/ifft.cpp @@ -5,6 +5,7 @@ #include "common_op_table.hpp" #include "helper_ops/complex_type_mark.hpp" #include "openvino/op/idft.hpp" +#include "openvino/op/range.hpp" #include "openvino/op/subtract.hpp" #include "utils.hpp" diff --git a/src/frontends/tensorflow_common/src/op/matmul.cpp b/src/frontends/tensorflow_common/src/op/matmul.cpp index 5b3f57f6f8506a..83b1023f2aa47e 100644 --- a/src/frontends/tensorflow_common/src/op/matmul.cpp +++ b/src/frontends/tensorflow_common/src/op/matmul.cpp @@ -5,6 +5,7 @@ #include "openvino/op/matmul.hpp" #include "common_op_table.hpp" +#include "openvino/op/convert.hpp" using namespace std; using namespace ov::op; diff --git a/src/frontends/tensorflow_common/src/op/matrix_diag.cpp b/src/frontends/tensorflow_common/src/op/matrix_diag.cpp index 123fbe0014cb7e..d877bdfd00a414 100644 --- a/src/frontends/tensorflow_common/src/op/matrix_diag.cpp +++ b/src/frontends/tensorflow_common/src/op/matrix_diag.cpp @@ -9,6 +9,7 @@ #include "openvino/op/constant.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/shape_of.hpp" +#include "openvino/op/slice.hpp" #include "openvino/op/strided_slice.hpp" #include "openvino/op/unsqueeze.hpp" #include "openvino/op/util/attr_types.hpp" diff --git a/src/frontends/tensorflow_common/src/op/non_max_suppression.cpp b/src/frontends/tensorflow_common/src/op/non_max_suppression.cpp index 961770ac1c2305..2a7f9e0d723b12 100644 --- a/src/frontends/tensorflow_common/src/op/non_max_suppression.cpp +++ b/src/frontends/tensorflow_common/src/op/non_max_suppression.cpp @@ -11,6 +11,7 @@ #include "openvino/op/shape_of.hpp" #include "openvino/op/slice.hpp" #include "openvino/op/squeeze.hpp" +#include "openvino/op/subtract.hpp" #include "openvino/op/unsqueeze.hpp" #include "utils.hpp" diff --git a/src/frontends/tensorflow_common/src/op/parallel_dynamic_stitch.cpp b/src/frontends/tensorflow_common/src/op/parallel_dynamic_stitch.cpp index df98a956080869..800351f6b9db6b 100644 --- a/src/frontends/tensorflow_common/src/op/parallel_dynamic_stitch.cpp +++ b/src/frontends/tensorflow_common/src/op/parallel_dynamic_stitch.cpp @@ -12,6 +12,7 @@ #include "openvino/op/scatter_update.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/slice.hpp" +#include "openvino/op/unsqueeze.hpp" #include "utils.hpp" using namespace std; diff --git a/src/frontends/tensorflow_common/src/op/reduce.cpp b/src/frontends/tensorflow_common/src/op/reduce.cpp index 8affb37dc50ea1..bcca7697a75dc4 100644 --- a/src/frontends/tensorflow_common/src/op/reduce.cpp +++ b/src/frontends/tensorflow_common/src/op/reduce.cpp @@ -9,6 +9,7 @@ #include "openvino/op/reduce_max.hpp" #include "openvino/op/reduce_mean.hpp" #include "openvino/op/reduce_min.hpp" +#include "openvino/op/reduce_prod.hpp" #include "openvino/op/reduce_sum.hpp" using namespace std; diff --git a/src/frontends/tensorflow_common/src/op/reverse.cpp b/src/frontends/tensorflow_common/src/op/reverse.cpp index 68d4a82c766a0f..b161999bd6ae8b 100644 --- a/src/frontends/tensorflow_common/src/op/reverse.cpp +++ b/src/frontends/tensorflow_common/src/op/reverse.cpp @@ -7,6 +7,7 @@ #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/reverse_sequence.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/unsqueeze.hpp" diff --git a/src/frontends/tensorflow_common/src/op/rfft.cpp b/src/frontends/tensorflow_common/src/op/rfft.cpp index 7e38c8651a9058..b32280464e2d95 100644 --- a/src/frontends/tensorflow_common/src/op/rfft.cpp +++ b/src/frontends/tensorflow_common/src/op/rfft.cpp @@ -6,6 +6,7 @@ #include "helper_ops/complex_type_mark.hpp" #include "openvino/core/any.hpp" #include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" #include "openvino/op/rdft.hpp" #include "openvino/op/subtract.hpp" #include "utils.hpp" diff --git a/src/frontends/tensorflow_common/src/op/rsqrt.cpp b/src/frontends/tensorflow_common/src/op/rsqrt.cpp index 12d7f98cb856f2..ea1779dd95e60e 100644 --- a/src/frontends/tensorflow_common/src/op/rsqrt.cpp +++ b/src/frontends/tensorflow_common/src/op/rsqrt.cpp @@ -3,11 +3,11 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/power.hpp" #include "utils.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -18,7 +18,7 @@ OutputVector translate_rsqrt_op(const NodeContext& node) { default_op_checks(node, 1, {"Rsqrt", "RSQRT"}); auto input = node.get_input(0); auto exponent = create_same_type_const_scalar(input, -0.5f); - auto rsqrt = make_shared(input, exponent); + auto rsqrt = make_shared(input, exponent); set_node_name(node.get_name(), rsqrt); return {rsqrt}; } diff --git a/src/frontends/tensorflow_common/src/op/shape.cpp b/src/frontends/tensorflow_common/src/op/shape.cpp index 501480e404be45..0e93c2dc8b743e 100644 --- a/src/frontends/tensorflow_common/src/op/shape.cpp +++ b/src/frontends/tensorflow_common/src/op/shape.cpp @@ -4,8 +4,10 @@ #include "common_op_table.hpp" #include "helper_ops/complex_type_mark.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/slice.hpp" +#include "openvino/op/subtract.hpp" using namespace std; using namespace ov; diff --git a/src/frontends/tensorflow_common/src/op/space_to_batch_nd.cpp b/src/frontends/tensorflow_common/src/op/space_to_batch_nd.cpp index 0b8977e701efac..1cf48a1f13956a 100644 --- a/src/frontends/tensorflow_common/src/op/space_to_batch_nd.cpp +++ b/src/frontends/tensorflow_common/src/op/space_to_batch_nd.cpp @@ -8,6 +8,7 @@ #include "openvino/op/convert_like.hpp" #include "openvino/op/pad.hpp" #include "openvino/op/shape_of.hpp" +#include "openvino/op/space_to_batch.hpp" #include "openvino/op/split.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/subtract.hpp" diff --git a/src/frontends/tensorflow_common/src/op/truncate_div.cpp b/src/frontends/tensorflow_common/src/op/truncate_div.cpp index b725bbd76b44a3..88b4174e1195d1 100644 --- a/src/frontends/tensorflow_common/src/op/truncate_div.cpp +++ b/src/frontends/tensorflow_common/src/op/truncate_div.cpp @@ -12,7 +12,7 @@ #include "openvino/op/select.hpp" using namespace std; -using namespace ov::opset10; +using namespace ov::op; namespace ov { namespace frontend { @@ -23,9 +23,10 @@ OutputVector translate_truncate_div_op(const NodeContext& node) { auto x = node.get_input(0); auto y = node.get_input(1); - auto res = make_shared(x, y); - auto is_res_negative = make_shared(res, create_same_type_const_scalar(x, 0)); - auto final_res = make_shared(is_y_negative, negative_y, y); + auto negative_y = make_shared(y); + y = make_shared(is_y_negative, negative_y, y); // check if floor_mod == zero - auto floor_mod = make_shared(x, y); - auto is_zero = make_shared(floor_mod, create_same_type_const_scalar(floor_mod, 0)); + auto floor_mod = make_shared(x, y); + auto is_zero = make_shared(floor_mod, create_same_type_const_scalar(floor_mod, 0)); // floor_mod - y - auto other_res = make_shared(floor_mod, y); + auto other_res = make_shared(floor_mod, y); // select operation to handle the sign - auto result = make_shared(is_x_negative, other_res, floor_mod)); + auto result = + make_shared(is_zero, floor_mod, make_shared(is_x_negative, other_res, floor_mod)); set_node_name(node.get_name(), result); return result->outputs(); diff --git a/src/frontends/tensorflow_common/src/op/unravel_index.cpp b/src/frontends/tensorflow_common/src/op/unravel_index.cpp index f649393d5d4c95..3de27fa3586741 100644 --- a/src/frontends/tensorflow_common/src/op/unravel_index.cpp +++ b/src/frontends/tensorflow_common/src/op/unravel_index.cpp @@ -10,6 +10,7 @@ #include "openvino/op/divide.hpp" #include "openvino/op/equal.hpp" #include "openvino/op/floor_mod.hpp" +#include "openvino/op/less_eq.hpp" #include "openvino/op/range.hpp" #include "openvino/op/reduce_prod.hpp" #include "openvino/op/roll.hpp" diff --git a/src/frontends/tensorflow_common/src/op/xlog1py.cpp b/src/frontends/tensorflow_common/src/op/xlog1py.cpp index cf24fec54382d3..a9de65ba47e61e 100644 --- a/src/frontends/tensorflow_common/src/op/xlog1py.cpp +++ b/src/frontends/tensorflow_common/src/op/xlog1py.cpp @@ -10,7 +10,8 @@ #include "openvino/op/select.hpp" using namespace std; -using namespace ov::opset10; +using namespace ov; +using namespace ov::op; namespace ov { namespace frontend { @@ -26,13 +27,13 @@ OutputVector translate_xlog1py_op(const NodeContext& node) { auto one = create_same_type_const_scalar(y, 1); // compute a mask to identify where x is equal to 0 - auto is_zero = make_shared(x, zero); + auto is_zero = make_shared(x, zero); // compute x * log(y + 1) elementwise - auto xlog1py = make_shared(x, make_shared(make_shared(y, one))); + auto xlog1py = make_shared(x, make_shared(make_shared(y, one))); // create the output tensor using Select to handle the x == 0 condition - auto result = make_shared(is_zero, zero, xlog_y); + auto result = make_shared(is_zero, zero, xlog_y); set_node_name(node.get_name(), result); return result->outputs(); diff --git a/src/frontends/tensorflow_common/src/utils.cpp b/src/frontends/tensorflow_common/src/utils.cpp index cd5fafa3291bb3..5baa6a24b2dcad 100644 --- a/src/frontends/tensorflow_common/src/utils.cpp +++ b/src/frontends/tensorflow_common/src/utils.cpp @@ -12,11 +12,14 @@ #include "openvino/op/constant.hpp" #include "openvino/op/convolution.hpp" #include "openvino/op/divide.hpp" +#include "openvino/op/group_conv.hpp" #include "openvino/op/maximum.hpp" #include "openvino/op/pad.hpp" +#include "openvino/op/parameter.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/slice.hpp" +#include "openvino/op/squeeze.hpp" #include "openvino/op/subtract.hpp" #include "openvino/op/transpose.hpp" @@ -32,7 +35,8 @@ namespace tensorflow { void set_node_name(const string& node_name, const shared_ptr& node) { const auto& outputs = node->outputs(); node->set_friendly_name(node_name); - if (outputs.size() == 1) { + // TODO: Remove this and set single tensor names for model inputs, ticket - 129457 + if (outputs.size() == 1 && as_type_ptr(node)) { set_out_name(node_name, outputs[0]); } for (size_t idx = 0; idx < outputs.size(); ++idx) { diff --git a/src/inference/dev_api/blob_factory.hpp b/src/inference/dev_api/blob_factory.hpp index 3ba23868b92159..f37f813604c62f 100644 --- a/src/inference/dev_api/blob_factory.hpp +++ b/src/inference/dev_api/blob_factory.hpp @@ -4,7 +4,7 @@ /** * @brief A file with helper functions to uniformly create Blob objects - * @file blob_transform.hpp + * @file blob_factory.hpp */ #pragma once @@ -16,7 +16,8 @@ #include "ie_blob.h" #include "ie_data.h" #include "ie_memcpy.h" -#include "ie_preprocess.hpp" +#include "openvino/runtime/itensor.hpp" +#include "openvino/runtime/so_ptr.hpp" IE_SUPPRESS_DEPRECATED_START /** @@ -83,17 +84,6 @@ INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) make_blob_with_precision(const InferenceEngine::TensorDesc& desc, const std::shared_ptr& alloc); -/** - * @brief Creates a plain Blob::Ptr - * @ingroup ie_dev_api_memory - * - * @param[in] prec The Precision value - * @param[in] dims The dims - * @return A Blob::Ptr pointer - */ -INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) -make_plain_blob(InferenceEngine::Precision prec, const InferenceEngine::SizeVector dims); - /** * @brief Creates Blob::Ptr with precision * @ingroup ie_dev_api_memory @@ -149,4 +139,15 @@ void CopyVectorToBlob(const InferenceEngine::Blob::Ptr outputBlob, const std::ve IE_THROW() << "Element size mismatch between blob and vector"; ie_memcpy(outputBlob->buffer().as(), outputBlob->byteSize(), &inputVector[0], inputVector.size() * sizeof(T)); } + +namespace ov { + +ov::SoPtr make_tensor(const std::shared_ptr& tensor, bool unwrap = false); + +OPENVINO_RUNTIME_API std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, + bool unwrap = true, + InferenceEngine::TensorDesc desc = {}); + +} // namespace ov + IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/dev_api/caseless.hpp b/src/inference/dev_api/caseless.hpp deleted file mode 100644 index fde83b4b51f5c2..00000000000000 --- a/src/inference/dev_api/caseless.hpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @file caseless.hpp - * @brief A header file with caseless containers - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -namespace InferenceEngine { -namespace details { - -/** - * @brief Provides caseless comparison for STL algorithms - * - * @tparam Key type, usually std::string - */ -template -class CaselessLess { -public: - bool operator()(const Key& a, const Key& b) const noexcept { - return std::lexicographical_compare(std::begin(a), - std::end(a), - std::begin(b), - std::end(b), - [](const char& cha, const char& chb) { - return std::tolower(cha) < std::tolower(chb); - }); - } -}; - -/** - * provides caseless eq for stl algorithms - * @tparam Key - */ -template -class CaselessEq { -public: - bool operator()(const Key& a, const Key& b) const noexcept { - return a.size() == b.size() && - std::equal(std::begin(a), std::end(a), std::begin(b), [](const char& cha, const char& chb) { - return std::tolower(cha) == std::tolower(chb); - }); - } -}; - -/** - * To hash caseless - */ -template -class CaselessHash : public std::hash { -public: - size_t operator()(T __val) const noexcept { - T lc; - std::transform(std::begin(__val), std::end(__val), std::back_inserter(lc), [](typename T::value_type ch) { - return std::tolower(ch); - }); - return std::hash()(lc); - } -}; - -template -using caseless_unordered_map = std::unordered_map, CaselessEq>; - -template -using caseless_unordered_multimap = std::unordered_multimap, CaselessEq>; - -template -using caseless_map = std::map>; - -template -using caseless_set = std::set>; - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp b/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp index 5378dcfe525345..687a87a3364388 100644 --- a/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp +++ b/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp @@ -243,25 +243,11 @@ class INFERENCE_ENGINE_1_0_DEPRECATED AsyncInferRequestThreadSafeDefault : publi _syncRequest->SetBlob(name, data); } - void SetBlobs(const std::string& name, const std::vector& blobs) override { - CheckState(); - _syncRequest->SetBlobs(name, blobs); - } - - BatchedBlob::Ptr GetBlobs(const std::string& name) override { - CheckState(); - return _syncRequest->GetBlobs(name); - } - Blob::Ptr GetBlob(const std::string& name) override { CheckState(); return _syncRequest->GetBlob(name); } - const PreProcessInfo& GetPreProcess(const std::string& name) const override { - return _syncRequest->GetPreProcess(name); - } - void SetCallback(Callback callback) override { CheckState(); _callback = std::move(callback); diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp index e488cd406c8c82..6b89010ac995fe 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp @@ -12,8 +12,7 @@ #include "cpp/ie_cnn_network.h" #include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" namespace ov { class Function; @@ -29,7 +28,6 @@ namespace InferenceEngine { class IInferencePlugin; class IPluginWrapper; class IInferRequestInternal; -class RemoteContext; class IVariableStateInternal; class ICompiledModelWrapper; @@ -151,12 +149,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IExecutableNetw */ virtual Parameter GetMetric(const std::string& name) const; - /** - * @brief Gets the remote context. - * @return A reference to a context - */ - virtual std::shared_ptr GetContext() const; - /** * @brief Raises the flag that model was loaded from cache */ diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp index 4b22faa5339aeb..438b94acb65c2d 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp @@ -4,17 +4,15 @@ #pragma once -#include #include #include #include #include "cpp/ie_infer_request.hpp" #include "ie_common.h" -#include "ie_compound_blob.h" #include "ie_input_info.hpp" #include "openvino/core/node_output.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" namespace InferenceEngine { @@ -89,31 +87,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn */ virtual void SetBlob(const std::string& name, const Blob::Ptr& data); - /** - * @brief Set batch of input data to infer. Default implementation performs basic validation and checks that all - * tensors are not remote. Plugin-specific implementations may override this behavior to handle remote tensors case. - * If plugin expects only memory blobs (not remote blobs), consider to override only SetBlobsImpl and reuse basic - * existing implementation - * @param name - an operation name of input or output blob. - * @param blobs - input blobs. The type of Blob must correspond to the model's input - * precision and size. - */ - virtual void SetBlobs(const std::string& name, const std::vector& blobs); - - /** - * @brief Set batch of input data to infer. Default implementation throws "Not implemented" exception - * To support 'set_input_tensors'/'set_tensors' plugin-specific implementations shall: - * - Inside SetBlobsImpl: update 'InferenceEngine::IInferRequestInternal::batched_inputs' map - * - Inside 'SetBlob': erase appropriate 'InferenceEngine::IInferRequestInternal::_batched_inputs[name]' item - * - Inside 'InferImpl': call 'convertBatchedInputBlobs' on the beginning to convert many user blobs into single - * one - * - If needed, override 'convertBatchedInputBlob' to perform custom concatenation and data copy to input blob - * @param name - an operation name of input or output blob. - * @param batched_blob - input blobs combined in batched blob. Called only if number of blobs > 1 - * precision and size. - */ - virtual void SetBlobsImpl(const std::string& name, const BatchedBlob::Ptr& batched_blob); - /** * @brief Get input/output data to infer * @note Memory allocation doesn't happen @@ -123,21 +96,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn */ virtual Blob::Ptr GetBlob(const std::string& name); - /** - * @brief Get input/output data to infer - * @note Memory allocation doesn't happen - * @param name - a name of input or output blob. - * @return data - a reference to input batched blob. - */ - virtual BatchedBlob::Ptr GetBlobs(const std::string& name); - - /** - * @brief Gets pre-process for input data - * @param name Name of input blob. - * @param info pointer to a pointer to PreProcessInfo structure - */ - virtual const PreProcessInfo& GetPreProcess(const std::string& name) const; - /** * @brief Queries memory states. * @return Returns memory states @@ -283,13 +241,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn */ std::shared_ptr findOutputByNodeName(const std::string& name) const; - /** - * @brief Concatenates _batched_inputs into single blob before inference - * It is expected that _batched_inputs map contains only valid BatchedBlob blobs with 2 or more blobs inside - * @throws Exception if error occurs - */ - void convertBatchedInputBlobs(); - /** * @brief Checks whether pre-processing step is required for a given input * @param info InputInfo corresponding to input blob @@ -303,24 +254,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn void addInputPreProcessingFor(const std::string& name, const Blob::Ptr& from, const Blob::Ptr& to); - /** - * @brief Performs actual concatenation of blobs into single tensor - * Default implementation may allocate memory for new blob containing user's input data - * Plugin is allowed to override this behavior - * @throws Exception if error occurs - */ - virtual void convertBatchedInputBlob(const std::string& name, - const InferenceEngine::BatchedBlob::Ptr& batched_blob); - - /** - * @brief Performs basic validation of user's blobs set via SetBlobs - * @note Plugin-specific implementations may call this function to performs basic validation inside 'SetBlobs' - * @param name - input name. - * @param blobs - input blobs. The type of Blob must correspond to the network input - * precision and size. - */ - virtual void checkBlobsForBatch(const std::string& name, const std::vector& blobs); - InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data InferenceEngine::BlobMap _inputs; //!< A map of user passed blobs for network inputs @@ -328,21 +261,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn InferenceEngine::BlobMap _outputs; //!< A map of user passed blobs for network outputs std::vector> _parameters; //!< A vector of function inputs std::vector> _results; //!< A vector of function outputs - std::map _batched_inputs; //!< A map of user passed blobs for network inputs - - class PreProcessDataPlugin { - public: - void setRoiBlob(const Blob::Ptr& blob) {} - - Blob::Ptr getRoiBlob() const { - return nullptr; - } - - void execute(Blob::Ptr& preprocessedBlob, const PreProcessInfo& info, bool serial, int batchSize = -1) {} - - void isApplicable(const Blob::Ptr& src, const Blob::Ptr& dst) {} - }; - std::map> _preProcData; //!< A map of pre-process data per input /** * @brief A shared pointer to IInferRequestInternal diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index eb0e8d38c46f87..42df35371800a3 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -21,8 +21,8 @@ #include "ie_parameter.hpp" #include "openvino/core/extension.hpp" #include "openvino/runtime/iplugin.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "openvino/util/pp.hpp" -#include "so_ptr.hpp" using namespace ov::threading; @@ -30,18 +30,9 @@ namespace InferenceEngine { class ExecutorManager; class IExecutableNetworkInternal; -class RemoteContext; class IExtension; class ICore; -/** - * @brief Copies preprocess info - * - * @param[in] from PreProcessInfo to copy from - * @return copy of preprocess info - */ -INFERENCE_ENGINE_API_CPP(PreProcessInfo) copyPreProcess(const PreProcessInfo& from); - /** * @brief Copies the values of `std::string` indexed map and apply const cast * @@ -180,18 +171,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi virtual std::shared_ptr LoadNetwork(const CNNNetwork& network, const std::map& config); - /** - * @brief Creates an executable network from network object, on specified remote context - * @param network A network object acquired from InferenceEngine::Core::ReadNetwork - * @param config string-string map of config parameters relevant only for this load operation - * @param context A pointer to plugin context derived from RemoteContext class used to - * execute the network - * @return Created Executable Network object - */ - virtual std::shared_ptr LoadNetwork(const CNNNetwork& network, - const std::map& config, - const std::shared_ptr& context); - /** * @brief Creates an executable network from model file path * @param modelPath A path to model @@ -235,20 +214,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi */ virtual Parameter GetMetric(const std::string& name, const std::map& options) const; - /** - * @brief Creates a remote context instance based on a map of parameters - * @param[in] params The map of parameters - * @return A remote context object - */ - virtual std::shared_ptr CreateContext(const ParamMap& params); - - /** - * @brief Provides a default remote context instance if supported by a plugin - * @param[in] params The map of parameters - * @return The default context. - */ - virtual std::shared_ptr GetDefaultContext(const ParamMap& params); - /** * @deprecated Use ImportNetwork(std::istream& networkModel, const std::map& config) * @brief Creates an executable network from an previously exported network @@ -269,19 +234,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi virtual std::shared_ptr ImportNetwork(std::istream& networkModel, const std::map& config); - /** - * @brief Creates an executable network from an previously exported network using plugin implementation - * and removes Inference Engine magic and plugin name - * @param networkModel Reference to network model output stream - * @param context A pointer to plugin context derived from RemoteContext class used to - * execute the network - * @param config A string -> string map of parameters - * @return An Executable network - */ - virtual std::shared_ptr ImportNetwork(std::istream& networkModel, - const std::shared_ptr& context, - const std::map& config); - /** * @brief Sets pointer to ICore interface * @param core Pointer to Core interface @@ -333,24 +285,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi const CNNNetwork& network, const std::map& config); - /** - * @brief Creates an executable network using remote context from a parsed network object, - * users can create as many networks as they need and use them simultaneously (up to the limitation of the HW - * resources) - * @note The function is used in - * InferencePluginInternal::LoadNetwork(const CNNNetwork&, const std::map&, - * RemoteContext::Ptr) which performs common steps first and calls this plugin-dependent method implementation - * after. - * @param network A network object - * @param context A remote context - * @param config string-string map of config parameters relevant only for this load operation - * @return Shared pointer to the ExecutableNetwork object - */ - virtual std::shared_ptr LoadExeNetworkImpl( - const CNNNetwork& network, - const std::shared_ptr& context, - const std::map& config); - /** * @brief Set input and output information to executable network. This method is used to * set addtional information to InferenceEngine::IExecutableNetworkInternal create by device plugin. diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp index a2a0aabf997f70..f04a34bf841e63 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp @@ -8,7 +8,7 @@ #include #include "ie_blob.h" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" namespace InferenceEngine { diff --git a/src/inference/dev_api/debug.h b/src/inference/dev_api/debug.h deleted file mode 100644 index e626d8670a63b0..00000000000000 --- a/src/inference/dev_api/debug.h +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Basic debugging tools - * @file debug.h - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ie_algorithm.hpp" - -namespace InferenceEngine { -namespace details { - -/** - * @brief Serializes a `std::vector` to a `std::ostream` - * @ingroup ie_dev_api_error_debug - * @param out An output stream - * @param vec A vector to serialize - * @return A reference to a `std::stream` - */ -template -inline std::ostream& operator<<(std::ostream& out, const std::vector& vec) { - if (vec.empty()) - return std::operator<<(out, "[]"); - out << "[" << vec[0]; - for (unsigned i = 1; i < vec.size(); i++) { - out << ", " << vec[i]; - } - return out << "]"; -} - -/** - * @brief trim from start (in place) - * @ingroup ie_dev_api_error_debug - * @param s - string to trim - */ -inline void ltrim(std::string& s) { - s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { - return !std::isspace(c); - })); -} - -/** - * @brief trim from end (in place) - * @ingroup ie_dev_api_error_debug - * @param s - string to trim - */ -inline void rtrim(std::string& s) { - s.erase(std::find_if(s.rbegin(), - s.rend(), - [](int c) { - return !std::isspace(c); - }) - .base(), - s.end()); -} - -/** - * @brief Trims std::string from both ends (in place) - * @ingroup ie_dev_api_error_debug - * @param s A reference to a std::tring to trim - * @return A reference to a trimmed std::string - */ -inline std::string& trim(std::string& s) { - ltrim(s); - rtrim(s); - return s; -} - -/** - * @brief split string into a vector of substrings - * @ingroup ie_dev_api_error_debug - * @param src - string to split - * @param delimiter - string used as a delimiter - * @return vector of substrings - */ -inline std::vector split(const std::string& src, const std::string& delimiter) { - std::vector tokens; - std::string tokenBuf; - size_t prev = 0, pos = 0, srcLength = src.length(), delimLength = delimiter.length(); - do { - pos = src.find(delimiter, prev); - if (pos == std::string::npos) { - pos = srcLength; - } - tokenBuf = src.substr(prev, pos - prev); - if (!tokenBuf.empty()) { - tokens.push_back(tokenBuf); - } - prev = pos + delimLength; - } while (pos < srcLength && prev < srcLength); - return tokens; -} - -/** - * @brief create a string representation for a vector of values, without any suffixes or prefixes - * @ingroup ie_dev_api_error_debug - * @param vec Vector of values - * @param glue A separator - * @return A string representation - */ -template -std::string joinVec(std::vector const& vec, std::string const& glue = std::string(",")) { - if (vec.empty()) - return ""; - std::stringstream oss; - oss << vec[0]; - for (size_t i = 1; i < vec.size(); i++) - oss << glue << vec[i]; - return oss.str(); -} - -/** - * @brief create a string representation for a vector of values, enclosing text in a square brackets - * @ingroup ie_dev_api_error_debug - * @param vec - vector of values - * @return string representation - */ -template -std::string dumpVec(std::vector const& vec) { - return "[" + joinVec(vec) + "]"; -} - -/** - * @brief multiply vector's values - * @ingroup ie_dev_api_error_debug - * @param vec - vector with values - * @return result of multiplication - */ -template -T product(std::vector const& vec) { - if (vec.empty()) - return 0; - T ret = vec[0]; - for (size_t i = 1; i < vec.size(); ++i) - ret *= vec[i]; - return ret; -} - -/** - * @brief check if vectors contain same values - * @ingroup ie_dev_api_error_debug - * @param v1 - first vector - * @param v2 - second vector - * @return true if vectors contain same values - */ -template -bool equal(const std::vector& v1, const std::vector& v2) { - if (v1.size() != v2.size()) - return false; - for (auto i1 = v1.cbegin(), i2 = v2.cbegin(); i1 != v1.cend(); ++i1, ++i2) { - if (*i1 != *i2) - return false; - } - return true; -} - -#ifdef _WIN32 -# define strncasecmp _strnicmp -#endif - -/** - * @brief Checks whether two `std::string`s are equal - * @ingroup ie_dev_api_error_debug - * @param lhs A first `std::string` to compare - * @param rhs A second `std::string` to compare - * @param ignoreCase Whether to ignore case-sensitivity, default is `true` - * @return `True` in case of `std::string`s are equal, `false` otherwise - */ -inline bool equal(const std::string& lhs, const std::string& rhs, bool ignoreCase = true) { - return (lhs.size() == rhs.size()) && (ignoreCase ? 0 == strncasecmp(lhs.c_str(), rhs.c_str(), lhs.size()) - : 0 == strncmp(lhs.c_str(), rhs.c_str(), lhs.size())); -} - -/** - * @brief check string end with given substring - * @ingroup ie_dev_api_error_debug - * @param src - string to check - * @param with - given substring - * @return true if string end with given substring - */ -inline bool endsWith(const std::string& src, const char* with) { - int wl = static_cast(strlen(with)); - int so = static_cast(src.length()) - wl; - if (so < 0) - return false; - return 0 == strncmp(with, &src[so], wl); -} - -/** - * @brief Converts all upper-case letters in a std::string to lower case - * @ingroup ie_dev_api_error_debug - * @param s A std::tring to convert - * @return An output std::string in lower case - */ -inline std::string tolower(const std::string& s) { - std::string ret; - ret.resize(s.length()); - std::transform(s.begin(), s.end(), ret.begin(), [](char c) { - return static_cast(::tolower(static_cast(c))); - }); - return ret; -} -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/dev_api/exec_graph_info.hpp b/src/inference/dev_api/exec_graph_info.hpp deleted file mode 100644 index e0541fecfd392e..00000000000000 --- a/src/inference/dev_api/exec_graph_info.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A file defines names to be used by plugins to create execution graph. - * It's an API between plugin and WorkBench tool. - * @file exec_graph_info.hpp - */ - -#pragma once - -#include - -#include "openvino/op/op.hpp" -#include "openvino/runtime/exec_model_info.hpp" - -/** - * @brief A namespace with const values for Execution Graph parameters names. - * @ingroup ie_dev_exec_graph - * Executable Graph Info is represented in CNNNetwork format with general ExecutionNode nodes inside - * including connections between the nodes. Each node describes an executable hardware-specific - * primitive and stores its parameters within ExecutionNode::get_rt_info map. - * There is a list of general keys for the parameters map. - */ -namespace ExecGraphInfoSerialization { - -using ov::exec_model_info::EXECUTION_ORDER; -using ov::exec_model_info::ExecutionNode; -using ov::exec_model_info::IMPL_TYPE; -using ov::exec_model_info::LAYER_TYPE; -using ov::exec_model_info::ORIGINAL_NAMES; -using ov::exec_model_info::OUTPUT_LAYOUTS; -using ov::exec_model_info::OUTPUT_PRECISIONS; -using ov::exec_model_info::PERF_COUNTER; -using ov::exec_model_info::RUNTIME_PRECISION; - -} // namespace ExecGraphInfoSerialization diff --git a/src/inference/dev_api/ie_algorithm.hpp b/src/inference/dev_api/ie_algorithm.hpp deleted file mode 100644 index c87d2df7616e98..00000000000000 --- a/src/inference/dev_api/ie_algorithm.hpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file with simple helper functions for STL containters - * @file ie_algorithm.hpp - */ - -#pragma once -#include -#include -#include - -namespace InferenceEngine { - -/** - * @brief A namespace with non-public Inference Engine Plugin API - * @ingroup ie_dev_api - */ -namespace details { - -/** - * @brief Simple helper function to check element presence in container - * container must provede stl-compliant find member function - * - * @param container - Container to check - * @param element - element to check - * - * @return true if element present in container - */ -template -bool contains(const C& container, const T& element) { - return container.find(element) != container.end(); -} - -/** - * @brief Associative containers doesnt work with remove_if algorithm - * @tparam ContainerT - * @tparam PredicateT - * @param data An associative container - * @param predicate A predicate to remove values conditionally - */ -template -inline void erase_if(Container& data, const PredicateT& predicate) { - for (auto it = std::begin(data); it != std::end(data);) { - if (predicate(*it)) { - it = data.erase(it); - } else { - ++it; - } - } -} - -/** - * @brief Multiplies container - * - * @param[in] beg The `begin` iterator - * @param[in] en The `end` iterator - * - * @tparam TIterator An iterator type - * - * @return A result of multiplication. - */ -template -auto product(TIterator beg, TIterator en) -> typename std::remove_reference::type { - return std::accumulate(beg, - en, - static_cast::type>(1), - std::multiplies::type>()); -} - -/** - * @brief Clips element to be in range `[min, max]` - * - * @param idx The pointer to element. - * @param[in] min The minimum value - * @param[in] max The maximum value - */ -inline void clipping(int* idx, const int min, const int max) { - (*idx) = ((*idx) > min) ? (*idx) : min; - (*idx) = ((*idx) < max) ? (*idx) : (max - 1); -} - -/** - * @brief Set containers intersection - * @tparam Set - * @param lhs First set container - * @param rhs Second set container - * @return Set intersection - */ -template -static Set Intersection(const Set& lhs, const Set& rhs) { - Set result; - const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs; - const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs; - for (auto&& val : minSizeSet) { - if (InferenceEngine::details::contains(maxSizeSet, val)) { - result.insert(val); - } - } - return result; -} - -/** - * @brief Check whether two sets intersect - * @tparam Set - * @param lhs First set container - * @param rhs Second set container - * @return true if two sets interesect false otherwise - */ -template -static bool Intersects(const Set& lhs, const Set& rhs) { - const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs; - const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs; - for (auto&& val : minSizeSet) { - if (InferenceEngine::details::contains(maxSizeSet, val)) { - return true; - } - } - return false; -} - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/dev_api/ie_icore.hpp b/src/inference/dev_api/ie_icore.hpp index 700be69f9f3691..be2b85118de466 100644 --- a/src/inference/dev_api/ie_icore.hpp +++ b/src/inference/dev_api/ie_icore.hpp @@ -16,7 +16,6 @@ #include "cpp/ie_cnn_network.h" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" #include "openvino/runtime/icore.hpp" #include "openvino/runtime/properties.hpp" @@ -60,22 +59,6 @@ class ICore : public ov::ICore { const std::string& deviceName, const std::map& config = {}) = 0; - /** - * @brief Creates an executable network from a network object. - * - * Users can create as many networks as they need and use - * them simultaneously (up to the limitation of the hardware resources) - * - * @param network CNNNetwork object acquired from Core::ReadNetwork - * @param remoteCtx "Remote" (non-CPU) accelerator device-specific execution context to use - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network reference - */ - virtual SoExecutableNetworkInternal LoadNetwork(const CNNNetwork& network, - const RemoteContext::Ptr& remoteCtx, - const std::map& config = {}) = 0; - /** * @brief Creates an executable network from a model memory. * @@ -181,15 +164,6 @@ class ICore : public ov::ICore { */ virtual bool DeviceSupportsModelCaching(const std::string& deviceName) const = 0; - /** - * @brief Create a new shared context object on specified accelerator device - * using specified plugin-specific low level device API parameters (device handle, pointer, etc.) - * @param deviceName Name of a device to create new shared context on. - * @param params Map of device-specific shared context parameters. - * @return A shared pointer to a created remote context. - */ - virtual InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName, const ov::AnyMap&) = 0; - /** * @brief Get only configs that are supported by device * @param deviceName Name of a device @@ -200,13 +174,6 @@ class ICore : public ov::ICore { const std::map& config) = 0; virtual bool isNewAPI() const = 0; - - /** - * @brief Get a pointer to default shared context object for the specified device. - * @param deviceName - A name of a device to get create shared context from. - * @return A shared pointer to a default remote context. - */ - virtual RemoteContext::Ptr GetDefaultContext(const std::string& deviceName) = 0; }; } // namespace InferenceEngine diff --git a/src/inference/dev_api/ie_metric_helpers.hpp b/src/inference/dev_api/ie_metric_helpers.hpp deleted file mode 100644 index e45c9a9760e1dc..00000000000000 --- a/src/inference/dev_api/ie_metric_helpers.hpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Defines set of macro to safely set plugin and executable network metric values - * @file ie_metric_helpers.hpp - */ - -#pragma once - -#include -#include - -/** - * @cond - */ - -namespace InferenceEngine { -namespace Metrics { - -template -struct MetricType; - -#define DECLARE_METRIC_KEY_IMPL(name, ...) \ - struct name {}; \ - template <> \ - struct MetricType { \ - using type = __VA_ARGS__; \ - } - -} // namespace Metrics -} // namespace InferenceEngine - -/** - * @endcond - */ - -/** - * @def IE_SET_METRIC_RETURN(name, ...) - * @ingroup ie_dev_api - * @brief Return metric value with specified @p name and arguments `...`. Example: - * @code - * IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys); - * @endcode - * - * @param name The metric name - * @param ... A metric value - * - * @return A metric value wrapped with Parameter and returned to a calling function - */ -#define IE_SET_METRIC_RETURN(name, ...) \ - typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::name>::type _##name##_value = \ - __VA_ARGS__; \ - return _##name##_value - -#include "ie_plugin_config.hpp" diff --git a/src/inference/dev_api/ie_performance_hints.hpp b/src/inference/dev_api/ie_performance_hints.hpp deleted file mode 100644 index 495ebc17be66d9..00000000000000 --- a/src/inference/dev_api/ie_performance_hints.hpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for config that holds the performance hints - * @file ie_performance_hints.hpp - */ - -#pragma once -#include -#include - -namespace InferenceEngine { -struct PerfHintsConfig { - std::string ovPerfHint = "LATENCY"; - int ovPerfHintNumRequests = 0; - - /** - * @brief Parses configuration key/value pair - * @param key configuration key - * @param value configuration values - */ - void SetConfig(const std::string& key, const std::string& value) { - if (PluginConfigParams::KEY_PERFORMANCE_HINT == key) { - ovPerfHint = CheckPerformanceHintValue(value); - } else if (PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS == key) { - ovPerfHintNumRequests = CheckPerformanceHintRequestValue(value); - } - } - - /** - * @brief Return configuration value - * @param key configuration key - * @return configuration value wrapped into Parameter - */ - Parameter GetConfig(const std::string& key) { - if (PluginConfigParams::KEY_PERFORMANCE_HINT == key) { - return ovPerfHint; - } else if (PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS == key) { - return ovPerfHintNumRequests; - } else { - IE_THROW() << "Unsupported Performance Hint config: " << key << std::endl; - } - } - - /** - * @brief Supported Configuration keys - * @return vector of supported configuration keys - */ - static std::vector SupportedKeys() { - return {PluginConfigParams::KEY_PERFORMANCE_HINT, PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS}; - } - - /** - * @brief Checks configuration key and value, otherwise throws - * @param configuration key + value - * @return void - */ - static void CheckConfigAndValue(std::pair kvp) { - if (kvp.first == PluginConfigParams::KEY_PERFORMANCE_HINT) - CheckPerformanceHintValue(kvp.second); - else if (kvp.first == PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS) - CheckPerformanceHintRequestValue(kvp.second); - else - IE_THROW() << "Unsupported Performance Hint config: " << kvp.first << std::endl; - } - - /** - * @brief Returns configuration value if it is valid, otherwise throws - * @param configuration value - * @return configuration value - */ - static std::string CheckPerformanceHintValue(const std::string& val) { - if (val == PluginConfigParams::LATENCY || val == PluginConfigParams::THROUGHPUT || - val == PluginConfigParams::CUMULATIVE_THROUGHPUT || val == PluginConfigParams::UNDEFINED) - return val; - else - IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_PERFORMANCE_HINT - << ". Expected only " << PluginConfigParams::LATENCY << "/" << PluginConfigParams::THROUGHPUT - << "/" << PluginConfigParams::CUMULATIVE_THROUGHPUT << "/" << PluginConfigParams::UNDEFINED; - } - - /** - * @brief Returns configuration value if it is valid, otherwise throws - * @param configuration value as string - * @return configuration value as number - */ - static int CheckPerformanceHintRequestValue(const std::string& val) { - int val_i = -1; - try { - val_i = std::stoi(val); - if (val_i >= 0) - return val_i; - else - throw std::logic_error("wrong val"); - } catch (const std::exception&) { - IE_THROW() << "Wrong value of " << val << " for property key " - << PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS - << ". Expected only positive integer numbers"; - } - } -}; -} // namespace InferenceEngine diff --git a/src/inference/dev_api/ie_system_conf.h b/src/inference/dev_api/ie_system_conf.h deleted file mode 100644 index c0d2d81704f432..00000000000000 --- a/src/inference/dev_api/ie_system_conf.h +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Abstraction over platform specific implementations - * @file ie_system_conf.h - */ - -#pragma once - -#include -#include - -#include "openvino/runtime/system_conf.hpp" -#include "openvino/runtime/threading/cpu_streams_info.hpp" - -namespace InferenceEngine { - -/** - * @brief Checks whether OpenMP environment variables are defined - * @ingroup ie_dev_api_system_conf - * - * @param[in] includeOMPNumThreads Indicates if the omp number threads is included - * @return `True` if any OpenMP environment variable is defined, `false` otherwise - */ -inline bool checkOpenMpEnvVars(bool includeOMPNumThreads = true) { - return ov::check_open_mp_env_vars(includeOMPNumThreads); -} - -/** - * @brief Returns available CPU NUMA nodes (on Linux, and Windows [only with TBB], single node is assumed on all - * other OSes) - * @ingroup ie_dev_api_system_conf - * @return NUMA nodes - */ -inline std::vector getAvailableNUMANodes() { - return ov::get_available_numa_nodes(); -} - -/** - * @brief Returns available CPU cores types (on Linux, and Windows) and ONLY with TBB, single core type is assumed - * otherwise - * @ingroup ie_dev_api_system_conf - * @return Vector of core types - */ -inline std::vector getAvailableCoresTypes() { - return ov::get_available_cores_types(); -} - -/** - * @brief Returns number of CPU physical cores on Linux/Windows (which is considered to be more performance - * friendly for servers) (on other OSes it simply relies on the original parallel API of choice, which usually uses the - * logical cores). call function with 'false' to get #phys cores of all types call function with 'true' to get #phys - * 'Big' cores number of 'Little' = 'all' - 'Big' - * @ingroup ie_dev_api_system_conf - * @param[in] bigCoresOnly Additionally limits the number of reported cores to the 'Big' cores only. - * @return Number of physical CPU cores. - */ -inline int getNumberOfCPUCores(bool bigCoresOnly = false) { - return ov::get_number_of_cpu_cores(bigCoresOnly); -} - -/** - * @brief Returns number of CPU logical cores on Linux/Windows (on other OSes it simply relies on the original - * parallel API of choice, which uses the 'all' logical cores). call function with 'false' to get #logical cores of - * all types call function with 'true' to get #logical 'Big' cores number of 'Little' = 'all' - 'Big' - * @ingroup ie_dev_api_system_conf - * @param[in] bigCoresOnly Additionally limits the number of reported cores to the 'Big' cores only. - * @return Number of logical CPU cores. - */ -inline int getNumberOfLogicalCPUCores(bool bigCoresOnly = false) { - return ov::get_number_of_logical_cpu_cores(bigCoresOnly); -} - -/** - * @brief Returns number of blocked CPU cores. Please note that this is a temporary interface for performance - * optimization on a specific platform. May be removed in future release. - * @ingroup ov_dev_api_system_conf - * @return Number of blocked CPU cores. - */ -using ov::get_number_of_blocked_cores; - -/** - * @brief Checks whether CPU supports SSE 4.2 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is SSE 4.2 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_sse42; - -/** - * @brief Checks whether CPU supports AVX capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx; - -/** - * @brief Checks whether CPU supports AVX2 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX2 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx2; - -/** - * @brief Checks whether CPU supports AVX2_VNNI capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX2_VNNI instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx2_vnni; - -/** - * @brief Checks whether CPU supports AVX 512 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX512F (foundation) instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512f; - -/** - * @brief Checks whether CPU supports AVX 512 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX512F, AVX512BW, AVX512DQ instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core; - -/** - * @brief Checks whether CPU supports AVX 512 VNNI capability - * @ingroup ie_dev_api_system_conf - * @return `True` is AVX512F, AVX512BW, AVX512DQ, AVX512_VNNI instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_vnni; - -/** - * @brief Checks whether CPU supports BFloat16 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAVX512_BF16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_bfloat16; - -/** - * @brief Checks whether CPU supports fp16 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAVX512_FP16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_fp16; - -/** - * @brief Checks whether CPU supports AMX int8 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAMX_INT8 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_amx_int8; - -/** - * @brief Checks whether CPU supports AMX bf16 capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAMX_BF16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_amx_bf16; - -/** - * @brief Checks whether CPU supports AMX capability - * @ingroup ie_dev_api_system_conf - * @return `True` is tAMX_INT8 or tAMX_BF16 instructions are available, `false` otherwise - */ -using ov::with_cpu_x86_avx512_core_amx; - -/** - * @brief Checks whether CPU mapping Available - * @ingroup ie_dev_api_system_conf - * @return `True` is CPU mapping is available, `false` otherwise - */ -using ov::is_cpu_map_available; - -/** - * @brief Get number of numa nodes - * @ingroup ie_dev_api_system_conf - * @return Number of numa nodes - */ -using ov::get_num_numa_nodes; - -/** - * @brief Get number of sockets - * @ingroup ie_dev_api_system_conf - * @return Number of sockets - */ -using ov::get_num_sockets; - -/** - * @brief Returns number of CPU cores on Linux/Windows - * @ingroup ie_dev_api_system_conf - * @param[in] plugin_task plugin task. - * @return Number of CPU cores with core_type. - */ -using ov::get_proc_type_table; - -/** - * @brief Returns original number of CPU cores on Linux/Windows - * @ingroup ie_dev_api_system_conf - * @param[in] plugin_task plugin task. - * @return Number of original CPU cores with core_type. - */ -using ov::get_org_proc_type_table; - -/** - * @brief Get and reserve available cpu ids - * @ingroup ie_dev_api_system_conf - * @param[in] streams_info_table streams information table. - * @param[in] stream_processors processors grouped in stream - * @param[in] cpu_status set cpu status - */ -using ov::reserve_available_cpus; - -/** - * @brief Set flag bit 'Used' of CPU - * @ingroup ie_dev_api_system_conf - * @param[in] cpu_ids cpus in cup_mapping. - * @param[in] used flag bit - */ -using ov::set_cpu_used; - -/** - * @brief Get socket id by current numa node id - * @ingroup ie_dev_api_system_conf - * @param[in] numa_node_id numa node id - * @return socket id - */ -using ov::get_socket_by_numa_node; - -/** - * @brief This enum contains definition of each columns in processor type table which bases on cpu core types. Will - * extend to support other CPU core type like ARM. - * - * The following are two example of processor type table. - * 1. Processor table of two socket CPUs XEON server - * - * ALL_PROC | MAIN_CORE_PROC | EFFICIENT_CORE_PROC | HYPER_THREADING_PROC - * 96 48 0 48 // Total number of two sockets - * 48 24 0 24 // Number of socket one - * 48 24 0 24 // Number of socket two - * - * 2. Processor table of one socket CPU desktop - * - * ALL_PROC | MAIN_CORE_PROC | EFFICIENT_CORE_PROC | HYPER_THREADING_PROC - * 32 8 16 8 // Total number of one socket - */ -using ov::ColumnOfProcessorTypeTable; - -/** - * @brief This enum contains definition of each columns in CPU mapping table which use processor id as index. - * - * GROUP_ID is generated according to the following rules. - * 1. If one MAIN_CORE_PROC and one HYPER_THREADING_PROC are based on same Performance-cores, they are in one group. - * 2. If some EFFICIENT_CORE_PROC share one L2 cachle, they are in one group. - * 3. There are no duplicate group IDs in the system - * - * The following is the example of CPU mapping table. - * 1. Four processors of two Pcore - * 2. Four processors of four Ecores shared L2 cache - * - * PROCESSOR_ID | SOCKET_ID | CORE_ID | CORE_TYPE | GROUP_ID | Used - * 0 0 0 3 0 0 - * 1 0 0 1 0 0 - * 2 0 1 3 1 0 - * 3 0 1 1 1 0 - * 4 0 2 2 2 0 - * 5 0 3 2 2 0 - * 6 0 4 2 2 0 - * 7 0 5 2 2 0 - */ -using ov::ColumnOfCPUMappingTable; - -/** - * @brief definition of CPU_MAP_USED_FLAG column in CPU mapping table. - */ -using ov::ProcessorUseStatus; - -} // namespace InferenceEngine diff --git a/src/inference/dev_api/openvino/runtime/make_tensor.hpp b/src/inference/dev_api/openvino/runtime/make_tensor.hpp index 2e5d771c7d98e7..7433b22a7fc38c 100644 --- a/src/inference/dev_api/openvino/runtime/make_tensor.hpp +++ b/src/inference/dev_api/openvino/runtime/make_tensor.hpp @@ -4,11 +4,16 @@ #pragma once -#include "ie_blob.h" #include "openvino/runtime/common.hpp" #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/so_ptr.hpp" +namespace InferenceEngine { + +class Blob; + +} // namespace InferenceEngine + namespace ov { /** @@ -65,16 +70,4 @@ OPENVINO_RUNTIME_API ov::Tensor make_tensor(const ov::SoPtr& tensor); */ OPENVINO_RUNTIME_API ov::SoPtr get_tensor_impl(const ov::Tensor& tensor); -IE_SUPPRESS_DEPRECATED_START -/** @cond INTERNAL */ -ov::SoPtr make_tensor(const std::shared_ptr& tensor, bool unwrap = false); -const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob); -InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob); - -OPENVINO_RUNTIME_API std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, - bool unwrap = true, - InferenceEngine::TensorDesc desc = {}); -/** @endcond */ - -IE_SUPPRESS_DEPRECATED_END } // namespace ov diff --git a/src/inference/dev_api/remote_utils.hpp b/src/inference/dev_api/remote_utils.hpp index 95dbeb48191b9a..d37f5ec8612150 100644 --- a/src/inference/dev_api/remote_utils.hpp +++ b/src/inference/dev_api/remote_utils.hpp @@ -5,17 +5,11 @@ #pragma once #include "ie_ngraph_utils.hpp" -#include "ie_remote_blob.hpp" -#include "ie_remote_context.hpp" #include "openvino/runtime/iremote_context.hpp" namespace ov { namespace legacy_convert { -INFERENCE_ENGINE_API_CPP(ov::SoPtr) -convert_remote_context(const std::shared_ptr& context); -INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob*) get_hardware_blob(InferenceEngine::Blob* blob); - class INFERENCE_ENGINE_API_CLASS(TensorHolder) { public: TensorHolder(ov::SoPtr tensor) : _tensor(tensor) {} @@ -29,158 +23,4 @@ class INFERENCE_ENGINE_API_CLASS(TensorHolder) { }; } // namespace legacy_convert - -/** - * @brief Tensor what contains InferenceEngine::RemoteBlob inside - * Blob owns the memory - */ -class INFERENCE_ENGINE_API_CLASS(RemoteBlobTensor) : public IRemoteTensor { - mutable element::Type m_type; - mutable Shape m_shape; - mutable Strides m_strides; - mutable ov::AnyMap m_properties; - mutable std::string m_dev_name; - -public: - std::shared_ptr blob; - - RemoteBlobTensor(const InferenceEngine::RemoteBlob::Ptr& blob) : blob{blob} { - OPENVINO_ASSERT(blob); - m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); - } - - const element::Type& get_element_type() const override { - m_type = InferenceEngine::details::convertPrecision(blob->getTensorDesc().getPrecision()); - return m_type; - } - - void set_shape(ov::Shape shape) override { - blob->setShape({shape.begin(), shape.end()}); - } - - const Shape& get_shape() const override { - m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); - return m_shape; - } - - const Strides& get_strides() const override { - OPENVINO_ASSERT(get_element_type().bitwidth() >= 8, - "Could not get strides for types with bitwidths less then 8 bit. Tensor type: ", - get_element_type()); - const auto& element_strides = blob->getTensorDesc().getBlockingDesc().getStrides(); - const size_t elem_size = get_element_type().size(); - m_strides.clear(); - m_strides.resize(element_strides.size()); - std::transform(element_strides.begin(), element_strides.end(), m_strides.begin(), [&elem_size](size_t stride) { - return stride * elem_size; - }); - return m_strides; - } - - size_t get_size() const override { - return blob->size(); - } - - size_t get_byte_size() const override { - return blob->byteSize(); - } - - const AnyMap& get_properties() const override { - m_properties = blob->getParams(); - return m_properties; - } - - const std::string& get_device_name() const override { - m_dev_name = blob->getDeviceName(); - return m_dev_name; - } -}; - -/** - * @brief Create InferenceEngine::RemoteBlob from the Tensor - */ -class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) - : public InferenceEngine::RemoteBlob, - public ov::legacy_convert::TensorHolder { -public: - TensorRemoteBlob(const ov::SoPtr& tensor, InferenceEngine::TensorDesc desc) - : InferenceEngine::RemoteBlob{desc}, - ov::legacy_convert::TensorHolder(tensor) { - OPENVINO_ASSERT(this->get_tensor()); - } - std::shared_ptr cast_tensor() const { - auto remote = std::dynamic_pointer_cast(get_tensor()._ptr); - OPENVINO_ASSERT(remote); - return remote; - } - AnyMap getParams() const override { - return cast_tensor()->get_properties(); - } - std::string getDeviceName() const noexcept override { - try { - return cast_tensor()->get_device_name(); - } catch (...) { - return {}; - } - } - std::shared_ptr getContext() const noexcept override { - return {}; - } - - void allocate() noexcept override {} - bool deallocate() noexcept override { - return true; - } - InferenceEngine::LockedMemory buffer() noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory cbuffer() const noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory rwmap() noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory rmap() const noexcept override { - return {nullptr, nullptr, 0}; - } - InferenceEngine::LockedMemory wmap() noexcept override { - return {nullptr, nullptr, 0}; - } - const std::shared_ptr& getAllocator() const noexcept override { - return m_allocator; - } - void* getHandle() const noexcept override { - return nullptr; - } - - using TensorHolder::get_tensor; - -private: - std::shared_ptr m_allocator; -}; - } // namespace ov - -namespace InferenceEngine { - -class INFERENCE_ENGINE_API_CLASS(IRemoteContextWrapper) : public ov::IRemoteContext { -private: - std::shared_ptr m_context; - mutable std::string m_name; - mutable ov::AnyMap m_params; - -public: - IRemoteContextWrapper(const std::shared_ptr& context) : m_context(context) {} - virtual ~IRemoteContextWrapper() = default; - const std::shared_ptr& get_context(); - const std::string& get_device_name() const override; - - const ov::AnyMap& get_property() const override; - - ov::SoPtr create_tensor(const ov::element::Type& type, - const ov::Shape& shape, - const ov::AnyMap& params = {}) override; - ov::SoPtr create_host_tensor(const ov::element::Type type, const ov::Shape& shape) override; -}; - -} // namespace InferenceEngine diff --git a/src/inference/dev_api/so_ptr.hpp b/src/inference/dev_api/so_ptr.hpp deleted file mode 100644 index c9aa52642a6d46..00000000000000 --- a/src/inference/dev_api/so_ptr.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a wrapper class for handling plugin instantiation and releasing resources - * @file so_ptr.hpp - */ -#pragma once - -#include "openvino/runtime/so_ptr.hpp" diff --git a/src/inference/include/ie/cpp/ie_executable_network.hpp b/src/inference/include/ie/cpp/ie_executable_network.hpp index bcdfd013000ad2..80ee0cbab45338 100644 --- a/src/inference/include/ie/cpp/ie_executable_network.hpp +++ b/src/inference/include/ie/cpp/ie_executable_network.hpp @@ -30,7 +30,6 @@ #include "cpp/ie_infer_request.hpp" #include "ie_iexecutable_network.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" namespace ov { class Core; @@ -171,13 +170,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ExecutableNetwo */ Parameter GetMetric(const std::string& name) const; - /** - * @brief Returns pointer to plugin-specific shared context - * on remote accelerator device that was used to create this ExecutableNetwork - * @return A context - */ - RemoteContext::Ptr GetContext() const; - /** * @brief Checks if current ExecutableNetwork object is not initialized * @return true if current ExecutableNetwork object is not initialized, false - otherwise diff --git a/src/inference/include/ie/cpp/ie_infer_request.hpp b/src/inference/include/ie/cpp/ie_infer_request.hpp index b42a35779abccb..69702bb5cccd1f 100644 --- a/src/inference/include/ie/cpp/ie_infer_request.hpp +++ b/src/inference/include/ie/cpp/ie_infer_request.hpp @@ -118,13 +118,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferRequest) { */ Blob::Ptr GetBlob(const std::string& name); - /** - * @brief Gets pre-process for input data - * @param name Name of input blob. - * @return pointer to pre-process info of blob with name - */ - const PreProcessInfo& GetPreProcess(const std::string& name) const; - /** * @brief Infers specified input(s) in synchronous mode * diff --git a/src/inference/include/ie/details/ie_blob_iterator.hpp b/src/inference/include/ie/details/ie_blob_iterator.hpp deleted file mode 100644 index e6f92e46798561..00000000000000 --- a/src/inference/include/ie/details/ie_blob_iterator.hpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for the BlobIterator class - * - * @file ie_blob_iterator.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" -#include "ie_locked_memory.hpp" - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -namespace details { -/** - * @brief This class provides range loops support for TBlob objects - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED BlobIterator { - LockedMemory _mem; - size_t _offset; - -public: - /** - * @brief A move constructor to create a BlobIterator instance from a LockedMemory instance. - * Explicitly rejects implicit conversions. - * @param lk Rvalue of the memory instance to move from - * @param offset Size of offset in memory - */ - explicit BlobIterator(LockedMemory&& lk, size_t offset = 0) : _mem(std::move(lk)), _offset(offset) {} - - /** - * @brief Increments an offset of the current BlobIterator instance - * @return The current BlobIterator instance - */ - BlobIterator& operator++() { - _offset++; - return *this; - } - - /** - * @brief An overloaded postfix incrementation operator - * Implementation does not follow std interface since only move semantics is used - */ - void operator++(int) { - _offset++; - } - - /** - * @brief Checks if the given iterator is not equal to the current one - * @param that Iterator to compare with - * @return true if the given iterator is not equal to the current one, false - otherwise - */ - bool operator!=(const BlobIterator& that) const { - return !operator==(that); - } - - /** - * @brief Gets a value by the pointer to the current iterator - * @return The value stored in memory for the current offset value - */ - const T& operator*() const { - return *(_mem.template as() + _offset); - } - - /** - * @brief Gets a value by the pointer to the current iterator - * @return The value stored in memory for the current offset value - */ - T& operator*() { - return *(_mem.template as() + _offset); - } - /** - * @brief Compares the given iterator with the current one - * @param that Iterator to compare with - * @return true if the given iterator is equal to the current one, false - otherwise - */ - bool operator==(const BlobIterator& that) const { - return &operator*() == &that.operator*(); - } -}; -} // namespace details -} // namespace InferenceEngine -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/include/ie/details/ie_so_loader.h b/src/inference/include/ie/details/ie_so_loader.h deleted file mode 100644 index d6209ccbd6b0af..00000000000000 --- a/src/inference/include/ie/details/ie_so_loader.h +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for definition of abstraction over platform specific shared objects - * - * @file ie_so_loader.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" - -namespace InferenceEngine { -namespace details { - -/** - * @deprecated This is internal stuff. Use Inference Engine Plugin API - * @brief This class provides an OS shared module abstraction - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(SharedObjectLoader) { - std::shared_ptr _so; - -public: - /** - * @brief Constructs from existing object - */ - SharedObjectLoader(const std::shared_ptr& so); - - /** - * @brief Default constructor - */ - SharedObjectLoader() = default; - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - /** - * @brief Loads a library with the wide char name specified. - * @param pluginName Full or relative path to the plugin library - */ - explicit SharedObjectLoader(const wchar_t* pluginName); -#endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - - /** - * @brief Loads a library with the name specified. - * @param pluginName Full or relative path to the plugin library - */ - explicit SharedObjectLoader(const char* pluginName); - - /** - * @brief A destructor - */ - ~SharedObjectLoader(); - - /** - * @brief Searches for a function symbol in the loaded module - * @param symbolName Name of function to find - * @return A pointer to the function if found - * @throws Exception if the function is not found - */ - void* get_symbol(const char* symbolName) const; - - /** - * @brief Retruns reference to type erased implementation - * @throws Exception if the function is not found - */ - std::shared_ptr get() const; -}; - -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/include/ie/details/ie_so_pointer.hpp b/src/inference/include/ie/details/ie_so_pointer.hpp deleted file mode 100644 index e3cbda07c50d7f..00000000000000 --- a/src/inference/include/ie/details/ie_so_pointer.hpp +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a wrapper class for handling plugin instantiation and releasing resources - * @file ie_so_pointer.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include -#include -#include - -#include "ie_common.h" -#include "ie_so_loader.h" - -namespace InferenceEngine { -namespace details { -/** - * @brief This class is a trait class that provides a creator with a function name corresponding to the templated class - * parameter - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED SOCreatorTrait {}; - -/** - * @brief Enables only `char` or `wchar_t` template specializations - * @tparam C A char type - */ -template -using enableIfSupportedChar = - typename std::enable_if<(std::is_same::value || std::is_same::value)>::type; - -/** - * @deprecated This is internal stuff. Use Inference Engine Plugin API - * @brief This class instantiate object using shared library - * @tparam T An type of object SOPointer can hold - */ -template -class INFERENCE_ENGINE_1_0_DEPRECATED SOPointer { - template - friend class SOPointer; - - IE_SUPPRESS_DEPRECATED_START - struct HasRelease { - template - static char test(decltype(&C::Release)); - template - static long test(...); - constexpr static const bool value = sizeof(test(nullptr)) == sizeof(char); - }; - IE_SUPPRESS_DEPRECATED_END - -public: - /** - * @brief Default constructor - */ - SOPointer() = default; - - /** - * @brief The main constructor - * @param name Name of a shared library file - */ - template > - SOPointer(const std::basic_string& name) { - try { - _so = SharedObjectLoader(name.c_str()); - Load(std::integral_constant{}); - } catch (const std::runtime_error& ex) { - IE_THROW() << ex.what(); - } catch (...) { - details::Rethrow(); - } - } - - /** - * @brief Constructs an object with existing reference - * @param so Existing pointer to a library loader - * @param ptr Existing reference to an object - */ - SOPointer(const SharedObjectLoader& so, const std::shared_ptr& ptr) : _so{so}, _ptr{ptr} {} - - /** - * @brief Constructs an object with existing loader - * @param so Existing pointer to a library loader - */ - explicit SOPointer(const SharedObjectLoader& so) : _so(so) { - Load(std::integral_constant{}); - } - - /** - * @brief The copy-like constructor, can create So Pointer that dereferenced into child type if T is derived of U - * @param that copied SOPointer object - */ - template - SOPointer(const SOPointer& that) : _so(that._so), - _ptr(std::dynamic_pointer_cast(that._ptr)) { - IE_ASSERT(_ptr != nullptr); - } - - /** - * @brief Standard pointer operator - * @return underlined interface with disabled Release method - */ - T* operator->() const noexcept { - return _ptr.get(); - } - - explicit operator bool() const noexcept { - return _ptr != nullptr; - } - - friend bool operator==(std::nullptr_t, const SOPointer& ptr) noexcept { - return !ptr; - } - friend bool operator==(const SOPointer& ptr, std::nullptr_t) noexcept { - return !ptr; - } - friend bool operator!=(std::nullptr_t, const SOPointer& ptr) noexcept { - return static_cast(ptr); - } - friend bool operator!=(const SOPointer& ptr, std::nullptr_t) noexcept { - return static_cast(ptr); - } - - operator const SharedObjectLoader&() const noexcept { - return _so; - } - - operator std::shared_ptr&() noexcept { - return _ptr; - } - -protected: - /** - * @brief Implements load of object from library if Release method is presented - */ - void Load(std::true_type) { - try { - void* create = nullptr; - try { - create = _so.get_symbol((SOCreatorTrait::name + std::string("Shared")).c_str()); - } catch (const NotFound&) { - } - if (create == nullptr) { - create = _so.get_symbol(SOCreatorTrait::name); - using CreateF = StatusCode(T*&, ResponseDesc*); - T* object = nullptr; - ResponseDesc desc; - StatusCode sts = reinterpret_cast(create)(object, &desc); - if (sts != OK) { - IE_EXCEPTION_SWITCH(sts, - ExceptionType, - InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM} <<= - std::stringstream{} << desc.msg) - } - IE_SUPPRESS_DEPRECATED_START - _ptr = std::shared_ptr(object, [](T* ptr) { - ptr->Release(); - }); - IE_SUPPRESS_DEPRECATED_END - } else { - using CreateF = void(std::shared_ptr&); - reinterpret_cast(create)(_ptr); - } - } catch (...) { - details::Rethrow(); - } - } - - /** - * @brief Implements load of object from library - */ - void Load(std::false_type) { - try { - using CreateF = void(std::shared_ptr&); - reinterpret_cast(_so.get_symbol(SOCreatorTrait::name))(_ptr); - } catch (...) { - details::Rethrow(); - } - } - - /** - * @brief The DLL - */ - SharedObjectLoader _so; - - /** - * @brief Gets a smart pointer to the custom object - */ - std::shared_ptr _ptr; -}; -} // namespace details -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_blob.h b/src/inference/include/ie/ie_blob.h index f8e116ddfc5b3d..4436136c5832e8 100644 --- a/src/inference/include/ie/ie_blob.h +++ b/src/inference/include/ie/ie_blob.h @@ -29,7 +29,6 @@ #include #include -#include "details/ie_blob_iterator.hpp" #include "details/ie_pre_allocator.hpp" #include "ie_allocator.hpp" #include "ie_common.h" @@ -40,8 +39,6 @@ namespace InferenceEngine { IE_SUPPRESS_DEPRECATED_START -class RemoteBlob; - /** * @brief This class represents a universal container in the Inference Engine * @@ -82,7 +79,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { typename std::enable_if::value && !std::is_reference::value, int>::type = 0, typename std::enable_if::value, int>::type = 0> bool is() noexcept { - return dynamic_cast(getHardwareBlob()) != nullptr; + return dynamic_cast(this) != nullptr; } /** @@ -95,7 +92,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { typename std::enable_if::value && !std::is_reference::value, int>::type = 0, typename std::enable_if::value, int>::type = 0> bool is() const noexcept { - return dynamic_cast(getHardwareBlob()) != nullptr; + return dynamic_cast(this) != nullptr; } /** @@ -106,25 +103,9 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { * @tparam T Type to cast to. Must represent a class derived from the Blob * @return Raw pointer to the object of the type T or nullptr on error */ - template < - typename T, - typename std::enable_if::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value && !std::is_same::value, int>::type = 0> - T* as() noexcept { - return dynamic_cast(getHardwareBlob()); - } - - /** - * @brief Casts this Blob object to the type RemoteBlob. - * - * Use InferenceEngine::as() to operate with shared Blob objects instead of raw pointers - * - * @tparam T Type to cast to. Must represent a class derived from the Blob - * @return Raw pointer to the object of the type T or nullptr on error - */ template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> + typename std::enable_if::value, int>::type = 0> T* as() noexcept { return dynamic_cast(this); } @@ -137,27 +118,11 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { * @tparam T Type to cast to. Must represent a class derived from the Blob * @return Raw pointer to the object of the type const T or nullptr on error */ - template < - typename T, - typename std::enable_if::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value && !std::is_same::value, int>::type = 0> - const T* as() const noexcept { - return dynamic_cast(getHardwareBlob()); - } - - /** - * @brief Casts this Blob object to the type RemoteBlob. - * - * Use InferenceEngine::as() to operate with shared Blob objects instead of raw pointers - * - * @tparam T Type to cast to. Must represent a class derived from the Blob - * @return Raw pointer to the object of the type T or nullptr on error - */ template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> + typename std::enable_if::value, int>::type = 0> const T* as() const noexcept { - return dynamic_cast(this); + return dynamic_cast(this); } /** @@ -320,9 +285,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Blob) { * @return The allocator for allocator-based blobs or nullptr if there is none */ virtual const std::shared_ptr& getAllocator() const noexcept = 0; - - const Blob* getHardwareBlob() const; - Blob* getHardwareBlob(); }; /** @@ -713,50 +675,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED TBlob : public MemoryBlob { return Blob::Ptr(new TBlob(*this, begin, end)); } - /** - * @brief Gets BlobIterator for the data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type T - */ - details::BlobIterator begin() { - return details::BlobIterator(data()); - } - - /** - * @brief Gets BlobIterator for the end of data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type T representing end of the data - */ - details::BlobIterator end() { - return details::BlobIterator(data(), size()); - } - - /** - * @brief Gets a const BlobIterator for the read-only data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type const T - */ - details::BlobIterator begin() const { - return details::BlobIterator(readOnly()); - } - - /** - * @brief Gets a const BlobIterator for the end of read-only data. - * - * Enables a ranged loop support for the TBlob object. - * - * @return BlobIterator object of type const T representing end of data - */ - details::BlobIterator end() const { - return details::BlobIterator(readOnly(), size()); - } - protected: /** * @brief Local instance of IAllocator to manipulate memory. diff --git a/src/inference/include/ie/ie_compound_blob.h b/src/inference/include/ie/ie_compound_blob.h deleted file mode 100644 index 5dce8d82eee7b1..00000000000000 --- a/src/inference/include/ie/ie_compound_blob.h +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for CompoundBlob - * - * @file ie_compound_blob.h - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_blob.h" - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { -/** - * @brief This class represents a blob that contains other blobs - * - * Compound blob is a wrapper blob over references to underlying blobs. These blobs should share - * some properties and can be grouped into a single entity. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(CompoundBlob) : public Blob { -public: - /** - * @brief A smart pointer to the CompoundBlob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const CompoundBlob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief Constructs a compound blob from a vector of blobs - * - * @param blobs A vector of blobs that is copied to this object - */ - explicit CompoundBlob(const std::vector& blobs); - - /** - * @brief Constructs a compound blob from a vector of blobs - * - * @param blobs A vector of blobs that is moved to this object - */ - explicit CompoundBlob(std::vector&& blobs); - - /** - * @brief Always returns `0` - * @return Returns `0` - */ - size_t byteSize() const override; - - /** - * @brief Always returns `0` - * @return Returns `0` - */ - size_t element_size() const override; - - /** - * @brief No operation is performed. Compound blob does not allocate/deallocate any data - */ - void allocate() noexcept override; - - /** - * @brief No operation is performed. Compound blob does not allocate/deallocate any data - * @return Returns `false` - */ - bool deallocate() noexcept override; - - /** - * @brief Always returns an empty LockedMemory object - * @return Empty locked memory - */ - LockedMemory buffer() noexcept override; - - /** - * @brief Always returns an empty LockedMemory object - * @return Empty locked memory - */ - LockedMemory cbuffer() const noexcept override; - - /** - * @brief Returns the number of underlying blobs in the compound blob - * @return A number of underlying blobs - */ - size_t size() const noexcept override; - - /** - * @brief Returns an underlying blob at index i - * - * @param i the index of the underlying Blob object - * @return A smart pointer to the underlying Blob object or nullptr in case of an error - */ - virtual Blob::Ptr getBlob(size_t i) const noexcept; - - Blob::Ptr createROI(const ROI& roi) const override; - -protected: - /** - * @brief Constructs a compound blob with specified descriptor - * - * @param tensorDesc A tensor descriptor for the compound blob - */ - explicit CompoundBlob(const TensorDesc& tensorDesc); - - /** - * @brief Compound blob container for underlying blobs - */ - std::vector _blobs; - - const std::shared_ptr& getAllocator() const noexcept override; -}; - -/** - * @brief This class represents a blob that contains other blobs - one per batch - * @details Plugin which supports BatchedBlob input should report BATCHED_BLOB - * in the OPTIMIZATION_CAPABILITIES metric. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(BatchedBlob) : public CompoundBlob { -public: - /** - * @brief A smart pointer to the BatchedBlob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const BatchedBlob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief Constructs a batched blob from a vector of blobs - * @details All passed blobs should meet following requirements: - * - all blobs have equal tensor descriptors, - * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW, HWC - * - batch dimensions should be equal to 1 or not defined (C, CHW, HWC). - * Resulting blob's tensor descriptor is constructed using tensor descriptors - * of passed blobs by setting batch dimension to blobs.size() - * - * @param blobs A vector of blobs that is copied to this object - */ - explicit BatchedBlob(const std::vector& blobs); - - /** - * @brief Constructs a batched blob from a vector of blobs - * @details All passed blobs should meet following requirements: - * - all blobs have equal tensor descriptors, - * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW, HWC - * - batch dimensions should be equal to 1 or not defined (C, CHW, HWC). - * Resulting blob's tensor descriptor is constructed using tensor descriptors - * of passed blobs by setting batch dimension to blobs.size() - * - * @param blobs A vector of blobs that is moved to this object - */ - explicit BatchedBlob(std::vector&& blobs); -}; -} // namespace InferenceEngine -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/include/ie/ie_core.hpp b/src/inference/include/ie/ie_core.hpp index 5c1608d45c6103..0a25fae4444343 100644 --- a/src/inference/include/ie/ie_core.hpp +++ b/src/inference/include/ie/ie_core.hpp @@ -28,7 +28,6 @@ #include "cpp/ie_executable_network.hpp" #include "ie_extension.h" #include "ie_plugin_config.hpp" -#include "ie_remote_context.hpp" #include "ie_version.hpp" namespace InferenceEngine { @@ -178,18 +177,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) { */ void AddExtension(const IExtensionPtr& extension); - /** - * @brief Creates an executable network from a network object within a specified remote context. - * @param network CNNNetwork object acquired from Core::ReadNetwork - * @param context Pointer to RemoteContext object - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network object - */ - ExecutableNetwork LoadNetwork(const CNNNetwork& network, - RemoteContext::Ptr context, - const std::map& config = {}); - /** * @brief Registers extension for the specified plugin * @@ -232,20 +219,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) { INFERENCE_ENGINE_DEPRECATED("Use Core::ImportNetwork with explicit device name") ExecutableNetwork ImportNetwork(std::istream& networkModel); - /** - * @brief Creates an executable network from a previously exported network within a specified - * remote context. - * - * @param networkModel Network model stream - * @param context Pointer to RemoteContext object - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load - * operation - * @return An executable network reference - */ - ExecutableNetwork ImportNetwork(std::istream& networkModel, - const RemoteContext::Ptr& context, - const std::map& config = {}); - /** * @brief Query device if it supports specified network with specified configuration * @@ -348,22 +321,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) { * @param xmlConfigFile A path to .xml file with plugins to register. */ void RegisterPlugins(const std::string& xmlConfigFile); - - /** - * @brief Create a new shared context object on specified accelerator device - * using specified plugin-specific low level device API parameters (device handle, pointer, etc.) - * @param deviceName Name of a device to create new shared context on. - * @param params Map of device-specific shared context parameters. - * @return A shared pointer to a created remote context. - */ - RemoteContext::Ptr CreateContext(const std::string& deviceName, const ParamMap& params); - - /** - * @brief Get a pointer to default(plugin-supplied) shared context object for specified accelerator device. - * @param deviceName - A name of a device to get create shared context from. - * @return A shared pointer to a default remote context. - */ - RemoteContext::Ptr GetDefaultContext(const std::string& deviceName); }; /** diff --git a/src/inference/include/ie/ie_iexecutable_network.hpp b/src/inference/include/ie/ie_iexecutable_network.hpp index be826d107aa602..989167885c5d9d 100644 --- a/src/inference/include/ie/ie_iexecutable_network.hpp +++ b/src/inference/include/ie/ie_iexecutable_network.hpp @@ -30,7 +30,6 @@ #include "ie_iinfer_request.hpp" #include "ie_input_info.hpp" #include "ie_parameter.hpp" -#include "ie_remote_context.hpp" namespace InferenceEngine { @@ -158,15 +157,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED IExecutableNetwork : public std::enable_sh */ virtual StatusCode GetMetric(const std::string& name, Parameter& result, ResponseDesc* resp) const noexcept = 0; - /** - * @brief Gets shared context used to create an executable network. - * - * @param pContext Reference to a pointer that will receive resulting shared context object ptr - * @param resp Pointer to the response message that holds a description of an error if any occurred - * @return code of the operation. InferenceEngine::OK if succeeded - */ - virtual StatusCode GetContext(RemoteContext::Ptr& pContext, ResponseDesc* resp) const noexcept = 0; - protected: virtual ~IExecutableNetwork() = default; }; diff --git a/src/inference/include/ie/ie_iinfer_request.hpp b/src/inference/include/ie/ie_iinfer_request.hpp index 896d33aaed12ce..73c7570abf6d76 100644 --- a/src/inference/include/ie/ie_iinfer_request.hpp +++ b/src/inference/include/ie/ie_iinfer_request.hpp @@ -26,7 +26,6 @@ #include "ie_blob.h" #include "ie_common.h" -#include "ie_preprocess.hpp" namespace InferenceEngine { @@ -85,15 +84,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED IInferRequest : public std::enable_shared_ */ virtual StatusCode GetBlob(const char* name, Blob::Ptr& data, ResponseDesc* resp) noexcept = 0; - /** - * @brief Gets pre-process for input data - * @param name Name of input blob. - * @param info pointer to a pointer to PreProcessInfo structure - * @param resp Optional: pointer to an already allocated object to contain information in case of failure - * @return Status code of the operation: OK (0) for success - */ - virtual StatusCode GetPreProcess(const char* name, const PreProcessInfo** info, ResponseDesc* resp) const - noexcept = 0; /** * @brief Infers specified input(s) in synchronous mode * diff --git a/src/inference/include/ie/ie_input_info.hpp b/src/inference/include/ie/ie_input_info.hpp index 841e3d95316fb4..ec7092d42e5c62 100644 --- a/src/inference/include/ie/ie_input_info.hpp +++ b/src/inference/include/ie/ie_input_info.hpp @@ -28,7 +28,6 @@ #include "ie_common.h" #include "ie_data.h" #include "ie_precision.hpp" -#include "ie_preprocess.hpp" namespace InferenceEngine { @@ -163,28 +162,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED InputInfo { return _inputData->getTensorDesc(); } - /** - * @brief Gets pre-process info for the input - * @return A reference to the PreProcessInfo instance that contains pre-process info for this input - */ - PreProcessInfo& getPreProcess() { - return _preProcessInfo; - } - - /** - * @brief Gets pre-process info for the input - * @return A reference to the PreProcessInfo instance that contains pre-process info for this input - */ - const PreProcessInfo& getPreProcess() const { - return _preProcessInfo; - } - protected: - /** - * @brief Pre-process info for the input - */ - PreProcessInfo _preProcessInfo; - /** * @brief A smart pointer to the input data */ diff --git a/src/inference/include/ie/ie_parallel.hpp b/src/inference/include/ie/ie_parallel.hpp deleted file mode 100644 index 21dfc6d0e1c91f..00000000000000 --- a/src/inference/include/ie/ie_parallel.hpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief Contains declarations and definitions for sequential and multi-threading implementations. - * - * Multi-threading support is implemented in two variants: using the Threading Building Blocks library and OpenMP* - * product. To build a particular implementation, use the corresponding identifier: IE_THREAD_TBB, IE_THREAD_TBB_AUTO, - * IE_THREAD_OMP or IE_THREAD_SEQ. - * - * @file ie_parallel.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/core/parallel.hpp" - -#define IE_THREAD_TBB OV_THREAD_TBB -#define IE_THREAD_OMP OV_THREAD_OMP -#define IE_THREAD_SEQ OV_THREAD_SEQ -#define IE_THREAD_TBB_AUTO OV_THREAD_TBB_AUTO - -namespace InferenceEngine { - -using ov::for_1d; -using ov::for_2d; -using ov::for_3d; -using ov::for_4d; -using ov::for_5d; -using ov::for_6d; -using ov::parallel_for; -using ov::parallel_for2d; -using ov::parallel_for3d; -using ov::parallel_for4d; -using ov::parallel_for5d; -using ov::parallel_for6d; -using ov::parallel_it_init; -using ov::parallel_it_step; -using ov::parallel_nt; -using ov::parallel_nt_static; -using ov::parallel_sort; -using ov::parallel_sum; -using ov::parallel_sum2d; -using ov::parallel_sum3d; -using ov::splitter; - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_plugin_config.hpp b/src/inference/include/ie/ie_plugin_config.hpp index c96334a399806a..2d3bf9eafa1985 100644 --- a/src/inference/include/ie/ie_plugin_config.hpp +++ b/src/inference/include/ie/ie_plugin_config.hpp @@ -112,7 +112,6 @@ DECLARE_METRIC_KEY(FULL_DEVICE_NAME, std::string); * - "INT8" - device can support models with INT8 layers * - "BIN" - device can support models with BIN layers * - "WINOGRAD" - device can support models where convolution implemented via Winograd transformations - * - "BATCHED_BLOB" - device can support BatchedBlob */ INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(OPTIMIZATION_CAPABILITIES, std::vector); @@ -129,8 +128,6 @@ INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(BIN); INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(WINOGRAD); -INFERENCE_ENGINE_1_0_DEPRECATED -DECLARE_METRIC_VALUE(BATCHED_BLOB); /** * @brief Metric to provide information about a range for streams on platforms where streams are supported. diff --git a/src/inference/include/ie/ie_preprocess.hpp b/src/inference/include/ie/ie_preprocess.hpp deleted file mode 100644 index 1b895962f5140f..00000000000000 --- a/src/inference/include/ie/ie_preprocess.hpp +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This header file provides structures to store info about pre-processing of - * network inputs (scale, mean image, ...) - * - * @file ie_preprocess.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include - -#include "ie_blob.h" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START - -/** - * @brief This structure stores info about pre-processing of network inputs (scale, mean image, ...) - */ -struct INFERENCE_ENGINE_1_0_DEPRECATED PreProcessChannel { - /** @brief Scale parameter for a channel */ - float stdScale = 1; - - /** @brief Mean value for a channel */ - float meanValue = 0; - - /** @brief Mean data for a channel */ - Blob::Ptr meanData; - - /** @brief Smart pointer to an instance */ - using Ptr = std::shared_ptr; -}; - -/** - * @brief Defines available types of mean - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED MeanVariant { - MEAN_IMAGE, /**< mean value is specified for each input pixel */ - MEAN_VALUE, /**< mean value is specified for each input channel */ - NONE, /**< no mean value specified */ -}; - -/** - * @enum ResizeAlgorithm - * @brief Represents the list of supported resize algorithms. - */ -enum INFERENCE_ENGINE_1_0_DEPRECATED ResizeAlgorithm { NO_RESIZE = 0, RESIZE_BILINEAR, RESIZE_AREA }; - -/** - * @brief This class stores pre-process information for the input - */ -class INFERENCE_ENGINE_1_0_DEPRECATED PreProcessInfo { - // Channel data - std::vector _channelsInfo; - MeanVariant _variant = NONE; - - // Resize Algorithm to be applied for input before inference if needed. - ResizeAlgorithm _resizeAlg = NO_RESIZE; - - // Color format to be used in on-demand color conversions applied to input before inference - ColorFormat _colorFormat = ColorFormat::RAW; - -public: - /** - * @brief Overloaded [] operator to safely get the channel by an index - * - * Throws an exception if channels are empty - * - * @param index Index of the channel to get - * @return The pre-process channel instance - */ - PreProcessChannel::Ptr& operator[](size_t index) { - if (_channelsInfo.empty()) { - IE_THROW() << "accessing pre-process when nothing was set."; - } - if (index >= _channelsInfo.size()) { - IE_THROW() << "pre process index " << index << " is out of bounds."; - } - return _channelsInfo[index]; - } - - /** - * @brief operator [] to safely get the channel preprocessing information by index. - * - * Throws exception if channels are empty or index is out of border - * - * @param index Index of the channel to get - * @return The const preprocess channel instance - */ - const PreProcessChannel::Ptr& operator[](size_t index) const { - if (_channelsInfo.empty()) { - IE_THROW() << "accessing pre-process when nothing was set."; - } - if (index >= _channelsInfo.size()) { - IE_THROW() << "pre process index " << index << " is out of bounds."; - } - return _channelsInfo[index]; - } - - /** - * @brief Returns a number of channels to preprocess - * - * @return The number of channels - */ - size_t getNumberOfChannels() const { - return _channelsInfo.size(); - } - - /** - * @brief Initializes with given number of channels - * - * @param numberOfChannels Number of channels to initialize - */ - void init(const size_t numberOfChannels) { - _channelsInfo.resize(numberOfChannels); - for (auto& channelInfo : _channelsInfo) { - channelInfo = std::make_shared(); - } - } - - /** - * @brief Sets mean image values if operation is applicable. - * - * Also sets the mean type to MEAN_IMAGE for all channels - * - * @param meanImage Blob with a mean image - */ - void setMeanImage(const Blob::Ptr& meanImage) { - if (meanImage.get() == nullptr) { - IE_THROW() << "Failed to set invalid mean image: nullptr"; - } else if (meanImage.get()->getTensorDesc().getLayout() != Layout::CHW) { - IE_THROW() << "Mean image layout should be CHW"; - } else if (meanImage.get()->getTensorDesc().getDims().size() != 3) { - IE_THROW() << "Failed to set invalid mean image: number of dimensions != 3"; - } else if (meanImage.get()->getTensorDesc().getDims()[0] != getNumberOfChannels()) { - IE_THROW() << "Failed to set invalid mean image: number of channels != " << getNumberOfChannels(); - } - _variant = MEAN_IMAGE; - } - - /** - * @brief Sets mean image values if operation is applicable. - * - * Also sets the mean type to MEAN_IMAGE for a particular channel - * - * @param meanImage Blob with a mean image - * @param channel Index of a particular channel - */ - void setMeanImageForChannel(const Blob::Ptr& meanImage, const size_t channel) { - if (meanImage.get() == nullptr) { - IE_THROW() << "Failed to set invalid mean image for channel: nullptr"; - } else if (meanImage.get()->getTensorDesc().getDims().size() != 2) { - IE_THROW() << "Failed to set invalid mean image for channel: number of dimensions != 2"; - } else if (channel >= _channelsInfo.size()) { - IE_THROW() << "Channel " << channel << " exceed number of PreProcess channels: " << _channelsInfo.size(); - } - _variant = MEAN_IMAGE; - _channelsInfo[channel]->meanData = meanImage; - } - - /** - * @brief Sets a type of mean operation - * - * @param variant Type of mean operation to set - */ - void setVariant(const MeanVariant& variant) { - _variant = variant; - } - - /** - * @brief Gets a type of mean operation - * - * @return The type of mean operation - */ - MeanVariant getMeanVariant() const { - return _variant; - } - - /** - * @brief Sets resize algorithm to be used during pre-processing - * - * @param alg Resize algorithm - */ - void setResizeAlgorithm(const ResizeAlgorithm& alg) { - _resizeAlg = alg; - } - - /** - * @brief Gets preconfigured resize algorithm - * - * @return Resize algorithm - */ - ResizeAlgorithm getResizeAlgorithm() const { - return _resizeAlg; - } - - /** - * @brief Changes the color format of the input data provided by the user - * - * This function should be called before loading the network to the plugin - * Setting color format different from ColorFormat::RAW enables automatic color conversion - * (as a part of built-in preprocessing routine) - * - * @param fmt A new color format associated with the input - */ - void setColorFormat(ColorFormat fmt) { - _colorFormat = fmt; - } - - /** - * @brief Gets a color format associated with the input - * - * @details By default, the color format is ColorFormat::RAW meaning - * there is no particular color format assigned to the input - * @return Color format. - */ - ColorFormat getColorFormat() const { - return _colorFormat; - } -}; -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_remote_blob.hpp b/src/inference/include/ie/ie_remote_blob.hpp deleted file mode 100644 index ad241b256ead29..00000000000000 --- a/src/inference/include/ie/ie_remote_blob.hpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a header file for the IE RemoteContext and RemoteBlob classes - * - * @file ie_remote_context.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_blob.h" -#include "ie_parameter.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START -class RemoteContext; - -/** - * @brief This class represents an Inference Engine abstraction to the memory allocated - * on the remote (non-CPU) accelerator device - */ -class INFERENCE_ENGINE_1_0_DEPRECATED RemoteBlob : public MemoryBlob { -public: - /** - * @brief A smart pointer to the RemoteBlob object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const RemoteBlob object - */ - using CPtr = std::shared_ptr; - - /** - * @brief RemoteBlob virtual destructor - */ - virtual ~RemoteBlob() = default; - - /** - * @brief Constructor. Creates an empty RemoteBlob object with the specified precision. - * @param tensorDesc Defines the layout and dims of the blob - */ - explicit RemoteBlob(const TensorDesc& tensorDesc) : MemoryBlob(tensorDesc) {} - - /** - * @brief Returns a map of device-specific parameters required for low-level - * operations with underlying object. - * Parameters include device/context/surface/buffer handles, access flags, - * etc. Contents of the map returned depend on remote execution context that is - * currently set on the device (working scenario). - * Abstract method. - * @return A map of name/parameter elements. - */ - virtual ParamMap getParams() const = 0; - - /** - * @brief Returns name of the device on which underlying object is allocated. - * Abstract method. - * @return A device name string in the same format as that in plugin metric. - */ - virtual std::string getDeviceName() const noexcept = 0; - - /** - * @brief Returns device context which underlying object belongs to. - * Abstract method. - * @return Pointer to plugin-specific context class object, which is derived from RemoteContext. - * Dynamic casting should be used if it is necessary to retrieve a pointer to original class. - */ - virtual std::shared_ptr getContext() const noexcept = 0; -}; -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_remote_context.hpp b/src/inference/include/ie/ie_remote_context.hpp deleted file mode 100644 index 7e74e8c48ecaa6..00000000000000 --- a/src/inference/include/ie/ie_remote_context.hpp +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This is a header file for the IE RemoteContext and RemoteBlob classes - * - * @file ie_remote_context.hpp - */ -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ie_api.h" -#include "ie_parameter.hpp" -#include "ie_remote_blob.hpp" - -namespace InferenceEngine { -IE_SUPPRESS_DEPRECATED_START -/** - * @brief This class represents an Inference Engine abstraction - * for remote (non-CPU) accelerator device-specific execution context. - * Such context represents a scope on the device within which executable - * networks and remote memory blobs can exist, function and exchange data. - */ -class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(RemoteContext) - : public std::enable_shared_from_this { -public: - /** - * @brief A smart pointer to the RemoteContext object - */ - using Ptr = std::shared_ptr; - - /** - * @brief A smart pointer to the const RemoteContext object - */ - using CPtr = std::shared_ptr; - - /** - * @brief RemoteContext virtual destructor - */ - virtual ~RemoteContext() = default; - - /** - * @brief Checks if the RemoteContext object can be cast to the type T* - * - * @tparam T Type to be checked. Must represent a class derived from the RemoteContext - * @return true if this object can be dynamically cast to the type T*. Otherwise, false - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - bool is() noexcept { - return dynamic_cast(GetHardwareContext().get()) != nullptr; - } - - /** - * @brief Checks if the RemoteContext object can be cast to the type const T* - * - * @tparam T Type to be checked. Must represent a class derived from the RemoteContext - * @return true if this object can be dynamically cast to the type const T*. Otherwise, false - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - bool is() const noexcept { - return dynamic_cast(GetHardwareContext().get()) != nullptr; - } - - /** - * @brief Casts this RemoteContext object to the type T*. - * - * @tparam T Type to cast to. Must represent a class derived from the RemoteContext - * @return Raw pointer to the object of the type T or nullptr on error - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - T* as() noexcept { - return dynamic_cast(GetHardwareContext().get()); - } - - /** - * @brief Casts this RemoteContext object to the type const T*. - * - * @tparam T Type to cast to. Must represent a class derived from the RemoteContext - * @return Raw pointer to the object of the type const T or nullptr on error - */ - template ::value && !std::is_reference::value, int>::type = 0, - typename std::enable_if::value, int>::type = 0> - const T* as() const noexcept { - return dynamic_cast(GetHardwareContext().get()); - } - - /** - * @brief Returns name of the device on which underlying object is allocated. - * Abstract method. - * @return A device name string in the same format as that in plugin metric. - */ - virtual std::string getDeviceName() const noexcept = 0; - - /** - * @brief Allocates memory blob in device memory or wraps user-supplied memory handle - * using the specified tensor description and low-level device-specific parameters. - * Returns a pointer to the object which implements RemoteBlob interface. - * @param tensorDesc Defines the layout and dims of the blob - * @param params Map of the low-level blob object parameters. - * Abstract method. - * @return A pointer to plugin object that implements RemoteBlob interface. - */ - virtual RemoteBlob::Ptr CreateBlob(const TensorDesc& tensorDesc, const ParamMap& params = {}) = 0; - - /** - * @brief Allocates host accessible memory blob friendly for the device in current context - * Returns a pointer to the object which implements MemoryBlob interface. - * @param tensorDesc Defines the layout and dims of the blob - * @return A pointer to host accessible MemoryBlob object - */ - virtual MemoryBlob::Ptr CreateHostBlob(const TensorDesc& tensorDesc); - - /** - * @brief Returns a map of device-specific parameters required for low-level - * operations with underlying object. - * Parameters include device/context handles, access flags, - * etc. Contents of the map returned depend on remote execution context that is - * currently set on the device (working scenario). - * Abstract method. - * @return A map of name/parameter elements. - */ - virtual ParamMap getParams() const = 0; - - /** - * @brief Unwrap hardware remote context - * - * @return shared pointer to plugin specific remote context - */ - const std::shared_ptr GetHardwareContext(); - - /** - * @brief Unwrap hardware remote context - * - * @return shared pointer to plugin specific remote context - */ - const std::shared_ptr GetHardwareContext() const; -}; - -/** - * @brief A wrapper of CreateBlob method of RemoteContext to keep consistency with - * plugin-specific wrappers. - * @param desc Defines the layout and dims of the blob - * @param ctx Pointer to the plugin object derived from RemoteContext. - * @return A pointer to plugin object that implements RemoteBlob interface. - */ -inline INFERENCE_ENGINE_1_0_DEPRECATED RemoteBlob::Ptr make_shared_blob(const TensorDesc& desc, - RemoteContext::Ptr ctx) { - return ctx->CreateBlob(desc); -} - -IE_SUPPRESS_DEPRECATED_END -} // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_transformations.hpp b/src/inference/include/ie/ie_transformations.hpp deleted file mode 100644 index 3b3df4c92502f4..00000000000000 --- a/src/inference/include/ie/ie_transformations.hpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief This header file defines the list of public transformations. - * - * @file ie_transformations.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "cpp/ie_cnn_network.h" -#include "ie_api.h" - -namespace InferenceEngine { - -/** - * @brief The transformation finds all TensorIterator/Loop layers in the network, - * processes all back edges that describe a connection between Result and Parameter - * of the TensorIterator/Loop bodies,and inserts ReadValue and Assign layers at the - * input and output corresponding to this back edge. - * Supported platform: CPU. - * - * The example below describes the changes made by the transformation - * [] - TensorIterator body - * () - new layer - * BE - back-edge - * - * before applying the transformation: - * -> input1[BE_1 -> Parameter -> Layers ... -> Result -> BE_1 ]output1-> - * - * after applying the transformation: - * ->(ReadValue)-> input1[BE_1 ->Parameter->Layers ...->Result->BE_1]output1 ->(Assign) - * \ - * ->... - * After applying the transformation, the resulting network can be inferred - * step by step, the states will store between inferences. - * @param network A network to apply LowLatency transformation - * @param use_const_initializer Changes the type of the initializing subgraph for ReadValue operations. - If "true", then the transformation inserts Constant before ReadValue operation. - If "false, then the transformation leaves existed initializing subgraph for ReadValue operation. - * Loop operation by a given number. Does not affect TensorIterators. - */ -INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CPP(void) - lowLatency2(InferenceEngine::CNNNetwork& network, bool use_const_initializer = true); -} // namespace InferenceEngine diff --git a/src/inference/include/ie/inference_engine.hpp b/src/inference/include/ie/inference_engine.hpp index 682dd3fe4b37be..5638013e294af2 100644 --- a/src/inference/include/ie/inference_engine.hpp +++ b/src/inference/include/ie/inference_engine.hpp @@ -18,9 +18,7 @@ # endif #endif -#include "ie_compound_blob.h" #include "ie_core.hpp" -#include "ie_transformations.hpp" // remove in 2022.1 major release #include diff --git a/src/inference/include/ie/vpu/hddl_config.hpp b/src/inference/include/ie/vpu/hddl_config.hpp deleted file mode 100644 index 777c1f5e1d63c0..00000000000000 --- a/src/inference/include/ie/vpu/hddl_config.hpp +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header that defines advanced related properties for HDDL plugin. - * These properties should be used in SetConfig() and LoadNetwork() methods of plugins - * - * @file hddl_config.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "vpu_config.hpp" - -namespace InferenceEngine { - -namespace Metrics { - -/** - * @brief Metric to get a int of the device number, String value is METRIC_HDDL_DEVICE_NUM - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_NUM, int); - -/** - * @brief Metric to get a std::vector of device names, String value is METRIC_HDDL_DEVICE_NAME - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_NAME, std::vector); - -/** - * @brief Metric to get a std::vector of device thermal, String value is METRIC_HDDL_DEVICE_THERMAL - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_THERMAL, std::vector); - -/** - * @brief Metric to get a std::vector of device ids, String value is METRIC_HDDL_DEVICE_ID - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_ID, std::vector); - -/** - * @brief Metric to get a std::vector of device subclasses, String value is METRIC_HDDL_DEVICE_SUBCLASS - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_SUBCLASS, std::vector); - -/** - * @brief Metric to get a std::vector of device total memory, String value is METRIC_HDDL_MEMORY_TOTAL - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_TOTAL, std::vector); - -/** - * @brief Metric to get a std::vector of device used memory, String value is METRIC_HDDL_DEVICE_MEMORY_USED - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_USED, std::vector); - -/** - * @brief Metric to get a std::vector of device utilization, String value is METRIC_HDDL_DEVICE_UTILIZATION - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_UTILIZATION, std::vector); - -/** - * @brief Metric to get a std::vector of stream ids, String value is METRIC_HDDL_DEVICE_STREAM_ID - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_STREAM_ID, std::vector); - -/** - * @brief Metric to get a std::vector of device tags, String value is METRIC_HDDL_DEVICE_TAG - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_TAG, std::vector); - -/** - * @brief Metric to get a std::vector of group ids, String value is METRIC_HDDL_GROUP_ID - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_GROUP_ID, std::vector); - -/** - * @brief Metric to get a int number of device be using for group, String value is METRIC_HDDL_DEVICE_GROUP_USING_NUM - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_GROUP_USING_NUM, int); - -/** - * @brief Metric to get a int number of total device, String value is METRIC_HDDL_DEVICE_TOTAL_NUM - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(HDDL_DEVICE_TOTAL_NUM, int); - -} // namespace Metrics - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: Arbitrary non-empty string. If empty (""), equals no set, default: ""; - * This option allows to specify the number of MYX devices used for inference a specific Executable network. - * Note: Only one network would be allocated to one device. - * The number of devices for the tag is specified in the hddl_service.config file. - * Example: - * "service_settings": - * { - * "graph_tag_map": - * { - * "tagA":3 - * } - * } - * It means that an executable network marked with tagA will be executed on 3 devices - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_GRAPH_TAG); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: Arbitrary non-empty string. If empty (""), equals no set, default: ""; - * This config makes the executable networks to be allocated on one certain device (instead of multiple devices). - * And all inference through this executable network, will be done on this device. - * Note: Only one network would be allocated to one device. - * The number of devices which will be used for stream-affinity must be specified in hddl_service.config file. - * Example: - * "service_settings": - * { - * "stream_device_number":5 - * } - * It means that 5 device will be used for stream-affinity - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_STREAM_ID); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: Arbitrary non-empty string. If empty (""), equals no set, default: ""; - * This config allows user to control device flexibly. This config gives a "tag" for a certain device while - * allocating a network to it. Afterward, user can allocating/deallocating networks to this device with this "tag". - * Devices used for such use case is controlled by a so-called "Bypass Scheduler" in HDDL backend, and the number - * of such device need to be specified in hddl_service.config file. - * Example: - * "service_settings": - * { - * "bypass_device_number": 5 - * } - * It means that 5 device will be used for Bypass scheduler. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_DEVICE_TAG); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: "YES/NO", default is "NO". - * This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set. After a user load a - * network, the user got a handle for the network. - * If "YES", the network allocated is bind to the device (with the specified "DEVICE_TAG"), which means all afterwards - * inference through this network handle will be executed on this device only. - * If "NO", the network allocated is not bind to the device (with the specified "DEVICE_TAG"). If the same network - * is allocated on multiple other devices (also set BIND_DEVICE to "False"), then inference through any handle of these - * networks may be executed on any of these devices those have the network loaded. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_BIND_DEVICE); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: A signed int wrapped in a string, default is "0". - * This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set and "BIND_DEVICE" is "False". - * When there are multiple devices running a certain network (a same network running on multiple devices in Bypass - * Scheduler), the device with a larger number has a higher priority, and more inference tasks will be fed to it with - * priority. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_RUNTIME_PRIORITY); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: "YES/NO", default is "NO". - * SGAD is short for "Single Graph All Device". With this scheduler, once application allocates 1 network, all devices - * (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device - * can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_USE_SGAD); - -/** - * @brief [Only for OpenVINO Intel HDDL device] - * Type: A signed int wrapped in a string, default is "0". - * This config gives a "group id" for a certain device when this device has been reserved for certain client, client - * can use this device grouped by calling this group id while other client can't use this device - * Each device has their own group id. Device in one group shares same group id. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(HDDL_GROUP_DEVICE); - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/vpu/myriad_config.hpp b/src/inference/include/ie/vpu/myriad_config.hpp deleted file mode 100644 index 52d490b87d1b86..00000000000000 --- a/src/inference/include/ie/vpu/myriad_config.hpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header that defines advanced related properties for Myriad plugin. - * These properties should be used in SetConfig() and LoadNetwork() methods of plugins - * - * @file myriad_config.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "vpu_config.hpp" - -namespace InferenceEngine { - -/** - * @brief The flag to reset stalled devices. - * This is a plugin scope option and must be used with the plugin's SetConfig method - * The only possible values are: - * CONFIG_VALUE(YES) - * CONFIG_VALUE(NO) (default value) - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_FORCE_RESET); - -/** - * @brief This option allows to specify device memory type. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_TYPE); - -/** - * @brief Supported keys definition for InferenceEngine::MYRIAD_DDR_TYPE option. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_AUTO); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_2GB); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_SAMSUNG_2GB); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_HYNIX_2GB); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_1GB); - -/** - * @brief This option allows to specify protocol. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_PROTOCOL); - -/** - * @brief Supported keys definition for InferenceEngine::MYRIAD_PROTOCOL option. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_PCIE); -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_USB); - -/** - * @brief Optimize MYRIAD plugin execution to maximize throughput. - * This option should be used with integer value which is the requested number of streams. - * The only possible values are: - * 1 - * 2 - * 3 - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS); - -/** - * @brief Default key definition for InferenceEngine::MYRIAD_THROUGHPUT_STREAMS option. - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS_AUTO); - -} // namespace InferenceEngine diff --git a/src/inference/include/ie/vpu/vpu_config.hpp b/src/inference/include/ie/vpu/vpu_config.hpp deleted file mode 100644 index 1755ed4d9fff0c..00000000000000 --- a/src/inference/include/ie/vpu/vpu_config.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header that defines common config subset for VPU plugins. - * Include myriad_config.hpp or hddl_config.hpp directly. - * These properties should be used in SetConfig() and LoadNetwork() methods of plugins - * - * @file vpu_config.hpp - */ - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) -# define IE_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "ie_api.h" -#include "ie_plugin_config.hpp" - -#define DECLARE_VPU_CONFIG(name) static constexpr auto name = #name - -namespace InferenceEngine { - -// -// Common options -// - -/** - * @brief Turn on HW stages usage (applicable for MyriadX devices only). - * The only possible values are: - * CONFIG_VALUE(YES) (default value) - * CONFIG_VALUE(NO) - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_HW_ACCELERATION); - -/** - * @brief The flag for adding to the profiling information the time of obtaining a tensor. - * The only possible values are: - * CONFIG_VALUE(YES) - * CONFIG_VALUE(NO) (default value) - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_ENABLE_RECEIVING_TENSOR_TIME); - -/** - * @brief This option allows to pass custom layers binding xml. - * If layer is present in such an xml, it would be used during inference even if the layer is natively supported - */ -INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_VPU_CONFIG(MYRIAD_CUSTOM_LAYERS); - -} // namespace InferenceEngine diff --git a/src/inference/src/blob_factory.cpp b/src/inference/src/blob_factory.cpp index 8408158970202a..dcf7181944502e 100644 --- a/src/inference/src/blob_factory.cpp +++ b/src/inference/src/blob_factory.cpp @@ -20,7 +20,3 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::Tenso const std::shared_ptr& alloc) { return make_blob_with_precision(desc.getPrecision(), desc, alloc); } - -InferenceEngine::Blob::Ptr make_plain_blob(InferenceEngine::Precision prec, const InferenceEngine::SizeVector dims) { - return make_blob_with_precision({prec, dims, InferenceEngine::TensorDesc::getLayoutByDims(dims)}); -} diff --git a/src/inference/src/blob_transform.cpp b/src/inference/src/blob_transform.cpp index 0ccbe03a388a14..321bef03dc997b 100644 --- a/src/inference/src/blob_transform.cpp +++ b/src/inference/src/blob_transform.cpp @@ -4,7 +4,7 @@ #include "blob_transform.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" #ifdef HAVE_SSE # include "cpu_x86_sse42/blob_transform_sse42.hpp" #endif @@ -53,7 +53,7 @@ static void blob_copy_4d_t(Blob::Ptr src, Blob::Ptr dst) { #ifdef HAVE_SSE if (src->getTensorDesc().getLayout() == NHWC && dst->getTensorDesc().getLayout() == NCHW && C == 3 && - C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && with_cpu_x86_sse42()) { + C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_4d_split_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), @@ -84,7 +84,7 @@ static void blob_copy_4d_t(Blob::Ptr src, Blob::Ptr dst) { } if (src->getTensorDesc().getLayout() == NCHW && dst->getTensorDesc().getLayout() == NHWC && C == 3 && - C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && with_cpu_x86_sse42()) { + C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_4d_merge_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), @@ -214,7 +214,7 @@ static void blob_copy_5d_t(Blob::Ptr src, Blob::Ptr dst) { #ifdef HAVE_SSE if (src->getTensorDesc().getLayout() == NDHWC && dst->getTensorDesc().getLayout() == NCDHW && C == 3 && - C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && with_cpu_x86_sse42()) { + C_src_stride == 1 && W_src_stride == 3 && W_dst_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_5d_split_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), @@ -251,7 +251,7 @@ static void blob_copy_5d_t(Blob::Ptr src, Blob::Ptr dst) { } if (src->getTensorDesc().getLayout() == NCDHW && dst->getTensorDesc().getLayout() == NDHWC && C == 3 && - C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && with_cpu_x86_sse42()) { + C_dst_stride == 1 && W_dst_stride == 3 && W_src_stride == 1 && ov::with_cpu_x86_sse42()) { if (PRC == Precision::U8) { blob_copy_5d_merge_u8c3(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr), diff --git a/src/inference/src/cnn_network_ngraph_impl.cpp b/src/inference/src/cnn_network_ngraph_impl.cpp index 74f80f39e2f641..5a972bd7a347a2 100644 --- a/src/inference/src/cnn_network_ngraph_impl.cpp +++ b/src/inference/src/cnn_network_ngraph_impl.cpp @@ -16,7 +16,6 @@ #include "blob_factory.hpp" #include "cpp/ie_cnn_network.h" #include "ie_common.h" -#include "ie_memcpy.h" #include "ie_ngraph_utils.hpp" #include "itt.hpp" #include "ngraph/graph_util.hpp" @@ -216,7 +215,6 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) { DataPtr input = std::make_shared(name, inData->getTensorDesc()); _data[name] = input; info->setInputData(input); - info->getPreProcess() = inputInfo.second->getPreProcess(); info->setPrecision(inputInfo.second->getPrecision()); info->setLayout(inputInfo.second->getLayout()); _inputData[name] = info; @@ -263,7 +261,7 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, try { for (const auto& layer : _ngraph_function->get_ops()) { // Result can have the same name as previous operation - if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast(layer)) { + if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast(layer)) { // Check that output port exists if (layer->outputs().size() <= outputIndex) { return DescriptionBuffer(OUT_OF_BOUNDS, resp) @@ -277,10 +275,10 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, // Check that we don't have a result for the output port for (const auto& port : layer->output(outputIndex).get_target_inputs()) { - if (dynamic_cast(port.get_node())) + if (dynamic_cast(port.get_node())) return OK; } - auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex)); + auto result = make_shared<::ov::op::v0::Result>(layer->output(outputIndex)); result->set_friendly_name(outputName); _ngraph_function->add_results({result}); // Check that we cannot add Result to layer with non unique friendly name diff --git a/src/inference/src/compilation_context.cpp b/src/inference/src/compilation_context.cpp index 5c9b789b883518..72c83811b74aa7 100644 --- a/src/inference/src/compilation_context.cpp +++ b/src/inference/src/compilation_context.cpp @@ -111,24 +111,6 @@ std::string ModelCache::compute_hash(const std::shared_ptr& mod if (it != rt_info.end()) { seed = calculate_td(it->second.as(), seed); } - - it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - auto preproc = it->second.as(); - - seed = ov::hash_combine(seed, ov::as_int32_t(preproc.getMeanVariant())); - - if (preproc.getMeanVariant() == InferenceEngine::MeanVariant::MEAN_VALUE) { - seed = ov::hash_combine(seed, preproc.getNumberOfChannels()); - for (size_t c = 0; c < preproc.getNumberOfChannels(); ++c) { - const InferenceEngine::PreProcessChannel::Ptr& channelInfo = preproc[c]; - seed = ov::hash_combine(seed, channelInfo->stdScale); - seed = ov::hash_combine(seed, channelInfo->meanValue); - } - } else if (preproc.getMeanVariant() == InferenceEngine::MeanVariant::MEAN_IMAGE) { - // TODO: think if we need to compute hash for mean image if it exists - } - } } for (auto&& output : model->outputs()) { auto& rt_info = output.get_rt_info(); diff --git a/src/inference/src/cpp/ie_executable_network.cpp b/src/inference/src/cpp/ie_executable_network.cpp index ede5dface96c7e..63b74987546228 100644 --- a/src/inference/src/cpp/ie_executable_network.cpp +++ b/src/inference/src/cpp/ie_executable_network.cpp @@ -10,7 +10,6 @@ #include "ie_common.h" #include "ie_executable_network_base.hpp" #include "ie_plugin_config.hpp" -#include "ie_remote_context.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/compiled_model.hpp" @@ -93,10 +92,6 @@ Parameter ExecutableNetwork::GetMetric(const std::string& name) const { EXEC_NET_CALL_STATEMENT(return {_impl->GetMetric(name), {_so}}); } -RemoteContext::Ptr ExecutableNetwork::GetContext() const { - EXEC_NET_CALL_STATEMENT(return _impl->GetContext()); -} - bool ExecutableNetwork::operator!() const noexcept { return !_impl; } diff --git a/src/inference/src/cpp/ie_executable_network_base.hpp b/src/inference/src/cpp/ie_executable_network_base.hpp index 7afe0b8d901f8c..f1a5cffa38b0f6 100644 --- a/src/inference/src/cpp/ie_executable_network_base.hpp +++ b/src/inference/src/cpp/ie_executable_network_base.hpp @@ -17,7 +17,6 @@ #include "cpp/exception2status.hpp" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" -#include "ie_remote_context.hpp" #include "ie_iexecutable_network.hpp" #include "ie_infer_async_request_base.hpp" @@ -81,10 +80,6 @@ class ExecutableNetworkBase : public IExecutableNetwork { TO_STATUS(result = _impl->GetMetric(name)); } - StatusCode GetContext(RemoteContext::Ptr& pContext, ResponseDesc* resp) const noexcept override { - TO_STATUS(pContext = _impl->GetContext()); - } - std::shared_ptr GetImpl() const { return _impl; } diff --git a/src/inference/src/cpp/ie_infer_async_request_base.hpp b/src/inference/src/cpp/ie_infer_async_request_base.hpp index 679aa892b3efa2..79415ba6cb34b5 100644 --- a/src/inference/src/cpp/ie_infer_async_request_base.hpp +++ b/src/inference/src/cpp/ie_infer_async_request_base.hpp @@ -12,7 +12,6 @@ #include "cpp_interfaces/plugin_itt.hpp" #include #include "ie_iinfer_request.hpp" -#include "ie_preprocess.hpp" namespace InferenceEngine { @@ -138,10 +137,6 @@ class InferRequestBase : public IInferRequest { TO_STATUS(data = _impl->GetBlob(name)); } - StatusCode GetPreProcess(const char* name, const PreProcessInfo** info, ResponseDesc *resp) const noexcept override { - TO_STATUS(*info = &(_impl->GetPreProcess(name))); - } - StatusCode StartAsync(ResponseDesc* resp) noexcept override { OV_ITT_SCOPED_TASK(itt::domains::Plugin, "StartAsync"); TO_STATUS(_impl->StartAsync()); diff --git a/src/inference/src/cpp/ie_infer_request.cpp b/src/inference/src/cpp/ie_infer_request.cpp index 374273b8d743d5..4b384b3df69e20 100644 --- a/src/inference/src/cpp/ie_infer_request.cpp +++ b/src/inference/src/cpp/ie_infer_request.cpp @@ -13,7 +13,6 @@ #include "dev/converter_utils.hpp" #include "ie_infer_async_request_base.hpp" #include "ie_ngraph_utils.hpp" -#include "ie_remote_context.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/exception.hpp" #include "openvino/runtime/infer_request.hpp" @@ -52,16 +51,11 @@ Blob::Ptr InferRequest::GetBlob(const std::string& name) { std::string error = "Internal error: blob with name `" + name + "` is not allocated!"; if (blobPtr == nullptr) IE_THROW() << error; - const bool remoteBlobPassed = blobPtr->is(); - if (!remoteBlobPassed && blobPtr->buffer() == nullptr) + if (blobPtr->buffer() == nullptr) IE_THROW() << error; return blobPtr; } -const PreProcessInfo& InferRequest::GetPreProcess(const std::string& name) const { - INFER_REQ_CALL_STATEMENT(return _impl->GetPreProcess(name);) -} - void InferRequest::Infer() { INFER_REQ_CALL_STATEMENT(_impl->Infer();) } diff --git a/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp index 965fcf5200c9aa..88d84f6e5fe719 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp @@ -108,10 +108,6 @@ Parameter IExecutableNetworkInternal::GetMetric(const std::string&) const { IE_THROW(NotImplemented); } -std::shared_ptr IExecutableNetworkInternal::GetContext() const { - IE_THROW(NotImplemented); -} - std::shared_ptr IExecutableNetworkInternal::CreateInferRequestImpl( InputsDataMap networkInputs, OutputsDataMap networkOutputs) { diff --git a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp index adf7a0a7dbda04..cf4dd6a59c28aa 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp @@ -4,23 +4,18 @@ #include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" -#include #include #include -#include #include #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "cpp_interfaces/plugin_itt.hpp" -#include "debug.h" -#include "ie_algorithm.hpp" #include "ie_blob.h" #include "ie_common.h" -#include "ie_compound_blob.h" #include "ie_ngraph_utils.hpp" -#include "ie_preprocess.hpp" -#include "ie_remote_context.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/util/common_util.hpp" #include "transformations/utils/utils.hpp" namespace InferenceEngine { @@ -122,9 +117,7 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us const auto input = findInputByNodeName(name); const auto output = findOutputByNodeName(name); - const bool compoundBlobPassed = userBlob->is(); - const bool remoteBlobPassed = userBlob->is(); - if (!compoundBlobPassed && !remoteBlobPassed && userBlob->buffer() == nullptr) + if (userBlob->buffer() == nullptr) IE_THROW(NotAllocated) << "Input data was not allocated. Input name: \'" << name << "\'"; if (userBlob->size() == 0 && !((input && input->get_output_partial_shape(0).is_dynamic()) || (output && output->get_output_partial_shape(0).is_dynamic()))) { @@ -143,12 +136,9 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us } auto& devBlob = _deviceInputs[name]; - if (compoundBlobPassed) { - IE_THROW(NotImplemented) << "cannot set compound blob: supported only for input pre-processing"; - } size_t inputSize = foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR - ? InferenceEngine::details::product(foundInput->getTensorDesc().getDims()) + ? ov::util::product(foundInput->getTensorDesc().getDims()) : 1; if (!isInputDynamic && dataSize != inputSize) { IE_THROW() << "Input tensor size is not equal network input size (" << dataSize << "!=" << inputSize @@ -156,13 +146,9 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us } _inputs[name] = userBlob; devBlob = userBlob; - _batched_inputs.erase(name); } else { - if (compoundBlobPassed) { - IE_THROW(NotImplemented) << "cannot set compound blob: supported only for input pre-processing"; - } size_t outputSize = foundOutput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR - ? details::product(foundOutput->getTensorDesc().getDims()) + ? ov::util::product(foundOutput->getTensorDesc().getDims()) : 1; if (!isOutputDynamic && dataSize != outputSize) { IE_THROW() << "Output blob size is not equal network output size (" << dataSize << "!=" << outputSize @@ -181,180 +167,6 @@ void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& us } } -void IInferRequestInternal::SetBlobs(const std::string& name, const std::vector& blobs) { - if (blobs.size() == 1) { - SetBlob(name, blobs[0]); - return; - } - - bool all_memory = std::all_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& item) { - return item && item->is() && !item->is(); - }); - OPENVINO_ASSERT(all_memory, - "set_input_tensors/set_tensors error. Default implementation support only local memory tensors"); - - checkBlobsForBatch(name, blobs); - - SetBlobsImpl(name, std::make_shared(blobs)); -} - -void IInferRequestInternal::SetBlobsImpl(const std::string& name, const BatchedBlob::Ptr& batched_blob) { - IE_THROW(NotImplemented) << "set_input_tensors/set_tensors are not supported by this plugin"; -} - -void IInferRequestInternal::checkBlobsForBatch(const std::string& name, const std::vector& blobs) { - OPENVINO_ASSERT(!blobs.empty(), - "set_input_tensors/set_tensors can't be called with empty blobs for input '", - name, - "'"); - OPENVINO_ASSERT(blobs.size() != 1, - "Internal error (plugin): checkBlobsForBatch is not allowed to have only one blob inside batch " - "for input '", - name, - "'"); - - std::shared_ptr param; - const auto& inputs = GetInputs(); - for (const auto& input : inputs) { - if (auto p = std::dynamic_pointer_cast(input)) { - if (name == p->get_friendly_name()) { - param = p; - break; - } - } - } - OPENVINO_ASSERT(param, "set_input_tensors/set_tensors error. Parameter '", name, "' is not found"); - OPENVINO_ASSERT(ov::layout::has_batch(param->get_layout()), - "set_input_tensors/set_tensors can be used only for inputs with N(batch) dimension" - " 'layout' defined. Current layout for '", - name, - "' is ", - param->get_layout().to_string()); - auto batch_idx = ov::layout::batch_idx(param->get_layout()); - if (batch_idx < 0) { - batch_idx += static_cast(blobs[0]->getTensorDesc().getDims().size()); - } - OPENVINO_ASSERT(batch_idx == 0, - "set_input_tensors/set_tensors is not currently supported for batch dimension index ", - batch_idx, - " != 0"); - std::for_each(blobs.begin(), blobs.end(), [&batch_idx](const Blob::Ptr& item) { - OPENVINO_ASSERT(item->getTensorDesc().getDims()[batch_idx] == 1, - "set_input_tensors/set_tensors. Tensors shall represent one item in a batch, ", - item->getTensorDesc().getDims()[batch_idx], - " provided"); - }); - auto blobs_size = static_cast(blobs.size()); - if (param->get_partial_shape().rank().is_static()) { - OPENVINO_ASSERT(batch_idx >= 0 && batch_idx < param->get_partial_shape().rank().get_length(), - "set_input_tensors/set_tensors error. Layout ", - param->get_layout().to_string(), - " is incorrect for operation with name '", - name, - "' with shape ", - param->get_partial_shape()); - auto batch = param->get_partial_shape()[batch_idx]; - - OPENVINO_ASSERT(batch.is_dynamic() || batch.get_length() == blobs_size, - "set_input_tensors/set_tensors error. Input shape ", - param->get_partial_shape(), - "batch ", - batch, - "doesn't match with total blobs count: ", - blobs_size); - } - - // In future consider checking if blobs point to contiguous range of memory and use single 'SetBlob' instead - auto tmp_desc = blobs[0]->getTensorDesc(); - tmp_desc.getDims()[batch_idx] = blobs_size; - auto blockingDims = tmp_desc.getBlockingDesc().getBlockDims(); - blockingDims[batch_idx] = blobs_size; - auto blockingDesc = BlockingDesc(blockingDims, tmp_desc.getBlockingDesc().getOrder()); - auto batched_desc = InferenceEngine::TensorDesc(tmp_desc.getPrecision(), tmp_desc.getDims(), blockingDesc); - auto desc_to_string = [](const TensorDesc& desc) { - std::stringstream s; - s << "{ " << desc.getLayout() << " " << desc.getPrecision().name(); - s << "dim=("; - for (const auto& d : desc.getDims()) { - s << " " << d; - } - s << " ) }"; - return s.str(); - }; - for (const auto& item : blobs) { - auto item_desc = item->getTensorDesc(); - item_desc.getDims()[batch_idx] = batched_desc.getDims()[batch_idx]; - OPENVINO_ASSERT(item_desc.getDims() == batched_desc.getDims() && - item_desc.getLayout() == batched_desc.getLayout() && - item_desc.getPrecision() == batched_desc.getPrecision() && - item_desc.getBlockingDesc().getOrder() == batched_desc.getBlockingDesc().getOrder(), - "set_input_tensors/set_tensors error. Blob ", - desc_to_string(item_desc), - " is not compatible with batched blob ", - desc_to_string(batched_desc)); - } -} - -void IInferRequestInternal::convertBatchedInputBlob(const std::string& name, const BatchedBlob::Ptr& batched_blob) { - auto tmp_desc = batched_blob->getBlob(0)->getTensorDesc(); - tmp_desc.getDims()[0] = batched_blob->size(); - auto blockingDims = tmp_desc.getBlockingDesc().getBlockDims(); - blockingDims[0] = batched_blob->size(); - auto blockingDesc = BlockingDesc(blockingDims, tmp_desc.getBlockingDesc().getOrder()); - auto batched_desc = InferenceEngine::TensorDesc(tmp_desc.getPrecision(), tmp_desc.getDims(), blockingDesc); - std::shared_ptr remote_context; - MemoryBlob::Ptr mem_blob; - try { - auto net = getPointerToExecutableNetworkInternal(); - if (net) { - remote_context = net->GetContext(); - } - } catch (const InferenceEngine::NotImplemented&) { - } - if (remote_context) { - mem_blob = remote_context->CreateHostBlob(batched_desc); - } else { - mem_blob = std::dynamic_pointer_cast(make_blob_with_precision(batched_desc)); - } - OPENVINO_ASSERT(mem_blob, "Internal error - can't create host memory blob"); - mem_blob->allocate(); - auto ptr = mem_blob->wmap(); - - // Perform memory copy - InferenceEngine::parallel_for(batched_blob->size(), [&](size_t i) { - const auto& blob = as(batched_blob->getBlob(i)); - OPENVINO_ASSERT(mem_blob, "Internal error - can't cast blob ", i, " to MemoryBlob"); - const auto& blob_desc = blob->getTensorDesc().getBlockingDesc(); - bool offsets_0 = std::all_of(blob_desc.getOffsetPaddingToData().begin(), - blob_desc.getOffsetPaddingToData().end(), - [](size_t dim) { - return dim == 0; - }); - OPENVINO_ASSERT(offsets_0, - "set_tensors/set_input_tensors - default combining is not supported for " - "ROI tensors. All tensors offsets shall be 0"); - OPENVINO_ASSERT(mem_blob->getTensorDesc().getBlockingDesc().getOrder() == blob_desc.getOrder(), - "set_tensors/set_input_tensors - default combining is not supported for " - "ROI tensors. Axis order shall be default"); - OPENVINO_ASSERT(mem_blob->getTensorDesc().getBlockingDesc().getStrides() == blob_desc.getStrides(), - "set_tensors/set_input_tensors - default combining is not supported for " - "ROI tensors. Input blobs shall have default strides set"); - memcpy(ptr.as() + i * blob->byteSize(), - blob->rmap().as() + - blob->getTensorDesc().getBlockingDesc().getOffsetPadding() * blob->element_size(), - blob->byteSize()); - }); - SetBlob(name, mem_blob); -} - -void IInferRequestInternal::convertBatchedInputBlobs() { - auto batched_copy = _batched_inputs; - for (const auto& item : batched_copy) { - convertBatchedInputBlob(item.first, item.second); - } - _batched_inputs = batched_copy; -} - Blob::Ptr IInferRequestInternal::GetBlob(const std::string& name) { OV_ITT_SCOPED_TASK(itt::domains::Plugin, "GetBlob"); Blob::Ptr data; @@ -383,23 +195,6 @@ Blob::Ptr IInferRequestInternal::GetBlob(const std::string& name) { return data; } -BatchedBlob::Ptr IInferRequestInternal::GetBlobs(const std::string& name) { - if (_batched_inputs.count(name)) { - return _batched_inputs.at(name); - } - return nullptr; -} - -const PreProcessInfo& IInferRequestInternal::GetPreProcess(const std::string& name) const { - InputInfo::Ptr foundInput; - DataPtr foundOutput; - if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) { - return foundInput->getPreProcess(); - } else { - IE_THROW() << "Output blob can't have pre-processing"; - } -} - std::vector> IInferRequestInternal::QueryState() { IE_THROW(NotImplemented); } @@ -483,7 +278,7 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, const auto input = findInputByNodeName(name); isDynamic = input && input->get_output_partial_shape(0).is_dynamic(); dims = foundInputPair->second->getTensorDesc().getDims(); - refSize = foundInputPair->second->getTensorDesc().getLayout() != SCALAR ? details::product(dims) : 1; + refSize = foundInputPair->second->getTensorDesc().getLayout() != SCALAR ? ov::util::product(dims) : 1; } else { auto foundOutputPair = std::find_if(std::begin(_networkOutputs), std::end(_networkOutputs), @@ -503,17 +298,16 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, // need to immediately throw here dims = foundOutputPair->second->getTensorDesc().getDims(); } - refSize = foundOutputPair->second->getTensorDesc().getLayout() != SCALAR ? details::product(dims) : 1; + refSize = foundOutputPair->second->getTensorDesc().getLayout() != SCALAR ? ov::util::product(dims) : 1; } } else { - refSize = details::product(refDims); + refSize = ov::util::product(refDims); } if (!isDynamic && refSize != blob->size()) { IE_THROW() << strNotMatched + ": got " << blob->size() << " expecting " << refSize; } - const bool remoteBlobPassed = blob->is(); - if (!remoteBlobPassed && blob->buffer() == nullptr) + if (blob->buffer() == nullptr) IE_THROW() << strNotAllocated; } diff --git a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp index a4fe56fb58ab9b..5d67f5035b82fa 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp @@ -23,12 +23,11 @@ #include "cnn_network_ngraph_impl.hpp" #include "cpp/ie_cnn_network.h" #include "dev/converter_utils.hpp" -#include "exec_graph_info.hpp" -#include "ie_algorithm.hpp" #include "ie_api.h" #include "ie_icore.hpp" #include "ie_iextension.h" #include "ie_input_info.hpp" +#include "ie_memcpy.h" #include "ie_ngraph_utils.hpp" #include "ie_parameter.hpp" #include "openvino/core/deprecated.hpp" @@ -37,33 +36,18 @@ #include "openvino/core/runtime_attribute.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/pass/manager.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "openvino/runtime/threading/executor_manager.hpp" #include "transformations/utils/utils.hpp" namespace InferenceEngine { -PreProcessInfo copyPreProcess(const PreProcessInfo& from) { - PreProcessInfo to = from; - if (from.getMeanVariant() == MEAN_IMAGE) { - for (size_t i = 0; i < from.getNumberOfChannels(); i++) { - auto& from_blob = from[i]->meanData; - auto to_blob = make_blob_with_precision(from[i]->meanData->getTensorDesc()); - to_blob->allocate(); - ie_memcpy(to_blob->buffer(), to_blob->byteSize(), from_blob->cbuffer(), from_blob->byteSize()); - - to.setMeanImageForChannel(to_blob, i); - } - } - return to; -} - InputsDataMap copyInfo(const InputsDataMap& networkInputs) { InputsDataMap _networkInputs; for (const auto& it : networkInputs) { InputInfo::Ptr newPtr; if (it.second) { newPtr = std::make_shared(); - newPtr->getPreProcess() = it.second->getPreProcess(); newPtr->setInputData(std::make_shared(*it.second->getInputData())); } _networkInputs.emplace(it.first, newPtr); @@ -118,12 +102,6 @@ void IInferencePlugin::SetName(const std::string& pluginName) noexcept { _pluginName = pluginName; } -std::shared_ptr IInferencePlugin::LoadNetwork( - const CNNNetwork& network, - const std::map& config) { - return LoadNetwork(network, config, nullptr); -} - template std::map> const_map_cast(const std::map>& map) { std::map> res; @@ -134,8 +112,7 @@ std::map> const_map_cast(const std::map IInferencePlugin::LoadNetwork( const CNNNetwork& orig_network, - const std::map& config, - const std::shared_ptr& context) { + const std::map& config) { std::shared_ptr impl; // if IR `version` is not set, suppose it's IR v10 for old API @@ -170,7 +147,6 @@ std::shared_ptr IInferencePlugin::LoadNetwork( auto toInfo = network.getInputsInfo().at(inputInfo.first); toInfo->setPrecision(inputInfo.second->getPrecision()); toInfo->setLayout(inputInfo.second->getLayout()); - toInfo->getPreProcess() = inputInfo.second->getPreProcess(); } for (const auto& outputInfo : orig_network.getOutputsInfo()) { auto toInfo = network.getOutputsInfo().at(outputInfo.first); @@ -181,11 +157,7 @@ std::shared_ptr IInferencePlugin::LoadNetwork( } } - if (nullptr == context) { - impl = LoadExeNetworkImpl(network, config); - } else { - impl = LoadExeNetworkImpl(network, context, config); - } + impl = LoadExeNetworkImpl(network, config); SetExeNetworkInfo(impl, const_map_cast(network.getInputsInfo()), const_map_cast(network.getOutputsInfo())); if (function) { @@ -221,14 +193,6 @@ Parameter IInferencePlugin::GetMetric(const std::string&, const std::map IInferencePlugin::CreateContext(const ParamMap&) { - IE_THROW(NotImplemented); -} - -std::shared_ptr IInferencePlugin::GetDefaultContext(const ParamMap&) { - IE_THROW(NotImplemented); -} - std::shared_ptr IInferencePlugin::ImportNetwork( const std::string& modelFileName, const std::map& config) { @@ -247,13 +211,6 @@ std::shared_ptr IInferencePlugin::ImportNetwork( IE_THROW(NotImplemented); } -std::shared_ptr IInferencePlugin::ImportNetwork( - std::istream& networkModel, - const std::shared_ptr& context, - const std::map& config) { - IE_THROW(NotImplemented); -} - void IInferencePlugin::SetCore(std::weak_ptr core) { IE_ASSERT(!core.expired()); _core = core; @@ -285,13 +242,6 @@ std::shared_ptr IInferencePlugin::LoadExeNetworkImpl IE_THROW(NotImplemented); } -std::shared_ptr IInferencePlugin::LoadExeNetworkImpl( - const CNNNetwork&, - const std::shared_ptr&, - const std::map&) { - IE_THROW(NotImplemented); -} - void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr& exeNetwork, const ConstInputsDataMap& inputs, const ConstOutputsDataMap& outputs) { @@ -323,7 +273,7 @@ std::unordered_set GetRemovedNodes(const std::shared_ptrget_ops()) { - if (!InferenceEngine::details::contains(transformedNodeNames, originalNode->get_friendly_name())) + if (transformedNodeNames.find(originalNode->get_friendly_name()) == transformedNodeNames.end()) result.emplace(originalNode->get_friendly_name()); } diff --git a/src/inference/src/dev/converter_utils.cpp b/src/inference/src/dev/converter_utils.cpp index e5c4e4c332448c..e4824e2d7d6f67 100644 --- a/src/inference/src/dev/converter_utils.cpp +++ b/src/inference/src/dev/converter_utils.cpp @@ -16,7 +16,6 @@ #include "icompiled_model_wrapper.hpp" #include "ie_blob.h" #include "ie_common.h" -#include "ie_compound_blob.h" #include "ie_icore.hpp" #include "ie_input_info.hpp" #include "ie_layouts.h" @@ -31,17 +30,14 @@ #include "openvino/runtime/icompiled_model.hpp" #include "openvino/runtime/iinfer_request.hpp" #include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/ivariable_state.hpp" #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/profiling_info.hpp" -#include "openvino/runtime/remote_context.hpp" #include "openvino/runtime/so_ptr.hpp" #include "openvino/runtime/tensor.hpp" #include "openvino/runtime/threading/executor_manager.hpp" #include "openvino/runtime/variable_state.hpp" -#include "remote_context_wrapper.hpp" #include "transformations/utils/utils.hpp" #ifdef PROXY_PLUGIN_ENABLED @@ -62,11 +58,7 @@ void fill_input_info(ov::Output& input, InferenceEngine::InputInfo::Pt const ov::Output const_input(input.get_node(), input.get_index()); ov::legacy_convert::fill_input_info(const_input, input_info); auto& rt_info = input.get_rt_info(); - auto it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - rt_info.erase(it); - } - it = rt_info.find("ie_legacy_td"); + auto it = rt_info.find("ie_legacy_td"); if (it != rt_info.end()) { rt_info.erase(it); } @@ -106,11 +98,7 @@ void ov::legacy_convert::fill_input_info(const ov::Output& input input_info->setInputData(data); } auto& rt_info = input.get_rt_info(); - auto it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - input_info->getPreProcess() = it->second.as(); - } - it = rt_info.find("ie_legacy_td"); + auto it = rt_info.find("ie_legacy_td"); if (it != rt_info.end()) { auto td = it->second.as(); input_info->getInputData()->reshape(td.getDims(), td.getLayout()); @@ -176,7 +164,6 @@ std::shared_ptr ov::legacy_convert::convert_model(const Inferen auto input_info = network.getInputsInfo().at(param_name); auto& rt_info = input.get_rt_info(); - rt_info["ie_legacy_preproc"] = input_info->getPreProcess(); rt_info["ie_legacy_td"] = input_info->getTensorDesc(); } for (auto&& result : cloned_model->get_results()) { @@ -253,17 +240,6 @@ class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { m_plugin._so}); } - std::shared_ptr LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::map& config, - const std::shared_ptr& context) override { - return ov::legacy_convert::convert_compiled_model( - {m_plugin->compile_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()), - ov::any_copy(config), - ov::legacy_convert::convert_remote_context(context)), - m_plugin._so}); - } - ov::SoPtr LoadNetwork( const std::string& modelPath, const std::map& config) override { @@ -297,15 +273,6 @@ class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { return m_plugin->get_property(name, options); } - std::shared_ptr CreateContext(const InferenceEngine::ParamMap& params) override { - return ov::legacy_convert::convert_remote_context(m_plugin->create_context(params)); - } - - std::shared_ptr GetDefaultContext( - const InferenceEngine::ParamMap& params) override { - return ov::legacy_convert::convert_remote_context(m_plugin->get_default_context(params)); - } - std::shared_ptr ImportNetwork( const std::string& modelFileName, const std::map& config) override { @@ -321,17 +288,6 @@ class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { {m_plugin->import_model(networkModel, ov::any_copy(config)), m_plugin._so}); } - std::shared_ptr ImportNetwork( - std::istream& networkModel, - const std::shared_ptr& context, - const std::map& config) override { - return ov::legacy_convert::convert_compiled_model( - {m_plugin->import_model(networkModel, - ov::legacy_convert::convert_remote_context(context), - ov::any_copy(config)), - m_plugin._so}); - } - void SetCore(std::weak_ptr core) override { return m_plugin->set_core(std::dynamic_pointer_cast(core.lock())); } @@ -453,10 +409,6 @@ class IExecutableNetworkWrapper : public InferenceEngine::IExecutableNetworkInte return m_model->get_property(name); } - std::shared_ptr GetContext() const override { - return ov::legacy_convert::convert_remote_context(m_model->get_context()); - } - ov::SoPtr get_compiled_model() { return m_model; } @@ -551,18 +503,6 @@ class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestIntern } } - void SetBlobs(const std::string& name, const std::vector& blobs) override { - try { - std::vector> tensors; - for (const auto& blob : blobs) { - tensors.emplace_back(ov::make_tensor(blob, true)); - } - m_request->set_tensors(find_port(name), tensors); - } catch (const ov::Exception& ex) { - IE_THROW(GeneralError) << ex.what(); - } - } - InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override { auto port = find_port(name); auto& rt_info = port.get_rt_info(); @@ -574,38 +514,6 @@ class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestIntern return tensor_to_blob(m_request->get_tensor(port), true, desc); } - InferenceEngine::BatchedBlob::Ptr GetBlobs(const std::string& name) override { - auto port = find_port(name); - auto& rt_info = port.get_rt_info(); - auto it = rt_info.find("ie_legacy_td"); - InferenceEngine::TensorDesc desc; - if (it != rt_info.end()) { - desc = it->second.as(); - } - auto tensors = m_request->get_tensors(port); - std::vector blobs; - for (const auto& tensor : tensors) { - blobs.emplace_back(tensor_to_blob(tensor, true, desc)); - } - return std::make_shared(blobs); - } - - const InferenceEngine::PreProcessInfo& GetPreProcess(const std::string& name) const override { -#ifdef PROXY_PLUGIN_ENABLED - if (auto proxy_request = std::dynamic_pointer_cast(m_request._ptr)) { - return ov::legacy_convert::convert_infer_request(proxy_request->get_hardware_request()) - ->GetPreProcess(name); - } -#endif - auto port = find_port(name); - auto& rt_info = port.get_rt_info(); - auto it = rt_info.find("ie_legacy_preproc"); - if (it != rt_info.end()) { - return it->second.as(); - } - OPENVINO_THROW("Cannot find PreProcess info."); - } - std::vector> QueryState() override { auto res = m_request->query_state(); std::vector> ret; @@ -759,43 +667,17 @@ class IAsyncInferRequestWrapper : public ov::IAsyncInferRequest { ov::SoPtr get_tensor(const ov::Output& port) const override { const auto& name = get_legacy_name_from_port(port); - OPENVINO_ASSERT(!m_request->GetBlobs(name), - "get_tensor shall not be used together with batched " - "set_tensors/set_input_tensors for name '", - name, - "'"); auto blob = m_request->GetBlob(name); ov::SoPtr tensor = ov::make_tensor(blob); if (!tensor._so) tensor._so = m_request->getPointerToSo(); return tensor; } + void set_tensor(const ov::Output& port, const ov::SoPtr& tensor) override { m_request->SetBlob(get_legacy_name_from_port(port), ov::tensor_to_blob(tensor, m_unwrap_tensor)); } - std::vector> get_tensors(const ov::Output& port) const override { - auto blobs = m_request->GetBlobs(get_legacy_name_from_port(port)); - std::vector> ret; - if (!blobs) - return ret; - for (size_t i = 0; i < blobs->size(); i++) { - ov::SoPtr tensor = ov::make_tensor(blobs->getBlob(i)); - if (!tensor._so) - tensor._so = m_request->getPointerToSo(); - ret.emplace_back(tensor); - } - return ret; - } - void set_tensors(const ov::Output& port, - const std::vector>& tensors) override { - std::vector blobs; - for (const auto& tensor : tensors) { - blobs.emplace_back(ov::tensor_to_blob(tensor, m_unwrap_tensor)); - } - m_request->SetBlobs(get_legacy_name_from_port(port), blobs); - } - std::vector> query_state() const override { std::vector> variable_states; for (auto&& state : m_request->QueryState()) { @@ -855,60 +737,6 @@ ov::SoPtr<::ov::IAsyncInferRequest> ov::legacy_convert::convert_infer_request( request->getPointerToSo()}; } -namespace InferenceEngine { -const std::shared_ptr& IRemoteContextWrapper::get_context() { - return m_context; -} - -const std::string& IRemoteContextWrapper::get_device_name() const { - m_name = m_context->getDeviceName(); - return m_name; -} - -const ov::AnyMap& IRemoteContextWrapper::get_property() const { - m_params = m_context->getParams(); - return m_params; -} - -ov::SoPtr IRemoteContextWrapper::create_tensor(const ov::element::Type& type, - const ov::Shape& shape, - const ov::AnyMap& params) { - InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(type), - shape, - InferenceEngine::TensorDesc::getLayoutByDims(shape)); - auto blob = m_context->CreateBlob(desc, params); - blob->allocate(); - auto tensor = ov::make_tensor(blob); - return {std::dynamic_pointer_cast(tensor._ptr), tensor._so}; -} - -ov::SoPtr IRemoteContextWrapper::create_host_tensor(const ov::element::Type type, const ov::Shape& shape) { - InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(type), - shape, - InferenceEngine::TensorDesc::getLayoutByDims(shape)); - auto blob = m_context->CreateHostBlob(desc); - blob->allocate(); - return ov::make_tensor(blob); -} - -} // namespace InferenceEngine - -std::shared_ptr ov::legacy_convert::convert_remote_context( - const ov::SoPtr& context) { - if (auto ctx = std::dynamic_pointer_cast(context._ptr)) { - return ctx->get_context(); - } - return std::make_shared(context); -} - -ov::SoPtr ov::legacy_convert::convert_remote_context( - const std::shared_ptr& context) { - if (auto ctx = std::dynamic_pointer_cast(context)) { - return ctx->get_context(); - } - return {std::make_shared(context)}; -} - namespace ov { /* diff --git a/src/inference/src/dev/converter_utils.hpp b/src/inference/src/dev/converter_utils.hpp index d121f5a4fa9ac3..ea2de550a51d10 100644 --- a/src/inference/src/dev/converter_utils.hpp +++ b/src/inference/src/dev/converter_utils.hpp @@ -8,14 +8,12 @@ #include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "ie_iextension.h" -#include "ie_remote_blob.hpp" #include "openvino/core/extension.hpp" #include "openvino/core/model.hpp" #include "openvino/runtime/iasync_infer_request.hpp" #include "openvino/runtime/icompiled_model.hpp" #include "openvino/runtime/iplugin.hpp" #include "openvino/runtime/iremote_context.hpp" -#include "remote_utils.hpp" namespace ov { namespace legacy_convert { @@ -40,8 +38,6 @@ ov::SoPtr<::ov::IAsyncInferRequest> convert_infer_request( const std::shared_ptr<::InferenceEngine::IInferRequestInternal>& request, const std::string& plugin_name = ""); -std::shared_ptr convert_remote_context(const ov::SoPtr& context); - std::vector convert_extension(const std::vector& exts); std::vector convert_extension(const std::vector& exts); diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 6489f7e4af8b32..4d9a5a6b8f6545 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -11,6 +11,7 @@ #include "dev/converter_utils.hpp" #include "dev/icompiled_model_wrapper.hpp" #include "dev/iplugin_wrapper.hpp" +#include "ie_plugin_config.hpp" #include "itt.hpp" #include "model_reader.hpp" #include "openvino/core/any.hpp" @@ -1498,8 +1499,10 @@ ov::SoPtr ov::CoreImpl::load_model_from_cache( throw HeaderException(); } - compiled_model = context ? plugin.import_model(networkStream, context, config) - : plugin.import_model(networkStream, config); + ov::AnyMap update_config = config; + update_config[ov::loaded_from_cache.name()] = true; + compiled_model = context ? plugin.import_model(networkStream, context, update_config) + : plugin.import_model(networkStream, update_config); if (auto wrapper = std::dynamic_pointer_cast(compiled_model._ptr)) { wrapper->get_executable_network()->loadedFromCache(); } @@ -1523,11 +1526,8 @@ ov::SoPtr ov::CoreImpl::load_model_from_cache( ov::AnyMap ov::CoreImpl::create_compile_config(const ov::Plugin& plugin, const ov::AnyMap& user_config) const { ov::AnyMap property_config; - // 0. Move ov::device::priorities / TARGET_FALLBACK key to property_config - auto device_priorities_it = user_config.find("TARGET_FALLBACK"); - if (device_priorities_it == user_config.end()) { - device_priorities_it = user_config.find(ov::device::priorities.name()); - } + // 0. Move ov::device::priorities key to property_config + auto device_priorities_it = user_config.find(ov::device::priorities.name()); if (device_priorities_it != user_config.end()) { property_config[device_priorities_it->first] = device_priorities_it->second.as(); } diff --git a/src/inference/src/dev/core_impl.hpp b/src/inference/src/dev/core_impl.hpp index 2a4415ad941bd4..6aa1db3fa9929b 100644 --- a/src/inference/src/dev/core_impl.hpp +++ b/src/inference/src/dev/core_impl.hpp @@ -6,8 +6,6 @@ #include -#include - #include "any_copy.hpp" #include "cache_guard.hpp" #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" @@ -15,7 +13,6 @@ #include "ie_cache_manager.hpp" #include "ie_extension.h" #include "ie_icore.hpp" -#include "multi-device/multi_device_config.hpp" #include "openvino/core/any.hpp" #include "openvino/core/extension.hpp" #include "openvino/core/so_extension.hpp" @@ -212,8 +209,7 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t ov::SoPtr LoadNetworkImpl( const InferenceEngine::CNNNetwork& model, ov::Plugin& plugin, - const std::map& parsedConfig, - const InferenceEngine::RemoteContext::Ptr& context); + const std::map& parsedConfig); public: CoreImpl(bool _newAPI); @@ -249,13 +245,6 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t bool isNewAPI() const override; - InferenceEngine::RemoteContext::Ptr GetDefaultContext(const std::string& deviceName) override; - - ov::SoPtr LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::shared_ptr& context, - const std::map& config) override; - InferenceEngine::SoExecutableNetworkInternal LoadNetwork(const InferenceEngine::CNNNetwork& network, const std::string& deviceNameOrig, const std::map& config) override; @@ -294,16 +283,6 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t */ std::vector GetAvailableDevices() const override; - /** - * @brief Create a new shared context object on specified accelerator device - * using specified plugin-specific low level device API parameters (device handle, pointer, etc.) - * @param deviceName Name of a device to create new shared context on. - * @param params Map of device-specific shared context parameters. - * @return A shared pointer to a created remote context. - */ - InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName, - const InferenceEngine::ParamMap& params) override; - std::map GetSupportedConfig(const std::string& deviceName, const std::map& configs) override; diff --git a/src/inference/src/dev/core_impl_ie.cpp b/src/inference/src/dev/core_impl_ie.cpp index 88dd55a595f17a..c6dae8eab29e52 100644 --- a/src/inference/src/dev/core_impl_ie.cpp +++ b/src/inference/src/dev/core_impl_ie.cpp @@ -5,6 +5,7 @@ #include #include "any_copy.hpp" +#include "blob_factory.hpp" #include "compilation_context.hpp" #include "core_impl.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" @@ -14,7 +15,6 @@ #include "ie_network_reader.hpp" #include "iplugin_wrapper.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/pass/constant_folding.hpp" #include "openvino/itt.hpp" #include "openvino/runtime/device_id_parser.hpp" @@ -31,21 +31,13 @@ bool ov::CoreImpl::isNewAPI() const { ov::SoPtr ov::CoreImpl::LoadNetworkImpl( const InferenceEngine::CNNNetwork& network, ov::Plugin& plugin, - const std::map& parsedConfig, - const InferenceEngine::RemoteContext::Ptr& context) { + const std::map& parsedConfig) { OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "CoreImpl::LoadNetworkImpl"); ov::SoPtr execNetwork; auto wrapper = std::dynamic_pointer_cast(plugin.m_ptr); OPENVINO_ASSERT(wrapper); auto old_plugin = wrapper->get_plugin(); - execNetwork = {context ? old_plugin->LoadNetwork(network, parsedConfig, context) - : old_plugin->LoadNetwork(network, parsedConfig), - plugin.m_so}; - return execNetwork; -} - -InferenceEngine::RemoteContext::Ptr ov::CoreImpl::GetDefaultContext(const std::string& deviceName) { - return ov::legacy_convert::convert_remote_context(get_default_context(deviceName)); + return {old_plugin->LoadNetwork(network, parsedConfig), plugin.m_so}; } InferenceEngine::CNNNetwork ov::CoreImpl::ReadNetwork(const std::string& modelPath, const std::string& binPath) const { @@ -64,27 +56,6 @@ InferenceEngine::CNNNetwork ov::CoreImpl::ReadNetwork(const std::string& model, return InferenceEngine::details::ReadNetwork(model, weights, extensions, isNewAPI(), frontendMode); } -ov::SoPtr ov::CoreImpl::LoadNetwork( - const InferenceEngine::CNNNetwork& network, - const std::shared_ptr& context, - const std::map& config) { - OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::LoadTime, "Core::LoadNetwork::RemoteContext"); - if (network.getFunction()) { - auto ctx = ov::legacy_convert::convert_remote_context(context); - auto compiled_model = - compile_model(ov::legacy_convert::convert_model(network, isNewAPI()), ctx, any_copy(config)); - return {ov::legacy_convert::convert_compiled_model(compiled_model), compiled_model._so}; - } - if (context == nullptr) { - IE_THROW() << "Remote context is null"; - } - // have to deduce the device name/config from the context first - auto parsed = parseDeviceNameIntoConfig(context->getDeviceName(), any_copy(config)); - auto plugin = get_plugin(parsed._deviceName); - auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config), context); - return res; -} - InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( const InferenceEngine::CNNNetwork& network, const std::string& deviceName, @@ -97,7 +68,7 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( } auto parsed = parseDeviceNameIntoConfig(deviceName, any_copy(config)); auto plugin = get_plugin(parsed._deviceName); - auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config), nullptr); + auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config)); return {res._ptr, res._so}; } @@ -205,11 +176,6 @@ std::vector ov::CoreImpl::GetAvailableDevices() const { return get_available_devices(); } -InferenceEngine::RemoteContext::Ptr ov::CoreImpl::CreateContext(const std::string& deviceName, - const InferenceEngine::ParamMap& params) { - return ov::legacy_convert::convert_remote_context(create_context(deviceName, params)); -} - /** * @brief Registers the extension in a Core object * Such extensions can be used for both CNNNetwork readers and device plugins diff --git a/src/inference/src/dev/icompiled_model.cpp b/src/inference/src/dev/icompiled_model.cpp index 6e81c091719686..49d60e5268cb49 100644 --- a/src/inference/src/dev/icompiled_model.cpp +++ b/src/inference/src/dev/icompiled_model.cpp @@ -137,9 +137,6 @@ void ov::ICompiledModel::set_callback_executor(const std::shared_ptr ov::ICompiledModel::get_context() const { - if (auto wrapper = dynamic_cast(this)) { - return ov::legacy_convert::convert_remote_context(wrapper->get_executable_network()->GetContext()); - } if (m_context) return m_context; return m_plugin->get_default_context({}); diff --git a/src/inference/src/dev/iplugin_wrapper.cpp b/src/inference/src/dev/iplugin_wrapper.cpp index 5b25dcfb6aac1d..d51dbb767c43a9 100644 --- a/src/inference/src/dev/iplugin_wrapper.cpp +++ b/src/inference/src/dev/iplugin_wrapper.cpp @@ -50,11 +50,7 @@ std::shared_ptr IPluginWrapper::compile_model(const std::str std::shared_ptr IPluginWrapper::compile_model(const std::shared_ptr& model, const ov::AnyMap& properties, const ov::SoPtr& context) const { - return ov::legacy_convert::convert_compiled_model( - update_exec_network(m_old_plugin->LoadNetwork(ov::legacy_convert::convert_model(model, is_new_api()), - any_copy(properties), - ov::legacy_convert::convert_remote_context(context)))) - ._ptr; + OPENVINO_NOT_IMPLEMENTED; } void IPluginWrapper::set_property(const ov::AnyMap& properties) { @@ -70,11 +66,11 @@ ov::Any IPluginWrapper::get_property(const std::string& name, const ov::AnyMap& } ov::SoPtr IPluginWrapper::create_context(const ov::AnyMap& remote_properties) const { - return ov::legacy_convert::convert_remote_context(m_old_plugin->CreateContext(remote_properties)); + OPENVINO_NOT_IMPLEMENTED; } ov::SoPtr IPluginWrapper::get_default_context(const ov::AnyMap& remote_properties) const { - return ov::legacy_convert::convert_remote_context(m_old_plugin->GetDefaultContext(remote_properties)); + OPENVINO_NOT_IMPLEMENTED; } std::shared_ptr IPluginWrapper::import_model(std::istream& model, @@ -87,11 +83,7 @@ std::shared_ptr IPluginWrapper::import_model(std::istream& m std::shared_ptr IPluginWrapper::import_model(std::istream& model, const ov::SoPtr& context, const ov::AnyMap& properties) const { - return ov::legacy_convert::convert_compiled_model( - update_exec_network(m_old_plugin->ImportNetwork(model, - ov::legacy_convert::convert_remote_context(context), - any_copy(properties)))) - ._ptr; + OPENVINO_NOT_IMPLEMENTED; } ov::SupportedOpsMap IPluginWrapper::query_model(const std::shared_ptr& model, diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 4f536c6cd24a7e..e91fccdbbf3c8e 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -6,8 +6,8 @@ #include -#include "ie_blob.h" -#include "ie_remote_blob.hpp" +#include "blob_factory.hpp" +#include "ie_ngraph_utils.hpp" #include "openvino/runtime/iremote_tensor.hpp" #include "openvino/runtime/properties.hpp" #include "remote_utils.hpp" @@ -17,6 +17,29 @@ namespace ov { +namespace { +Shape make_roi_shape(const Shape& tensor_shape, const Coordinate& begin, const Coordinate& end) { + OPENVINO_ASSERT(tensor_shape.size() == begin.size()); + OPENVINO_ASSERT(begin.size() == end.size()); + + auto roi_shape = Shape(begin.size()); + + auto roi_begin = begin.begin(); + auto roi_end = end.begin(); + auto roi_dim = roi_shape.begin(); + auto max_dim = tensor_shape.begin(); + + for (; max_dim != tensor_shape.end(); ++max_dim, ++roi_begin, ++roi_end, ++roi_dim) { + OPENVINO_ASSERT(*roi_begin <= *max_dim); + OPENVINO_ASSERT(*roi_end <= *max_dim); + *roi_dim = *roi_end - *roi_begin; + OPENVINO_ASSERT(*roi_dim <= *max_dim); + } + + return roi_shape; +} +} // namespace + /** * @brief View tensor to external memory * The tensor doesn't own the external memory @@ -156,7 +179,7 @@ class StridedViewTensor : public ViewTensor { * * @param element_type Tensor element type * @param shape Tensor shape - * @param ptr pointer to external memoty + * @param ptr pointer to external memory * @param byte_strides Tensor strides * * @return Shared pointer to tensor interface @@ -266,22 +289,14 @@ std::shared_ptr make_tensor(const element::Type element_type, const Sha */ class RoiTensor : public ITensor { public: - RoiTensor(const std::shared_ptr& owner, const Coordinate& begin, const Coordinate& end) : m_owner{owner} { - OPENVINO_ASSERT(owner->get_element_type().bitwidth() >= 8, + RoiTensor(const std::shared_ptr& owner, const Coordinate& begin, const Coordinate& end) + : m_owner{owner}, + m_shape{make_roi_shape(owner->get_shape(), begin, end)}, + m_capacity{m_shape}, + m_offset{std::inner_product(begin.begin(), begin.end(), get_strides().begin(), static_cast(0))} { + OPENVINO_ASSERT(get_element_type().bitwidth() >= 8, "ROI Tensor for types with bitwidths less then 8 bit is not implemented. Tensor type: ", - owner->get_element_type()); - auto owner_shape = owner->get_shape(); - OPENVINO_ASSERT(owner_shape.size() == begin.size()); - OPENVINO_ASSERT(begin.size() == end.size()); - m_shape.resize(begin.size()); - for (size_t i = 0; i < begin.size(); ++i) { - OPENVINO_ASSERT(begin[i] <= owner_shape[i]); - OPENVINO_ASSERT(end[i] <= owner_shape[i]); - m_shape[i] = end[i] - begin[i]; - OPENVINO_ASSERT(m_shape[i] <= owner_shape[i]); - } - auto& strides = get_strides(); - m_offset = std::inner_product(begin.begin(), begin.end(), strides.begin(), static_cast(0)); + get_element_type()); } const element::Type& get_element_type() const override { @@ -297,7 +312,18 @@ class RoiTensor : public ITensor { } void set_shape(ov::Shape new_shape) override { - OPENVINO_THROW("Shapes cannot be changed for ROI Tensor"); + OPENVINO_ASSERT(new_shape.size() == m_shape.size()); + for (auto new_dim = new_shape.cbegin(), max_dim = m_capacity.cbegin(); new_dim != new_shape.cend(); + ++max_dim, ++new_dim) { + OPENVINO_ASSERT(*new_dim <= *max_dim, + "Cannot set new shape: ", + new_shape, + " for ROI tensor! Dimension: ", + std::distance(new_shape.cbegin(), new_dim), + " is not compatible."); + } + + m_shape = std::move(new_shape); } void* data(const element::Type& element_type) const override { @@ -307,8 +333,9 @@ class RoiTensor : public ITensor { private: std::shared_ptr m_owner; - size_t m_offset; Shape m_shape; + const Shape m_capacity; + const size_t m_offset; }; /** @@ -354,8 +381,6 @@ class BlobTensor : public ITensor { std::shared_ptr blob; BlobTensor(const InferenceEngine::Blob::Ptr& blob) : blob{blob} { - auto remote_impl = dynamic_cast(blob.get()); - OPENVINO_ASSERT(!remote_impl); OPENVINO_ASSERT(blob); m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); update_strides(); @@ -460,10 +485,6 @@ ov::SoPtr make_tensor(const std::shared_ptr& blo return {}; } else if (unwrap && std::dynamic_pointer_cast(blob) != nullptr) { return std::dynamic_pointer_cast(blob)->get_tensor(); - } else if (auto remote_blob = std::dynamic_pointer_cast(blob)) { - return remote_blob->get_tensor(); - } else if (auto remote_blob = std::dynamic_pointer_cast(blob)) { - return {std::make_shared(remote_blob), nullptr}; } ELSE_IF(float) ELSE_IF(double) @@ -484,36 +505,6 @@ ov::SoPtr make_tensor(const std::shared_ptr& blo #undef IF } -InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob) { -#ifdef PROXY_PLUGIN_ENABLED - if (auto remote_blob = dynamic_cast(blob)) { - const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor()); - if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } - OPENVINO_NOT_IMPLEMENTED; - } -#endif - return blob; -} - -const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob) { -#ifdef PROXY_PLUGIN_ENABLED - if (auto remote_blob = dynamic_cast(blob)) { - const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor()); - if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob.get(); - } - OPENVINO_NOT_IMPLEMENTED; - } -#endif - return blob; -} - InferenceEngine::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap, InferenceEngine::TensorDesc desc) { @@ -559,10 +550,6 @@ InferenceEngine::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, return {}; } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { return blob_tensor->blob; - } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { - return blob_tensor->blob; - } else if (std::dynamic_pointer_cast(tensor._ptr)) { - return std::make_shared(tensor, create_desc(tensor, desc)); } else { #define CASE(precision, T) \ case element::precision: \ diff --git a/src/inference/src/dev/preprocessing/mean_image.cpp b/src/inference/src/dev/preprocessing/mean_image.cpp deleted file mode 100644 index 639a31d5bf6bca..00000000000000 --- a/src/inference/src/dev/preprocessing/mean_image.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "mean_image.hpp" - -#include "openvino/cc/pass/itt.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" - -ov::pass::AddMeanImage::AddMeanImage(const MeanMap& inputInfoMap) { - MATCHER_SCOPE(AddMeanImage); - auto label = ov::pass::pattern::wrap_type(); - - ov::matcher_pass_callback callback = [=](pattern::Matcher& m) { - auto param = std::dynamic_pointer_cast(m.get_match_root()); - if (!param) { - return false; - } - - auto it = inputInfoMap.find(param->get_friendly_name()); - if (it == inputInfoMap.end()) { - return false; - } - - auto mean_const = it->second; - OPENVINO_ASSERT(mean_const->get_element_type() == ov::element::f32, - "Mean for ", - param->get_friendly_name(), - " must have f32 type"); - - auto copy_param = param->clone_with_new_inputs({}); - auto sub = std::make_shared(copy_param, mean_const); - - ov::replace_node(param, sub); - sub->set_argument(0, param); - - // Return true as the root node was changed - return true; - }; - - // Register pattern with Parameter operation as a pattern root node - auto m = std::make_shared(label, matcher_name); - // Register Matcher - register_matcher(m, callback); -} diff --git a/src/inference/src/dev/preprocessing/mean_image.hpp b/src/inference/src/dev/preprocessing/mean_image.hpp deleted file mode 100644 index 70c387c7a756b9..00000000000000 --- a/src/inference/src/dev/preprocessing/mean_image.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "openvino/op/constant.hpp" -#include "openvino/pass/graph_rewrite.hpp" - -namespace ov { -namespace pass { - -/** - * @brief Add `meanImage` preprocessing to input nodes - */ -class AddMeanImage : public ov::pass::MatcherPass { -public: - using MeanMap = std::map>; - - OPENVINO_RTTI("AddMeanImage", "0"); - explicit AddMeanImage(const MeanMap& inputInfoMap); -}; - -} // namespace pass -} // namespace ov diff --git a/src/inference/src/dev/preprocessing/preprocessing.cpp b/src/inference/src/dev/preprocessing/preprocessing.cpp index 69fb991da1eb32..ce19eda01efa27 100644 --- a/src/inference/src/dev/preprocessing/preprocessing.cpp +++ b/src/inference/src/dev/preprocessing/preprocessing.cpp @@ -5,8 +5,6 @@ #include "preprocessing.hpp" #include "dev/converter_utils.hpp" -#include "dev/preprocessing/mean_image.hpp" -#include "ie_common.h" #include "ie_ngraph_utils.hpp" #include "openvino/cc/pass/itt.hpp" #include "openvino/core/preprocess/color_format.hpp" @@ -19,7 +17,6 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& model) { RUN_ON_MODEL_SCOPE(AddPreprocessing); ov::preprocess::PrePostProcessor preproc(model); - ov::pass::AddMeanImage::MeanMap meanMap; for (size_t i = 0; i < model->inputs().size(); i++) { ov::Output const_input(model->input(i).get_node(), model->input(i).get_index()); @@ -29,8 +26,6 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& ov::legacy_convert::fill_input_info(const_input, input_info); OPENVINO_ASSERT(input_info); - auto& legacy_preproc = input_info->getPreProcess(); - preproc.input(i).tensor().set_element_type( InferenceEngine::details::convertPrecision(input_info->getPrecision())); @@ -41,77 +36,10 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& preproc.input(i).tensor().set_layout(ov::Layout{stream.str()}); } - // Resize - switch (legacy_preproc.getResizeAlgorithm()) { - case InferenceEngine::ResizeAlgorithm::RESIZE_AREA: - preproc.input(i).preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_NEAREST); - preproc.input(i).tensor().set_spatial_dynamic_shape(); - break; - case InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR: - preproc.input(i).preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); - preproc.input(i).tensor().set_spatial_dynamic_shape(); - break; - default: - // nothing to do - break; - } - - switch (legacy_preproc.getMeanVariant()) { - case InferenceEngine::MEAN_IMAGE: { - ov::Shape shape(input_info->getTensorDesc().getDims()); - std::vector scale; - std::vector meanImageData(ov::shape_size(shape)); - for (size_t c = 0, i = 0; c < legacy_preproc.getNumberOfChannels(); ++c) { - auto blob = legacy_preproc[c]->meanData; - - auto lm = blob->buffer(); - const float* data = lm.as(); - - std::memcpy(&meanImageData[i], data, blob->byteSize()); - i += blob->size(); - scale.emplace_back(legacy_preproc[c]->stdScale); - } - meanMap[input_info->name()] = ov::op::v0::Constant::create(ov::element::f32, shape, meanImageData); - preproc.input(i).preprocess().scale(scale); - break; - } - case InferenceEngine::MEAN_VALUE: { - std::vector mean, scale; - for (size_t i = 0; i < legacy_preproc.getNumberOfChannels(); i++) { - mean.emplace_back(legacy_preproc[i]->meanValue); - scale.emplace_back(legacy_preproc[i]->stdScale); - } - preproc.input(i).preprocess().mean(mean).scale(scale); - break; - } - default: - break; - } - - switch (legacy_preproc.getColorFormat()) { - case InferenceEngine::ColorFormat::BGR: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::BGR); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - case InferenceEngine::ColorFormat::RGB: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::RGB); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - case InferenceEngine::ColorFormat::RGBX: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::RGBX); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - case InferenceEngine::ColorFormat::BGRX: - preproc.input(i).tensor().set_color_format(ov::preprocess::ColorFormat::BGRX); - preproc.input(i).preprocess().convert_color(ov::preprocess::ColorFormat::BGR); - break; - default: - break; - } - if (const_input.get_partial_shape().is_static() && const_input.get_shape().size() == 4) preproc.input(i).model().set_layout("NCHW"); } + std::vector legacy_names(model->get_output_size()); for (size_t i = 0; i < model->get_output_size(); i++) { ov::Output const_output(model->output(i).get_node(), model->output(i).get_index()); @@ -137,13 +65,6 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& } } - ov::pass::Manager manager(get_pass_config()); - auto rewrite = manager.register_pass(); - if (!meanMap.empty()) { - rewrite->add_matcher(meanMap); - } - manager.run_passes(model); - preproc.build(); for (size_t i = 0; i < model->get_output_size(); i++) { diff --git a/src/inference/src/dev/preprocessing/preprocessing.hpp b/src/inference/src/dev/preprocessing/preprocessing.hpp index 15c2675db3e135..c0a8494f744b76 100644 --- a/src/inference/src/dev/preprocessing/preprocessing.hpp +++ b/src/inference/src/dev/preprocessing/preprocessing.hpp @@ -12,9 +12,6 @@ namespace pass { /** * @brief Converts the following preprocessing information to OpenVINO operations: - * - InferenceEngine::PreProcessInfo->PreProcessChannel::meanData -> Subtract - * - InferenceEngine::PreProcessInfo->PreProcessChannel::meanValue -> Subtract - * - InferenceEngine::PreProcessInfo->PreProcessChannel::stdScale -> Divide * * The order of operations is the following: * (x - mean) / stdScale diff --git a/src/inference/src/dev/remote_context_wrapper.hpp b/src/inference/src/dev/remote_context_wrapper.hpp deleted file mode 100644 index c061e4d38836c3..00000000000000 --- a/src/inference/src/dev/remote_context_wrapper.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "ie_ngraph_utils.hpp" -#include "ie_remote_context.hpp" -#include "openvino/runtime/iremote_context.hpp" -#include "openvino/runtime/make_tensor.hpp" -#include "openvino/runtime/so_ptr.hpp" - -namespace ov { - -class RemoteContextWrapper : public InferenceEngine::RemoteContext { -private: - ov::SoPtr m_context; - -public: - RemoteContextWrapper(const ov::SoPtr& context) : m_context(context) {} - - const ov::SoPtr& get_context() const { - return m_context; - } - - std::string getDeviceName() const noexcept override { - return m_context->get_device_name(); - } - - InferenceEngine::RemoteBlob::Ptr CreateBlob(const InferenceEngine::TensorDesc& tensorDesc, - const InferenceEngine::ParamMap& params = {}) override { - return std::dynamic_pointer_cast(ov::tensor_to_blob( - m_context->create_tensor(InferenceEngine::details::convertPrecision(tensorDesc.getPrecision()), - tensorDesc.getBlockingDesc().getBlockDims(), - params), - false)); - } - - InferenceEngine::MemoryBlob::Ptr CreateHostBlob(const InferenceEngine::TensorDesc& tensorDesc) override { - return std::dynamic_pointer_cast(ov::tensor_to_blob( - m_context->create_host_tensor(InferenceEngine::details::convertPrecision(tensorDesc.getPrecision()), - tensorDesc.getBlockingDesc().getBlockDims()), - false)); - } - - InferenceEngine::ParamMap getParams() const override { - return m_context->get_property(); - } -}; - -} // namespace ov diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index eb706a37192143..e3eeafe85930ad 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -186,11 +186,7 @@ struct CPUStreamsExecutor::Impl { int max_threads_per_core; StreamCreateType stream_type; const auto org_proc_type_table = get_org_proc_type_table(); - const auto stream_id = - _impl->_config._streams == 0 - ? 0 - : (_streamId >= _impl->_config._streams ? _impl->_config._streams - 1 : _streamId); - + const auto stream_id = _impl->_config._streams == 0 ? 0 : _streamId % _impl->_config._streams; get_cur_stream_info(stream_id, _impl->_config._cpu_reservation, org_proc_type_table, diff --git a/src/inference/src/ie_blob_common.cpp b/src/inference/src/ie_blob_common.cpp index e7e0d0ab040178..eb26b055a597bd 100644 --- a/src/inference/src/ie_blob_common.cpp +++ b/src/inference/src/ie_blob_common.cpp @@ -13,22 +13,6 @@ namespace InferenceEngine { IE_SUPPRESS_DEPRECATED_START -Blob* Blob::getHardwareBlob() { -#ifdef PROXY_PLUGIN_ENABLED - return ov::get_hardware_blob(this); -#else - return this; -#endif -} - -const Blob* Blob::getHardwareBlob() const { -#ifdef PROXY_PLUGIN_ENABLED - return ov::get_hardware_blob(this); -#else - return this; -#endif -} - void Blob::setShape(const SizeVector& dims) { // we don't want to allow setShape for: // 1. ROI cases diff --git a/src/inference/src/ie_common.cpp b/src/inference/src/ie_common.cpp index a111adaca6a2e9..5e8aa69596c87a 100644 --- a/src/inference/src/ie_common.cpp +++ b/src/inference/src/ie_common.cpp @@ -11,7 +11,6 @@ #include #include -#include "exec_graph_info.hpp" #include "ie_blob.h" #include "ie_extension.h" #include "ie_iextension.h" diff --git a/src/inference/src/ie_compound_blob.cpp b/src/inference/src/ie_compound_blob.cpp deleted file mode 100644 index 8bafacb3d223b5..00000000000000 --- a/src/inference/src/ie_compound_blob.cpp +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief An implementation file for CompoundBlob - * @file ie_compound_blob.cpp - */ - -#include "ie_compound_blob.h" - -#include -#include -#include -#include - -IE_SUPPRESS_DEPRECATED_START -namespace InferenceEngine { - -namespace { - -TensorDesc getBlobTensorDesc(const Blob::Ptr& blob) { - return blob->getTensorDesc(); -} - -TensorDesc verifyBatchedBlobInput(const std::vector& blobs) { - // verify invariants - if (blobs.empty()) { - IE_THROW() << "BatchedBlob cannot be created from empty vector of Blob, Please, make sure vector contains at " - "least one Blob"; - } - - // Cannot create a compound blob from nullptr Blob objects - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob == nullptr; - })) { - IE_THROW() << "Cannot create a compound blob from nullptr Blob objects"; - } - - const auto subBlobDesc = getBlobTensorDesc(blobs[0]); - - if (std::any_of(blobs.begin(), blobs.end(), [&subBlobDesc](const Blob::Ptr& blob) { - return getBlobTensorDesc(blob) != subBlobDesc; - })) { - IE_THROW() << "All blobs tensors should be equal"; - } - - auto subBlobLayout = subBlobDesc.getLayout(); - - auto blobLayout = Layout::ANY; - SizeVector blobDims = subBlobDesc.getDims(); - switch (subBlobLayout) { - case NCHW: - case NHWC: - case NCDHW: - case NDHWC: - case NC: - case CN: - blobLayout = subBlobLayout; - if (blobDims[0] != 1) { - IE_THROW() << "All blobs should be batch 1"; - } - blobDims[0] = blobs.size(); - break; - case C: - blobLayout = NC; - blobDims.insert(blobDims.begin(), blobs.size()); - break; - case CHW: - blobLayout = NCHW; - blobDims.insert(blobDims.begin(), blobs.size()); - break; - case HWC: - blobLayout = NHWC; - blobDims.insert(blobDims.begin(), blobs.size()); - break; - default: - IE_THROW() << "Unsupported sub-blobs layout - to be one of: [NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW]"; - } - - return TensorDesc{subBlobDesc.getPrecision(), blobDims, blobLayout}; -} - -} // anonymous namespace - -CompoundBlob::CompoundBlob(const TensorDesc& tensorDesc) : Blob(tensorDesc) {} - -CompoundBlob::CompoundBlob(const std::vector& blobs) : CompoundBlob(TensorDesc{}) { - // Cannot create a compound blob from nullptr Blob objects - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob == nullptr; - })) { - IE_THROW() << "Cannot create a compound blob from nullptr Blob objects"; - } - - // Check that none of the blobs provided is compound. If at least one of them is compound, throw - // an exception because recursive behavior is not allowed - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob->is(); - })) { - IE_THROW() << "Cannot create a compound blob from other compound blobs"; - } - - this->_blobs = blobs; -} - -CompoundBlob::CompoundBlob(std::vector&& blobs) : CompoundBlob(TensorDesc{}) { - // Cannot create a compound blob from nullptr Blob objects - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob == nullptr; - })) { - IE_THROW() << "Cannot create a compound blob from nullptr Blob objects"; - } - - // Check that none of the blobs provided is compound. If at least one of them is compound, throw - // an exception because recursive behavior is not allowed - if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { - return blob->is(); - })) { - IE_THROW() << "Cannot create a compound blob from other compound blobs"; - } - - this->_blobs = std::move(blobs); -} - -size_t CompoundBlob::byteSize() const { - return 0; -} - -size_t CompoundBlob::element_size() const { - return 0; -} - -void CompoundBlob::allocate() noexcept {} - -bool CompoundBlob::deallocate() noexcept { - return false; -} - -LockedMemory CompoundBlob::buffer() noexcept { - return LockedMemory(nullptr, nullptr, 0); -} - -LockedMemory CompoundBlob::cbuffer() const noexcept { - return LockedMemory(nullptr, nullptr, 0); -} - -size_t CompoundBlob::size() const noexcept { - return _blobs.size(); -} - -Blob::Ptr CompoundBlob::getBlob(size_t i) const noexcept { - if (i >= _blobs.size()) { - return nullptr; - } - return _blobs[i]; -} - -Blob::Ptr CompoundBlob::createROI(const ROI& roi) const { - std::vector roiBlobs; - roiBlobs.reserve(_blobs.size()); - - for (const auto& blob : _blobs) { - roiBlobs.push_back(blob->createROI(roi)); - } - - return std::make_shared(std::move(roiBlobs)); -} - -const std::shared_ptr& CompoundBlob::getAllocator() const noexcept { - static std::shared_ptr _allocator = nullptr; - return _allocator; -}; - -BatchedBlob::BatchedBlob(const std::vector& blobs) : CompoundBlob(verifyBatchedBlobInput(blobs)) { - this->_blobs = blobs; -} - -BatchedBlob::BatchedBlob(std::vector&& blobs) : CompoundBlob(verifyBatchedBlobInput(blobs)) { - this->_blobs = std::move(blobs); -} - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index 5da90bde36e353..f81f49218b2711 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -28,7 +28,6 @@ #include "ie_network_reader.hpp" #include "ie_ngraph_utils.hpp" #include "ie_plugin_config.hpp" -#include "ie_remote_context.hpp" #include "itt.hpp" #include "openvino/core/except.hpp" #include "openvino/core/so_extension.hpp" @@ -150,22 +149,6 @@ ExecutableNetwork Core::LoadNetwork(const CNNNetwork& network, } } -ExecutableNetwork Core::LoadNetwork(const CNNNetwork& network, - RemoteContext::Ptr context, - const std::map& config) { - auto valid = ::CheckStatic(network); - try { - OPENVINO_ASSERT(std::get<0>(valid), - "InferenceEngine::Core::LoadNetwork doesn't support inputs having dynamic shapes. ", - "Use ov::Core::compile_model API instead. Dynamic inputs are :", - std::get<1>(valid)); - auto exec = _impl->LoadNetwork(network, std::dynamic_pointer_cast(context), config); - return {exec._ptr, exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - ExecutableNetwork Core::LoadNetwork(const std::string& modelPath, const std::string& deviceName, const std::map& config) { @@ -191,31 +174,6 @@ ExecutableNetwork Core::LoadNetwork(const std::string& modelPath, const std::map } } -RemoteContext::Ptr Core::CreateContext(const std::string& deviceName, const ParamMap& params) { - try { - return _impl->CreateContext(deviceName, params); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - -RemoteContext::Ptr Core::GetDefaultContext(const std::string& deviceName) { - if (deviceName.find("HETERO") == 0) { - IE_THROW() << "HETERO device does not support remote context"; - } - if (deviceName.find("MULTI") == 0) { - IE_THROW() << "MULTI device does not support remote context"; - } - if (deviceName.find("AUTO") == 0) { - IE_THROW() << "AUTO device does not support remote context"; - } - try { - return _impl->GetDefaultContext(deviceName); - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - void Core::AddExtension(IExtensionPtr extension, const std::string& deviceName_) { if (deviceName_.find("HETERO") == 0) { IE_THROW() << "HETERO device does not support extensions. Please, set extensions directly to fallback devices"; @@ -296,30 +254,6 @@ ExecutableNetwork Core::ImportNetwork(std::istream& networkModel) { } } -ExecutableNetwork Core::ImportNetwork(std::istream& networkModel, - const RemoteContext::Ptr& context, - const std::map& config) { - OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::ImportNetwork"); - - if (context == nullptr) { - IE_THROW() << "Remote context is null"; - } - - std::string deviceName_ = context->getDeviceName(); - ov::DeviceIDParser device(deviceName_); - std::string deviceName = device.get_device_name(); - - try { - auto parsed = ov::parseDeviceNameIntoConfig(deviceName, ov::any_copy(config)); - auto exec = - _impl->get_plugin(deviceName) - .import_model(networkModel, ov::legacy_convert::convert_remote_context(context), parsed._config); - return {ov::legacy_convert::convert_compiled_model(exec), exec._so}; - } catch (const ov::Exception& ex) { - IE_THROW() << ex.what(); - } -} - QueryNetworkResult Core::QueryNetwork(const CNNNetwork& network, const std::string& deviceName, const std::map& config) const { diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp index b9543193a58a78..80343432e4335c 100644 --- a/src/inference/src/ie_network_reader.cpp +++ b/src/inference/src/ie_network_reader.cpp @@ -29,8 +29,8 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/runtime/shared_buffer.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "openvino/util/shared_object.hpp" -#include "so_ptr.hpp" #include "transformations/rt_info/old_api_map_order_attribute.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/inference/src/ie_remote_context.cpp b/src/inference/src/ie_remote_context.cpp deleted file mode 100644 index 2bab079fbaf279..00000000000000 --- a/src/inference/src/ie_remote_context.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_remote_context.hpp" - -#include -#include - -#include "blob_factory.hpp" -#include "dev/converter_utils.hpp" -#include "dev/remote_context_wrapper.hpp" -#include "openvino/runtime/remote_context.hpp" -#ifdef PROXY_PLUGIN_ENABLED -# include "openvino/proxy/plugin.hpp" -#endif - -namespace InferenceEngine { - -IE_SUPPRESS_DEPRECATED_START -MemoryBlob::Ptr RemoteContext::CreateHostBlob(const TensorDesc& tensorDesc) { - auto blob = std::dynamic_pointer_cast(make_blob_with_precision(tensorDesc)); - if (!blob) - IE_THROW(NotAllocated) << "Failed to create host blob in remote context for " << getDeviceName() << " device"; - - return blob; -} - -const std::shared_ptr RemoteContext::GetHardwareContext() { -#ifdef PROXY_PLUGIN_ENABLED - if (auto wrapper = dynamic_cast(this)) { - auto ov_context = wrapper->get_context(); - auto hw_context = ov::proxy::get_hardware_context(ov_context); - return ov::legacy_convert::convert_remote_context(hw_context._ptr); - } -#endif - return shared_from_this(); -} - -const std::shared_ptr RemoteContext::GetHardwareContext() const { -#ifdef PROXY_PLUGIN_ENABLED - if (auto wrapper = dynamic_cast(this)) { - auto ov_context = wrapper->get_context(); - auto hw_context = ov::proxy::get_hardware_context(ov_context); - return ov::legacy_convert::convert_remote_context(hw_context._ptr); - } -#endif - return shared_from_this(); -} -IE_SUPPRESS_DEPRECATED_END - -} // namespace InferenceEngine diff --git a/src/inference/src/ie_transformations.cpp b/src/inference/src/ie_transformations.cpp deleted file mode 100644 index 1b1ba8212ebc6b..00000000000000 --- a/src/inference/src/ie_transformations.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_transformations.hpp" - -#include "ngraph/pass/low_latency.hpp" -#include "ngraph/pass/manager.hpp" - -using namespace InferenceEngine; - -void InferenceEngine::lowLatency2(InferenceEngine::CNNNetwork& network, bool use_const_initializer) { - auto function = network.getFunction(); - ngraph::pass::Manager manager; - manager.register_pass(use_const_initializer); - manager.run_passes(function); -} diff --git a/src/inference/src/cpp/ie_remote_context.cpp b/src/inference/src/remote_context.cpp similarity index 98% rename from src/inference/src/cpp/ie_remote_context.cpp rename to src/inference/src/remote_context.cpp index 10dde33bb6158b..e2a2bc61a0c731 100644 --- a/src/inference/src/cpp/ie_remote_context.cpp +++ b/src/inference/src/remote_context.cpp @@ -2,17 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ie_remote_context.hpp" +#include "openvino/runtime/remote_context.hpp" #include #include "any_copy.hpp" -#include "ie_remote_blob.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/make_tensor.hpp" -#include "openvino/runtime/remote_context.hpp" #define OV_REMOTE_CONTEXT_STATEMENT(...) \ OPENVINO_ASSERT(_impl != nullptr, "RemoteContext was not initialized."); \ diff --git a/src/inference/src/shared_object_loader.cpp b/src/inference/src/shared_object_loader.cpp deleted file mode 100644 index 748b0b4864290c..00000000000000 --- a/src/inference/src/shared_object_loader.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "details/ie_so_loader.h" -#include "ie_common.h" -#include "openvino/util/file_util.hpp" -#include "openvino/util/shared_object.hpp" - -IE_SUPPRESS_DEPRECATED_START - -namespace InferenceEngine { -namespace details { - -SharedObjectLoader::SharedObjectLoader(const std::shared_ptr& so) : _so(so) {} - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -SharedObjectLoader::SharedObjectLoader(const wchar_t* pluginName) - : SharedObjectLoader(ov::util::wstring_to_string(pluginName).c_str()) {} -#endif - -SharedObjectLoader::SharedObjectLoader(const char* pluginName) : _so{nullptr} { - try { - _so = ov::util::load_shared_object(pluginName); - } catch (const std::runtime_error& ex) { - IE_THROW(GeneralError) << ex.what(); - } -} - -SharedObjectLoader::~SharedObjectLoader() {} - -void* SharedObjectLoader::get_symbol(const char* symbolName) const { - try { - return ov::util::get_symbol(_so, symbolName); - } catch (const std::runtime_error& ex) { - IE_THROW(NotFound) << ex.what(); - } -} - -std::shared_ptr SharedObjectLoader::get() const { - return _so; -} - -} // namespace details -} // namespace InferenceEngine - -IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/tests/functional/CMakeLists.txt b/src/inference/tests/functional/CMakeLists.txt index 187f48599746e5..7a5f43ed6a7f97 100644 --- a/src/inference/tests/functional/CMakeLists.txt +++ b/src/inference/tests/functional/CMakeLists.txt @@ -10,7 +10,6 @@ endif() set(DEPENDENCIES mock_engine - template_extension openvino_template_extension ) diff --git a/src/inference/tests/functional/async_infer_request_test.cpp b/src/inference/tests/functional/async_infer_request_test.cpp index b6f4f4143766da..68b97e5d69bb6d 100644 --- a/src/inference/tests/functional/async_infer_request_test.cpp +++ b/src/inference/tests/functional/async_infer_request_test.cpp @@ -23,11 +23,6 @@ TEST(InferRequestCPPTests, throwsOnUninitializedGetBlob) { ASSERT_THROW(req.GetBlob({}), InferenceEngine::NotAllocated); } -TEST(InferRequestCPPTests, throwsOnUninitializedGetPreProcess) { - InferRequest req; - ASSERT_THROW(req.GetPreProcess({}), InferenceEngine::NotAllocated); -} - TEST(InferRequestCPPTests, throwsOnUninitializedInfer) { InferRequest req; ASSERT_THROW(req.Infer(), InferenceEngine::NotAllocated); diff --git a/src/inference/tests/functional/blob_copy_test.cpp b/src/inference/tests/functional/blob_copy_test.cpp deleted file mode 100644 index 0807d5c24d9e79..00000000000000 --- a/src/inference/tests/functional/blob_copy_test.cpp +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include -#include - -using namespace ::testing; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -using ChannelNum = size_t; -using BatchNum = size_t; -using PrecisionType = InferenceEngine::Precision::ePrecision; -using IsInterleaved = bool; // true = interleaved, false = deinterleaved. -using Dims = - std::vector; // dimensions are in the form of (N x C x D1 x D2 ... Dn), so Dims is vector (D1 x D2 ... Dn) - -namespace { - -InferenceEngine::Layout setLayout(IsInterleaved isInterleaved, int dimsSize) { - if (dimsSize == 3) { - return (isInterleaved) ? InferenceEngine::Layout::NDHWC : InferenceEngine::Layout::NCDHW; - } else if (dimsSize == 2) { - return (isInterleaved) ? InferenceEngine::Layout::NHWC : InferenceEngine::Layout::NCHW; - } - IE_THROW() << "Can't set layout"; -} - -// Support only for 4d and 5d blobs -SizeVector SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) { - if (dims.size() == 2) { - return SizeVector{batchNum, channelNum, dims[0], dims[1]}; - } else if (dims.size() == 3) { - return SizeVector{batchNum, channelNum, dims[0], dims[1], dims[2]}; - } - IE_THROW() << "Can't set dimVector"; -} - -// For FP16 and Q78 precision we use int16_t type -InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, - SizeVector dimsVector, - InferenceEngine::Layout layout) { - InferenceEngine::TensorDesc tensorDesc(precision, dimsVector, layout); - switch (precision) { - case InferenceEngine::Precision::FP32: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::FP64: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::I32: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U32: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::I64: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U64: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U16: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return make_shared_blob(tensorDesc); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return make_shared_blob(tensorDesc); - default: - IE_THROW() << "Unsupported precision"; - } -} - -// returns a random value in the range [0 , elem) -size_t GenerateRandom(size_t elem) { - size_t result; - do { - result = static_cast(std::floor(std::rand() / static_cast(RAND_MAX * elem))); - } while (result >= elem); - return result; -} - -// returns index of random element of the blob: -// dims is the blob shape, e.g. {1, 3, 640, 480} -// random index[i] lays between 0 and dims[i]-1 -SizeVector GenerateRandomVector(SizeVector dims) { - SizeVector idx(dims.size()); - - for (size_t i = 0; i < dims.size(); ++i) { - idx[i] = GenerateRandom(dims[i]); - } - return idx; -} - -void PrintParams(InferenceEngine::Layout layout, - SizeVector dims, - std::string blobType, - InferenceEngine::Precision precision) { - std::cout << blobType << "Blob params: " << layout << ", precision: " << precision << ", dims: {"; - for (size_t i = 0; i < dims.size(); i++) { - std::cout << (i > 0 ? ", " : "") << dims[i]; - } - std::cout << "}" << std::endl; -} - -// For FP16 and Q78 precision we use int16_t type -template -void FillBlobRandom(Blob::Ptr& inputBlob) { - srand(1); - auto inputBlobData = inputBlob->buffer().as(); - for (size_t i = 0; i < inputBlob->size(); i++) { - inputBlobData[i] = (T)(GenerateRandom(RAND_MAX) / static_cast(RAND_MAX) * 100); - } -} - -// For FP16 and Q78 precision we use int16_t type -void FillBlob(Blob::Ptr& inputBlob) { - auto precision = inputBlob->getTensorDesc().getPrecision(); - switch (precision) { - case InferenceEngine::Precision::FP32: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::FP64: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::I32: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U32: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::I64: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U64: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U16: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return FillBlobRandom(inputBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return FillBlobRandom(inputBlob); - default: - IE_THROW() << "Cant fill blob with \"" << precision << "\" precision\n"; - } -} - -template -T GetElem(Blob::Ptr& blob, SizeVector idx) { - T* src = blob->buffer().as() + blob->getTensorDesc().getBlockingDesc().getOffsetPadding(); - - auto blobLayout = blob->getTensorDesc().getLayout(); - - SizeVector strides = blob->getTensorDesc().getBlockingDesc().getStrides(); - if (blobLayout == NHWC || blobLayout == NDHWC) { - for (size_t i = 2; i < strides.size(); i++) { - std::swap(strides[1], strides[i]); - } - } - - size_t offset = 0; - - for (size_t i = 0; i < idx.size(); i++) { - offset += idx[i] * strides[i]; - } - - return src[offset]; -} - -int SetExperimentsNum(int blobSize) { - if (blobSize < 1000) { - return blobSize; - } else if (blobSize < 10000) { - return 1000; - } else if (blobSize < 100000) { - return blobSize / 10; - } else { - return blobSize / 100; - } -} - -template -bool IsCorrectBlobCopy_Impl(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { - EXPECT_TRUE(srcBlob->size() == dstBlob->size()); - int experimentsNum = SetExperimentsNum(static_cast(srcBlob->size())); - int errorsCount = 0; - for (; experimentsNum > 0; --experimentsNum) { - SizeVector randomElemIdx = GenerateRandomVector(srcBlob->getTensorDesc().getDims()); - auto srcElem = GetElem(srcBlob, randomElemIdx); - auto dstElem = GetElem(dstBlob, randomElemIdx); - if (srcElem != dstElem) { - if (errorsCount < 10) { - errorsCount++; - std::cout << "ERROR: srcElem = " << srcElem << ", dstElem = " << dstElem << std::endl; - } else { - errorsCount++; - } - } - } - if (errorsCount > 0) { - std::cout << "errorsCount = " << errorsCount << std::endl; - } - return errorsCount == 0; -} - -bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { - switch (srcBlob->getTensorDesc().getPrecision()) { - case InferenceEngine::Precision::FP32: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP64: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I32: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U32: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I64: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U64: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U16: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); - default: - return false; - } -} - -} // namespace - -using BlobCopyTest = - ::testing::TestWithParam>; - -TEST_P(BlobCopyTest, BlobCopy) { - IsInterleaved srcIsInterleaved = get<0>(GetParam()); - IsInterleaved dstIsInterleaved = get<1>(GetParam()); - BatchNum batchNum = get<2>(GetParam()); - ChannelNum channelNum = get<3>(GetParam()); - Dims dims = get<4>(GetParam()); - PrecisionType precisionType = get<5>(GetParam()); - - SizeVector srcDims = SetDimVector(batchNum, channelNum, dims); - SizeVector dstDims = SetDimVector(batchNum, channelNum, dims); - - InferenceEngine::Layout srcLayout = setLayout(srcIsInterleaved, static_cast(dims.size())); - InferenceEngine::Layout dstLayout = setLayout(dstIsInterleaved, static_cast(dims.size())); - - PrintParams(srcLayout, srcDims, "src", precisionType); - PrintParams(dstLayout, dstDims, "dst", precisionType); - - Blob::Ptr srcBlob = createBlob(precisionType, srcDims, srcLayout); - Blob::Ptr dstBlob = createBlob(precisionType, dstDims, dstLayout); - - srcBlob->allocate(); - dstBlob->allocate(); - - FillBlob(srcBlob); - - auto start = std::chrono::high_resolution_clock::now(); - blob_copy(srcBlob, dstBlob); - auto finish = std::chrono::high_resolution_clock::now(); - - std::cout << "Blob_copy execution time : " - << std::chrono::duration_cast(finish - start).count() << " micros" - << std::endl; - - ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is not correct"; -} - -namespace { - -// is interleaved srcBlob? -std::vector BlobCopy_srcLayoutParam = { - true, - false, -}; -// is interleaved dstBlob? -std::vector BlobCopy_dstLayoutParam = { - false, - true, -}; - -std::vector BlobCopy_BatchNum = { - 1, - 3, -}; - -std::vector BlobCopy_ChannelNum = { - 3, - 7, -}; - -std::vector BlobCopy_Dims = { - {{10, 20, 30}}, - {{60, 80}}, -}; - -// The 'blob_copy(4/5)_d' function is a template with the parameter-list -// FP32 is used for cases with the following accuracy: FP32, I32, U32 -// FP16 is used for cases with the following accuracy: FP16, U16, I16 -// U8 is used for cases with the following accuracy: U8, I8 -// Cases with other precision are not supported -std::vector BlobCopy_PrecisionParams = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U16, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U32, - InferenceEngine::Precision::I32, -}; - -} // namespace - -INSTANTIATE_TEST_SUITE_P(accuracy, - BlobCopyTest, - ::testing::Combine(::testing::ValuesIn(BlobCopy_srcLayoutParam), - ::testing::ValuesIn(BlobCopy_dstLayoutParam), - ::testing::ValuesIn(BlobCopy_BatchNum), - ::testing::ValuesIn(BlobCopy_ChannelNum), - ::testing::ValuesIn(BlobCopy_Dims), - ::testing::ValuesIn(BlobCopy_PrecisionParams))); - -namespace { - -template -bool IsEqualBlobCopy_Impl(Blob::Ptr& ref, Blob::Ptr& dst) { - EXPECT_TRUE(ref->size() == dst->size()); - auto refData = ref->buffer().as(); - auto dstData = dst->buffer().as(); - return (std::equal(dstData, dstData + dst->size(), refData, [](T left, T right) { - return left == right; - })); -} - -bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { - switch (srcBlob->getTensorDesc().getPrecision()) { - case InferenceEngine::Precision::FP32: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP64: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U32: - IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I32: - IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U64: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I64: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U16: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - default: - return false; - } -} - -template -void copy3DBlobsAllBytesWithReLayout(const Blob::Ptr& srcLayoutBlob, Blob::Ptr& trgLayoutBlob) { - auto srcData = srcLayoutBlob->buffer().as(); - auto dstData = trgLayoutBlob->buffer().as(); - auto& dims = srcLayoutBlob->getTensorDesc().getDims(); - size_t C = dims[1]; - size_t H = dims[2]; - size_t W = dims[3]; - for (size_t c = 0; c < C; ++c) { - for (size_t h = 0; h < H; ++h) { - for (size_t w = 0; w < W; ++w) { - size_t src_idx = c * H * W + h * W + w; - size_t dst_idx = h * W * C + w * C + c; - dstData[dst_idx] = srcData[src_idx]; - } - } - } -} - -// For FP16 and Q78 precision we use int16_t type -void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob::Ptr& trgLayoutBlob) { - auto precision = srcLayoutBlob->getTensorDesc().getPrecision(); - switch (precision) { - case InferenceEngine::Precision::FP32: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::FP64: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::FP16: - case InferenceEngine::Precision::I16: - case InferenceEngine::Precision::Q78: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::I32: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U32: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U64: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::I64: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U16: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::I4: - case InferenceEngine::Precision::I8: - case InferenceEngine::Precision::BIN: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - case InferenceEngine::Precision::U4: - case InferenceEngine::Precision::U8: - return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); - default: - IE_THROW() << "Cant copy blob with \"" << precision << "\" precision\n"; - } -} - -std::vector BlobCopySetLayout_Dims = { - {{1, 10, 10}}, - {{2, 100, 100}}, - {{3, 224, 224}}, -}; - -std::vector BlobCopySetLayout_Precisions = { - Precision::U8, - Precision::U16, - InferenceEngine::Precision::FP32, -}; - -} // namespace - -using BlobCopySetLayoutTest = ::testing::TestWithParam>; - -// test after [IE] Fix TensorDesc::setLayout method, 735d275b47c4fd0c7b0db5c8f9fe8705967270f0 -TEST_P(BlobCopySetLayoutTest, BlobCopyWithNCHW_To_NHWC_After_setLayout) { - const size_t C_sz = get<0>(GetParam())[0]; - const size_t H_sz = get<0>(GetParam())[1]; - const size_t W_sz = get<0>(GetParam())[2]; - const Precision precision = get<1>(GetParam()); - const Layout src_layout = Layout::NCHW, dst_layout = Layout::NHWC; - - auto src = createBlob(precision, {1, C_sz, H_sz, W_sz}, dst_layout); - src->allocate(); - src->getTensorDesc().setLayout(src_layout); - - FillBlob(src); - - auto dst = createBlob(precision, {1, C_sz, H_sz, W_sz}, dst_layout); - dst->allocate(); - - blob_copy(src, dst); - - auto ref = createBlob(precision, {1, C_sz, H_sz, W_sz}, dst_layout); - ref->allocate(); - - copy3DBlobsAllBytesWithReLayoutWrapper(src, ref); - - ASSERT_TRUE(IsEqualBlobCopy(ref, dst)) << "'blob_copy' after setLayout function is not correct"; -} - -INSTANTIATE_TEST_SUITE_P(accuracy, - BlobCopySetLayoutTest, - ::testing::Combine(::testing::ValuesIn(BlobCopySetLayout_Dims), - ::testing::ValuesIn(BlobCopySetLayout_Precisions))); diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index a8f10175218da5..8956d5dfa2eddf 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -1883,7 +1883,7 @@ TEST_P(CachingTest, LoadHetero_TargetFallbackFromCore) { }); testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(m_cacheDir)); - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock"}}); m_testFunction(core); }); // Ensure that only 1 blob (for Hetero) is created @@ -1900,7 +1900,7 @@ TEST_P(CachingTest, LoadHetero_TargetFallbackFromCore) { } testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(m_cacheDir)); - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock"}}); m_testFunction(core); comp_models.clear(); }); @@ -2020,7 +2020,7 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { }); testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(m_cacheDir)); - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.1"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock.1"}}); m_testFunction(core); }); } @@ -2034,7 +2034,7 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { EXPECT_CALL(*net, export_model(_)).Times(0); } testLoad([&](ov::Core& core) { - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.1"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock.1"}}); core.set_property(ov::cache_dir(m_cacheDir)); m_testFunction(core); }); @@ -2048,7 +2048,7 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { EXPECT_CALL(net, export_model(_)).Times(1); }); testLoad([&](ov::Core& core) { - core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.51"}}); + core.set_property(ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), "mock.51"}}); core.set_property(ov::cache_dir(m_cacheDir)); m_testFunction(core); comp_models.clear(); diff --git a/src/inference/tests/functional/caseless_tests.cpp b/src/inference/tests/functional/caseless_tests.cpp deleted file mode 100644 index 0ba008f4798373..00000000000000 --- a/src/inference/tests/functional/caseless_tests.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "caseless.hpp" -#include "debug.h" - -using namespace std; -using namespace InferenceEngine::details; - -using CaselessTests = ::testing::Test; - -TEST_F(CaselessTests, emptyAreEqual) { - ASSERT_TRUE(InferenceEngine::details::equal("", "")); -} - -TEST_F(CaselessTests, canIgnoreCase) { - ASSERT_TRUE(InferenceEngine::details::equal("abc", "ABC")); -} - -TEST_F(CaselessTests, emptyIsNotEqualNotEmpty) { - ASSERT_FALSE(InferenceEngine::details::equal("", "abc")); -} - -TEST_F(CaselessTests, canFindCaslessInMap) { - caseless_map storage = { - {"Abc", 1}, - {"bC", 2}, - {"AbcD", 3}, - }; - ASSERT_EQ(storage["abc"], 1); - ASSERT_EQ(storage["ABC"], 1); - ASSERT_EQ(storage["BC"], 2); - ASSERT_EQ(storage["aBCd"], 3); - ASSERT_EQ(storage.find("aBd"), storage.end()); - ASSERT_EQ(storage.find(""), storage.end()); -} - -TEST_F(CaselessTests, canFindCaslessInUnordered) { - caseless_unordered_map storage = { - {"Abc", 1}, - {"bC", 2}, - {"AbcD", 3}, - }; - ASSERT_EQ(storage["abc"], 1); - ASSERT_EQ(storage["ABC"], 1); - ASSERT_EQ(storage["BC"], 2); - ASSERT_EQ(storage["aBCd"], 3); - ASSERT_EQ(storage.find("aBd"), storage.end()); - ASSERT_EQ(storage.find(""), storage.end()); -} diff --git a/src/inference/tests/functional/cnn_network_test.cpp b/src/inference/tests/functional/cnn_network_test.cpp index 497051aa92a88e..171a6ee0845253 100644 --- a/src/inference/tests/functional/cnn_network_test.cpp +++ b/src/inference/tests/functional/cnn_network_test.cpp @@ -112,24 +112,6 @@ TEST_F(CNNNetworkTests, throwsHasDynamicInputs) { } } -TEST_F(CNNNetworkTests, throwsHasDynamicInputs_remoteContext) { - auto model = CNNNetworkTests_create_model(); - CNNNetwork network(model); - InferenceEngine::Core core; - try { - core.LoadNetwork(network, InferenceEngine::RemoteContext::Ptr()); - FAIL() << "LoadNetwork with dynamic inputs shall throw"; - } catch (const InferenceEngine::Exception& e) { - EXPECT_TRUE(std::string(e.what()).find("InferenceEngine::Core::LoadNetwork") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_1") == std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_2") == std::string::npos) << e.what(); - } -} - TEST_F(CNNNetworkTests, throwsHasDynamicInputs_queryNetwork) { auto model = CNNNetworkTests_create_model(); CNNNetwork network(model); diff --git a/src/inference/tests/functional/core_threading.cpp b/src/inference/tests/functional/core_threading.cpp index aaa85c90c66f6b..4b2d19731ff322 100644 --- a/src/inference/tests/functional/core_threading.cpp +++ b/src/inference/tests/functional/core_threading.cpp @@ -58,17 +58,6 @@ class IECoreThreadingTests : public ::testing::Test { thread.join(); } } - - void safeAddExtension(InferenceEngine::Core& ie) { - try { - auto extension = std::make_shared( - ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + OV_BUILD_POSTFIX)); - ie.AddExtension(extension); - } catch (const InferenceEngine::Exception& ex) { - ASSERT_STR_CONTAINS(ex.what(), "name: custom_opset. Opset"); - } - } }; // tested function: SetConfig @@ -167,19 +156,3 @@ TEST_F(IECoreThreadingTests, GetAvailableDevices) { }, 30); } - -#if defined(ENABLE_OV_IR_FRONTEND) -// tested function: ReadNetwork, AddExtension -TEST_F(IECoreThreadingTests, ReadNetwork) { - InferenceEngine::Core ie; - auto network = ie.ReadNetwork(modelName, weightsName); - - runParallel( - [&]() { - safeAddExtension(ie); - (void)ie.ReadNetwork(modelName, weightsName); - }, - 100, - 12); -} -#endif // defined(ENABLE_OV_IR_FRONTEND) diff --git a/src/inference/tests/functional/debug_tests.cpp b/src/inference/tests/functional/debug_tests.cpp deleted file mode 100644 index 6f72c8d88d768a..00000000000000 --- a/src/inference/tests/functional/debug_tests.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "debug.h" - -using DebugTests = ::testing::Test; - -TEST_F(DebugTests, tolowerWorksWithEmptyString) { - std::string str = ""; - ASSERT_STREQ("", InferenceEngine::details::tolower(str).c_str()); -} - -TEST_F(DebugTests, shouldConvertToLowerCase) { - std::string str = "Hello, World!1"; - ASSERT_STREQ("hello, world!1", InferenceEngine::details::tolower(str).c_str()); -} diff --git a/src/inference/tests/functional/executable_network.cpp b/src/inference/tests/functional/executable_network.cpp index 84b77740826caf..5d741e876749c4 100644 --- a/src/inference/tests/functional/executable_network.cpp +++ b/src/inference/tests/functional/executable_network.cpp @@ -50,8 +50,3 @@ TEST(ExecutableNetworkTests, throwsOnUninitializedGetMetric) { ExecutableNetwork exec; ASSERT_THROW(exec.GetMetric({}), InferenceEngine::NotAllocated); } - -TEST(ExecutableNetworkTests, throwsOnUninitializedGetContext) { - ExecutableNetwork exec; - ASSERT_THROW(exec.GetContext(), InferenceEngine::NotAllocated); -} diff --git a/src/inference/tests/functional/ngraph_reshape_tests.cpp b/src/inference/tests/functional/ngraph_reshape_tests.cpp deleted file mode 100644 index caca91383fc846..00000000000000 --- a/src/inference/tests/functional/ngraph_reshape_tests.cpp +++ /dev/null @@ -1,1282 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/data_utils.hpp" -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_common.hpp" -#include "ie_common.h" -#include "openvino/core/partial_shape.hpp" -#include "openvino/core/shape.hpp" - -using namespace testing; -using namespace InferenceEngine; - -using NGraphReshapeTests = ov::test::TestsCommon; - -TEST_F(NGraphReshapeTests, getBatchSize) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - CNNNetwork cnnNetwork(ngraph); - ASSERT_EQ(1, cnnNetwork.getBatchSize()); -} - -TEST_F(NGraphReshapeTests, ReshapedDynamicShapeLayout) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({-1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - param->set_friendly_name("A"); - auto relu = std::make_shared(param); - - ngraph::ParameterVector params = {param}; - - ngraph = std::make_shared(relu, params); - } - - CNNNetwork cnnNetwork(ngraph); - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["A"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["A"]->getInputData()->getDims(), (SizeVector{0, 3, 22, 22})); - - ICNNNetwork::InputShapes new_shape; - new_shape["A"] = {1, 3, 22, 22}; - cnnNetwork.reshape(new_shape); - - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["A"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["A"]->getInputData()->getDims(), (SizeVector{1, 3, 22, 22})); -} - -TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLU) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - param->set_friendly_name("data"); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - CNNNetwork cnnNetwork(ngraph::clone_function(*ngraph)); - std::map shapes; - shapes["data"] = {1, 3, 25, 25}; - - ASSERT_NO_THROW(cnnNetwork.reshape(shapes)); - - auto changedFunction = cnnNetwork.getFunction(); - ASSERT_NE(nullptr, changedFunction); - ASSERT_EQ(changedFunction->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(changedFunction->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["data"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["data"]->getInputData()->getDims(), (SizeVector{1, 3, 25, 25})); -} - -TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLUWithoutCloneFunction) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - param->set_friendly_name("data"); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - CNNNetwork cnnNetwork(ngraph); - std::map shapes; - shapes["data"] = {1, 3, 25, 25}; - - ASSERT_NO_THROW(cnnNetwork.reshape(shapes)); - - auto changedFunction = cnnNetwork.getFunction(); - ASSERT_NE(nullptr, changedFunction); - ASSERT_EQ(changedFunction->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(changedFunction->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - - ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["data"]->getLayout()); - ASSERT_EQ(cnnNetwork.getInputsInfo()["data"]->getInputData()->getDims(), (SizeVector{1, 3, 25, 25})); -} - -class CustomTestOp : public ngraph::op::Op { -public: - OPENVINO_OP("CustomTestLayer", "test_extension"); - - CustomTestOp() = default; - CustomTestOp(const ngraph::Output& arg, bool test1, int64_t test2) - : Op({arg}), - test1(test1), - test2(test2) { - constructor_validate_and_infer_types(); - } - - void validate_and_infer_types() override { - auto input_pshape = get_input_partial_shape(0); - if (input_pshape.is_static()) { - auto input_shape = input_pshape.to_shape(); - ngraph::Shape output_shape(input_shape); - for (size_t i = 0; i < input_shape.size(); ++i) { - output_shape[i] = input_shape[i] * test2 + (test1 ? 0 : 1); - } - set_output_type(0, get_input_element_type(0), ngraph::PartialShape(output_shape)); - } else { - set_output_type(0, get_input_element_type(0), ngraph::PartialShape::dynamic()); - } - } - - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override { - if (new_args.size() != 1) { - OPENVINO_THROW("Incorrect number of new arguments"); - } - - return std::make_shared(new_args.at(0), test1, test2); - } - - bool visit_attributes(ngraph::AttributeVisitor& visitor) override { - visitor.on_attribute("test1", test1); - visitor.on_attribute("test2", test2); - return true; - } - -private: - bool test1; - int64_t test2; -}; - -class TestInPlaceExtension : public InferenceEngine::IExtension { -public: - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {} - - void Unload() noexcept override {} - - std::map getOpSets() override { - static std::map opsets; - if (opsets.empty()) { - ngraph::OpSet opset; - opset.insert(); - opsets[CustomTestOp::get_type_info_static().version_id] = opset; - } - return opsets; - } - -private: -}; - -#if defined(ENABLE_OV_IR_FRONTEND) -TEST_F(NGraphReshapeTests, ReshapeNewIRWithNewExtension1) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - 1 - 3 - 22 - 22 - - - - - 1 - 3 - 22 - 22 - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - ie.AddExtension(std::make_shared()); - Blob::Ptr weights; - SizeVector refBeforeReshape = {1, 3, 22, 22}; - SizeVector refAfterReshape = {4, 6, 44, 44}; - - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 3, 22, 22}; - - ASSERT_NO_THROW(network.reshape(newShapes)); - auto output = network.getOutputsInfo(); - SizeVector outDims = output["activation"]->getTensorDesc().getDims(); - ASSERT_EQ(outDims, refAfterReshape); -} - -TEST_F(NGraphReshapeTests, ReshapeNewIRWithNewExtension2) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - 1 - 3 - 22 - 22 - - - - - 1 - 3 - 22 - 22 - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - ie.AddExtension(std::make_shared()); - Blob::Ptr weights; - SizeVector refBeforeReshape = {1, 3, 22, 22}; - SizeVector refAfterReshape = {7, 10, 67, 67}; - - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 3, 22, 22}; - - ASSERT_NO_THROW(network.reshape(newShapes)); - auto output = network.getOutputsInfo(); - SizeVector outDims = output["activation"]->getTensorDesc().getDims(); - ASSERT_EQ(outDims, refAfterReshape); -} -#endif // defined(ENABLE_OV_IR_FRONTEND) - -class BadExtension : public InferenceEngine::IExtension { -public: - BadExtension() {} - - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override{}; - - void Unload() noexcept override{}; - - std::map getOpSets() override { - static std::map opsets; - if (opsets.empty()) { - ngraph::OpSet opset; - opset.insert(); - opsets["opset1"] = opset; - } - return opsets; - } -}; - -TEST_F(NGraphReshapeTests, LoadBadNewExtension) { - InferenceEngine::Core ie; - ASSERT_THROW(ie.AddExtension(std::make_shared()), InferenceEngine::Exception); -} - -TEST_F(NGraphReshapeTests, TestInterpParameters) { - auto inp = std::make_shared(ngraph::element::f32, ngraph::Shape{2, 3, 4, 5}); - inp->set_friendly_name("test"); - - ngraph::op::v0::InterpolateAttrs attrs; - attrs.pads_begin.push_back(0); - attrs.pads_end.push_back(0); - attrs.axes = ngraph::AxisSet{2, 3}; - attrs.align_corners = false; - attrs.mode = "nearest"; - attrs.antialias = false; - - std::vector shape = {8, 10}; - auto out_shape = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, shape); - auto interp = std::make_shared(inp, out_shape, attrs); - - auto output = std::make_shared(interp); - auto ngraph_function = - std::make_shared(ngraph::ResultVector{output}, ngraph::ParameterVector{inp}); - - CNNNetwork cnn(ngraph_function); - std::map inShape; - inShape["test"] = {1, 3, 4, 5}; - cnn.reshape(inShape); -} - -#ifdef ENABLE_OV_IR_FRONTEND -TEST_F(NGraphReshapeTests, ReshapeWithDefaultGenericOps) { - // the RNNCEll was initially marked as "experimental" operation but later was added to opset - // the test checks that IR reader properly instantiate the "experimental" RNNCell as "opset6" RNNCell - std::string model = R"V0G0N( - - - - - - - 1 - 16 - - - - - - - - 1 - 128 - - - - - - - - 128 - 16 - - - - - - - - 128 - 128 - - - - - - - - 128 - - - - - - - - 1 - 16 - - - 1 - 128 - - - 128 - 16 - - - 128 - 128 - - - 128 - - - - - 1 - 128 - - - - - - - 1 - 128 - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 16}; - newShapes["in2"] = {2, 128}; - - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDDetectionOutput) { - std::string model = R"V0G0N( - - - - - - - 1000 - 4 - - - - - - - - 1000 - 324 - - - - - - - - 1000 - 81 - - - - - - - - 1 - 3 - - - - - - - - 1000 - 4 - - - 1000 - 324 - - - 1000 - 81 - - - 1 - 3 - - - - - 100 - 4 - - - 100 - - - 100 - - - - - - - 100 - 4 - - - - - - - 100 - - - - - - - 100 - - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {2000, 4}; - newShapes["in1"] = {2000, 324}; - newShapes["in2"] = {2000, 81}; - - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDPriorGridGenerator) { - std::string model = R"V0G0N( - - - - - - - 3 - 4 - - - - - - - - 1 - 256 - 200 - 336 - - - - - - - - 1000 - 81 - - - - - - - - 3 - 4 - - - 1 - 256 - 200 - 336 - - - 1 - 3 - 800 - 1344 - - - - - 201600 - 4 - - - - - - - 201600 - 4 - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 256, 200, 336}; - newShapes["in2"] = {2, 3, 800, 1344}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDGenerateProposalsSingleImage) { - std::string model = R"V0G0N( - - - - - - - 3 - - - - - - - - 201600 - 4 - - - - - - - - 12 - 200 - 336 - - - - - - - - 3 - 200 - 336 - - - - - - - - 3 - - - 201600 - 4 - - - 12 - 200 - 336 - - - 3 - 200 - 336 - - - - - 1000 - 4 - - - 1000 - - - - - - - 1000 - 4 - - - - - - - 1000 - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in2"] = {12, 200, 300}; - newShapes["in3"] = {2, 200, 300}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDGenerateProposalsSingleImage_opset6) { - std::string model = R"V0G0N( - - - - - - - 3 - - - - - - - - 201600 - 4 - - - - - - - - 12 - 200 - 336 - - - - - - - - 3 - 200 - 336 - - - - - - - - 3 - - - 201600 - 4 - - - 12 - 200 - 336 - - - 3 - 200 - 336 - - - - - 1000 - 4 - - - 1000 - - - - - - - 1000 - 4 - - - - - - - 1000 - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in2"] = {12, 200, 300}; - newShapes["in3"] = {2, 200, 300}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeGenerateProposals) { - std::string model = R"V0G0N( - - - - - - - 8 - 3 - - - - - - - - 50 - 84 - 3 - 4 - - - - - - - - 8 - 12 - 50 - 84 - - - - - - - - 8 - 3 - 50 - 84 - - - - - - - - 8 - 3 - - - 50 - 84 - 3 - 4 - - - 8 - 12 - 50 - 84 - - - 8 - 3 - 50 - 84 - - - - - -1 - 4 - - - -1 - - - 8 - - - - - - - -1 - 4 - - - - - - - -1 - - - - - - - 8 - - - - - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {100, 100, 4, 4}; - newShapes["in2"] = {8, 16, 100, 100}; - newShapes["in3"] = {8, 4, 100, 100}; - ASSERT_NO_THROW(network.reshape(newShapes)); - - InferenceEngine::ICNNNetwork::InputShapes newShapes2; - newShapes2["in0"] = {2, 4}; - newShapes2["in1"] = {100, 100, 4, 4}; - newShapes2["in2"] = {2, 16, 100, 100}; - newShapes2["in3"] = {2, 4, 100, 100}; - ASSERT_NO_THROW(network.reshape(newShapes2)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDROIFeatureExtractor) { - std::string model = R"V0G0N( - - - - - - - 1000 - 4 - - - - - - - - 1 - 256 - 200 - 336 - - - - - - - - 1000 - 4 - - - 1 - 256 - 200 - 336 - - - - - 1000 - 256 - 7 - 7 - - - - - - - 1000 - 256 - 7 - 7 - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {1256, 4}; - newShapes["in1"] = {1, 256, 7, 7}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDROIFeatureExtractorOpset6) { - std::string model = R"V0G0N( - - - - - - - 1000 - 4 - - - - - - - - 1 - 256 - 200 - 336 - - - - - - - - 1000 - 4 - - - 1 - 256 - 200 - 336 - - - - - 1000 - 256 - 7 - 7 - - - - - - - 1000 - 256 - 7 - 7 - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {1256, 4}; - newShapes["in1"] = {1, 256, 7, 7}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} - -TEST_F(NGraphReshapeTests, ReshapeEDTopKROIs) { - std::string model = R"V0G0N( - - - - - - - 5000 - 4 - - - - - - - - 5000 - - - - - - - - 5000 - 4 - - - 5000 - - - - - 1000 - 4 - - - - - - - 1000 - 4 - - - - - - - - - - -)V0G0N"; - InferenceEngine::Core ie; - Blob::Ptr weights; - auto network = ie.ReadNetwork(model, weights); - InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in0"] = {10000, 4}; - newShapes["in1"] = {10000}; - ASSERT_NO_THROW(network.reshape(newShapes)); -} -#endif diff --git a/src/inference/tests/functional/ov_core_threading.cpp b/src/inference/tests/functional/ov_core_threading.cpp index 96f954b5dcbd6c..09c88809084663 100644 --- a/src/inference/tests/functional/ov_core_threading.cpp +++ b/src/inference/tests/functional/ov_core_threading.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -15,6 +16,7 @@ #include "common_test_utils/test_assertions.hpp" #include "functional_test_utils/test_model/test_model.hpp" #include "ie_extension.h" +#include "openvino/core/so_extension.hpp" #include "openvino/runtime/core.hpp" #include "openvino/util/file_util.hpp" #ifdef __GLIBC__ @@ -61,12 +63,10 @@ class CoreThreadingTests : public ::testing::Test { void safeAddExtension(ov::Core& core) { try { - OPENVINO_SUPPRESS_DEPRECATED_START - auto extension = std::make_shared( + auto extension = ov::detail::load_extensions( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + OV_BUILD_POSTFIX)); + std::string("openvino_template_extension") + OV_BUILD_POSTFIX)); core.add_extension(extension); - OPENVINO_SUPPRESS_DEPRECATED_END } catch (const ov::Exception& ex) { ASSERT_STR_CONTAINS(ex.what(), "name: custom_opset. Opset"); } @@ -169,17 +169,63 @@ TEST_F(CoreThreadingTests, GetAvailableDevices) { } #if defined(ENABLE_OV_IR_FRONTEND) -// tested function: read_model and add_legacy_extension + +namespace ov { +namespace test { +namespace util { +class Barrier { +private: + std::mutex m_mutex; + std::condition_variable m_cv; + size_t m_count; + const size_t m_expected; + size_t m_wait_id; + +public: + explicit Barrier(std::size_t count) : m_count{count}, m_expected{count}, m_wait_id{} {} + + void arrive_and_wait() { + std::unique_lock lock(m_mutex); + + if (--m_count == 0) { + ++m_wait_id; + m_count = m_expected; + m_cv.notify_all(); + } else { + const auto wait_id = m_wait_id; + m_cv.wait(lock, [this, wait_id] { + return wait_id != m_wait_id; + }); + } + } +}; +} // namespace util +} // namespace test +} // namespace ov + +// tested function: read_model and add_extension TEST_F(CoreThreadingTests, ReadModel) { ov::Core core; auto model = core.read_model(modelName, weightsName); + constexpr size_t threads_num = 12; + ov::test::util::Barrier sync_point(threads_num); + runParallel( [&]() { safeAddExtension(core); - (void)core.read_model(modelName, weightsName); + // Add the extension and read model are thread-safe when use separately. + // The barrier is required here to wait until all threads add extensions to core before read model. + // The read_model loads Frontend which check extension vector and assume it want change. If extension vector + // is expanded then all iterators are invalidated and can result in segfault when frontend check extensions + // to be added in frontend. + sync_point.arrive_and_wait(); + std::ignore = core.read_model(modelName, weightsName); + + // sync before next iteration (modification of extensions vector) + sync_point.arrive_and_wait(); }, 100, - 12); + threads_num); } #endif // defined(ENABLE_OV_IR_FRONTEND) diff --git a/src/inference/tests/functional/ov_extension_test.cpp b/src/inference/tests/functional/ov_extension_test.cpp index 394526c1bc47da..08a60b81e44e08 100644 --- a/src/inference/tests/functional/ov_extension_test.cpp +++ b/src/inference/tests/functional/ov_extension_test.cpp @@ -18,11 +18,6 @@ std::string getOVExtensionPath() { std::string("openvino_template_extension") + OV_BUILD_POSTFIX); } -std::string getOldExtensionPath() { - return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + OV_BUILD_POSTFIX); -} - std::string getIncorrectExtensionPath() { return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), std::string("incorrect") + OV_BUILD_POSTFIX); @@ -142,10 +137,6 @@ TEST_F(OVExtensionTests, load_new_extension) { EXPECT_NO_THROW(core.add_extension(getOVExtensionPath())); } -TEST_F(OVExtensionTests, load_old_extension) { - EXPECT_NO_THROW(core.add_extension(getOldExtensionPath())); -} - TEST_F(OVExtensionTests, load_incorrect_extension) { EXPECT_THROW(core.add_extension(getIncorrectExtensionPath()), ov::Exception); } diff --git a/src/inference/tests/functional/preprocess_test.cpp b/src/inference/tests/functional/preprocess_test.cpp deleted file mode 100644 index 8b5045a5b7bc18..00000000000000 --- a/src/inference/tests/functional/preprocess_test.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -using namespace std; - -IE_SUPPRESS_DEPRECATED_START -using PreProcessTests = ::testing::Test; - -TEST_F(PreProcessTests, throwsOnSettingNullMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(1); - ASSERT_THROW(info.setMeanImage(InferenceEngine::Blob::Ptr(nullptr)), InferenceEngine::Exception); -} - -TEST_F(PreProcessTests, throwsOnSetting2DMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(1); - InferenceEngine::Blob::Ptr blob( - new InferenceEngine::TBlob({InferenceEngine::Precision::FP32, {1, 1}, InferenceEngine::Layout::HW})); - ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception); -} - -TEST_F(PreProcessTests, throwsOnSettingWrongSizeMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(1); - InferenceEngine::TBlob::Ptr blob( - new InferenceEngine::TBlob({InferenceEngine::Precision::FP32, {2, 1, 1}, InferenceEngine::Layout::CHW})); - blob->allocate(); - ASSERT_THROW(info.setMeanImage(blob), InferenceEngine::Exception); -} - -TEST_F(PreProcessTests, noThrowWithCorrectSizeMeanImage) { - InferenceEngine::PreProcessInfo info; - info.init(2); - InferenceEngine::TBlob::Ptr blob( - new InferenceEngine::TBlob({InferenceEngine::Precision::FP32, {2, 1, 1}, InferenceEngine::Layout::CHW})); - blob->allocate(); - ASSERT_NO_THROW(info.setMeanImage(blob)); -} diff --git a/src/inference/tests/functional/response_buffer_test.cpp b/src/inference/tests/functional/response_buffer_test.cpp deleted file mode 100644 index 31a63304ceee46..00000000000000 --- a/src/inference/tests/functional/response_buffer_test.cpp +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "description_buffer.hpp" - -using namespace std; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -using ResponseBufferTests = ::testing::Test; - -TEST_F(ResponseBufferTests, canCreateResponseMessage) { - ResponseDesc desc; - DescriptionBuffer(&desc) << "make error: " << 1; - ASSERT_STREQ("make error: 1", desc.msg); -} - -TEST_F(ResponseBufferTests, canReportError) { - ResponseDesc desc; - DescriptionBuffer d(NETWORK_NOT_LOADED, &desc); - d << "make error: "; - ASSERT_EQ(NETWORK_NOT_LOADED, (StatusCode)d); -} - -TEST_F(ResponseBufferTests, savePreviosMessage) { - ResponseDesc desc; - desc.msg[0] = 'T'; - desc.msg[1] = 'e'; - desc.msg[2] = 's'; - desc.msg[3] = 't'; - desc.msg[4] = '\0'; - DescriptionBuffer d(&desc); - ASSERT_EQ(GENERAL_ERROR, (StatusCode)d); - ASSERT_EQ(std::string("Test"), desc.msg); -} - -TEST_F(ResponseBufferTests, canHandleBigMessage) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - - buf << bigVal; - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} - -TEST_F(ResponseBufferTests, canHandleNotNullTerminatedInput) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - - desc.msg[size - 1] = 'B'; - - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - - buf << bigVal; - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} - -TEST_F(ResponseBufferTests, canHandlePredefined) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - buf << bigVal; - - DescriptionBuffer buf2(&desc); - std::string bigVal2(size, 'B'); - buf2 << bigVal2; - - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} - -TEST_F(ResponseBufferTests, canHandleNotNullTerminatedPredefined) { - ResponseDesc desc; - int size = sizeof(desc.msg) / sizeof(desc.msg[0]); - - DescriptionBuffer buf(&desc); - std::string bigVal(size, 'A'); - buf << bigVal; - - desc.msg[size - 1] = 'B'; - - DescriptionBuffer buf2(&desc); - std::string bigVal2(size, 'B'); - buf2 << bigVal2; - - ASSERT_EQ(desc.msg[0], 'A'); - ASSERT_EQ(desc.msg[size - 2], 'A'); - ASSERT_EQ(desc.msg[size - 1], 0); -} diff --git a/src/inference/tests/functional/task_executor_tests.cpp b/src/inference/tests/functional/task_executor_tests.cpp index 0d2396855497dc..18df500dd59ccb 100644 --- a/src/inference/tests/functional/task_executor_tests.cpp +++ b/src/inference/tests/functional/task_executor_tests.cpp @@ -3,12 +3,11 @@ // #include -#include #include -#include #include +#include "openvino/core/parallel.hpp" #include "openvino/runtime/threading/cpu_streams_executor.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" diff --git a/src/inference/tests/unit/CMakeLists.txt b/src/inference/tests/unit/CMakeLists.txt index 8030fe0ddb7a15..1d4c466878203b 100644 --- a/src/inference/tests/unit/CMakeLists.txt +++ b/src/inference/tests/unit/CMakeLists.txt @@ -12,7 +12,7 @@ ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} DEPENDENCIES - template_extension + openvino_template_extension LINK_LIBRARIES unit_test_utils ADD_CLANG_FORMAT diff --git a/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp b/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp index 613124cf040004..6b13290c3e7250 100644 --- a/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp +++ b/src/inference/tests/unit/cpp_interfaces/ie_infer_async_request_base_test.cpp @@ -10,7 +10,7 @@ #include #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp" diff --git a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp index cacd96813a824d..1ac5278fc4f4e9 100644 --- a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp index 0609798e9669a4..36c30d67108eb8 100644 --- a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp b/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp index 9b550ee9a04f97..47e91b93c26f12 100644 --- a/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp +++ b/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp b/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp index 5dac715f1550be..1d99069338f761 100644 --- a/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp +++ b/src/inference/tests/unit/cpu_map_parser/parser_windows.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp b/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp index 11a0481e7c1218..fbd498129db801 100644 --- a/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp +++ b/src/inference/tests/unit/cpu_map_parser/valid_proc_check.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_reserve_test.cpp b/src/inference/tests/unit/cpu_reserve_test.cpp index e5fe6b40abdf7b..68b686dd1d0eab 100644 --- a/src/inference/tests/unit/cpu_reserve_test.cpp +++ b/src/inference/tests/unit/cpu_reserve_test.cpp @@ -4,9 +4,8 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/threading/cpu_streams_executor_internal.hpp" using namespace testing; diff --git a/src/inference/tests/unit/cpu_stream_info_test.cpp b/src/inference/tests/unit/cpu_stream_info_test.cpp index a11d0544d0b221..4df93f8e1e6bb7 100644 --- a/src/inference/tests/unit/cpu_stream_info_test.cpp +++ b/src/inference/tests/unit/cpu_stream_info_test.cpp @@ -4,10 +4,10 @@ #include -#include - -#include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/threading/cpu_streams_executor_internal.hpp" +#include "openvino/runtime/threading/cpu_streams_info.hpp" #include "os/cpu_map_info.hpp" using namespace testing; diff --git a/src/inference/tests/unit/ie_blob_test.cpp b/src/inference/tests/unit/ie_blob_test.cpp index ecc8792d7b5f97..ae7dd7e7a8d5c4 100644 --- a/src/inference/tests/unit/ie_blob_test.cpp +++ b/src/inference/tests/unit/ie_blob_test.cpp @@ -6,6 +6,7 @@ #include #include +#include "blob_factory.hpp" #include "openvino/runtime/make_tensor.hpp" #include "unit_test_utils/mocks/mock_allocator.hpp" @@ -278,63 +279,6 @@ TEST_F(BlobTests, cannotCreateBlobWithIncorrectPrecision) { ASSERT_THROW(InferenceEngine::make_shared_blob(desc), InferenceEngine::Exception); } -TEST_F(BlobTests, canUseBlobInMoveSemantics) { - InferenceEngine::TBlob b(InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::C)); - - b.getTensorDesc().setDims({3}); - b.allocate(); - b.data()[0] = 1.0f; - b.data()[1] = 2.0f; - b.data()[2] = 3.0f; - - std::vector dump; - - for (const auto& e : b) { - dump.push_back(e); - } - - ASSERT_EQ(dump.size(), 3); - - ASSERT_EQ(dump[0], 1.0f); - ASSERT_EQ(dump[1], 2.0f); - ASSERT_EQ(dump[2], 3.0f); -} - -TEST_F(BlobTests, DISABLED_canUseLockedMemoryAsRvalueReference) { - std::vector dump; - std::vector v({1.0f, 2.0f, 3.0f}); - auto blob = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, InferenceEngine::C), - &v[0], - v.size()); - for (auto e : *blob) { - dump.push_back(e); - } - - ASSERT_EQ(dump.size(), 3); - - ASSERT_EQ(dump[0], 1.0f); - ASSERT_EQ(dump[1], 2.0f); - ASSERT_EQ(dump[2], 3.0f); -} - -TEST_F(BlobTests, canCreateBlobOnExistedMemory) { - float input[] = {0.1f, 0.2f, 0.3f}; - { - auto b = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 2}, InferenceEngine::HW), - input); - auto i = b->begin(); - ASSERT_NEAR(*i, 0.1, 0.00001); - i++; - ASSERT_NEAR(*i, 0.2, 0.00001); - i++; - ASSERT_EQ(i, b->end()); - - ASSERT_EQ(&*b->begin(), input); - } -} - // SetShape TEST_F(BlobTests, canSetShape) { auto b = InferenceEngine::make_shared_blob( @@ -350,20 +294,6 @@ TEST_F(BlobTests, canSetShape) { ASSERT_EQ(newDims[2], 6); } -TEST_F(BlobTests, canModifyDataInRangedFor) { - InferenceEngine::SizeVector v = {1, 2, 3}; - InferenceEngine::TBlob blob({InferenceEngine::Precision::I32, v, InferenceEngine::CHW}); - blob.allocate(); - - for (auto& data : blob) { - data = 5; - } - - for (size_t i = 0; i < v.size(); i++) { - ASSERT_EQ(5, blob.data()[i]) << "Mismatch at" << i; - } -} - TEST_F(BlobTests, makeRoiBlobNchw) { // we create main blob with NCHW layout. We will crop ROI from this blob. InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6 diff --git a/src/inference/tests/unit/ie_compound_blob_test.cpp b/src/inference/tests/unit/ie_compound_blob_test.cpp deleted file mode 100644 index c6521386f22648..00000000000000 --- a/src/inference/tests/unit/ie_compound_blob_test.cpp +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -class CompoundBlobTests : public ::testing::Test { -protected: - Blob::Ptr _test_blob; - using BlobPtrs = std::vector; - using MemoryBlobPtrs = std::vector; - -public: - void verifyCompoundBlob(const Blob::Ptr& blob) { - // verify basic assumptions about a compound blob - ASSERT_NE(nullptr, blob); - ASSERT_TRUE(blob->is()); - CompoundBlob::Ptr compound_blob = as(blob); - ASSERT_NE(nullptr, compound_blob); - EXPECT_EQ(compound_blob.get(), blob->as()); // shared object == raw ptr - EXPECT_EQ(0, compound_blob->element_size()); - EXPECT_EQ(nullptr, compound_blob->buffer()); - EXPECT_EQ(nullptr, compound_blob->cbuffer()); - EXPECT_GT(compound_blob->size(), 0); - EXPECT_NE(nullptr, compound_blob->getBlob(0)); - } - - void verifyCompoundBlob(Blob::Ptr blob, const BlobPtrs& underlying_blobs) { - verifyCompoundBlob(blob); - - // check that the compound blob contains a vector of provided underlying blobs - CompoundBlob::Ptr compound_blob = as(blob); - EXPECT_EQ(compound_blob.get(), blob->as()); // shared object == raw ptr - ASSERT_EQ(underlying_blobs.size(), compound_blob->size()); - for (size_t i = 0; i < underlying_blobs.size(); ++i) { - EXPECT_EQ(underlying_blobs[i], compound_blob->getBlob(i)); - } - } -}; - -TEST(BlobConversionTests, canWorkWithMemoryBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as(blob).get(), blob->as()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithConstMemoryBlob) { - Blob::CPtr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as(blob).get(), blob->as()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithTBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is>()); - ASSERT_FALSE(blob->is>()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithConstTBlob) { - Blob::CPtr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - ASSERT_TRUE(blob->is>()); - ASSERT_FALSE(blob->is>()); - ASSERT_FALSE(blob->is()); - ASSERT_NE(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as>(blob)); - ASSERT_EQ(nullptr, as(blob)); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as>(blob).get(), blob->as>()); - ASSERT_EQ(as(blob).get(), blob->as()); -} - -TEST(BlobConversionTests, canWorkWithCompoundBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - Blob::Ptr cblob = make_shared_blob(std::vector({blob})); - ASSERT_TRUE(cblob->is()); - ASSERT_FALSE(cblob->is()); - ASSERT_NE(nullptr, as(cblob)); - ASSERT_EQ(nullptr, as(cblob)); - ASSERT_EQ(as(cblob).get(), cblob->as()); - ASSERT_EQ(as(cblob).get(), cblob->as()); -} - -TEST(BlobConversionTests, canWorkWithConstCompoundBlob) { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - Blob::CPtr cblob = make_shared_blob(std::vector({blob})); - ASSERT_TRUE(cblob->is()); - ASSERT_FALSE(cblob->is()); - ASSERT_NE(nullptr, as(cblob)); - ASSERT_EQ(nullptr, as(cblob)); - ASSERT_EQ(as(cblob).get(), cblob->as()); - ASSERT_EQ(as(cblob).get(), cblob->as()); -} - -TEST(BlobConversionTests, blobSharesOwnershipOnCast) { - static constexpr const uint8_t stored_value = 123; - TBlob::Ptr tblob; - { - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - ASSERT_EQ(1, blob.use_count()); - ASSERT_TRUE(blob->is>()); - tblob = as>(blob); - ASSERT_NE(nullptr, tblob); - ASSERT_EQ(2, blob.use_count()); - ASSERT_EQ(2, tblob.use_count()); - tblob->allocate(); - tblob->data()[0] = stored_value; - ASSERT_EQ(stored_value, tblob->data()[0]); - } - ASSERT_EQ(1, tblob.use_count()); - ASSERT_NE(nullptr, tblob); - ASSERT_EQ(stored_value, tblob->data()[0]); -} - -TEST_F(CompoundBlobTests, cannotCreateCompoundBlobFromNullptr) { - Blob::Ptr valid = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - EXPECT_THROW(make_shared_blob(std::vector({valid, nullptr})), InferenceEngine::Exception); -} - -TEST_F(CompoundBlobTests, canCreateEmptyCompoundBlob) { - _test_blob = make_shared_blob(std::vector()); - - ASSERT_NE(nullptr, _test_blob); - EXPECT_EQ(0, _test_blob->element_size()); - EXPECT_EQ(nullptr, _test_blob->buffer()); - EXPECT_EQ(nullptr, _test_blob->cbuffer()); - ASSERT_TRUE(_test_blob->is()); - CompoundBlob::Ptr compound_blob = as(_test_blob); - ASSERT_NE(nullptr, compound_blob); - EXPECT_EQ(0, compound_blob->size()); - EXPECT_EQ(nullptr, compound_blob->getBlob(0)); -} - -TEST_F(CompoundBlobTests, canCreateCompoundBlob) { - // Create a blob with NCHW layout and pass it to compound for test - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - BlobPtrs blobs = {blob}; - - _test_blob = make_shared_blob(blobs); - verifyCompoundBlob(_test_blob, blobs); -} - -TEST_F(CompoundBlobTests, cannotCreateCompoundBlobFromCompoundBlob) { - // Create a blob with NCHW layout and pass it to compound for test. The created compound blob - // cannot be used to construct another compound blob. Recursive behavior is rejected - Blob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 3, 4, 4}, NCHW)); - - _test_blob = make_shared_blob(std::vector({blob})); - verifyCompoundBlob(_test_blob); - - EXPECT_THROW(make_shared_blob(std::vector({blob, _test_blob})), - InferenceEngine::Exception); -} - -TEST_F(CompoundBlobTests, compoundBlobHoldsCorrectDataInCorrectOrder) { - // Create a vector of blobs with HW layout and pass it to a compound blob to test if the vector - // is stored correctly - static constexpr const uint8_t MAGIC_NUMBER = 23; - BlobPtrs blobs(5); - for (size_t i = 0; i < blobs.size(); ++i) { - blobs[i] = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - blobs[i]->allocate(); - MemoryBlob::Ptr mb = as(blobs[i]); - auto lm = mb->rwmap(); - lm.as()[0] = static_cast(i + MAGIC_NUMBER); - } - - _test_blob = make_shared_blob(blobs); - - verifyCompoundBlob(_test_blob, blobs); - - CompoundBlob::Ptr compound_blob = as(_test_blob); - EXPECT_EQ(blobs.size(), compound_blob->size()); - for (size_t i = 0; i < compound_blob->size(); ++i) { - auto blob = compound_blob->getBlob(i); - ASSERT_NE(nullptr, blob); - MemoryBlob::Ptr mb = as(blob); - ASSERT_NE(nullptr, mb); - auto lm = mb->rwmap(); - EXPECT_EQ(static_cast(i + MAGIC_NUMBER), lm.as()[0]); - } -} - -TEST_F(CompoundBlobTests, compoundBlobHoldsReferencesToBlobs) { - // Create a blob with HW layout and pass it to a compound blob to check that the compound blob - // holds references to the blob and not a copy of it - MemoryBlob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - blob->allocate(); - // here is quite self to dereference address since LockedMemory would be destroyed only after assignemnt - blob->rwmap().as()[0] = 12; - _test_blob = make_shared_blob(std::vector({blob})); - - verifyCompoundBlob(_test_blob); - - CompoundBlob::Ptr compound_blob = as(_test_blob); - Blob::Ptr b0 = compound_blob->getBlob(0); - MemoryBlob::CPtr mb0 = as(b0); - EXPECT_EQ(12, mb0->rmap().as()[0]); - blob->rwmap().as()[0] = 34; - EXPECT_EQ(34, mb0->rmap().as()[0]); -} - -TEST_F(CompoundBlobTests, compoundBlobHoldsValidDataWhenUnderlyingBlobIsDestroyed) { - // Create a scoped blob with HW layout, pass it to compound, and destroy the original scoped - // blob. Check that the compound blob, which holds a reference to the destroyed blob, still has - // a valid object - static constexpr const uint8_t stored_value = 123; - { - MemoryBlob::Ptr blob = make_shared_blob(TensorDesc(Precision::U8, {1, 1}, HW)); - blob->allocate(); - blob->rwmap().as()[0] = stored_value; - _test_blob = make_shared_blob(std::vector({blob})); - } - - verifyCompoundBlob(_test_blob); - CompoundBlob::Ptr compound_blob = as(_test_blob); - ASSERT_NE(nullptr, compound_blob->getBlob(0)); - MemoryBlob::CPtr mb0 = as(compound_blob->getBlob(0)); - ASSERT_NE(nullptr, mb0); - EXPECT_EQ(stored_value, mb0->rmap().as()[0]); -} diff --git a/src/inference/tests/unit/ie_executable_network_test.cpp b/src/inference/tests/unit/ie_executable_network_test.cpp index 3db791e60a9aaa..142214c3ef8e15 100644 --- a/src/inference/tests/unit/ie_executable_network_test.cpp +++ b/src/inference/tests/unit/ie_executable_network_test.cpp @@ -12,7 +12,7 @@ #include "cpp/ie_executable_network_base.hpp" #include "openvino/runtime/compiled_model.hpp" -#include "so_ptr.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp" @@ -34,7 +34,6 @@ using testing::Throw; // 5. void SetConfig(const std::map& config) // 6. Parameter GetConfig(const std::string& name) const // 7. Parameter GetMetric(const std::string& name) const -// 8. RemoteContext::Ptr GetContext() class ExecutableNetworkTests : public ::testing::Test { protected: diff --git a/src/inference/tests/unit/ie_extension_test.cpp b/src/inference/tests/unit/ie_extension_test.cpp deleted file mode 100644 index 336bf5da71c9b7..00000000000000 --- a/src/inference/tests/unit/ie_extension_test.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include -#include -#include - -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_common.hpp" - -using namespace InferenceEngine; - -using ExtensionTests = ::testing::Test; - -#ifndef OPENVINO_STATIC_LIBRARY - -OPENVINO_SUPPRESS_DEPRECATED_START - -static std::string getExtensionPath() { - return FileUtils::makePluginLibraryName(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + OV_BUILD_POSTFIX); -} - -TEST(ExtensionTests, testGetOpSets) { - IExtensionPtr extension = std::make_shared(getExtensionPath()); - auto opsets = extension->getOpSets(); - ASSERT_FALSE(opsets.empty()); - opsets.clear(); -} - -TEST(ExtensionTests, testGetImplTypes) { - IExtensionPtr extension = std::make_shared(getExtensionPath()); - auto opset = extension->getOpSets().begin()->second; - std::shared_ptr op(opset.create(opset.get_types_info().begin()->name)); - ASSERT_FALSE(extension->getImplTypes(op).empty()); -} - -TEST(ExtensionTests, testGetImplTypesThrowsIfNgraphNodeIsNullPtr) { - IExtensionPtr extension = std::make_shared(getExtensionPath()); - ASSERT_THROW(extension->getImplTypes(std::shared_ptr()), InferenceEngine::Exception); -} - -TEST(ExtensionTests, testGetImplementation) { - IExtensionPtr extension = std::make_shared(getExtensionPath()); - auto opset = extension->getOpSets().begin()->second; - std::shared_ptr op(opset.create("Template")); - ASSERT_NE(nullptr, extension->getImplementation(op, extension->getImplTypes(op)[0])); -} - -TEST(ExtensionTests, testGetImplementationThrowsIfNgraphNodeIsNullPtr) { - IExtensionPtr extension = std::make_shared(getExtensionPath()); - ASSERT_THROW(extension->getImplementation(std::shared_ptr(), ""), InferenceEngine::Exception); -} - -OPENVINO_SUPPRESS_DEPRECATED_END - -#endif // OPENVINO_STATIC_LIBRARY diff --git a/src/inference/tests/unit/update_executor_config_test.cpp b/src/inference/tests/unit/update_executor_config_test.cpp index a660dfff0597ae..abb3612eb8750d 100644 --- a/src/inference/tests/unit/update_executor_config_test.cpp +++ b/src/inference/tests/unit/update_executor_config_test.cpp @@ -4,9 +4,7 @@ #include -#include - -// #include "ie_system_conf.h" +#include "common_test_utils/test_common.hpp" #include "openvino/runtime/threading/istreams_executor.hpp" #include "os/cpu_map_info.hpp" diff --git a/src/plugins/auto/src/common.hpp b/src/plugins/auto/src/common.hpp index dbb833e34f7909..e0827181aab86b 100644 --- a/src/plugins/auto/src/common.hpp +++ b/src/plugins/auto/src/common.hpp @@ -8,8 +8,6 @@ #include #include #include "ie_icore.hpp" -#include "ie_metric_helpers.hpp" -#include #include "openvino/runtime/icompiled_model.hpp" #include "openvino/runtime/isync_infer_request.hpp" #include "openvino/runtime/iasync_infer_request.hpp" diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp index 123a41e3524744..b51f2b7a7ae6b5 100644 --- a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp @@ -9,7 +9,6 @@ #include #include "common_test_utils/file_utils.hpp" -#include "ie_plugin_config.hpp" #include "openvino/core/any.hpp" #include "openvino/core/except.hpp" #include "openvino/opsets/opset11.hpp" @@ -560,7 +559,7 @@ class MockPluginSupportBatchAndContext : public MockPluginBase { return decltype(ov::optimal_batch_size)::value_type(4); } else if (name == ov::device::capabilities.name()) { return decltype(ov::device::capabilities)::value_type( - {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8", ov::device::capability::EXPORT_IMPORT}); + {"FP32", "FP16", "BIN", "INT8", ov::device::capability::EXPORT_IMPORT}); } else if (name == ov::device::type.name()) { return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED); } else if (name == ov::loaded_from_cache.name()) { @@ -571,18 +570,6 @@ class MockPluginSupportBatchAndContext : public MockPluginBase { return decltype(ov::streams::num)::value_type{2}; } else if (name == ov::compilation_num_threads.name()) { return decltype(ov::compilation_num_threads)::value_type{4}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } else if (name == ov::internal::supported_properties) { return decltype(ov::internal::supported_properties)::value_type( {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); @@ -690,18 +677,6 @@ class MockPlugin : public MockPluginBase { return decltype(ov::enable_profiling)::value_type{false}; } else if (name == ov::streams::num.name()) { return decltype(ov::streams::num)::value_type{2}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } else if (name == ov::internal::supported_properties) { return decltype(ov::internal::supported_properties)::value_type( {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp deleted file mode 100644 index 845f63ee4cee8c..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/exec_network_base.hpp" - -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> auto_configs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - ExecutableNetworkBaseTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_configs)), - ExecutableNetworkBaseTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - ExecutableNetworkBaseTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_configs)), - ExecutableNetworkBaseTest::getTestCaseName); - -const std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - ExecNetSetPrecision, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_configs)), - ExecNetSetPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - ExecNetSetPrecision, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_configs)), - ExecNetSetPrecision::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp deleted file mode 100644 index ca702dc66db4bc..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/get_metric.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -namespace { - -// -// Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, - IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp deleted file mode 100644 index c02a5c44c30e35..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/callback.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> multiConfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestCallbackTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestCallbackTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp deleted file mode 100644 index 483067a521c1b0..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/io_blob.hpp" - -#include - -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Autoconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Autoconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp deleted file mode 100644 index 5cbda535d8b2df..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/memory_states.hpp" - -#include - -#include "functional_test_utils/plugin_cache.hpp" -#include "ov_models/builders.hpp" - -using namespace BehaviorTestsDefinitions; - -namespace { -std::vector memoryStateTestCases = { - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_AUTO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}), - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}})}; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestVariableStateTest, - ::testing::ValuesIn(memoryStateTestCases), - InferRequestVariableStateTest::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp deleted file mode 100644 index 27a82693f28ff6..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "behavior/infer_request/multithreading.hpp" -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Multiconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestMultithreadingTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestMultithreadingTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp deleted file mode 100644 index baa0c4fe978c29..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/perf_counters.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Autoconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Autoconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp deleted file mode 100644 index c1037519a72f8e..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/set_blob_by_type.hpp" - -#include "common_test_utils/test_constants.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace InferenceEngine; - -const std::vector BlobTypes = { - FuncTestUtils::BlobType::Compound, - FuncTestUtils::BlobType::Batched, - FuncTestUtils::BlobType::Memory, -}; - -const std::map autoConfig{ - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, - InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::Values(autoConfig)), - InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, - InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::Values(autoConfig)), - InferRequestSetBlobByType::getTestCaseName); diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp deleted file mode 100644 index e1307f5092f6a5..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/wait.hpp" - -#include - -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -const std::vector> Autoconfigs = { - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Autoconfigs)), - InferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestWaitTests::getTestCaseName); - -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp index 20cb407b3ee694..208c136c9681d2 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp @@ -7,8 +7,6 @@ using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; - namespace { // // Executable Network GetMetric diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp index 60d1c7b6a90e3b..b22383c2193543 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp @@ -3,7 +3,6 @@ // #include "behavior/compiled_model/compiled_model_base.hpp" -#include "ie_plugin_config.hpp" using namespace ov::test::behavior; namespace { diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp index f264a55c667a9f..290f7ede951130 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp @@ -1,10 +1,8 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include - #include "behavior/compiled_model/import_export.hpp" -#include "ie_plugin_config.hpp" +#include "common_test_utils/test_constants.hpp" using namespace ov::test::behavior; namespace { diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 981e8d66aa48b3..982e8cac50981b 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -4,8 +4,9 @@ #include "behavior/compiled_model/properties.hpp" -#include "ie_system_conf.h" +#include "ie_plugin_config.hpp" #include "openvino/runtime/properties.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace ov::test::behavior; @@ -23,8 +24,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, #if (defined(__APPLE__) || defined(_WIN32)) auto default_affinity = [] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto numaNodes = ov::get_available_numa_nodes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else if (numaNodes.size() > 1) { @@ -35,7 +36,7 @@ auto default_affinity = [] { }(); #else auto default_affinity = [] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else { @@ -46,9 +47,6 @@ auto default_affinity = [] { const std::vector multi_properties = { {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::num_streams(ov::streams::AUTO)}, - {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, }; INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index 9ab07c0fce5e2b..32ab0eb2de956c 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -4,9 +4,9 @@ #include "behavior/ov_plugin/caching_tests.hpp" -#include -#include -#include +#include "ov_ops/multiclass_nms_ie_internal.hpp" +#include "ov_ops/nms_ie_internal.hpp" +#include "ov_ops/nms_static_shape_ie.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index c5afda521a5ca1..0fe3abe27364f3 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -4,15 +4,13 @@ #include "behavior/ov_plugin/core_integration.hpp" -#include - #include "behavior/ov_plugin/core_integration_sw.hpp" #include "behavior/ov_plugin/query_model.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/core.hpp" +#include "openvino/runtime/properties.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; // defined in plugin_name.cpp extern const char* cpu_plugin_file_name; diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 90cfa56407ca6f..50dcc1b0d4fc7f 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -4,10 +4,10 @@ #include "behavior/ov_plugin/properties_tests.hpp" -#include +#include "ie_plugin_config.hpp" +#include "openvino/runtime/auto/properties.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; namespace { const std::vector multi_Auto_properties = { @@ -129,11 +129,11 @@ INSTANTIATE_TEST_SUITE_P( OVCheckGetSupportedROMetricsPropsTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P( - OVCheckSetSupportedRWMandatoryMetricsPropsTests, + smoke_OVCheckSetSupportedRWMetricsPropsTests, OVCheckSetSupportedRWMetricsPropsTests, ::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( - {ov::hint::model_priority.name(), ov::log::level.name()}))), + ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWOptionalPropertiesValues( + {ov::log::level.name()}))), OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; @@ -161,7 +161,7 @@ INSTANTIATE_TEST_SUITE_P( smoke_MultiAutoOVCheckSetSupportedRWMetricsPropsTests, OVCheckSetSupportedRWMetricsPropsTests, ::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( - {ov::hint::model_priority.name(), ov::log::level.name()}))), + ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWOptionalPropertiesValues( + {ov::log::level.name()}))), OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); } // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp deleted file mode 100644 index bad8c61b42cf27..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/configuration_tests.hpp" - -#include "ie_plugin_config.hpp" -#include "ie_system_conf.h" - -using namespace BehaviorTestsDefinitions; - -namespace { -#if (defined(__APPLE__) || defined(_WIN32)) -auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else if (numaNodes.size() > 1) { - return std::string{CONFIG_VALUE(NUMA)}; - } else { - return std::string{CONFIG_VALUE(NO)}; - } -}()}; -#else -auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else { - return std::string{CONFIG_VALUE(YES)}; - } -}()}; -#endif - -const std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; - -const std::vector> conf = {{}}; - -const std::vector> MultiConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}}; - -const std::vector> AutoConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_NONE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_ERROR}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_WARNING}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_INFO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_DEBUG}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_TRACE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_LOW}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - CorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs)), - CorrectConfigTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - CorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - CorrectConfigTests::getTestCaseName); - -const std::vector> multiinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF"}}, -}; - -const std::vector> autoinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}}; - -const std::vector> multiconf = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - IncorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - IncorrectConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - IncorrectConfigAPITests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - IncorrectConfigAPITests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - -const std::vector> auto_multi_prop_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; - -const std::vector> auto_multi_loadNetWork_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp deleted file mode 100644 index 29097f845f876d..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/core_integration.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -// defined in plugin_name.cpp -extern const char* cpu_plugin_file_name; - -namespace { -// -// IE Class Common tests with -// -// -// IE Class GetMetric -// - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, - IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("MULTI", "AUTO")); - -INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetConfigTest, - IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("MULTI", "AUTO")); -////////////////////////////////////////////////////////////////////////////////////////// -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp deleted file mode 100644 index 12553dbab98b03..00000000000000 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#ifdef __GLIBC__ -# include -# if __GLIBC_MINOR__ >= 34 -# define ENABLETESTMULTI -# endif -#else -# define ENABLETESTMULTI -#endif - -namespace { - -const Params params[] = { - std::tuple{ov::test::utils::DEVICE_TEMPLATE, {{CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)}}}, -#ifdef ENABLETESTMULTI - std::tuple{ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, - std::tuple{ov::test::utils::DEVICE_AUTO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, -#endif -}; -} // namespace -/* -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTests, testing::ValuesIn(params), -CoreThreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTestsWithIterations, - testing::Combine(testing::ValuesIn(params), - testing::Values(4), - testing::Values(50), - testing::Values(ModelClass::Default)), - CoreThreadingTestsWithIterations::getTestCaseName); -*/ diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp index bf32bfb031b4b2..f85ece7c65e192 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -4,40 +4,18 @@ #include "functional_test_utils/skip_tests_config.hpp" -#include - #include #include -#include "ie_parallel.hpp" +#include "openvino/core/visibility.hpp" std::vector disabledTestPatterns() { std::vector retVector{ - // TODO: Issue: 43793 - R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*iPRC=0.*_iLT=1.*)", - R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*oPRC=0.*_oLT=1.*)", - - // Not expected behavior - R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)", - R"(.*Auto.*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableWithIncorrectConfig.*)", - // Not implemented yet: - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNetWithIncorrectConfig.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)", - // TODO: CVS-104942 - R"(.*(Auto|Multi).*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableAndCheckConfig.*)", - R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", - - // CPU does not support dynamic rank - // Issue: CVS-66778 - R"(.*smoke_Auto_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicOutputToDynamicInput.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicInputToDynamicOutput.*)", // unsupported metrics R"(.*smoke_AutoOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", @@ -49,22 +27,8 @@ std::vector disabledTestPatterns() { // AUTO does not support import / export R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)", - - // New plugin API doesn't support changes of pre-processing - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", - // New plugin work with tensors, so it means that blob in old API can have different pointers - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)", - R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)", - // TODO Issue 100145 - R"(.*Behavior.*InferRequestIOBBlobTest.*canReallocateExternalBlobViaGet.*)", R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)", - // Not expected behavior - R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)", - R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)", // template plugin doesn't support this case R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)"}; @@ -72,9 +36,5 @@ std::vector disabledTestPatterns() { // very time-consuming test retVector.emplace_back(R"(.*OVInferConsistencyTest.*)"); #endif - -#if defined(_WIN32) - retVector.emplace_back(R"(.*LoadNetworkCompiledKernelsCacheTest.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)"); -#endif return retVector; } diff --git a/src/plugins/auto/tests/unit/CMakeLists.txt b/src/plugins/auto/tests/unit/CMakeLists.txt index ab9f4731fd86bf..895929c0e41975 100644 --- a/src/plugins/auto/tests/unit/CMakeLists.txt +++ b/src/plugins/auto/tests/unit/CMakeLists.txt @@ -22,7 +22,7 @@ ov_add_test_target( unit_test_utils ADD_CPPLINT DEPENDENCIES - template_extension + openvino_template_extension mock_engine ov_models LABELS diff --git a/src/plugins/auto/tests/unit/auto_unit_test.cpp b/src/plugins/auto/tests/unit/auto_unit_test.cpp index 139533bc378bba..e4c7e8135774cc 100644 --- a/src/plugins/auto/tests/unit/auto_unit_test.cpp +++ b/src/plugins/auto/tests/unit/auto_unit_test.cpp @@ -131,7 +131,7 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() { .WillByDefault(RETURN_MOCK_VALUE(supportedProps)); ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(12)); std::vector cpuCability = {"FP32", "FP16", "INT8", "BIN"}; - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}; + std::vector gpuCability = {"FP32", "FP16", "BIN", "INT8"}; std::vector othersCability = {"FP32", "FP16"}; std::string igpuArchitecture = "GPU: vendor=0x8086 arch=0"; std::string dgpuArchitecture = "GPU: vendor=0x8086 arch=1"; diff --git a/src/plugins/auto/tests/unit/key_network_priority_test.cpp b/src/plugins/auto/tests/unit/key_network_priority_test.cpp index 616f14040486b6..595731fe49ee52 100644 --- a/src/plugins/auto/tests/unit/key_network_priority_test.cpp +++ b/src/plugins/auto/tests/unit/key_network_priority_test.cpp @@ -44,7 +44,7 @@ class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWit void SetUp() override { std::tie(netPrecision, enableDevicePriority, PriorityConfigs) = GetParam(); sizeOfConfigs = static_cast(PriorityConfigs.size()); - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"}; + std::vector gpuCability = {"FP32", "FP16", "BIN"}; ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _)) .WillByDefault(RETURN_MOCK_VALUE(gpuCability)); diff --git a/src/plugins/auto/tests/unit/select_device_test.cpp b/src/plugins/auto/tests/unit/select_device_test.cpp index baef090b32459c..bfcd5e92fc7d38 100644 --- a/src/plugins/auto/tests/unit/select_device_test.cpp +++ b/src/plugins/auto/tests/unit/select_device_test.cpp @@ -21,15 +21,13 @@ const std::vector fp32DeviceVector = {DGPU_INFO, IGPU_INFO, O const std::vector fp16DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector int8DeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; const std::vector binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; -const std::vector batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO}; std::map> devicesMap = {{"FP32", fp32DeviceVector}, {"FP16", fp16DeviceVector}, {"INT8", int8DeviceVector}, - {"BIN", binDeviceVector}, - {"BATCHED_BLOB", batchedblobDeviceVector}}; + {"BIN", binDeviceVector}}; const std::vector totalDevices = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector reverseTotalDevices = {CPU_INFO, OTHERS_INFO, IGPU_INFO, DGPU_INFO}; -const std::vector netPrecisions = {"FP32", "FP16", "INT8", "BIN", "BATCHED_BLOB"}; +const std::vector netPrecisions = {"FP32", "FP16", "INT8", "BIN"}; std::vector testConfigs; class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam { diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index 646d7403df47e7..412a5abc2c1d96 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -2,31 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "async_infer_request.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" +#include "ov_models/subgraph_builders.hpp" #include "transformations/utils/utils.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" - -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; using AutoBatchRequestTestParams = std::tuple>(m_model, m_hardware_plugin); m_compile_model_without_batch = {m_i_compile_model_without_batch, {}}; - m_config = {{"AUTO_BATCH_TIMEOUT", "200"}}; + m_config = {{ov::auto_batch_timeout.name(), "200"}}; m_device_info = {"CPU", {}, m_batch_size}; @@ -211,13 +196,13 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParam(workerRequestPtr->_tasks.size()); if (sz == workerRequestPtr->_batch_size) { - std::pair t; + std::pair t; for (int n = 0; n < sz; n++) { OPENVINO_ASSERT(workerRequestPtr->_tasks.try_pop(t)); workerRequestPtr->_completion_tasks[n] = std::move(t.second); t.first->m_sync_request->copy_inputs_if_needed(); t.first->m_sync_request->m_batched_request_status = - ov::autobatch_plugin::SyncInferRequest::eExecutionFlavor::BATCH_EXECUTED; + SyncInferRequest::eExecutionFlavor::BATCH_EXECUTED; } workerRequestPtr->_infer_request_batched->start_async(); } else if ((status == std::cv_status::timeout) && sz) { diff --git a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp index 39094d161393ca..71db54be4fe8ed 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp @@ -2,31 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using CreateInferRequestTestParams = std::tuple; // inferReq number @@ -94,7 +74,7 @@ class CompileModelCreateInferRequestTest : public ::testing::TestWithParam>(m_model, m_auto_batch_plugin); m_compile_model_without_batch = {m_i_compile_model_without_batch, {}}; - m_config = {{"AUTO_BATCH_TIMEOUT", "200"}}; + m_config = {{ov::auto_batch_timeout(static_cast(200))}}; m_device_info = {"CPU", {}, m_batch_size}; m_batched_inputs = {"Parameter_0"}; diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp index b3fc8497c9f052..883cca1dcd5d5d 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp @@ -2,30 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using get_property_param = std::tuple; // Throw exception @@ -106,7 +86,7 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParam(200))}, {ov::device::priorities("CPU(16)")}}; ASSERT_NO_THROW(auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); std::string network_name = m_model.get()->get_name(); @@ -132,8 +112,8 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParam res_config; - res_config.emplace_back("CACHE_DIR"); - res_config.emplace_back("OPTIMAL_BATCH_SIZE"); + res_config.emplace_back(ov::cache_dir.name()); + res_config.emplace_back(ov::optimal_batch_size.name()); return res_config; }); @@ -151,16 +131,15 @@ TEST_P(CompileModelGetPropertyTest, CompileModelGetPropertyTestCase) { } const std::vector compile_model_get_property_param_test = { - get_property_param{METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS), false}, - get_property_param{METRIC_KEY(NETWORK_NAME), false}, - get_property_param{METRIC_KEY(SUPPORTED_METRICS), false}, - get_property_param{METRIC_KEY(SUPPORTED_CONFIG_KEYS), false}, + get_property_param{ov::optimal_number_of_infer_requests.name(), false}, + get_property_param{ov::model_name.name(), false}, + get_property_param{ov::supported_properties.name(), false}, get_property_param{ov::execution_devices.name(), false}, - get_property_param{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), false}, + get_property_param{ov::device::priorities.name(), false}, get_property_param{ov::auto_batch_timeout.name(), false}, get_property_param{ov::cache_dir.name(), false}, // Config in dependent m_plugin - get_property_param{"OPTIMAL_BATCH_SIZE", false}, + get_property_param{ov::optimal_batch_size.name(), false}, // Incorrect Property get_property_param{"INCORRECT_METRIC", true}, get_property_param{"INCORRECT_CONFIG", true}, diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp index f338e6dd3e610a..a8e83a4f5bd5ca 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp @@ -2,30 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" #include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - class CompileModelGetRuntimeModelTest : public ::testing::Test { public: std::shared_ptr> m_core; @@ -89,7 +70,7 @@ class CompileModelGetRuntimeModelTest : public ::testing::Test { ON_CALL(*m_mock_i_compile_model.get(), get_runtime_model()).WillByDefault(Return(m_model)); - const ov::AnyMap configs = {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(16)"}}; + const ov::AnyMap configs = {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(16)")}}; ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); } diff --git a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp index ee03043a162c93..29801f05924ae1 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp @@ -2,30 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" #include "ov_models/subgraph_builders.hpp" -#include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using set_property_param = std::tuple; // Throw exception @@ -109,7 +90,7 @@ class CompileModelSetPropertyTest : public ::testing::TestWithParam(200))}, {ov::device::priorities("CPU(16)")}}; ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); } @@ -123,7 +104,7 @@ TEST_P(CompileModelSetPropertyTest, CompileModelSetPropertyTestCase) { } const std::vector compile_model_set_property_param_test = { - set_property_param{{{CONFIG_KEY(AUTO_BATCH_TIMEOUT), std::uint32_t(100)}}, false}, + set_property_param{{{ov::auto_batch_timeout(static_cast(100))}}, false}, set_property_param{{{"INCORRECT_CONFIG", 2}}, true}, }; diff --git a/src/plugins/auto_batch/tests/unit/mock_common.hpp b/src/plugins/auto_batch/tests/unit/mock_common.hpp index 7ab113c06544d5..2a9f0230fb43e7 100644 --- a/src/plugins/auto_batch/tests/unit/mock_common.hpp +++ b/src/plugins/auto_batch/tests/unit/mock_common.hpp @@ -7,12 +7,15 @@ #include -#include "async_infer_request.hpp" #include "compiled_model.hpp" -#include "ie_icore.hpp" #include "openvino/runtime/make_tensor.hpp" #include "plugin.hpp" -#include "sync_infer_request.hpp" + +using ::testing::_; +using ::testing::MatcherCast; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::StrEq; using namespace ov::mock_autobatch_plugin; diff --git a/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp b/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp index 565ac80a13643b..b6696f8d08c4f8 100644 --- a/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp +++ b/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp @@ -2,24 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using batch_device_config_params = std::tuple -#include - #include "mock_common.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using meta_device_params = std::tuple; // Throw exception const std::vector cpu_supported_properties = { - "CACHE_DIR", + ov::cache_dir.name(), }; const std::vector gpu_supported_properties = { - "CACHE_DIR", - "OPTIMAL_BATCH_SIZE", + ov::cache_dir.name(), + ov::optimal_batch_size.name(), }; class ParseMetaDeviceTest : public ::testing::TestWithParam { @@ -124,16 +108,16 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDeviceTestCase) { const std::vector meta_device_test_configs = { meta_device_params{"CPU(4)", {}, DeviceInformation{"CPU", {}, 4}, false}, meta_device_params{"CPU(4)", {{}}, DeviceInformation{"CPU", {{}}, 4}, true}, - meta_device_params{"CPU(4)", {{"CACHE_DIR", "./"}}, DeviceInformation{"CPU", {{"CACHE_DIR", "./"}}, 4}, false}, - meta_device_params{"GPU(4)", {{"CACHE_DIR", "./"}}, DeviceInformation{"GPU", {{"CACHE_DIR", "./"}}, 4}, false}, + meta_device_params{"CPU(4)", {{ov::cache_dir("./")}}, DeviceInformation{"CPU", {{ov::cache_dir("./")}}, 4}, false}, + meta_device_params{"GPU(4)", {{ov::cache_dir("./")}}, DeviceInformation{"GPU", {{ov::cache_dir("./")}}, 4}, false}, meta_device_params{"GPU(8)", - {{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}}, - DeviceInformation{"GPU", {{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}}, 8}, + {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, + DeviceInformation{"GPU", {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, 8}, false}, - meta_device_params{"CPU(4)", {{"OPTIMAL_BATCH_SIZE", "16"}}, DeviceInformation{"CPU", {{}}, 4}, true}, + meta_device_params{"CPU(4)", {{ov::optimal_batch_size.name(), "16"}}, DeviceInformation{"CPU", {{}}, 4}, true}, meta_device_params{"CPU(4)", - {{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}}, - DeviceInformation{"CPU", {{"CACHE_DIR", "./"}}, 4}, + {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, + DeviceInformation{"CPU", {{ov::cache_dir("./")}}, 4}, true}, }; diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index ebc52426bfe504..9235bd62f73114 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -2,30 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - +#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/runtime/intel_gpu/properties.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp" -#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" - -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; using plugin_compile_model_param = std::tuple&>(_), @@ -149,82 +131,82 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTe const std::vector plugin_compile_model_param_test = { // Case 1: explict apply batch size by config of AUTO_BATCH_DEVICE_CONFIG - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(32)"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(32)")}}, 32}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU(32)"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU(32)")}}, 32}, // Case 2: CPU batch size is figured out by min of opt_batch_size and infReq_num // If config contains "PERFORMANCE_HINT_NUM_REQUESTS" - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU")}}, 12}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(8)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(16)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(8)}, + {ov::hint::num_requests(16)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU")}}, 8}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(8)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(2)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(8)}, + {ov::hint::num_requests(2)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU")}}, 1}, // Case 3: GPU batch size is figured out by // 1) min of opt_batch_size and infReq_num // 2) available_mem/one_graph_mem_footprint with power 2 // Final m_batch_size is the min of 1) and 2) - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "5000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(5000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 4}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "40960000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(40960000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 12}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(32)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(24)}, - {"GPU_MEMORY_STATISTICS", "1000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "18000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(32)}, + {ov::hint::num_requests(24)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(18000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 16}, - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::THROUGHPUT}, - {"OPTIMAL_BATCH_SIZE", static_cast(32)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(48)}, - {"GPU_MEMORY_STATISTICS", "1000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "180000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, + {ov::optimal_batch_size.name(), static_cast(32)}, + {ov::hint::num_requests(48)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(180000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU")}}, 32}, // Case 4: - plugin_compile_model_param{{{"PERFORMANCE_HINT", ov::hint::PerformanceMode::LATENCY}, - {"OPTIMAL_BATCH_SIZE", static_cast(16)}, - {"PERFORMANCE_HINT_NUM_REQUESTS", static_cast(12)}, - {"GPU_MEMORY_STATISTICS", "1024000"}, - {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}}, - {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(32)"}}, + plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::LATENCY}, + {ov::optimal_batch_size.name(), static_cast(16)}, + {ov::hint::num_requests(12)}, + {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, + {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, + {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(32)")}}, 32}, }; diff --git a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp index 5d259789333310..850bb4ee11f8a2 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp @@ -2,30 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using get_property_params = std::tuple; // Throw exception -const char supported_metric[] = "SUPPORTED_METRICS FULL_DEVICE_NAME SUPPORTED_CONFIG_KEYS"; -const char supported_config_keys[] = "AUTO_BATCH_DEVICE_CONFIG MULTI_DEVICE_PRIORITIES AUTO_BATCH_TIMEOUT CACHE_DIR"; - class GetPropertyTest : public ::testing::TestWithParam { public: std::string m_property_name; @@ -70,29 +51,18 @@ TEST_P(GetPropertyTest, GetPropertyTestCase) { } else { ov::Any value; ASSERT_NO_THROW(value = m_plugin->get_property(m_property_name, options)); - if (m_property_name == METRIC_KEY(SUPPORTED_METRICS)) { - EXPECT_EQ(value.as(), supported_metric); - return; - } if (m_property_name == ov::device::full_name.name()) { EXPECT_EQ(value.as(), "BATCH"); return; } - if (m_property_name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { - EXPECT_EQ(value.as(), supported_config_keys); - return; - } } } const std::vector get_property_params_test = { - get_property_params{"AUTO_BATCH_TIMEOUT", false}, - get_property_params{"AUTO_BATCH_DEVICE_CONFIG", true}, - get_property_params{"CACHE_DIR", true}, - get_property_params{METRIC_KEY(SUPPORTED_METRICS), false}, - get_property_params{METRIC_KEY(SUPPORTED_CONFIG_KEYS), false}, - get_property_params{"CPU_THREADS_NUM", true}, - get_property_params{"PERFORMANCE_HINT", true}, + get_property_params{ov::auto_batch_timeout.name(), false}, + get_property_params{ov::device::priorities.name(), true}, + get_property_params{ov::cache_dir.name(), true}, + get_property_params{ov::hint::performance_mode.name(), true}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp index d36945693bd51c..619c8ba7e8f65f 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp @@ -2,27 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" #include "ov_models/subgraph_builders.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" #include "common_test_utils/subgraph_builders/multi_single_conv.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using query_model_params = std::tuple; @@ -81,9 +65,9 @@ TEST_P(QueryModelTest, QueryModelTestCase) { const std::vector query_model_params_test = { query_model_params{{{}}, true}, - query_model_params{{{"AUTO_BATCH_TIMEOUT", "200"}}, true}, - query_model_params{{{"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, - query_model_params{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, + query_model_params{{{ov::auto_batch_timeout(static_cast(200))}}, true}, + query_model_params{{{ov::device::priorities("CPU(4)")}}, false}, + query_model_params{{{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(4)")}}, false}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp index 28cc8e4dcf9e99..ef67def84bf216 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp @@ -2,24 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "mock_common.hpp" -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; - -using namespace ov::mock_autobatch_plugin; - using set_property_params = std::tuple; @@ -72,14 +56,14 @@ TEST_P(SetPropertyTest, SetPropertyTestCase) { } const std::vector plugin_set_property_params_test = { - set_property_params{{{"AUTO_BATCH_TIMEOUT", "200"}}, false}, - set_property_params{{{"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, + set_property_params{{{ov::auto_batch_timeout(static_cast(200))}}, false}, + set_property_params{{{ov::device::priorities("CPU(4)")}}, false}, set_property_params{{{"CACHE_DIR", "./xyz"}}, false}, - set_property_params{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, false}, - set_property_params{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}, {"CACHE_DIR", "./xyz"}}, + set_property_params{{{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(4)")}}, false}, + set_property_params{{{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(4)")}, {"CACHE_DIR", "./xyz"}}, false}, set_property_params{{{"XYZ", "200"}}, true}, - set_property_params{{{"XYZ", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}, {"CACHE_DIR", "./xyz"}}, true}, + set_property_params{{{"XYZ", "200"}, {ov::device::priorities("CPU(4)")}, {"CACHE_DIR", "./xyz"}}, true}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp index 6d2b0a32a2b5ac..3a7148459a78b9 100644 --- a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp @@ -2,31 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "sync_infer_request.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" #include "mock_common.hpp" -#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" +#include "ov_models/subgraph_builders.hpp" #include "transformations/utils/utils.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" -#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" - -using ::testing::_; -using ::testing::AnyNumber; -using ::testing::AtLeast; -using ::testing::Eq; -using ::testing::MatcherCast; -using ::testing::Matches; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::ReturnRef; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Throw; using AutoBatchRequestTestParams = std::tuple; // data type @@ -108,7 +93,7 @@ class AutoBatchRequestTest : public ::testing::TestWithParam>(m_model, m_auto_batch_plugin); m_compile_model_without_batch = {m_i_compile_model_without_batch, {}}; - m_config = {{"AUTO_BATCH_TIMEOUT", "200"}}; + m_config = {{ov::auto_batch_timeout(static_cast(200))}}; m_device_info = {"CPU", {}, m_batch_size}; m_batched_inputs = {"Parameter_0"}; diff --git a/src/plugins/hetero/src/compiled_model.cpp b/src/plugins/hetero/src/compiled_model.cpp index 0edddab6c5cfbc..35e587e85f4d75 100644 --- a/src/plugins/hetero/src/compiled_model.cpp +++ b/src/plugins/hetero/src/compiled_model.cpp @@ -8,7 +8,6 @@ #include "async_infer_request.hpp" #include "graph_debug_dump.hpp" -#include "ie_plugin_config.hpp" #include "itt.hpp" #include "op/device_subgraph.hpp" #include "openvino/op/util/op_types.hpp" @@ -136,11 +135,12 @@ void ov::hetero::CompiledModel::compile_model(const std::shared_ptr& ov::hetero::CompiledModel::CompiledModel(std::istream& model, const std::shared_ptr& plugin, - const Configuration& cfg) + const Configuration& cfg, + const bool loaded_from_cache) : ov::ICompiledModel(nullptr, plugin), m_cfg(cfg), m_name(), - m_loaded_from_cache(true) { + m_loaded_from_cache(loaded_from_cache) { std::string heteroXmlStr; std::getline(model, heteroXmlStr); @@ -267,7 +267,6 @@ std::shared_ptr ov::hetero::CompiledModel::get_hetero_ } ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { - OPENVINO_SUPPRESS_DEPRECATED_START const auto& add_ro_properties = [](const std::string& name, std::vector& properties) { properties.emplace_back(ov::PropertyName{name, ov::PropertyMutability::RO}); }; @@ -279,13 +278,6 @@ ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { ov::hetero::number_of_submodels}; return ro_properties; }; - const auto& to_string_vector = [](const std::vector& properties) { - std::vector ret; - for (const auto& property : properties) { - ret.emplace_back(property); - } - return ret; - }; if (ov::supported_properties == name) { auto supported_properties = default_ro_properties(); @@ -293,13 +285,6 @@ ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { add_ro_properties(ov::device::properties.name(), supported_properties); add_ro_properties(ov::device::priorities.name(), supported_properties); return decltype(ov::supported_properties)::value_type(supported_properties); - } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { - auto metrics = default_ro_properties(); - add_ro_properties(METRIC_KEY(SUPPORTED_METRICS), metrics); - add_ro_properties(METRIC_KEY(SUPPORTED_CONFIG_KEYS), metrics); - return to_string_vector(metrics); - } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - return to_string_vector(m_cfg.get_supported()); } else if (ov::device::properties == name) { ov::AnyMap all_devices = {}; for (const auto& comp_model_desc : m_compiled_submodels) { @@ -339,7 +324,6 @@ ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { return decltype(ov::hetero::number_of_submodels)::value_type{m_compiled_submodels.size()}; } return m_cfg.get(name); - OPENVINO_SUPPRESS_DEPRECATED_END } const std::vector>& ov::hetero::CompiledModel::inputs() const { diff --git a/src/plugins/hetero/src/compiled_model.hpp b/src/plugins/hetero/src/compiled_model.hpp index 934459f157b31c..09e6ce948bdae4 100644 --- a/src/plugins/hetero/src/compiled_model.hpp +++ b/src/plugins/hetero/src/compiled_model.hpp @@ -21,7 +21,10 @@ class CompiledModel : public ov::ICompiledModel { const std::shared_ptr& plugin, const Configuration& cfg); - CompiledModel(std::istream& model, const std::shared_ptr& plugin, const Configuration& cfg); + CompiledModel(std::istream& model, + const std::shared_ptr& plugin, + const Configuration& cfg, + const bool loaded_from_cache); void export_model(std::ostream& model) const override; diff --git a/src/plugins/hetero/src/config.cpp b/src/plugins/hetero/src/config.cpp index 35cae2b56d87da..d182a684d9e4c1 100644 --- a/src/plugins/hetero/src/config.cpp +++ b/src/plugins/hetero/src/config.cpp @@ -4,25 +4,21 @@ #include "config.hpp" -#include "ie/ie_plugin_config.hpp" #include "openvino/runtime/internal_properties.hpp" #include "openvino/runtime/properties.hpp" using namespace ov::hetero; -Configuration::Configuration() : dump_graph(false) {} +Configuration::Configuration() {} Configuration::Configuration(const ov::AnyMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) { - OPENVINO_SUPPRESS_DEPRECATED_START *this = defaultCfg; for (const auto& it : config) { const auto& key = it.first; const auto& value = it.second; - if (HETERO_CONFIG_KEY(DUMP_GRAPH_DOT) == key) { - dump_graph = value.as(); - } else if ("TARGET_FALLBACK" == key || ov::device::priorities == key) { + if (ov::device::priorities == key) { device_priorities = value.as(); } else { if (throwOnUnsupported) @@ -30,36 +26,23 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa device_properties.emplace(key, value); } } - OPENVINO_SUPPRESS_DEPRECATED_END } ov::Any Configuration::get(const std::string& name) const { - OPENVINO_SUPPRESS_DEPRECATED_START - if (name == HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)) { - return {dump_graph}; - } else if (name == "TARGET_FALLBACK" || name == ov::device::priorities) { + if (name == ov::device::priorities) { return {device_priorities}; } else { OPENVINO_THROW("Property was not found: ", name); } - OPENVINO_SUPPRESS_DEPRECATED_END } std::vector Configuration::get_supported() const { - OPENVINO_SUPPRESS_DEPRECATED_START - static const std::vector names = {HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), - "TARGET_FALLBACK", - ov::device::priorities}; + static const std::vector names = {ov::device::priorities}; return names; - OPENVINO_SUPPRESS_DEPRECATED_END } ov::AnyMap Configuration::get_hetero_properties() const { - OPENVINO_SUPPRESS_DEPRECATED_START - return {{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), dump_graph}, - {"TARGET_FALLBACK", device_priorities}, - {ov::device::priorities.name(), device_priorities}}; - OPENVINO_SUPPRESS_DEPRECATED_END + return {{ov::device::priorities.name(), device_priorities}}; } ov::AnyMap Configuration::get_device_properties() const { @@ -67,8 +50,5 @@ ov::AnyMap Configuration::get_device_properties() const { } bool Configuration::dump_dot_files() const { - bool res = dump_graph; - if (std::getenv("OPENVINO_HETERO_VISUALIZE")) - res = true; - return res; + return std::getenv("OPENVINO_HETERO_VISUALIZE") != NULL; } \ No newline at end of file diff --git a/src/plugins/hetero/src/config.hpp b/src/plugins/hetero/src/config.hpp index 65606011976cb3..92878e9c785782 100644 --- a/src/plugins/hetero/src/config.hpp +++ b/src/plugins/hetero/src/config.hpp @@ -35,9 +35,6 @@ struct Configuration { std::string device_priorities; ov::AnyMap device_properties; - -private: - bool dump_graph; }; } // namespace hetero } // namespace ov \ No newline at end of file diff --git a/src/plugins/hetero/src/graph_debug_dump.hpp b/src/plugins/hetero/src/graph_debug_dump.hpp index 388d2906a21f17..14e153290a3362 100644 --- a/src/plugins/hetero/src/graph_debug_dump.hpp +++ b/src/plugins/hetero/src/graph_debug_dump.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/openvino.hpp" namespace ov { namespace hetero { diff --git a/src/plugins/hetero/src/plugin.cpp b/src/plugins/hetero/src/plugin.cpp index 7c9a63265e8979..91eceb66da4a81 100644 --- a/src/plugins/hetero/src/plugin.cpp +++ b/src/plugins/hetero/src/plugin.cpp @@ -13,7 +13,6 @@ #include #include "compiled_model.hpp" -#include "ie/ie_plugin_config.hpp" #include "itt.hpp" #include "openvino/runtime/device_id_parser.hpp" #include "openvino/runtime/internal_properties.hpp" @@ -51,8 +50,17 @@ std::shared_ptr ov::hetero::Plugin::import_model(std::istrea const ov::AnyMap& properties) const { OV_ITT_SCOPED_TASK(itt::domains::Hetero, "Plugin::import_model"); - auto config = Configuration{properties, m_cfg}; - auto compiled_model = std::make_shared(model, shared_from_this(), config); + // check ov::loaded_from_cache property and erase it due to not needed any more. + auto _properties = properties; + const auto& it = _properties.find(ov::loaded_from_cache.name()); + bool loaded_from_cache = false; + if (it != _properties.end()) { + loaded_from_cache = it->second.as(); + _properties.erase(it); + } + + auto config = Configuration{_properties, m_cfg}; + auto compiled_model = std::make_shared(model, shared_from_this(), config, loaded_from_cache); return compiled_model; } @@ -120,11 +128,6 @@ void ov::hetero::Plugin::set_property(const ov::AnyMap& properties) { } ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyMap& properties) const { - OPENVINO_SUPPRESS_DEPRECATED_START - const auto& add_ro_properties = [](const std::string& name, std::vector& properties) { - properties.emplace_back(ov::PropertyName{name, ov::PropertyMutability::RO}); - }; - const auto& default_ro_properties = []() { std::vector ro_properties{ov::supported_properties, ov::device::full_name, @@ -135,25 +138,9 @@ ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyM std::vector rw_properties{ov::device::priorities}; return rw_properties; }; - const auto& to_string_vector = [](const std::vector& properties) { - std::vector ret; - for (const auto& property : properties) { - ret.emplace_back(property); - } - return ret; - }; Configuration full_config{properties, m_cfg}; - if (METRIC_KEY(SUPPORTED_METRICS) == name) { - auto metrics = default_ro_properties(); - - add_ro_properties(METRIC_KEY(SUPPORTED_METRICS), metrics); - add_ro_properties(METRIC_KEY(SUPPORTED_CONFIG_KEYS), metrics); - add_ro_properties(METRIC_KEY(IMPORT_EXPORT_SUPPORT), metrics); - return to_string_vector(metrics); - } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - return to_string_vector(full_config.get_supported()); - } else if (ov::supported_properties == name) { + if (ov::supported_properties == name) { auto ro_properties = default_ro_properties(); auto rw_properties = default_rw_properties(); @@ -167,8 +154,6 @@ ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyM ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}; } else if (ov::device::full_name == name) { return decltype(ov::device::full_name)::value_type{get_device_name()}; - } else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) { - return true; } else if (ov::internal::caching_properties == name) { return decltype(ov::internal::caching_properties)::value_type{ov::hetero::caching_device_properties.name()}; } else if (ov::hetero::caching_device_properties == name) { @@ -178,7 +163,6 @@ ov::Any ov::hetero::Plugin::get_property(const std::string& name, const ov::AnyM } else { return full_config.get(name); } - OPENVINO_SUPPRESS_DEPRECATED_END } ov::Any ov::hetero::Plugin::caching_device_properties(const std::string& device_priorities) const { diff --git a/src/plugins/hetero/tests/functional/hetero_tests.cpp b/src/plugins/hetero/tests/functional/hetero_tests.cpp index 55440556f0552b..3e2a64ee103584 100644 --- a/src/plugins/hetero/tests/functional/hetero_tests.cpp +++ b/src/plugins/hetero/tests/functional/hetero_tests.cpp @@ -8,7 +8,6 @@ #include #include "common_test_utils/file_utils.hpp" -#include "ie_plugin_config.hpp" #include "openvino/core/any.hpp" #include "openvino/core/except.hpp" #include "openvino/opsets/opset11.hpp" @@ -593,7 +592,6 @@ class MockPluginReshape : public MockPluginBase { RO_property(ov::available_devices.name()), RO_property(ov::loaded_from_cache.name()), RO_property(ov::device::uuid.name()), - RO_property(METRIC_KEY(IMPORT_EXPORT_SUPPORT)), }; // the whole config is RW before network is loaded. const static std::vector rwProperties{ @@ -635,23 +633,9 @@ class MockPluginReshape : public MockPluginBase { std::vector capabilities; capabilities.push_back(ov::device::capability::EXPORT_IMPORT); return decltype(ov::device::capabilities)::value_type(capabilities); - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) { - return true; } else if (ov::internal::caching_properties == name) { std::vector caching_properties = {ov::device::uuid}; return decltype(ov::internal::caching_properties)::value_type(caching_properties); - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } else if (name == ov::loaded_from_cache.name()) { return m_loaded_from_cache; } else if (name == ov::enable_profiling.name()) { @@ -695,7 +679,6 @@ class MockPluginSubtract : public MockPluginBase { RO_property(ov::available_devices.name()), RO_property(ov::loaded_from_cache.name()), RO_property(ov::device::uuid.name()), - RO_property(METRIC_KEY(IMPORT_EXPORT_SUPPORT)), }; // the whole config is RW before network is loaded. const static std::vector rwProperties{ @@ -737,23 +720,9 @@ class MockPluginSubtract : public MockPluginBase { return m_loaded_from_cache; } else if (name == ov::enable_profiling.name()) { return decltype(ov::enable_profiling)::value_type{m_profiling}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) { - return true; } else if (ov::internal::caching_properties == name) { std::vector caching_properties = {ov::device::uuid}; return decltype(ov::internal::caching_properties)::value_type(caching_properties); - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; } OPENVINO_THROW("Unsupported property: ", name); } diff --git a/src/plugins/hetero/tests/functional/properties_tests.cpp b/src/plugins/hetero/tests/functional/properties_tests.cpp index d72a4237134cdd..b4518d5707218f 100644 --- a/src/plugins/hetero/tests/functional/properties_tests.cpp +++ b/src/plugins/hetero/tests/functional/properties_tests.cpp @@ -21,35 +21,6 @@ TEST_F(HeteroTests, get_property_supported_properties) { } } -TEST_F(HeteroTests, get_property_supported_metrics) { - const std::vector supported_metrics = {ov::supported_properties.name(), - ov::device::full_name.name(), - ov::device::capabilities.name(), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(IMPORT_EXPORT_SUPPORT)}; - auto actual_supported_metrics = - core.get_property("HETERO", METRIC_KEY(SUPPORTED_METRICS)).as>(); - EXPECT_EQ(supported_metrics.size(), actual_supported_metrics.size()); - for (auto& supported_metric : supported_metrics) { - ASSERT_TRUE(std::find(actual_supported_metrics.begin(), actual_supported_metrics.end(), supported_metric) != - actual_supported_metrics.end()); - } -} - -TEST_F(HeteroTests, get_property_supported_configs) { - const std::vector supported_configs = {"HETERO_DUMP_GRAPH_DOT", - "TARGET_FALLBACK", - ov::device::priorities.name()}; - auto actual_supported_configs = - core.get_property("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); - EXPECT_EQ(supported_configs.size(), actual_supported_configs.size()); - for (auto& supported_config : supported_configs) { - ASSERT_TRUE(std::find(actual_supported_configs.begin(), actual_supported_configs.end(), supported_config) != - actual_supported_configs.end()); - } -} - TEST_F(HeteroTests, get_property_internal_supported_properties) { const std::vector supported_properties = {ov::internal::caching_properties}; auto actual_supported_properties = core.get_property("HETERO", ov::internal::supported_properties); @@ -71,8 +42,4 @@ TEST_F(HeteroTests, set_property_device_priorities) { EXPECT_EQ("", core.get_property("HETERO", ov::device::priorities)); core.set_property("HETERO", ov::device::priorities("MOCK0,MOCK1")); EXPECT_EQ("MOCK0,MOCK1", core.get_property("HETERO", ov::device::priorities)); - EXPECT_EQ("MOCK0,MOCK1", core.get_property("HETERO", "TARGET_FALLBACK").as()); - core.set_property("HETERO", {{"TARGET_FALLBACK", "MOCK1,MOCK0"}}); - EXPECT_EQ("MOCK1,MOCK0", core.get_property("HETERO", ov::device::priorities)); - EXPECT_EQ("MOCK1,MOCK0", core.get_property("HETERO", "TARGET_FALLBACK").as()); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index f81f59f94ae418..e11445781e34e8 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -1,7 +1,6 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "ie_metric_helpers.hpp" // must be included first #include "compiled_model.h" #include "async_infer_request.h" @@ -164,35 +163,6 @@ std::shared_ptr CompiledModel::get_runtime_model() const { return get_graph()._graph.dump(); } -ov::Any CompiledModel::get_metric_legacy(const std::string& name, const GraphGuard& graph) const { - OPENVINO_SUPPRESS_DEPRECATED_START - if (name == METRIC_KEY(NETWORK_NAME)) { - IE_SET_METRIC_RETURN(NETWORK_NAME, graph.dump()->get_friendly_name()); - } else if (name == METRIC_KEY(SUPPORTED_METRICS)) { - std::vector metrics; - metrics.push_back(METRIC_KEY(NETWORK_NAME)); - metrics.push_back(METRIC_KEY(SUPPORTED_METRICS)); - metrics.push_back(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); - metrics.push_back(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)); - IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics); - } else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { - std::vector configKeys; - for (auto&& key : graph.getConfig()._config) { - configKeys.push_back(key.first); - } - IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys); - } else if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) { - Config engConfig = graph.getConfig(); - auto option = engConfig._config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)); - OPENVINO_ASSERT(option != engConfig._config.end()); - auto streams = std::stoi(option->second); - IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, static_cast(streams ? streams : 1)); - } else { - OPENVINO_THROW("Unsupported property: ", name); - } - OPENVINO_SUPPRESS_DEPRECATED_END -} - ov::Any CompiledModel::get_property(const std::string& name) const { if (m_graphs.empty()) OPENVINO_THROW("No graph was found"); @@ -298,9 +268,7 @@ ov::Any CompiledModel::get_property(const std::string& name) const { return decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type( config.fcSparseWeiDecompressionRate); } - /* Internally legacy parameters are used with new API as part of migration procedure. - * This fallback can be removed as soon as migration completed */ - return get_metric_legacy(name, graph); + OPENVINO_THROW("Unsupported property: ", name); } void CompiledModel::export_model(std::ostream& modelStream) const { diff --git a/src/plugins/intel_cpu/src/compiled_model.h b/src/plugins/intel_cpu/src/compiled_model.h index d11ece0e8c2aea..d1527ef9202603 100644 --- a/src/plugins/intel_cpu/src/compiled_model.h +++ b/src/plugins/intel_cpu/src/compiled_model.h @@ -25,7 +25,7 @@ class CompiledModel : public ov::ICompiledModel { CompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin, const Config& cfg, - const bool loaded_from_cache = false); + const bool loaded_from_cache); std::shared_ptr create_infer_request() const override; @@ -73,8 +73,6 @@ class CompiledModel : public ov::ICompiledModel { * even from main thread */ GraphGuard::Lock get_graph() const; - - ov::Any get_metric_legacy(const std::string& name, const GraphGuard& graph) const; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 8064339682160c..6b84bb5a3283e1 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -197,11 +197,6 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { ov::internal::exclusive_async_requests.name(), ". Expected only true/false"); } - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (key.compare(InferenceEngine::PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT) == 0) { - // empty string means that dumping is switched off - dumpToDot = val.as(); - OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == ov::intel_cpu::lp_transforms_mode.name()) { try { lpTransformsMode = val.as() ? LPTransformsMode::On : LPTransformsMode::Off; @@ -217,29 +212,6 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { if (!device_id.empty()) { OPENVINO_THROW("CPU plugin supports only '' as device id"); } - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (key == InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16) { - bool enable; - try { - enable = val.as(); - } catch (ov::Exception&) { - OPENVINO_THROW("Wrong value ", - val.as(), - " for property key ", - key, - ". Expected only true/false"); - } - if (enable) { - if (hasHardwareSupport(ov::element::bf16)) { - inferencePrecision = ov::element::bf16; - } else { - OPENVINO_THROW("Platform doesn't support BF16 format"); - } - } else { - inferencePrecision = ov::element::f32; - } - inferencePrecisionSetExplicitly = true; - OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == ov::hint::inference_precision.name()) { try { auto const prec = val.as(); @@ -391,21 +363,6 @@ void Config::updateProperties() { _config.insert({ov::hint::performance_mode.name(), ov::util::to_string(hintPerfMode)}); _config.insert({ov::hint::num_requests.name(), std::to_string(hintNumRequests)}); - - OPENVINO_SUPPRESS_DEPRECATED_START - if (inferencePrecision == ov::element::bf16) { - _config.insert( - {InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}); - } else { - _config.insert( - {InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}); - } - _config.insert({InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - std::to_string(streamExecutorConfig._streams)}); - _config.insert( - {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, std::to_string(streamExecutorConfig._threads)}); - _config.insert({InferenceEngine::PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT, dumpToDot}); - OPENVINO_SUPPRESS_DEPRECATED_END } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/cpu_memory.h b/src/plugins/intel_cpu/src/cpu_memory.h index 613b49976b9ce2..a0e8889852502a 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.h +++ b/src/plugins/intel_cpu/src/cpu_memory.h @@ -5,6 +5,7 @@ #pragma once #include "memory_desc/dnnl_memory_desc.h" +#include "openvino/core/type/element_type_traits.hpp" /** * @file contains a concept classes to work with memory/tensor/blob abstractions on plugin level. diff --git a/src/plugins/intel_cpu/src/cpu_shape.cpp b/src/plugins/intel_cpu/src/cpu_shape.cpp index 3c87d6668d84d2..b130902db00c87 100644 --- a/src/plugins/intel_cpu/src/cpu_shape.cpp +++ b/src/plugins/intel_cpu/src/cpu_shape.cpp @@ -49,5 +49,28 @@ std::string Shape::toString() const { return output.str(); } +Shape mergeShapes(const Shape& lhs, const Shape& rhs) { + OPENVINO_ASSERT(lhs.getRank() == rhs.getRank(), + "Couldn't merge shapes of different ranks: shape 1:", + lhs.toString(), + " shape 2: ", + rhs.toString()); + + const auto& lhsMinDims = lhs.getMinDims(); + const auto& lhsMaxDims = lhs.getMaxDims(); + const auto& rhsMinDims = rhs.getMinDims(); + const auto& rhsMaxDims = rhs.getMaxDims(); + + VectorDims resultMinDims(lhsMinDims.size()); + VectorDims resultMaxDims(lhsMaxDims.size()); + + for (size_t i = 0; i < resultMinDims.size(); ++i) { + resultMinDims[i] = std::max(lhsMinDims[i], rhsMinDims[i]); + resultMaxDims[i] = std::min(lhsMaxDims[i], rhsMaxDims[i]); + OPENVINO_ASSERT(resultMinDims[i] <= resultMaxDims[i], "Couldn't merge shapes as the dims intervals are not overlapping."); + } + return Shape{resultMinDims, resultMaxDims}; +} + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/cpu_shape.h b/src/plugins/intel_cpu/src/cpu_shape.h index 1c5b48f7c458c6..7623aa12757663 100644 --- a/src/plugins/intel_cpu/src/cpu_shape.h +++ b/src/plugins/intel_cpu/src/cpu_shape.h @@ -218,5 +218,17 @@ class Shape { VectorDims dims; }; +/** + * @brief Merges two shapes overlapping their dims intervals. + * @note When one of the dims intervals are not overlapped an exception is thrown. + * @param lhs + * first shape + * @param rhs + * second shape + * @return resulting shape + */ + +Shape mergeShapes(const Shape& lhs, const Shape& rhs); + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp index 4cc31959224581..243ce573811d3d 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp @@ -12,6 +12,10 @@ #include +#ifdef SNIPPETS_DEBUG_CAPS +#include "emitters/snippets/x64/verbose.hpp" +#endif + namespace ov { namespace intel_cpu { @@ -50,6 +54,14 @@ class jit_emitter : public ov::snippets::Emitter { */ static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); +#ifdef SNIPPETS_DEBUG_CAPS + const char *info() const { + if (!info_.is_initialized()) + info_.init(this); + return info_.c_str(); + } +#endif + protected: virtual size_t aux_gprs_count() const; @@ -138,6 +150,11 @@ class jit_emitter : public ov::snippets::Emitter { void internal_call_rsp_align() const; void internal_call_rsp_restore() const; +#ifdef SNIPPETS_DEBUG_CAPS + mutable jit_emitter_info_t info_; + friend class jit_debug_emitter; +#endif + private: mutable std::vector preserved_vec_idxs; mutable std::vector preserved_gpr_idxs; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.cpp b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.cpp new file mode 100644 index 00000000000000..b7c51539861ff8 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#ifdef SNIPPETS_DEBUG_CAPS + +#include "debug_caps_config.hpp" + +namespace ov { +namespace intel_cpu { + +void SnippetsDebugCapsConfig::readProperties() { + auto readEnv = [](const char* envVar) { + const char* env = std::getenv(envVar); + if (env && *env) + return env; + + return (const char*)nullptr; + }; + + enable_segfault_detector = readEnv("OV_CPU_SNIPPETS_SEGFAULT_DETECTOR") ? true : false; +} + +} // namespace intel_cpu +} // namespace ov + +#endif // SNIPPETS_DEBUG_CAPS diff --git a/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.hpp b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.hpp new file mode 100644 index 00000000000000..14dcae0ddf0c69 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/utils/debug_caps_config.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include +#include + +namespace ov { +namespace intel_cpu { + +class SnippetsDebugCapsConfig { +public: + SnippetsDebugCapsConfig() { + readProperties(); + } + + bool enable_segfault_detector; + +private: + void readProperties(); +}; + +} // namespace intel_cpu +} // namespace ov + +#endif // SNIPPETS_DEBUG_CAPS diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp index 85061d906c05af..4a9f158d1e701e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp @@ -34,10 +34,62 @@ #include "emitters/snippets/x64/jit_perf_count_chrono_emitters.hpp" #include "emitters/snippets/x64/jit_perf_count_rdtsc_emitters.hpp" #include "transformations/snippets/x64/op/perf_count_rdtsc.hpp" +#include "emitters/snippets/x64/jit_debug_emitter.hpp" +#include "emitters/snippets/x64/jit_segfault_detector_emitter.hpp" +#include "emitters/snippets/x64/verbose.hpp" #endif namespace ov { +#ifdef SNIPPETS_DEBUG_CAPS +static bool is_load_emitter(const intel_cpu::jit_emitter *emitter) { + bool ret = false; + if (dynamic_cast(emitter) || + dynamic_cast(emitter) || + dynamic_cast(emitter)) { + return true; + } + return ret; +} + +static bool is_store_emitter(const intel_cpu::jit_emitter *emitter) { + bool ret = false; + if (dynamic_cast(emitter) || + dynamic_cast(emitter)) { + return true; + } + return ret; +} + +static bool is_segfault_detector_emitter(const intel_cpu::jit_emitter *emitter) { + // default active for typical tensor memory access emitters + bool ret = false; + ret = is_load_emitter(emitter) || + is_store_emitter(emitter) || + dynamic_cast(emitter) || + dynamic_cast(emitter) || + dynamic_cast(emitter); + return ret; + // use below code to active all emitters for extend usage + // return !dynamic_cast(emitter); +} + +#define CREATE_SNIPPETS_EMITTER(e_type) { \ + [this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr { \ + auto emitter = std::make_shared(h.get(), isa, expr); \ + if (debug_config.enable_segfault_detector && is_segfault_detector_emitter(emitter.get())) { \ + auto segfault_emitter = std::make_shared(h.get(), isa, emitter.get(), \ + is_load_emitter(emitter.get()), is_store_emitter(emitter.get()), expr->get_node()->get_friendly_name()); \ + return std::make_shared(emitter, segfault_emitter, jit_debug_emitter::EmissionLocation::preamble); \ + } else { \ + return emitter; \ + } \ + }, \ + [](const std::shared_ptr& n) -> std::set> { \ + return e_type::get_supported_precisions(n); \ + } \ +} +#else #define CREATE_SNIPPETS_EMITTER(e_type) { \ [this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr { \ return std::make_shared(h.get(), isa, expr); \ @@ -46,6 +98,7 @@ namespace ov { return e_type::get_supported_precisions(n); \ } \ } +#endif #define CREATE_CPU_EMITTER(e_type) { \ [this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr { \ @@ -212,22 +265,25 @@ std::shared_ptr intel_cpu::CPUGenerator::clone() const { return std::make_shared(cpu_target_machine->get_isa()); } -snippets::Generator::opRegType intel_cpu::CPUGenerator::get_specific_op_reg_type(const std::shared_ptr& op) const { +ov::snippets::RegType intel_cpu::CPUGenerator::get_specific_op_out_reg_type(const ov::Output& out) const { + const auto op = out.get_node_shared_ptr(); if (std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op)) - return gpr2gpr; + return ov::snippets::RegType::gpr; else if ( std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op)) - return vec2vec; + return ov::snippets::RegType::vec; else OPENVINO_THROW("Register type of the operation " + std::string(op->get_type_name()) + " isn't determined!"); } + bool intel_cpu::CPUGenerator::uses_precompiled_kernel(const std::shared_ptr& e) const { bool need = std::dynamic_pointer_cast(e) || std::dynamic_pointer_cast(e); #ifdef SNIPPETS_DEBUG_CAPS - need = need || + const auto cpu_target_machine = std::dynamic_pointer_cast(target); + need = need || (cpu_target_machine && cpu_target_machine->debug_config.enable_segfault_detector) || std::dynamic_pointer_cast(e) || std::dynamic_pointer_cast(e) || std::dynamic_pointer_cast(e) || diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp index 41131f1d4eb640..6eafd3cb04771c 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp @@ -9,6 +9,10 @@ #include "snippets/target_machine.hpp" #include "snippets/generator.hpp" +#ifdef SNIPPETS_DEBUG_CAPS +#include "emitters/snippets/utils/debug_caps_config.hpp" +#endif + namespace ov { namespace intel_cpu { @@ -29,6 +33,9 @@ class CPUTargetMachine : public snippets::TargetMachine { snippets::CompiledSnippetPtr get_snippet() override; size_t get_lanes() const override; dnnl::impl::cpu::x64::cpu_isa_t get_isa() const; +#ifdef SNIPPETS_DEBUG_CAPS + SnippetsDebugCapsConfig debug_config; +#endif private: std::unique_ptr h; @@ -41,8 +48,8 @@ class CPUGenerator : public snippets::Generator { std::shared_ptr clone() const override; protected: + ov::snippets::RegType get_specific_op_out_reg_type(const ov::Output& out) const override; bool uses_precompiled_kernel(const std::shared_ptr& emitter) const override; - opRegType get_specific_op_reg_type(const std::shared_ptr& op) const override; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp index 28e13f6fc33ee3..f11c1e84c29733 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp @@ -47,6 +47,10 @@ class jit_brgemm_copy_b_emitter : public jit_emitter { size_t m_in_offset = 0lu; size_t m_out_offset = 0lu; size_t m_comp_offset = 0lu; + +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_brgemm_copy_b_emitter(const jit_brgemm_copy_b_emitter *emitter); +#endif }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp index 34214e31ad4ca8..7e05ffa43a8cc4 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp @@ -55,6 +55,10 @@ class jit_brgemm_emitter : public jit_emitter { size_t m_store_offset_c = 0lu; std::vector io_data_size {}; + +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_brgemm_emitter(const jit_brgemm_emitter *emitter); +#endif }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp index 7181270f5a56aa..c39f7a7fb493ed 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_container_emitter.cpp @@ -21,51 +21,36 @@ void jit_container_emitter::map_abstract_registers(mapping_info& gpr_map_pool, m if (expressions.empty()) OPENVINO_THROW("Cannot map registers when there is no allocated_emitters provided"); - auto map_regs = [](const std::vector& abstract_regs, mapping_info& mapping) { - auto& abstract_to_physical = mapping.first; - auto& regs_pool = mapping.second; - std::vector physical_regs(abstract_regs.size()); - for (size_t i = 0; i < abstract_regs.size(); i++) { - const auto abstract = abstract_regs[i]; + auto map_regs = [&](const std::vector& abstract_regs) { + std::vector physical_regs = abstract_regs; + for (size_t i = 0; i < abstract_regs.size(); ++i) { + const auto& abstract_reg = abstract_regs[i]; + const auto& type = abstract_reg.type; + const auto& abstract = abstract_reg.idx; + OPENVINO_ASSERT(one_of(type, snippets::RegType::gpr, snippets::RegType::vec), "Incorrect reg type detected!"); + auto& mapping = type == snippets::RegType::gpr ? gpr_map_pool : vec_map_pool; + auto& abstract_to_physical = mapping.first; + auto& regs_pool = mapping.second; auto& physical = physical_regs[i]; if (abstract_to_physical.count(abstract) == 0) { if (regs_pool.empty()) OPENVINO_THROW("Cannot map registers for jit_container_emitter: not enough regs in the pool"); - physical = regs_pool.back(); + physical.idx = regs_pool.back(); regs_pool.pop_back(); - abstract_to_physical[abstract] = physical; + abstract_to_physical[abstract] = physical.idx; } else { - physical = abstract_to_physical[abstract]; + physical.idx = abstract_to_physical[abstract]; } } return physical_regs; }; for (const auto& expression : expressions) { - const auto& emitter = expression->get_emitter(); - std::vector in_physical_regs, out_physical_regs; - std::vector in_abstract_regs, out_abstract_regs; + std::vector in_physical_regs, out_physical_regs; + std::vector in_abstract_regs, out_abstract_regs; std::tie(in_abstract_regs, out_abstract_regs) = expression->get_reg_info(); - switch (std::dynamic_pointer_cast(emitter)->get_in_out_type()) { - case gpr_to_gpr: - in_physical_regs = map_regs(in_abstract_regs, gpr_map_pool); - out_physical_regs = map_regs(out_abstract_regs, gpr_map_pool); - break; - case gpr_to_vec: - in_physical_regs = map_regs(in_abstract_regs, gpr_map_pool); - out_physical_regs = map_regs(out_abstract_regs, vec_map_pool); - break; - case vec_to_gpr: - in_physical_regs = map_regs(in_abstract_regs, vec_map_pool); - out_physical_regs = map_regs(out_abstract_regs, gpr_map_pool); - break; - case vec_to_vec: - in_physical_regs = map_regs(in_abstract_regs, vec_map_pool); - out_physical_regs = map_regs(out_abstract_regs, vec_map_pool); - break; - default: - OPENVINO_THROW("Unsupported type of jit emitter!"); - } + in_physical_regs = map_regs(in_abstract_regs); + out_physical_regs = map_regs(out_abstract_regs); expression->set_reg_info({in_physical_regs, out_physical_regs}); if (auto container = std::dynamic_pointer_cast(expression->get_emitter())) container->map_abstract_registers(gpr_map_pool, vec_map_pool, expressions); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp new file mode 100644 index 00000000000000..0125ac69b0b525 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#include "jit_debug_emitter.hpp" +#include +#include "utils/general_utils.h" + +using namespace dnnl::impl::cpu; +using namespace dnnl::impl; +using namespace Xbyak; + +namespace ov { +namespace intel_cpu { + +size_t jit_debug_emitter::get_inputs_num() const { + return m_target_emitter->get_inputs_num(); +} + +size_t jit_debug_emitter::aux_vecs_count() const { + return m_target_emitter->aux_vecs_count(); +} + +size_t jit_debug_emitter::aux_gprs_count() const { + return m_target_emitter->aux_gprs_count(); +} + +void jit_debug_emitter::emitter_preamble(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs, const std::vector &pool_gpr_idxs) const { + m_target_emitter->emitter_preamble(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); +} + +void jit_debug_emitter::emitter_postamble() const { + m_target_emitter->emitter_postamble(); +} + +void jit_debug_emitter::validate_arguments(const std::vector& arg0, const std::vector& arg1) const { + m_target_emitter->validate_arguments(arg0, arg1); +} + +void jit_debug_emitter::emit_data() const { + m_target_emitter->emit_data(); +} + +void jit_debug_emitter::prepare_table() { + m_target_emitter->prepare_table(); +} + +void jit_debug_emitter::register_table_entries() { + m_target_emitter->register_table_entries(); +} + +void jit_debug_emitter::emit_impl(const std::vector &in_idxs, const std::vector &out_idxs) const { + m_target_emitter->emit_impl(in_idxs, out_idxs); +} + +void jit_debug_emitter::emit_code(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs, const std::vector &pool_gpr_idxs) const { + if (m_decorator_emit_loc == EmissionLocation::preamble || m_decorator_emit_loc == EmissionLocation::both) + m_decorator_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); + + m_target_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); + + if (m_decorator_emit_loc == EmissionLocation::postamble || m_decorator_emit_loc == EmissionLocation::both) + m_decorator_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); +} + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp new file mode 100644 index 00000000000000..116e17dcf4c1df --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include "emitters/plugin/x64/jit_emitter.hpp" + + +namespace ov { +namespace intel_cpu { + +class jit_debug_emitter : public jit_emitter { +public: + enum class EmissionLocation { + preamble, + postamble, + both + }; + jit_debug_emitter(const std::shared_ptr& target_emitter, const std::shared_ptr& decorator_emitter, const EmissionLocation& loc) + : jit_emitter(target_emitter->h, target_emitter->host_isa_, target_emitter->exec_prc_, target_emitter->in_out_type_), + m_target_emitter(target_emitter), m_decorator_emitter(decorator_emitter), m_decorator_emit_loc(loc) { + prepare_table(); + } + + void emit_code(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs = {}, const std::vector &pool_gpr_idxs = {}) const override; + void emit_data() const override; + + size_t get_inputs_num() const override; + size_t aux_vecs_count() const override; + +protected: + size_t aux_gprs_count() const override; + + void prepare_table() override; + void register_table_entries() override; + + void emit_impl(const std::vector &in_idxs, const std::vector &out_idxs) const override; + + void emitter_preamble(const std::vector &in_idxs, const std::vector &out_idxs, + const std::vector &pool_vec_idxs, const std::vector &pool_gpr_idxs) const override; + void emitter_postamble() const override; + +private: + void validate_arguments(const std::vector& arg0, const std::vector& arg1) const override; + // wrapper emitter for product function + const std::shared_ptr m_target_emitter; + // debug capability emitter + const std::shared_ptr m_decorator_emitter; + + EmissionLocation m_decorator_emit_loc; +}; + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp index 244beb5c3a6758..459dc158c7b54a 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp @@ -12,9 +12,16 @@ using namespace dnnl::impl::cpu::x64; namespace ov { namespace intel_cpu { -inline static void transform_idxs_to_regs(const std::vector& idxs, std::vector& regs) { - regs.resize(idxs.size()); +inline static std::vector transform_idxs_to_regs(const std::vector& idxs) { + std::vector regs(idxs.size()); std::transform(idxs.begin(), idxs.end(), regs.begin(), [](size_t idx){return Reg64(static_cast(idx));}); + return regs; +} + +inline static std::vector transform_snippets_regs_to_idxs(const std::vector& regs) { + std::vector idxs(regs.size()); + std::transform(regs.cbegin(), regs.cend(), idxs.begin(), [](const snippets::Reg& reg) { return reg.idx; }); + return idxs; } jit_kernel_emitter::jit_kernel_emitter(jit_generator* h, cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) @@ -228,16 +235,16 @@ void jit_kernel_emitter::init_data_pointers(const Xbyak::Reg64& reg_indexes, con void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vector& out) const { h->preamble(); - Reg64 reg_indexes = Reg64(static_cast(reg_indexes_idx)); - Reg64 reg_const_params = Reg64(static_cast(reg_const_params_idx)); - std::vector data_ptr_regs; - transform_idxs_to_regs(data_ptr_regs_idx, data_ptr_regs); + auto reg_indexes = Reg64(static_cast(reg_indexes_idx)); + auto reg_const_params = Reg64(static_cast(reg_const_params_idx)); + auto data_ptr_regs = transform_idxs_to_regs(data_ptr_regs_idx); init_data_pointers(reg_indexes, reg_const_params, data_ptr_regs); for (const auto& expression : body) { + const auto reg_info = expression->get_reg_info(); + const auto in_regs = transform_snippets_regs_to_idxs(reg_info.first); + const auto out_regs = transform_snippets_regs_to_idxs(reg_info.second); const auto& emitter = expression->get_emitter(); - std::vector in_regs, out_regs; - std::tie(in_regs, out_regs) = expression->get_reg_info(); emitter->emit_code(in_regs, out_regs, vec_regs_pool, gp_regs_pool); } h->postamble(); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp index 230bee0152f225..43d0d3dc9ca901 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp @@ -77,6 +77,10 @@ class jit_kernel_emitter : public jit_container_emitter { const size_t reg_indexes_idx; const size_t reg_const_params_idx; + +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_kernel_emitter(const jit_kernel_emitter *emitter); +#endif }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp index c7a37a71c9c8c3..054f78cb88b42e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp @@ -12,11 +12,6 @@ using namespace dnnl::impl::cpu::x64; namespace ov { namespace intel_cpu { -inline static void transform_idxs_to_regs(const std::vector& idxs, std::vector& regs) { - regs.resize(idxs.size()); - std::transform(idxs.begin(), idxs.end(), regs.begin(), [](size_t idx){return Reg64(static_cast(idx));}); -} - jit_loop_begin_emitter::jit_loop_begin_emitter(jit_generator* h, cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) : jit_emitter(h, isa) { loop_begin = ov::as_type_ptr(expr->get_node()); @@ -71,6 +66,7 @@ jit_loop_end_emitter::jit_loop_end_emitter(jit_generator* h, cpu_isa_t isa, cons num_outputs = expr->get_output_count(); wa_increment = static_cast(loop_end->get_increment()); work_amount = static_cast(loop_end->get_work_amount()); + is_incremented = loop_end->get_is_incremented(); ptr_increments = loop_end->get_ptr_increments(); finalization_offsets = loop_end->get_finalization_offsets(); evaluate_once = loop_end->get_evaluate_once(); @@ -98,22 +94,25 @@ void jit_loop_end_emitter::emit_impl(const std::vector& in, const std::v // the last input is actually a work_amount reg data_ptr_reg_idxs.reserve(num_inputs - 1); std::copy(in.begin(), in.end() - 1, std::back_inserter(data_ptr_reg_idxs)); - std::vector data_ptr_regs; - transform_idxs_to_regs(data_ptr_reg_idxs, data_ptr_regs); + Reg64 reg_work_amount = Reg64(in.back()); if (!evaluate_once) { - for (size_t idx = 0; idx < data_ptr_regs.size(); idx++) { - if (ptr_increments[idx] != 0) - h->add(data_ptr_regs[idx], ptr_increments[idx] * wa_increment * io_data_size[idx]); + for (size_t idx = 0; idx < data_ptr_reg_idxs.size(); idx++) { + if (!is_incremented[idx] || ptr_increments[idx] == 0) + continue; + Reg64 data_reg = Reg64(static_cast(data_ptr_reg_idxs[idx])); + h->add(data_reg, ptr_increments[idx] * wa_increment * io_data_size[idx]); } h->sub(reg_work_amount, wa_increment); h->cmp(reg_work_amount, wa_increment); h->jge(loop_begin->begin_address); } - for (size_t idx = 0; idx < data_ptr_regs.size(); idx++) { - if (finalization_offsets[idx] != 0) - h->add(data_ptr_regs[idx], finalization_offsets[idx] * io_data_size[idx]); + for (size_t idx = 0; idx < data_ptr_reg_idxs.size(); idx++) { + if (!is_incremented[idx] || finalization_offsets[idx] == 0) + continue; + Reg64 data_reg = Reg64(static_cast(data_ptr_reg_idxs[idx])); + h->add(data_reg, finalization_offsets[idx] * io_data_size[idx]); } } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp index ac87436c3030f6..a71d253cdd286e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.hpp @@ -55,6 +55,7 @@ class jit_loop_end_emitter : public jit_emitter { int64_t wa_increment = 0; int64_t work_amount = 0; bool evaluate_once = false; + std::vector is_incremented; std::vector ptr_increments; std::vector finalization_offsets; }; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp index 5a49af108561d4..50276d9d9e2f1b 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_memory_emitters.hpp @@ -22,6 +22,9 @@ class jit_memory_emitter : public jit_emitter { size_t count = 0; size_t byte_offset = 0; +#ifdef SNIPPETS_DEBUG_CAPS + friend std::string init_info_jit_memory_emitter(const jit_memory_emitter *emitter); +#endif }; class jit_load_memory_emitter : public jit_memory_emitter { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_rdtsc_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_rdtsc_emitters.cpp index 65180b8a2b6699..703bb82d446fea 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_rdtsc_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_rdtsc_emitters.cpp @@ -73,9 +73,7 @@ void jit_perf_count_rdtsc_end_emitter::emit_impl(const std::vector &in_i // iteration++ h->mov(h->rax, reinterpret_cast(&m_end_node->iteration)); - h->mov(h->rdx, qword[h->rax]); - h->add(h->rdx, 0x01); - h->mov(qword[h->rax], h->rdx); + h->inc(qword[h->rax]); h->pop(h->rdx); h->pop(h->rax); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp new file mode 100644 index 00000000000000..109950dd3a668e --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp @@ -0,0 +1,88 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#include "jit_segfault_detector_emitter.hpp" + +using namespace dnnl::impl::utils; +using namespace dnnl::impl; +using namespace dnnl::impl::cpu::x64; +using namespace Xbyak; + +namespace ov { +namespace intel_cpu { + +std::shared_ptr> g_custom_segfault_handler = + std::make_shared>(); + +jit_uni_segfault_detector_emitter::jit_uni_segfault_detector_emitter(dnnl::impl::cpu::x64::jit_generator* host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, + jit_emitter* target_emitter, bool is_load, bool is_store, std::string target_node_name) : + jit_emitter(host, host_isa), + m_target_emitter(target_emitter), + is_target_use_load_emitter(is_load), + is_target_use_store_emitter(is_store), + m_target_node_name(target_node_name) { +} + +size_t jit_uni_segfault_detector_emitter::get_inputs_num() const { return 1; } + +const jit_emitter* jit_uni_segfault_detector_emitter::get_target_emitter() const { + return m_target_emitter; +} + +void jit_uni_segfault_detector_emitter::emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const { + save_target_emitter(); + if (is_target_use_load_emitter) { + memory_track(in_vec_idxs[0]); + } else if (is_target_use_store_emitter) { + memory_track(out_vec_idxs[0]); + } +} + +void jit_uni_segfault_detector_emitter::save_target_emitter() const { + // use internal call as "->local" shoule be the execution thread. Otherwise always compilation thread. + internal_call_preamble(); + + const auto &set_local_handler_overload = static_cast(set_local_handler); + h->mov(h->rax, reinterpret_cast(set_local_handler_overload)); + h->mov(abi_param1, reinterpret_cast(this)); + internal_call_rsp_align(); + h->call(h->rax); + internal_call_rsp_restore(); + + internal_call_postamble(); +} + +void jit_uni_segfault_detector_emitter::set_local_handler(jit_uni_segfault_detector_emitter* emitter_address) { + g_custom_segfault_handler->local() = emitter_address; +} + +void jit_uni_segfault_detector_emitter::memory_track(size_t gpr_idx_for_mem_address) const { + h->push(h->r15); + Xbyak::Label label_set_address_current; + Xbyak::Label label_set_address_end; + h->mov(h->r15, reinterpret_cast(&start_address)); + h->cmp(h->qword[h->r15], 0); + h->jne(label_set_address_current); + h->mov(h->qword[h->r15], Xbyak::Reg64(gpr_idx_for_mem_address)); + h->mov(h->r15, reinterpret_cast(¤t_address)); + h->mov(h->qword[h->r15], Xbyak::Reg64(gpr_idx_for_mem_address)); + h->jmp(label_set_address_end); + h->L(label_set_address_current); + { + h->mov(h->r15, reinterpret_cast(¤t_address)); + h->mov(h->qword[h->r15], Xbyak::Reg64(gpr_idx_for_mem_address)); + } + h->L(label_set_address_end); + // iteration++, 1 means first access + h->mov(h->r15, reinterpret_cast(&iteration)); + h->add(h->qword[h->r15], 0x01); + h->pop(h->r15); +} + +} // namespace intel_cpu +} // namespace ov + +#endif diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.hpp new file mode 100644 index 00000000000000..68849e5a21563e --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include +#include "emitters/plugin/x64/jit_emitter.hpp" +#include "openvino/runtime/threading/thread_local.hpp" + +using namespace ov::threading; + +namespace ov { +namespace intel_cpu { + +class jit_uni_segfault_detector_emitter; +extern std::shared_ptr> g_custom_segfault_handler; + +class jit_uni_segfault_detector_emitter : public jit_emitter { +public: + jit_uni_segfault_detector_emitter(dnnl::impl::cpu::x64::jit_generator* host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, + jit_emitter* target_emitter, bool is_load, bool is_store, std::string target_node_name); + + size_t get_inputs_num() const override; + + const jit_emitter* get_target_emitter() const; + +private: + // emit code is to save "this" pointer(jit_uni_segfault_detector_emitter) to global handler, then print info w/ it's target_emitter. + // and to save tracked memory address, iteration, etc to print + void emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const override; + jit_emitter *m_target_emitter = nullptr; + bool is_target_use_load_emitter = false; + bool is_target_use_store_emitter = false; + std::string m_target_node_name = ""; + + void save_target_emitter() const; + static void set_local_handler(jit_uni_segfault_detector_emitter* emitter_address); + void memory_track(size_t gpr_idx_for_mem_address) const; + + mutable size_t start_address = 0; + mutable size_t current_address = 0; + mutable size_t iteration = 0; + + friend std::string init_info_jit_uni_segfault_detector_emitter(const jit_uni_segfault_detector_emitter *emitter); +}; + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp new file mode 100644 index 00000000000000..d73502825050ca --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp @@ -0,0 +1,222 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#include "verbose.hpp" +#include "jit_segfault_detector_emitter.hpp" +#include "jit_memory_emitters.hpp" +#include "jit_brgemm_emitter.hpp" +#include "jit_brgemm_copy_b_emitter.hpp" +#include "jit_kernel_emitter.hpp" +#include "jit_snippets_emitters.hpp" + +#ifndef _WIN32 +#include +#endif + +namespace ov { +namespace intel_cpu { + +template +std::string join(const T& v, const std::string& sep = ", ") { + std::ostringstream ss; + size_t count = 0; + for (const auto& x : v) { + if (count++ > 0) { + ss << sep; + } + ss << x; + } + return ss.str(); +} + +template +std::string vector_to_string(const T& v) { + std::ostringstream os; + os << "[ " << ov::util::join(v) << " ]"; + return os.str(); +} + +std::string get_emitter_type_name(const jit_emitter* emitter) { + std::string name = typeid(*emitter).name(); +#ifndef _WIN32 + int status; + std::unique_ptr demangled_name( + abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), + std::free); + name = demangled_name.get(); +#endif + return name; +} + +std::string init_info_jit_memory_emitter(const jit_memory_emitter *emitter) { + std::stringstream ss; + ss << " src_precision:" << emitter->src_prc + << " dst_precision:" << emitter->dst_prc + << " load/store_element_number:" << emitter->count + << " byte_offset:" << emitter->byte_offset; + return ss.str(); +} + +static std::string init_info_jit_load_memory_emitter(const jit_load_memory_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_load_memory_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_load_broadcast_emitter(const jit_load_broadcast_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_load_broadcast_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_load_convert_emitter(const jit_load_convert_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_load_convert_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_store_memory_emitter(const jit_store_memory_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_store_memory_emitter" + << memory_emitter_info; + return ss.str(); +} + +static std::string init_info_jit_store_convert_emitter(const jit_store_convert_emitter *emitter) { + std::stringstream ss; + std::string memory_emitter_info = init_info_jit_memory_emitter(emitter); + ss << "Emitter_type_name:jit_store_convert_emitter" + << memory_emitter_info; + return ss.str(); +} + +std::string init_info_jit_brgemm_emitter(const jit_brgemm_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:jit_brgemm_emitter" + << " m_ctx.M:" << emitter->m_ctx.M + << " m_ctx.K:" << emitter->m_ctx.K + << " m_ctx.N:" << emitter->m_ctx.N + << " m_ctx.LDA:" << emitter->m_ctx.LDA + << " m_ctx.LDB:" << emitter->m_ctx.LDB + << " m_ctx.LDC:" << emitter->m_ctx.LDC + << " m_ctx.dt_in0:" << emitter->m_ctx.dt_in0 + << " m_ctx.dt_in1:" << emitter->m_ctx.dt_in1 + << " m_ctx.palette:" << emitter->m_ctx.palette + << " m_ctx.is_with_amx:" << emitter->m_ctx.is_with_amx + << " m_ctx.is_with_comp:" << emitter->m_ctx.is_with_comp + << " m_ctx.beta:" << emitter->m_ctx.beta + << " m_load_offset_a:" << emitter->m_load_offset_a + << " m_load_offset_b:" << emitter->m_load_offset_b + << " m_load_offset_scratch:" << emitter->m_load_offset_scratch + << " m_store_offset_c:" << emitter->m_store_offset_c + << " m_with_scratch:" << emitter->m_with_scratch + << " m_with_comp:" << emitter->m_with_comp; + + return ss.str(); +} + +std::string init_info_jit_brgemm_copy_b_emitter(const jit_brgemm_copy_b_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:jit_brgemm_copy_b_emitter" + << " m_LDB:" << emitter->m_LDB + << " m_K:" << emitter->m_K + << " m_K_blk:" << emitter->m_K_blk + << " m_K_tail:" << emitter->m_K_tail + << " m_N:" << emitter->m_N + << " m_N_blk:" << emitter->m_N_blk + << " m_N_tail:" << emitter->m_N_tail + << " m_brgemm_prc_in0:" << emitter->m_brgemm_prc_in0 + << " m_brgemm_prc_in1:" << emitter->m_brgemm_prc_in1 + << " m_brgemmVNNIFactor:" << emitter->m_brgemmVNNIFactor + << " m_with_comp:" << emitter->m_with_comp + << " m_in_offset:" << emitter->m_in_offset + << " m_out_offset:" << emitter->m_out_offset + << ",m_comp_offset:" << emitter->m_comp_offset; + + return ss.str(); +} + +std::string init_info_jit_kernel_emitter(const jit_kernel_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:jit_kernel_emitter" + << " jcp.parallel_executor_ndims:" << emitter->jcp.parallel_executor_ndims + << " gp_regs_pool:"<< vector_to_string(emitter->gp_regs_pool) + << " master_shape:" << vector_to_string(emitter->master_shape) + << " num_inputs:" << emitter->num_inputs + << " num_outputs:" << emitter->num_outputs + << " num_unique_buffers:" << emitter->num_unique_buffers + << " io_data_sizes:" << vector_to_string(emitter->io_data_sizes) + << " data_ptr_regs_idx:" << vector_to_string(emitter->data_ptr_regs_idx) + << " vec_regs_pool:" << vector_to_string(emitter->vec_regs_pool) + << " reg_indexes_idx:" << emitter->reg_indexes_idx + << " reg_const_params_idx:" << emitter->reg_const_params_idx; + for (size_t i = 0; i < emitter->io_data_layouts.size(); ++i) + ss << " io_data_layouts for " << i << " is:" << vector_to_string(emitter->io_data_layouts[i]); + for (size_t i = 0; i < emitter->io_shapes.size(); ++i) + ss << " io_shapes for " << i << " is: "<< vector_to_string(emitter->io_shapes[i]); + return ss.str(); +} + +std::string init_info_jit_uni_segfault_detector_emitter(const jit_uni_segfault_detector_emitter *emitter) { + std::stringstream ss; + ss << "Node_name:" << emitter->m_target_node_name + << " use_load_emitter:"<< emitter->is_target_use_load_emitter + << " use_store_emitter:"<< emitter->is_target_use_store_emitter; + if (emitter->is_target_use_load_emitter || emitter->is_target_use_store_emitter) { + ss << " start_address:" << emitter->start_address + << " current_address:" << emitter->current_address + << " iteration:" << emitter->iteration << " "; + } + // traget emitter info + if (auto target_e = emitter->get_target_emitter()) { + ss << target_e->info(); + } + return ss.str(); +} + +static std::string init_info_jit_emitter_general(const jit_emitter *emitter) { + std::stringstream ss; + ss << "Emitter_type_name:" << get_emitter_type_name(emitter); + return ss.str(); +} + +void jit_emitter_info_t::init(const jit_emitter *emitter) { + if (is_initialized_) return; + if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_load_memory_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_load_broadcast_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_load_convert_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_store_memory_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_store_convert_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_brgemm_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_brgemm_copy_b_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_kernel_emitter(e_type); + } else if (auto e_type = dynamic_cast(emitter)) { + str_ = init_info_jit_uni_segfault_detector_emitter(e_type); + } else { + str_ = init_info_jit_emitter_general(emitter); + } + is_initialized_ = true; +} + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.hpp new file mode 100644 index 00000000000000..a81364039b98a7 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef SNIPPETS_DEBUG_CAPS + +#pragma once + +#include + +namespace ov { +namespace intel_cpu { +class jit_emitter; +struct jit_emitter_info_t { + jit_emitter_info_t() = default; + jit_emitter_info_t(const jit_emitter_info_t &rhs) + : str_(rhs.str_), is_initialized_(rhs.is_initialized_) {} + jit_emitter_info_t &operator=(const jit_emitter_info_t &rhs) { + is_initialized_ = rhs.is_initialized_; + str_ = rhs.str_; + return *this; + } + + const char *c_str() const { return str_.c_str(); } + bool is_initialized() const { return is_initialized_; } + + void init(const jit_emitter *emitter); + +private: + std::string str_; + bool is_initialized_ = false; +}; + +std::string get_emitter_type_name(const jit_emitter* emitter); + +} // namespace intel_cpu +} // namespace ov + +#endif \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index d254f36a3efbac..1520512aa2a870 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -1299,54 +1299,42 @@ void Graph::Infer(SyncInferRequest* request) { if (infer_count != -1) infer_count++; } -void Graph::VisitNode(NodePtr node, std::vector& sortedNodes) { - if (node->temporary) { - return; - } - - if (node->permanent) { - return; - } - - node->temporary = true; - - for (size_t i = 0; i < node->getChildEdges().size(); i++) { - VisitNode(node->getChildEdgeAt(i)->getChild(), sortedNodes); - } - - node->permanent = true; - node->temporary = false; - - sortedNodes.insert(sortedNodes.begin(), node); -} - void Graph::SortTopologically() { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::intel_cpu_LT, "Graph::SortTopologically"); - std::vector unsorted; - std::vector sorted; + auto sort = [](const std::vector& nodes) { + std::unordered_set visited; + visited.reserve(nodes.size()); + std::vector sorted; + sorted.reserve(nodes.size()); - for (size_t i = 0; i < graphNodes.size(); i++) { - NodePtr node = graphNodes[i]; + std::function visit; + visit = [&visited, &sorted, &visit](const NodePtr node) { + const bool inserted = visited.insert(node).second; + if (!inserted) + return; // already visited - node->permanent = false; - node->temporary = false; - - unsorted.push_back(node); - } + for (size_t i = 0; i < node->getChildEdges().size(); i++) { + visit(node->getChildEdgeAt(i)->getChild()); + } - while (!unsorted.empty()) { - NodePtr node = unsorted.at(0); - unsorted.erase(unsorted.begin()); + sorted.push_back(node); + }; - VisitNode(node, sorted); - } + for (const auto& node : nodes) { + visit(node); + } - for (size_t i = 0; i < sorted.size(); i++) - sorted[i]->execIndex = static_cast(i); + return sorted; + }; - graphNodes.erase(graphNodes.begin(), graphNodes.end()); - graphNodes.assign(sorted.begin(), sorted.end()); + // as a first step sort in reversed topological order to avoid an insertion into the front of the vector + graphNodes = sort(graphNodes); + // reverse to the actual topological order + std::reverse(graphNodes.begin(), graphNodes.end()); + // number the nodes based on topological order + for (size_t i = 0; i < graphNodes.size(); i++) + graphNodes[i]->execIndex = static_cast(i); // TODO: Sort in/out edges by port index because of backward compatibility // A lot of plugin logic are build on top of assumption that index in diff --git a/src/plugins/intel_cpu/src/graph.h b/src/plugins/intel_cpu/src/graph.h index 029c33ca12dfde..035c1b817e9129 100644 --- a/src/plugins/intel_cpu/src/graph.h +++ b/src/plugins/intel_cpu/src/graph.h @@ -191,8 +191,6 @@ class Graph { } protected: - void VisitNode(NodePtr node, std::vector& sortedNodes); - void ForgetGraphData() { status = Status::NotReady; diff --git a/src/plugins/intel_cpu/src/graph_dumper.cpp b/src/plugins/intel_cpu/src/graph_dumper.cpp index 0afa7c2ada5ce4..9c16c9ab73271d 100644 --- a/src/plugins/intel_cpu/src/graph_dumper.cpp +++ b/src/plugins/intel_cpu/src/graph_dumper.cpp @@ -4,16 +4,16 @@ #include "graph_dumper.h" -#include "utils/debug_capabilities.h" -#include "exec_graph_info.hpp" +#include "dnnl_debug.h" #include "openvino/pass/manager.hpp" #include "openvino/pass/serialize.hpp" -#include +#include "openvino/runtime/exec_model_info.hpp" +#include "utils/debug_capabilities.h" -#include -#include -#include #include +#include +#include +#include namespace ov { namespace intel_cpu { @@ -28,16 +28,16 @@ std::map extract_node_metadata(const NodePtr &node) { if (node->getType() == Type::Input && node->isConstant()) { // We need to separate Input and Const layers - serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = "Const"; + serialization_info[ov::exec_model_info::LAYER_TYPE] = "Const"; } else { - serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = NameFromType(node->getType()); + serialization_info[ov::exec_model_info::LAYER_TYPE] = NameFromType(node->getType()); } // Original layers - serialization_info[ExecGraphInfoSerialization::ORIGINAL_NAMES] = node->getOriginalLayers(); + serialization_info[ov::exec_model_info::ORIGINAL_NAMES] = node->getOriginalLayers(); // Implementation type name - serialization_info[ExecGraphInfoSerialization::IMPL_TYPE] = node->getPrimitiveDescriptorType(); + serialization_info[ov::exec_model_info::IMPL_TYPE] = node->getPrimitiveDescriptorType(); std::string outputPrecisionsStr; if (!node->getChildEdges().empty()) { @@ -62,7 +62,7 @@ std::map extract_node_metadata(const NodePtr &node) { outputPrecisionsStr = node->getParentEdgeAt(0)->getMemory().getDesc().getPrecision().get_type_name(); } } - serialization_info[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = outputPrecisionsStr; + serialization_info[ov::exec_model_info::OUTPUT_PRECISIONS] = outputPrecisionsStr; std::string outputLayoutsStr; auto outDescs = node->getSelectedPrimitiveDescriptor()->getConfig().outConfs; @@ -87,18 +87,18 @@ std::map extract_node_metadata(const NodePtr &node) { } else { outputLayoutsStr = dnnl::utils::fmt2str(dnnl::memory::format_tag::undef); } - serialization_info[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = outputLayoutsStr; + serialization_info[ov::exec_model_info::OUTPUT_LAYOUTS] = outputLayoutsStr; // Performance if (node->PerfCounter().avg() != 0) { - serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(node->PerfCounter().avg()); + serialization_info[ov::exec_model_info::PERF_COUNTER] = std::to_string(node->PerfCounter().avg()); } else { - serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed"; // it means it was not calculated yet + serialization_info[ov::exec_model_info::PERF_COUNTER] = "not_executed"; // it means it was not calculated yet } - serialization_info[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(node->getExecIndex()); + serialization_info[ov::exec_model_info::EXECUTION_ORDER] = std::to_string(node->getExecIndex()); - serialization_info[ExecGraphInfoSerialization::RUNTIME_PRECISION] = node->getRuntimePrecision().get_type_name(); + serialization_info[ov::exec_model_info::RUNTIME_PRECISION] = node->getRuntimePrecision().get_type_name(); return serialization_info; } @@ -164,7 +164,7 @@ std::shared_ptr dump_graph_as_ie_ngraph_net(const Graph &graph) { results.emplace_back(std::make_shared(get_inputs(node).back())); return_node = results.back(); } else { - return_node = std::make_shared( + return_node = std::make_shared( get_inputs(node), node->getSelectedPrimitiveDescriptor()->getConfig().outConfs.size()); for (size_t port = 0; port < return_node->get_output_size(); ++port) { diff --git a/src/plugins/intel_cpu/src/infer_request.cpp b/src/plugins/intel_cpu/src/infer_request.cpp index bf54bab2e917ad..7537c0f1f915d7 100644 --- a/src/plugins/intel_cpu/src/infer_request.cpp +++ b/src/plugins/intel_cpu/src/infer_request.cpp @@ -6,7 +6,6 @@ #include "async_infer_request.h" #include "compiled_model.h" -#include "debug.h" #include "dnnl_extension_utils.h" #include "itt.h" #include "memory_desc/dnnl_blocked_memory_desc.h" diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index 660d2b8d6bd7b2..85e6817c5a112b 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -76,8 +76,6 @@ Node::Node(const std::shared_ptr& op, const GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory) : selectedPrimitiveDescriptorIndex(-1), - permanent(false), - temporary(false), constant(ConstantType::NoConst), context(ctx), algorithm(Algorithm::Default), @@ -182,8 +180,6 @@ Node::Node(const std::shared_ptr& op, Node::Node(const std::string& type, const std::string& name, const GraphContext::CPtr ctx) : selectedPrimitiveDescriptorIndex(-1), - permanent(false), - temporary(false), constant(ConstantType::NoConst), context(ctx), fusingPort(-1), diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index da529fbefacde7..1a8cecf9b112f1 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -609,8 +609,6 @@ class Node { Node(const std::string& type, const std::string& name, const GraphContext::CPtr ctx); int selectedPrimitiveDescriptorIndex = -1; - bool permanent = false; - bool temporary = false; enum class InPlaceType { Unknown, diff --git a/src/plugins/intel_cpu/src/nodes/concat.cpp b/src/plugins/intel_cpu/src/nodes/concat.cpp index 1d4d75a40bd5d9..b50cfb13949bcd 100644 --- a/src/plugins/intel_cpu/src/nodes/concat.cpp +++ b/src/plugins/intel_cpu/src/nodes/concat.cpp @@ -342,6 +342,26 @@ void Concat::prepareParams() { hasOuterLoop = true; } } + + canOptimize1DCase = false; + if (outputShape.size() == 1 && outputStrides[0] == 1 && outputShape[0] <= 64 && elemSize == 4) { + // output is small 1d vector (which is typical in shape inference subgraph), + // in this case, inputs are also small 1d vector and single thread naive impl is faster + canOptimize1DCase = true; + for (size_t i = 0; i < getParentEdges().size(); i++) { + const auto& srcMemPtr = getParentEdgesAtPort(i)[0]->getMemoryPtr(); + const auto srcMemDesc = srcMemPtr->getDescPtr()->as(); + const auto& inputShape = srcMemDesc->getBlockDims(); + const auto& strides = srcMemDesc->getStrides(); + if (inputShape.size() != 1 || strides.size() != 1) { + canOptimize1DCase = false; + break; + } + } + if (canOptimize1DCase) + return; + } + std::vector srcs_d; for (size_t i = 0; i < getParentEdges().size(); i++) { const auto& srcMemPtr = getParentEdgesAtPort(i)[0]->getMemoryPtr(); @@ -451,6 +471,11 @@ void Concat::execute(dnnl::stream strm) { return; } + if (canOptimize1DCase) { + exec1DCase(); + return; + } + if (canOptimizeNspc) { execNspcSpecCase(); return; @@ -479,6 +504,19 @@ ov::element::Type Concat::getRuntimePrecision() const { return getMaxPrecision(getInputPrecisions()); } +void Concat::exec1DCase() { + DEBUG_LOG(getName(), " exec1DCase"); + auto* dst = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->getData()); + for (size_t i = 0; i < getParentEdges().size(); i++) { + const auto& srcMemPtr = getParentEdgeAt(i)->getMemoryPtr(); + const auto& srcShape = srcMemPtr->getStaticDims(); + const auto* src = reinterpret_cast(srcMemPtr->getData()); + for (size_t i = 0; i < srcShape[0]; i++) { + *dst++ = src[i]; + } + } +} + void Concat::execNspcSpecCase() { const auto& dst_memory = getChildEdgeAt(0)->getMemory(); const size_t num_src = getParentEdges().size(); diff --git a/src/plugins/intel_cpu/src/nodes/concat.h b/src/plugins/intel_cpu/src/nodes/concat.h index d751e3649b04dd..9236c8420e8d00 100644 --- a/src/plugins/intel_cpu/src/nodes/concat.h +++ b/src/plugins/intel_cpu/src/nodes/concat.h @@ -36,9 +36,11 @@ class Concat : public Node { size_t reorderedAxis = 0; bool canBeInPlace = false; bool canOptimizeNspc = false; + bool canOptimize1DCase = false; void execRef(); size_t inverseOrder(const VectorDims& order, size_t axis); void execNspcSpecCase(); + void exec1DCase(); std::vector inputStrides; std::vector nelemToCopy; // byte moved in each iter std::vector dstOffset; // dst offset for each input diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index 6a9949365ced87..102dbf509d00ea 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -268,6 +268,18 @@ void Gather::prepareParams() { if (getSelectedPrimitiveDescriptor() == nullptr) THROW_ERROR(" has unidentified preferable primitive descriptor."); + // short 1D vector fast execution impl (typical in shape infer subgraph) + canOptimize1DCase = false; + if (dataSrcRank <= 1 && dataMemPtr->getDesc().getPrecision() == ov::element::i32) { + const auto& dataDims = dataMemPtr->getStaticDims(); + const auto& idxDims = idxMemPtr->getStaticDims(); + if ((dataDims.size() == 0 || (dataDims.size() == 1 && dataDims[0] <= 64)) && + (idxDims.size() == 0 || (idxDims.size() == 1 && idxDims[0] <= 64))) { + canOptimize1DCase = true; + return; + } + } + if (!isAxisInputConst) { axis = (reinterpret_cast(getParentEdgeAt(GATHER_AXIS)->getMemoryPtr()->getData()))[0]; if (axis < 0) @@ -317,6 +329,11 @@ void Gather::execute(dnnl::stream strm) { if (isInPlace()) { return; } + + if (canOptimize1DCase) { + exec1DCase(); + return; + } #if defined(OPENVINO_ARCH_X86_64) if (jitKernel && jitKernel->isSupportedConfiguration(afterAxisSize)) { const void* srcIndices = getParentEdgeAt(GATHER_INDICES)->getMemoryPtr()->getData(); @@ -376,6 +393,10 @@ void Gather::executeDynamicImpl(dnnl::stream strm) { if (isInPlace()) { return; } + if (canOptimize1DCase) { + exec1DCase(); + return; + } #if defined(OPENVINO_ARCH_X86_64) if (jitKernel && jitKernel->isSupportedConfiguration(afterAxisSize)) { const void* srcIndices = getParentEdgeAt(GATHER_INDICES)->getMemoryPtr()->getData(); @@ -536,6 +557,29 @@ void Gather::execReference() { }); } +void Gather::exec1DCase() { + DEBUG_LOG(getName(), " exec1DCase"); + auto* pdst = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->getData()); + auto srcMemPtr = getParentEdgeAt(GATHER_DATA)->getMemoryPtr(); + auto idxMemPtr = getParentEdgeAt(GATHER_INDICES)->getMemoryPtr(); + const auto* psrc = reinterpret_cast(srcMemPtr->getData()); + const auto* pidx = reinterpret_cast(idxMemPtr->getData()); + + const auto& idxDims = idxMemPtr->getStaticDims(); + const auto idxCnt = (idxDims.size() == 0) ? 1 : idxDims[0]; + auto axisDim = srcMemPtr->getStaticDims()[0]; + for (size_t i = 0; i < idxCnt; i++) { + auto ii = pidx[i]; + if (ii < 0) { + if (reverseIndexing) + ii += axisDim; + else + ii = axisDim; + } + pdst[i] = psrc[ii]; + } +} + bool Gather::created() const { return getType() == Type::Gather; } diff --git a/src/plugins/intel_cpu/src/nodes/gather.h b/src/plugins/intel_cpu/src/nodes/gather.h index bc91b106cf573a..87f4f3a09ce5be 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.h +++ b/src/plugins/intel_cpu/src/nodes/gather.h @@ -56,6 +56,9 @@ class Gather : public Node { void initShortParams(threadExecParams& p, uint64_t start); void execReference(); + bool canOptimize1DCase = false; + void exec1DCase(); + bool isDataShapeStat = false; bool isIdxShapeStat = false; bool isAxisInputConst = false; diff --git a/src/plugins/intel_cpu/src/nodes/mathematics.cpp b/src/plugins/intel_cpu/src/nodes/mathematics.cpp index 734869737154d7..d22ed520ca78b9 100644 --- a/src/plugins/intel_cpu/src/nodes/mathematics.cpp +++ b/src/plugins/intel_cpu/src/nodes/mathematics.cpp @@ -159,6 +159,8 @@ void Math::execute(dnnl::stream strm) { dst_data[i] = 1.0f; else if (src_data[i] < 0.0f) dst_data[i] = -1.0f; + else if (std::isnan(src_data[i])) + dst_data[i] = src_data[i]; else dst_data[i] = 0.0f; }); diff --git a/src/plugins/intel_cpu/src/nodes/matmul.cpp b/src/plugins/intel_cpu/src/nodes/matmul.cpp index 7061bf60cb8218..9de62ae2ada01c 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.cpp +++ b/src/plugins/intel_cpu/src/nodes/matmul.cpp @@ -207,7 +207,11 @@ Node::AttrPtr MatMul::initPrimitiveAttr(const VectorDims &dims) { } Node::AttrPtr MatMul::initPrimitiveAttr() { - auto dummyShape = MemoryDescUtils::makeDummyShape(getOutputShapeAtPort(0)); + auto outputShape = getOutputShapeAtPort(0); + for (auto&& node : fusedWith) { + outputShape = mergeShapes(outputShape, node->getOutputShapeAtPort(0)); + } + auto dummyShape = MemoryDescUtils::makeDummyShape(outputShape); return initPrimitiveAttr(dummyShape.getStaticDims()); } @@ -293,7 +297,7 @@ void MatMul::getSupportedDescriptors() { const auto& inputShape0 = getInputShapeAtPort(0); const auto& inputShape1 = getInputShapeAtPort(1); - const auto& outputShape = getOutputShapeAtPort(0); + auto outputShape = getOutputShapeAtPort(0); if (inputShape0.getRank() != inputShape1.getRank() || inputShape0.getRank() != outputShape.getRank()) OPENVINO_THROW(errorPrefix, " has invalid dims count"); @@ -325,9 +329,14 @@ void MatMul::getSupportedDescriptors() { } } + for (auto&& node : fusedWith) { + outputShape = mergeShapes(outputShape, node->getOutputShapeAtPort(0)); + } + std::vector staticInputShapes{inputShape0, inputShape1}; if (inputShape0.isDynamic() || inputShape1.isDynamic()) { - std::tie(staticInputShapes[0], staticInputShapes[1]) = makeDummyInputShapes(inputShape0, inputShape1); + std::tie(staticInputShapes[0], staticInputShapes[1]) = + makeDummyInputShapes(inputShape0, inputShape1, outputShape); } auto staticOutputShape = outputShape.isStatic() ? outputShape : Shape(shapeInferGeneric(staticInputShapes).front()); @@ -342,14 +351,13 @@ void MatMul::getSupportedDescriptors() { createDescriptor({inDataDesc[0], inDataDesc[1]}, {outDataDesc}); } -std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Shape& in1) const { +std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Shape& in1, const Shape& out) const { if (in0.getRank() < 2 || in1.getRank() < 2) { OPENVINO_THROW("Can't create dummy inputs with rank less 2"); } - if (in0.getRank() != in1.getRank()) { - OPENVINO_THROW("Can't create dummy inputs if input's rank not equal"); - } + OPENVINO_ASSERT((in0.getRank() == in1.getRank()) && (in1.getRank() == out.getRank()), + "Can't create dummy inputs if argument shapes ranks are not equal"); auto swapTranspDims = [&](VectorDims& in0, VectorDims& in1) { if (transposeIn[0]) { @@ -362,6 +370,7 @@ std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Sha auto inDims0 = in0.getDims(); auto inDims1 = in1.getDims(); + auto outDims = out.getDims(); auto minDims0 = in0.getMinDims(); auto maxDims0 = in0.getMaxDims(); @@ -397,18 +406,28 @@ std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Sha fillDummy(inDims0.size() - 1, inDims1.size() - 2); // fill m, n - if (inDims0[inDims0.size() - 2] == Shape::UNDEFINED_DIM) { + if (outDims[outDims.size() - 2] != Shape::UNDEFINED_DIM) { + inDims0[inDims0.size() - 2] = outDims[outDims.size() - 2]; + } else if (inDims0[inDims0.size() - 2] == Shape::UNDEFINED_DIM) { inDims0[inDims0.size() - 2] = std::min(maxDims0[inDims0.size() - 2], std::max(minDims0[inDims0.size() - 2], static_cast(MemoryDescUtils::DEFAULT_DUMMY_VAL))); } - if (inDims1[inDims1.size() - 1] == Shape::UNDEFINED_DIM) { + + if (outDims[outDims.size() - 1] != Shape::UNDEFINED_DIM) { + inDims1[inDims1.size() - 1] = outDims[outDims.size() - 1]; + } else if (inDims1[inDims1.size() - 1] == Shape::UNDEFINED_DIM) { inDims1[inDims1.size() - 1] = std::min(maxDims1[inDims1.size() - 1], std::max(minDims1[inDims1.size() - 1], static_cast(MemoryDescUtils::DEFAULT_DUMMY_VAL))); } // fill batches for (size_t i = 0; i < inDims0.size() - 2; i++) { - fillDummy(i, i); + if (outDims[i] != Shape::UNDEFINED_DIM) { + inDims0[i] = outDims[i]; + inDims1[i] = outDims[i]; + } else { + fillDummy(i, i); + } } swapTranspDims(inDims0, inDims1); diff --git a/src/plugins/intel_cpu/src/nodes/matmul.h b/src/plugins/intel_cpu/src/nodes/matmul.h index 697de0a4f4fcdc..8b1eec8797b40c 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.h +++ b/src/plugins/intel_cpu/src/nodes/matmul.h @@ -51,7 +51,8 @@ class MatMul : public Node { using executorPtr = std::shared_ptr; executorPtr execPtr = nullptr; dnnl::memory::desc getBiasDescFrom(const DnnlMemoryDescCPtr outMemDesc); - std::pair makeDummyInputShapes(const Shape& in0, const Shape& in1) const; + std::pair + makeDummyInputShapes(const Shape& in0, const Shape& in1, const Shape& out) const; bool withBiases; diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp index 092ff83119a4e1..cd3e3c41aeb503 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp @@ -277,6 +277,29 @@ void ScatterUpdate::execute(dnnl::stream strm) { const auto& srcDataDim = getParentEdgeAt(DATA_ID)->getMemory().getStaticDims(); const auto& indicesDim = getParentEdgeAt(INDICES_ID)->getMemory().getStaticDims(); size_t srcRank = srcDataDim.size(); + + // 1d short vector scatter update optimized for shape inference subgraph + if (scatterUpdateMode == ScatterUpdateMode::ScatterUpdate && srcDataDim.size() == 1 && indicesDim.size() <= 1 && + indicesPrec == ov::element::i32 && dataPrec == ov::element::i32 && srcDataDim[0] <= 64) { + auto updateDims = updateMemPtr->getStaticDims(); + if (updateDims.size() <= 1) { + DEBUG_LOG(getName(), " exec1DCase"); + auto updateCnt = (updateDims.size() == 0) ? 1 : updateDims[0]; + auto srcLength = srcMemPtr->getStaticDims()[0]; + auto* psrc = reinterpret_cast(srcPtr); + auto* pdst = reinterpret_cast(dstPtr); + for (size_t i = 0; i < srcLength; i++) { + pdst[i] = psrc[i]; + } + auto* pindices = reinterpret_cast(indicesPtr); + auto* pupdate = reinterpret_cast(updatePtr); + for (size_t i = 0; i < updateCnt; i++) { + pdst[pindices[i]] = pupdate[i]; + } + return; + } + } + int axis = 0; if (axisRelaxed) { auto axisMemPtr = getParentEdgeAt(AXIS_ID)->getMemoryPtr(); diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index 100be163699022..d5d2fc2e3620c3 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -93,16 +93,7 @@ StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphConte attrs.endMask = createMask(ss->get_end_mask(), 1, true); attrs.newAxisMask = createMask(ss->get_new_axis_mask()); attrs.shrinkAxisMask = createMask(ss->get_shrink_axis_mask()); - - auto origEllipsisMask = ss->get_ellipsis_mask(); - bool isEllipsis = false; - for (const auto &o : origEllipsisMask) { - isEllipsis = isEllipsis || o != 0; - attrs.ellipsisMask.push_back(o); - } - if (attrs.ellipsisMask.size() == 0 || !isEllipsis) { - for (size_t i = attrs.ellipsisMask.size(); i < nDims; ++i) attrs.ellipsisMask.push_back(0); - } + attrs.ellipsisMask = createMask(ss->get_ellipsis_mask()); } else { const size_t length = outputShapes[0].getRank(); if (inputShapes.size() > AXES_ID) { diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index 4e98c9ef2029a2..b63def7f19d641 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -36,6 +36,12 @@ #include #include +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) +#include "emitters/snippets/x64/jit_segfault_detector_emitter.hpp" +#include +std::mutex err_print_lock; +#endif + using namespace dnnl::impl::utils; using namespace dnnl::impl::cpu; using namespace dnnl::impl::cpu::x64; @@ -395,7 +401,7 @@ void Snippet::prepareParams() { auto builder = [this](const SnippetKey& key) -> std::shared_ptr { std::shared_ptr executor = - std::make_shared(key.attrs, is_dynamic, context->getConfig().inferencePrecision == ov::element::bf16); + std::make_shared(key.attrs, is_dynamic); return executor; }; @@ -416,7 +422,7 @@ void Snippet::prepareParams() { getOrCreateExecutor(); } else { // in case perf count is enabled, disable executor cache by default to not mix up perf counters for different subgraphs. - execPtr = std::make_shared(key.attrs, is_dynamic, context->getConfig().inferencePrecision == ov::element::bf16); + execPtr = std::make_shared(key.attrs, is_dynamic); } #endif } @@ -510,10 +516,31 @@ void Snippet::SnippetJitExecutor::update_ptrs(jit_snippets_call_args& call_args, } } +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) +void Snippet::SnippetJitExecutor::segfault_detector() { + const auto target = std::dynamic_pointer_cast(snippetAttrs.snippet->get_generator()->get_target_machine()); + if (target && target->debug_config.enable_segfault_detector) { + __sighandler_t signal_handler = [](int signal) { + std::lock_guard guard(err_print_lock); + if (auto segfault_detector_emitter = ov::intel_cpu::g_custom_segfault_handler->local()) + std::cout << segfault_detector_emitter->info() << std::endl; + auto tid = parallel_get_thread_num(); + OPENVINO_THROW("Segfault was caught by the signal handler in subgraph node execution on thread " + std::to_string(tid)); + }; + struct sigaction new_handler{}; + new_handler.sa_handler = signal_handler; + sigaction(SIGSEGV, &new_handler, nullptr); + } +} +#endif + void Snippet::SnippetJitExecutor::schedule_6d(const std::vector& inMemPtrs, const std::vector& outMemPtrs) { const auto& dom = parallel_exec_domain; // < N, C, H, W > < 1, 1, N, C*H*W> const auto& callable = schedule.get_callable(); +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) + segfault_detector(); +#endif parallel_for5d(dom[0], dom[1], dom[2], dom[3], dom[4], [&](int64_t d0, int64_t d1, int64_t d2, int64_t d3, int64_t d4) { int64_t indexes[] = {d0, d1, d2, d3, d4}; @@ -525,6 +552,9 @@ void Snippet::SnippetJitExecutor::schedule_6d(const std::vector& inMe void Snippet::SnippetJitExecutor::schedule_nt(const std::vector& inMemPtrs, const std::vector& outMemPtrs) { const auto& work_size = parallel_exec_domain; +#if defined(__linux__) && defined(SNIPPETS_DEBUG_CAPS) + segfault_detector(); +#endif parallel_nt(0, [&](const int ithr, const int nthr) { jit_snippets_call_args call_args; update_ptrs(call_args, inMemPtrs, outMemPtrs); @@ -545,11 +575,11 @@ void Snippet::SnippetJitExecutor::schedule_nt(const std::vector& inMe }); } -Snippet::SnippetExecutor::SnippetExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16) - : snippetAttrs(std::move(attrs)), is_dynamic(is_dynamic), enforceBF16(enforceBF16) {} +Snippet::SnippetExecutor::SnippetExecutor(SnippetAttrs attrs, bool is_dynamic) + : snippetAttrs(std::move(attrs)), is_dynamic(is_dynamic) {} -Snippet::SnippetJitExecutor::SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16) : - SnippetExecutor(std::move(attrs), is_dynamic, enforceBF16) { +Snippet::SnippetJitExecutor::SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic) : + SnippetExecutor(std::move(attrs), is_dynamic) { numInput = snippetAttrs.inMemBlockedDims.size(); numOutput = snippetAttrs.outMemBlockedDims.size(); start_offset_in.resize(numInput); diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.h b/src/plugins/intel_cpu/src/nodes/subgraph.h index 8abc00bbc16f02..9ce3a3b71b760b 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.h +++ b/src/plugins/intel_cpu/src/nodes/subgraph.h @@ -73,7 +73,7 @@ class Snippet : public Node { class SnippetExecutor { public: - SnippetExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16); + SnippetExecutor(SnippetAttrs attrs, bool is_dynamic); virtual void exec(const std::vector& inMemPtrs, const std::vector& outMemPtrs) = 0; virtual ~SnippetExecutor() = default; std::shared_ptr shapeInference = nullptr; @@ -81,14 +81,13 @@ class Snippet : public Node { protected: SnippetAttrs snippetAttrs; bool is_dynamic = false; - bool enforceBF16 = false; }; std::shared_ptr execPtr = nullptr; class SnippetJitExecutor : public SnippetExecutor { public: - SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic, bool enforceBF16); + SnippetJitExecutor(SnippetAttrs attrs, bool is_dynamic); void exec(const std::vector& inMemPtrs, const std::vector& outMemPtrs) override; bool schedule_created(); @@ -126,6 +125,10 @@ class Snippet : public Node { // Buffer scratchpad std::vector buffer_scratchpad = {}; size_t buffer_scratchpad_size = 0; + +#ifdef SNIPPETS_DEBUG_CAPS + inline void segfault_detector(); +#endif }; }; diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 0313e5ed3c4a8b..12b29a5cbb3192 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -183,10 +183,6 @@ Engine::~Engine() { } static bool streamsSet(const ov::AnyMap& config) { - OPENVINO_SUPPRESS_DEPRECATED_START - if (config.count(InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS)) - return true; - OPENVINO_SUPPRESS_DEPRECATED_END return config.count(ov::num_streams.name()); } @@ -297,14 +293,8 @@ void Engine::apply_performance_hints(ov::AnyMap& config, const std::shared_ptr& model, bool imported) const { @@ -611,7 +597,7 @@ Engine::compile_model(const std::shared_ptr& model, const ov::A denormals_as_zero(false); } } - return std::make_shared(cloned_model, shared_from_this(), conf); + return std::make_shared(cloned_model, shared_from_this(), conf, false); } void Engine::set_property(const ov::AnyMap &config) { @@ -625,24 +611,7 @@ bool Engine::is_legacy_api() const { return !get_core()->is_new_api(); } -ov::Any Engine::get_property_legacy(const std::string& name, const ov::AnyMap& options) const { - ov::Any result; - auto option = engConfig._config.find(name); - if (option != engConfig._config.end()) { - result = option->second; - } else { - return get_metric_legacy(name, options); - } - return result; -} - ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) const { - if (is_legacy_api()) { - auto ret = get_property_legacy(name, options); - if (!ret.empty()) - return ret; - } - if (name == ov::optimal_number_of_infer_requests) { const auto streams = engConfig.streamExecutorConfig._streams; return decltype(ov::optimal_number_of_infer_requests)::value_type( @@ -711,75 +680,13 @@ ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) } } return res; + } else if (name == ov::internal::exclusive_async_requests.name()) { + return engConfig.exclusiveAsyncRequests; } return get_ro_property(name, options); } -ov::Any Engine::get_metric_legacy(const std::string& name, const ov::AnyMap& options) const { - OPENVINO_SUPPRESS_DEPRECATED_START - if (name == METRIC_KEY(SUPPORTED_METRICS)) { - std::vector metrics = { - METRIC_KEY(AVAILABLE_DEVICES), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(FULL_DEVICE_NAME), - METRIC_KEY(OPTIMIZATION_CAPABILITIES), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS), - METRIC_KEY(RANGE_FOR_STREAMS), - METRIC_KEY(IMPORT_EXPORT_SUPPORT), - }; - return metrics; - } else if (name == ov::device::full_name.name()) { - return decltype(ov::device::full_name)::value_type(deviceFullName); - } else if (name == ov::available_devices.name()) { - std::vector availableDevices = {""}; - return decltype(ov::available_devices)::value_type(std::move(availableDevices)); - } else if (name == ov::device::capabilities.name()) { - std::vector capabilities; - if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) - capabilities.push_back(METRIC_VALUE(BF16)); - if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) - capabilities.push_back(METRIC_VALUE(WINOGRAD)); - capabilities.push_back(METRIC_VALUE(FP32)); - capabilities.push_back(METRIC_VALUE(FP16)); - capabilities.push_back(METRIC_VALUE(INT8)); - capabilities.push_back(METRIC_VALUE(BIN)); - return decltype(ov::device::capabilities)::value_type(std::move(capabilities)); - } else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { - std::vector configKeys; - for (auto&& opt : engConfig._config) - configKeys.push_back(opt.first); - return configKeys; - } else if (name == ov::range_for_async_infer_requests.name()) { - std::tuple range = std::make_tuple(1, 1, 1); - return decltype(ov::range_for_async_infer_requests)::value_type(range); - } else if (name == ov::range_for_streams.name()) { - std::tuple range = std::make_tuple(1, parallel_get_max_threads()); - return decltype(ov::range_for_streams)::value_type(range); - } else if (name == METRIC_KEY(IMPORT_EXPORT_SUPPORT)) { - return true; - } else if (ov::internal::supported_properties.name() == name) { - return decltype(ov::internal::supported_properties)::value_type{ - ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}, - ov::PropertyName{ov::internal::exclusive_async_requests.name(), ov::PropertyMutability::RW}, - ov::PropertyName{ov::internal::compiled_model_runtime_properties.name(), ov::PropertyMutability::RO}, - ov::PropertyName{ov::internal::compiled_model_runtime_properties_supported.name(), ov::PropertyMutability::RO}}; - } else if (name == ov::internal::caching_properties) { - std::vector cachingProperties = {ov::device::full_name.name()}; - return decltype(ov::internal::caching_properties)::value_type(std::move(cachingProperties)); - } - - return {}; - OPENVINO_SUPPRESS_DEPRECATED_END -} - ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& options) const { - if (is_legacy_api()) { - ov::Any ret = get_metric_legacy(name, options); - if (!ret.empty()) - return ret; - } - auto RO_property = [](const std::string& propertyName) { return ov::PropertyName(propertyName, ov::PropertyMutability::RO); }; @@ -856,11 +763,6 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio } else if (name == ov::intel_cpu::sparse_weights_decompression_rate) { return decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type(engConfig.fcSparseWeiDecompressionRate); } - /* Internally legacy parameters are used with new API as part of migration procedure. - * This fallback can be removed as soon as migration completed */ - auto ret = get_metric_legacy(name, options); - if(!ret.empty()) - return ret; OPENVINO_THROW("Cannot get unsupported property: ", name); } @@ -931,12 +833,20 @@ std::shared_ptr Engine::import_model(std::istream& networkMo Config conf = engConfig; Config::ModelType modelType = getModelType(model); - conf.readProperties(config, modelType); + + // check ov::loaded_from_cache property and erase it to avoid exception in readProperties. + auto _config = config; + const auto& it = _config.find(ov::loaded_from_cache.name()); + bool loaded_from_cache = false; + if (it != _config.end()) { + loaded_from_cache = it->second.as(); + _config.erase(it); + } + conf.readProperties(_config, modelType); // import config props from caching model calculate_streams(conf, model, true); - - auto compiled_model = std::make_shared(model, shared_from_this(), conf, true); + auto compiled_model = std::make_shared(model, shared_from_this(), conf, loaded_from_cache); return compiled_model; } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 756387aa48a13d..53f52706f3c0fd 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -47,9 +47,7 @@ class Engine : public ov::IPlugin { bool is_legacy_api() const; ov::Any get_ro_property(const std::string& name, const ov::AnyMap& options) const; - ov::Any get_metric_legacy(const std::string& name, const ov::AnyMap& options) const; - ov::Any get_property_legacy(const std::string& name, const ov::AnyMap& options) const; void apply_performance_hints(ov::AnyMap &config, const std::shared_ptr& model) const; void get_performance_streams(Config &config, const std::shared_ptr& model) const; StreamCfg get_streams_num(ov::threading::IStreamsExecutor::ThreadBindingType thread_binding_type, diff --git a/src/plugins/intel_cpu/src/serialize.cpp b/src/plugins/intel_cpu/src/serialize.cpp index 777d7ea8a04ecc..0b91061684e741 100644 --- a/src/plugins/intel_cpu/src/serialize.cpp +++ b/src/plugins/intel_cpu/src/serialize.cpp @@ -27,7 +27,6 @@ static void setInfo(pugi::xml_node& root, std::shared_ptr& model) { ModelSerializer::ModelSerializer(std::ostream& ostream) : _ostream(ostream) {} void ModelSerializer::operator<<(const std::shared_ptr& model) { - OPENVINO_SUPPRESS_DEPRECATED_START auto serializeInfo = [&](std::ostream& stream) { const std::string name = "cnndata"; pugi::xml_document xml_doc; @@ -41,9 +40,7 @@ void ModelSerializer::operator<<(const std::shared_ptr& model) { xml_doc.save(stream); }; - // Serialize to old representation in case of old API ov::pass::StreamSerialize serializer(_ostream, serializeInfo); - OPENVINO_SUPPRESS_DEPRECATED_END serializer.run_on_model(std::const_pointer_cast(model->clone())); } @@ -64,7 +61,6 @@ void ModelDeserializer::operator>>(std::shared_ptr& model) { // read model input/output precisions _istream.seekg(hdr.custom_data_offset); - OPENVINO_SUPPRESS_DEPRECATED_START pugi::xml_document xmlInOutDoc; if (hdr.custom_data_size > 0) { std::string xmlInOutString; @@ -75,7 +71,6 @@ void ModelDeserializer::operator>>(std::shared_ptr& model) { OPENVINO_THROW("NetworkNotRead: The inputs and outputs information is invalid."); } } - OPENVINO_SUPPRESS_DEPRECATED_END // read blob content _istream.seekg(hdr.consts_offset); diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index feeb728d0e2de3..fcf365e54b58c5 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -1,18 +1,11 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "shape_inference.hpp" - -#include #include -#include "openvino/opsets/opset1.hpp" #include -#include "openvino/opsets/opset11.hpp" #include #include #include -#include "openvino/opsets/opset3.hpp" -#include "openvino/opsets/opset4.hpp" #include #include #include @@ -75,6 +68,10 @@ #include "nms_shape_inference.hpp" #include "nv12_shape_inference.hpp" #include "one_hot_shape_inference.hpp" +#include "openvino/opsets/opset1.hpp" +#include "openvino/opsets/opset11.hpp" +#include "openvino/opsets/opset3.hpp" +#include "openvino/opsets/opset4.hpp" #include "pad_shape_inference.hpp" #include "prior_box_clustered_shape_inference.hpp" #include "prior_box_shape_inference.hpp" @@ -98,6 +95,7 @@ #include "scatter_elements_update_shape_inference.hpp" #include "scatter_nd_base_shape_inference.hpp" #include "select_shape_inference.hpp" +#include "shape_inference.hpp" #include "shape_nodes.hpp" #include "shuffle_channels_shape_inference.hpp" #include "slice_shape_inference.hpp" diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp index 590f1bf225d8c1..4536698f6354c4 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp @@ -4,7 +4,6 @@ #pragma once -#include #include #include diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp index 3781e59bc788ad..fc44c6cffbce48 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp @@ -24,17 +24,25 @@ bool ov::intel_cpu::NgramNode::visit_attributes(ov::AttributeVisitor &visitor) { void ov::intel_cpu::NgramNode::validate_and_infer_types() { INTERNAL_OP_SCOPE(NgramNode_validate_and_infer_types); - NGRAPH_CHECK(m_k > 0, "k attribute must be greater than zero"); + OPENVINO_ASSERT(m_k > 0, "k attribute must be greater than zero"); const auto& idces_et = get_input_element_type(1); const auto& idces_shape = get_input_partial_shape(1); - NGRAPH_CHECK(idces_shape.rank() == 2, "'batch_idces' input must have 2D shape whereas current shape is", idces_shape); - NGRAPH_CHECK(idces_et.is_integral_number(), "'batch_idces' input must be integer whereas current element type is", idces_et); + OPENVINO_ASSERT(idces_shape.rank() == 2, + "'batch_idces' input must have 2D shape whereas current shape is", + idces_shape); + OPENVINO_ASSERT(idces_et.is_integral_number(), + "'batch_idces' input must be integer whereas current element type is", + idces_et); const auto& embeddings_et = get_input_element_type(0); const auto& embeddings_shape = get_input_partial_shape(0); - NGRAPH_CHECK(embeddings_et.is_real(), "'embeddings' input must be real whereas current element type is", embeddings_et); - NGRAPH_CHECK(embeddings_shape.rank() == 2, "'embeddings' input must have 2D shape whereas current shape is", embeddings_shape); + OPENVINO_ASSERT(embeddings_et.is_real(), + "'embeddings' input must be real whereas current element type is", + embeddings_et); + OPENVINO_ASSERT(embeddings_shape.rank() == 2, + "'embeddings' input must have 2D shape whereas current shape is", + embeddings_shape); auto out_shape = embeddings_shape; out_shape[1] *= m_k; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp index aeccde65a25513..2d9fdc6228ac8f 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/rope.hpp @@ -5,7 +5,8 @@ #pragma once #include -#include + +#include "openvino/op/op.hpp" namespace ov { namespace intel_cpu { @@ -58,7 +59,7 @@ namespace intel_cpu { * T2 - FP32 * T3 - I32 */ -class RoPENode : public ngraph::op::Op { +class RoPENode : public ov::op::Op { public: OPENVINO_OP("RoPE", "cpu_plugin_opset"); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp index b4fdda2c4cde40..f979d79a827452 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp @@ -47,7 +47,7 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { auto shape_a = fc_input_a.get_partial_shape(); auto shape_b = fc_input_b.get_partial_shape(); - NGRAPH_CHECK(shape_b.is_static()); + OPENVINO_ASSERT(shape_b.is_static()); auto rank_a = shape_a.rank().get_length(); auto rank_b = shape_b.rank().get_length(); @@ -140,7 +140,7 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { if (rank_b != 2) { ov::Dimension K = *(shape_b_aligned.rbegin() + 1); - NGRAPH_CHECK(K.is_static()); + OPENVINO_ASSERT(K.is_static()); auto k_len = K.get_length(); auto reshape_shape_values = matmul->get_transpose_b() ? std::vector{-1, k_len} : std::vector{k_len, -1}; auto reshape_shape = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{ 2 }, reshape_shape_values); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp index b4fded221efcbe..8552013de64294 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rope_fusion.cpp @@ -6,13 +6,15 @@ #include #include -#include + +#include "openvino/core/rt_info.hpp" #include "openvino/opsets/opset1.hpp" -#include -#include -#include -#include -#include +#include "openvino/opsets/opset6.hpp" +#include "openvino/opsets/opset8.hpp" +#include "openvino/pass/pattern/op/or.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/pass/pattern/matcher.hpp" +#include "transformations/utils/utils.hpp" #include "itt.hpp" #include "ov_ops/type_relaxed.hpp" @@ -55,7 +57,7 @@ ov::intel_cpu::RoPEFusionGPTNEOX::RoPEFusionGPTNEOX() { // [x1, x2]*cos + [-x2, x1]*sin auto result = makePattern({mul_cos, mul_sin}, {{"auto_broadcast", "numpy"}}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { PatternValidator validator(m); if (!validator) { return false; @@ -94,7 +96,7 @@ ov::intel_cpu::RoPEFusionGPTNEOX::RoPEFusionGPTNEOX() { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -154,7 +156,7 @@ ov::intel_cpu::RoPEFusionCosSinPreprocess::RoPEFusionCosSinPreprocess() { auto x = makePattern(ov::Rank(4)); auto rope = makePattern({x, cos_tab, sin_tab}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { PatternValidator validator(m); if (!validator) { return false; @@ -186,7 +188,7 @@ ov::intel_cpu::RoPEFusionCosSinPreprocess::RoPEFusionCosSinPreprocess() { register_new_node(rope_node); return true; }; - auto m = std::make_shared(rope, matcher_name); + auto m = std::make_shared(rope, matcher_name); this->register_matcher(m, callback); } @@ -202,7 +204,7 @@ ov::intel_cpu::RoPEFusionIOSlicing::RoPEFusionIOSlicing() { auto x_emb = makePattern({x, {}, {}}) | makePattern({x, {}, {}, {}}); auto result = makePattern({x_emb, y}, {{"axis", -1}}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); @@ -229,7 +231,7 @@ ov::intel_cpu::RoPEFusionIOSlicing::RoPEFusionIOSlicing() { register_new_node(rope_node); return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -250,7 +252,7 @@ ov::intel_cpu::RoPEFusionPreprocess::RoPEFusionPreprocess() { auto x = makePattern({input_slice | input_to_trans, {0, 2, 1, 3}}); auto result = makePattern({x, {}, {}}) | makePattern({x, {}, {}, {}}); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { PatternValidator validator(m); if (!validator) { return false; @@ -279,17 +281,17 @@ ov::intel_cpu::RoPEFusionPreprocess::RoPEFusionPreprocess() { register_new_node(rope_node); return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } // remove stridedslice from 0 to int32_max with stride 1 ov::intel_cpu::EliminateStridedSlice::EliminateStridedSlice() { MATCHER_SCOPE(EliminateStridedSlice); - auto data = ov::pass::pattern::any_input(ngraph::pattern::has_static_rank()); - auto begin = ov::pass::pattern::wrap_type(ngraph::pattern::type_matches(ov::element::i32)); - auto end = ov::pass::pattern::wrap_type(ngraph::pattern::type_matches(ov::element::i32)); - auto stride = ov::pass::pattern::wrap_type(ngraph::pattern::type_matches(ov::element::i32)); + auto data = ov::pass::pattern::any_input(ov::pass::pattern::has_static_rank()); + auto begin = ov::pass::pattern::wrap_type(ov::pass::pattern::type_matches(ov::element::i32)); + auto end = ov::pass::pattern::wrap_type(ov::pass::pattern::type_matches(ov::element::i32)); + auto stride = ov::pass::pattern::wrap_type(ov::pass::pattern::type_matches(ov::element::i32)); auto strided_slice = ov::pass::pattern::wrap_type({data, begin, end, stride}, [](const Output& value) { @@ -351,12 +353,12 @@ ov::intel_cpu::EliminateStridedSlice::EliminateStridedSlice() { return true; }); - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { auto root = m.get_match_root(); return replace_output_update_name(root->output(0), root->input_value(0)); }; - auto m = std::make_shared(strided_slice, matcher_name); + auto m = std::make_shared(strided_slice, matcher_name); this->register_matcher(m, callback); } @@ -423,7 +425,7 @@ ov::intel_cpu::RoPEFusionGPTJ::RoPEFusionGPTJ() { auto result = permute_Transpose_1213; - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); PatternValidator validator(m); @@ -451,7 +453,7 @@ ov::intel_cpu::RoPEFusionGPTJ::RoPEFusionGPTJ() { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -539,7 +541,7 @@ ov::intel_cpu::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id) { auto result = cat_Concat_505; - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); PatternValidator validator(m); @@ -576,7 +578,7 @@ ov::intel_cpu::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id) { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } @@ -679,7 +681,7 @@ ov::intel_cpu::RoPEFusionQwen::RoPEFusionQwen(int split_output_id) { auto result = add_Add_597; - matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); auto root = m.get_match_root(); PatternValidator validator(m); @@ -715,6 +717,6 @@ ov::intel_cpu::RoPEFusionQwen::RoPEFusionQwen(int split_output_id) { return true; }; - auto m = std::make_shared(result, matcher_name); + auto m = std::make_shared(result, matcher_name); this->register_matcher(m, callback); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/fused_mul_add.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/fused_mul_add.cpp index a80d582672ed59..6c3cd868b7aaba 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/fused_mul_add.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/fused_mul_add.cpp @@ -7,8 +7,6 @@ #include "snippets/itt.hpp" #include "openvino/op/util/elementwise_args.hpp" -#include - using namespace ov; using namespace ov::intel_cpu; @@ -28,8 +26,8 @@ std::shared_ptr FusedMulAdd::clone_with_new_inputs(const OutputVector& new void FusedMulAdd::validate_and_infer_types() { const auto input_size = get_input_size(); - NGRAPH_CHECK(input_size == 3, "FusedMulAdd must have 3 inputs"); - NGRAPH_CHECK(get_output_size() == 1, "FusedMulAdd must have only 1 output"); + OPENVINO_ASSERT(input_size == 3, "FusedMulAdd must have 3 inputs"); + OPENVINO_ASSERT(get_output_size() == 1, "FusedMulAdd must have only 1 output"); const auto element_type = get_input_element_type(0); auto pshape = get_input_partial_shape(0); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp index e9c0f593cfddeb..5bd27172aea92d 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/perf_count_rdtsc.hpp @@ -7,6 +7,7 @@ #include "openvino/op/op.hpp" #include "snippets/op/perf_count.hpp" +#include using namespace ov::snippets::op; @@ -38,8 +39,16 @@ class PerfCountRdtscEnd : public PerfCountEndBase { PerfCountRdtscEnd(const Output& pc_begin); PerfCountRdtscEnd() = default; ~PerfCountRdtscEnd() { - uint64_t avg = iteration == 0 ? 0 : accumulation / iteration; - std::cout << "accumulation:" << accumulation << " iteration:" << iteration << " avg:" << avg << std::endl; + double avg = 0; + if (iteration != 0) { + // Note: theoretically accumulation could be larger than 2^53, however + // iteration is unlikely to exceed this threshold. So here we derive an integral part first + // and cast only the remainder to double + const uint64_t integral = accumulation / iteration; + avg = integral + static_cast(accumulation - integral * iteration) / iteration; + } + std::cerr << "name : " << get_friendly_name() << " : acc : " << accumulation << " : num_hit : " << iteration + << std::fixed << std::setprecision(4) << " : avg : " << avg << std::endl; } std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; @@ -49,7 +58,7 @@ class PerfCountRdtscEnd : public PerfCountEndBase { // in destructor of PerfCountRdtscEnd, output the perf info // accumulation is cycle count uint64_t accumulation = 0ul; - uint32_t iteration = 0u; + uint64_t iteration = 0ul; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp index 319b17d3e6cb07..165f9626014290 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp @@ -32,36 +32,24 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_load_convert(snippets::lowe if (consumers.size() != 1) return false; + OPENVINO_ASSERT(convert_expr->get_loop_ids() == load_expr->get_loop_ids(), + "The pair of Load and Convert expressions must be in the same loops!"); + + const auto& parent_source = load_expr->get_input_port_connector(0)->get_source(); + const auto parent_output = parent_source.get_expr()->get_node()->output(parent_source.get_index()); std::shared_ptr load_convert = nullptr; if (ov::is_type(convert)) { - load_convert = std::make_shared(load->input_value(0), - convert->get_destination_type(), + load_convert = std::make_shared(parent_output, convert->get_destination_type(), load->get_count(), load->get_offset()); } else if (ov::is_type(convert)) { - load_convert = std::make_shared(load->input_value(0), - convert->get_destination_type(), + load_convert = std::make_shared(parent_output, convert->get_destination_type(), load->get_count(), load->get_offset()); } else { OPENVINO_THROW("Type of Convert op is undefined. Supports only fusing Load and ConvertTruncation or ConvertSaturation ops"); } - const auto out_port = convert_expr->get_output_port(0); - const auto convert_consumers = out_port.get_connected_ports(); - snippets::lowered::PortDescriptorUtils::set_port_descriptor_ptr(load_convert->output(0), out_port.get_descriptor_ptr()->clone()); - const auto load_convert_expr = linear_ir.create_expression(load_convert, { load_expr->get_input_port_connector(0) }); - const auto convert_expr_it = convert_it; - const auto insertion_pos = std::next(convert_it); - convert_it = linear_ir.insert(insertion_pos, load_convert_expr); - - const auto& load_loop_ids = load_expr->get_loop_ids(); - load_convert_expr->set_loop_ids(load_loop_ids); - const auto& loop_manager = linear_ir.get_loop_manager(); - loop_manager->update_loops_port(load_loop_ids, load_expr->get_input_port(0), {load_convert_expr->get_input_port(0)}, true); - loop_manager->update_loops_port(load_loop_ids, convert_expr->get_output_port(0), {load_convert_expr->get_output_port(0)}, false); - - linear_ir.erase(std::find(linear_ir.cbegin(), convert_expr_it, load_expr)); - linear_ir.erase(convert_expr_it); - linear_ir.replace_input(convert_consumers, load_convert_expr->get_output_port_connector(0)); + convert_it = linear_ir.replace_with_node({load_expr, convert_expr}, load_convert); + return true; } @@ -69,7 +57,6 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert(snippets::low snippets::lowered::LinearIR::constExprIt& convert_it) { const auto& convert_expr = *convert_it; const auto& convert = ov::as_type_ptr(convert_expr->get_node()); - const auto& input_connector = convert_expr->get_input_port_connector(0); const auto& output_connector = convert_expr->get_output_port_connector(0); if (convert->get_input_element_type(0) != ov::element::f32 && convert->get_input_element_type(0) != ov::element::i32) return false; @@ -84,36 +71,24 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert(snippets::low if (!store) return false; + OPENVINO_ASSERT(convert_expr->get_loop_ids() == store_expr->get_loop_ids(), + "The pair of Convert and Store expressions must be in the same loops!"); + + const auto& parent_source = convert_expr->get_input_port_connector(0)->get_source(); + const auto parent_output = parent_source.get_expr()->get_node()->output(parent_source.get_index()); std::shared_ptr store_convert = nullptr; if (ov::is_type(convert)) { - store_convert = std::make_shared(convert->input_value(0), - convert->get_destination_type(), + store_convert = std::make_shared(parent_output, convert->get_destination_type(), store->get_count(), store->get_offset()); } else if (ov::is_type(convert)) { - store_convert = std::make_shared(convert->input_value(0), - convert->get_destination_type(), + store_convert = std::make_shared(parent_output, convert->get_destination_type(), store->get_count(), store->get_offset()); } else { OPENVINO_THROW("Type of Convert op is undefined. Supports only fusing Store and ConvertTruncation or ConvertSaturation ops"); } - const auto out_port = store_expr->get_output_port(0); - const auto store_consumers = out_port.get_connected_ports(); - snippets::lowered::PortDescriptorUtils::set_port_descriptor_ptr(store_convert->output(0), out_port.get_descriptor_ptr()->clone()); - const auto store_convert_expr = linear_ir.create_expression(store_convert, { input_connector }); - const auto convert_expr_it = convert_it; - const auto insertion_pos = std::next(convert_it); - convert_it = linear_ir.insert(insertion_pos, store_convert_expr); - - const auto& convert_loop_ids = convert_expr->get_loop_ids(); - store_convert_expr->set_loop_ids(convert_loop_ids); - const auto& loop_manager = linear_ir.get_loop_manager(); - loop_manager->update_loops_port(convert_loop_ids, convert_expr->get_input_port(0), {store_convert_expr->get_input_port(0)}, true); - loop_manager->update_loops_port(convert_loop_ids, store_expr->get_output_port(0), {store_convert_expr->get_output_port(0)}, false); - - linear_ir.erase(std::find(convert_expr_it, linear_ir.cend(), store_expr)); - linear_ir.erase(convert_expr_it); - linear_ir.replace_input(store_consumers, store_convert_expr->get_output_port_connector(0)); + convert_it = linear_ir.replace_with_node({convert_expr, store_expr}, store_convert); + return true; } diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index 50c6cdb8fb6cac..8c1b6c194aa653 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -10,6 +10,7 @@ #include #ifdef CPU_DEBUG_CAPS +#include "cpu_memory.h" #include "debug_capabilities.h" #include "node.h" #include "edge.h" @@ -313,7 +314,7 @@ std::ostream & operator<<(std::ostream & os, const Node &c_node) { if (shape_size(shape) <= 8) { auto type = pmem->getDesc().getPrecision(); - auto tensor = std::make_shared(type, shape, data); + auto tensor = ov::Tensor(type, shape, data); auto constop = std::make_shared(tensor); comma = ""; for (auto & v : constop->get_value_strings()) { @@ -608,6 +609,43 @@ std::ostream & operator<<(std::ostream & os, const dnnl::memory::format_tag form return os; } +template +std::string to_string(const T* values, size_t N, size_t maxsize) { + std::stringstream ss; + for (size_t i = 0; i < N; i++) { + if (i > 0) + ss << ","; + if (ss.tellp() > static_cast(maxsize)) { + ss << "..." << N << "in total"; + break; + } + if (std::is_same::value || std::is_same::value) + ss << static_cast(values[i]); + else + ss << values[i]; + } + return ss.str(); +} + +std::ostream& operator<<(std::ostream& os, const IMemory& mem) { + const auto& desc = mem.getDesc(); + os << desc; + if (mem.isAllocated()) { + os << " ["; + if (desc.getPrecision() == ov::element::i32) { + os << to_string(reinterpret_cast(mem.getData()), mem.getSize() / sizeof(int32_t), 256); + } else if (desc.getPrecision() == ov::element::f32) { + os << to_string(reinterpret_cast(mem.getData()), mem.getSize() / sizeof(float), 256); + } else if (desc.getPrecision() == ov::element::i64) { + os << to_string(reinterpret_cast(mem.getData()), mem.getSize() / sizeof(int64_t), 256); + } else { + os << " ? "; + } + os << "]"; + } + return os; +} + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.h b/src/plugins/intel_cpu/src/utils/debug_capabilities.h index c8494c6e06d303..f593bfa36dc3d7 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.h +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.h @@ -44,7 +44,7 @@ class NodeDesc; class MemoryDesc; class Node; class Edge; - +class IMemory; class PrintableModel { public: PrintableModel(const ov::Model& model, std::string tag = "", std::string prefix = "") : model(model), tag(tag), prefix(prefix) {} @@ -92,6 +92,7 @@ class PrintableTimer { std::ostream & operator<<(std::ostream & os, const NodeDesc& desc); std::ostream & operator<<(std::ostream & os, const Node& node); std::ostream & operator<<(std::ostream & os, const MemoryDesc& desc); +std::ostream & operator<<(std::ostream & os, const IMemory& mem); std::ostream & operator<<(std::ostream & os, const Edge& edge); std::ostream & operator<<(std::ostream & os, const PrintableModel& model); std::ostream & operator<<(std::ostream & os, const PrintableDelta& us); diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index 2d837abb25eef3..b753edd33bb3f8 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -10,7 +10,7 @@ add_library(cpuSpecificRtInfo STATIC target_link_libraries(cpuSpecificRtInfo PRIVATE openvino::runtime) set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $/src) -set(DEPENDENCIES openvino_intel_cpu_plugin template_extension) +set(DEPENDENCIES openvino_intel_cpu_plugin openvino_template_extension) set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo openvino::snippets ov_snippets_models) if(ENABLE_OV_ONNX_FRONTEND) diff --git a/src/plugins/intel_cpu/tests/functional/extension/extension.cpp b/src/plugins/intel_cpu/tests/functional/extension/extension.cpp index 1b9c0158e51bd3..46b62c605e5065 100644 --- a/src/plugins/intel_cpu/tests/functional/extension/extension.cpp +++ b/src/plugins/intel_cpu/tests/functional/extension/extension.cpp @@ -3,146 +3,65 @@ // #include -#include -#include -#include -#include -#include "common_test_utils/file_utils.hpp" - -class CustomAbsKernel : public InferenceEngine::ILayerExecImpl { -public: - explicit CustomAbsKernel(const std::shared_ptr& node): node(node) {} - - InferenceEngine::StatusCode - init(InferenceEngine::LayerConfig& /*config*/, InferenceEngine::ResponseDesc* /*resp*/) noexcept override { - return InferenceEngine::StatusCode::OK; - } - - InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, - InferenceEngine::ResponseDesc* /*resp*/) noexcept override { - InferenceEngine::LayerConfig layerConfig; - - if (node->outputs().size() != 1 && node->inputs().size() != 1) - return InferenceEngine::GENERAL_ERROR; - - InferenceEngine::DataConfig cfg; - cfg.constant = false; - cfg.inPlace = 0; - - InferenceEngine::SizeVector order; - auto partialShape = node->get_output_partial_shape(0); - if (partialShape.is_dynamic()) - return InferenceEngine::GENERAL_ERROR; - auto shape = node->get_output_shape(0); - for (size_t i = 0; i < shape.size(); i++) { - order.push_back(i); - } - cfg.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, - shape, {shape, order}); - layerConfig.outConfs.push_back(cfg); - layerConfig.inConfs.push_back(cfg); - conf.push_back(layerConfig); - return InferenceEngine::OK; - } - - InferenceEngine::StatusCode - execute(std::vector& inputs, std::vector& outputs, - InferenceEngine::ResponseDesc* /*resp*/) noexcept override { - for (size_t i = 0; i < inputs.size(); i++) { - InferenceEngine::MemoryBlob::CPtr minput = InferenceEngine::as(inputs[i]); - InferenceEngine::MemoryBlob::Ptr moutput = InferenceEngine::as(outputs[i]); - if (!moutput || !minput) { - return InferenceEngine::StatusCode::PARAMETER_MISMATCH; - } - // locked memory holder should be alive all time while access to its buffer happens - auto minputHolder = minput->rmap(); - auto moutputHolder = moutput->wmap(); - - auto inputData = minputHolder.as(); - auto outputData = moutputHolder.as(); - for (size_t j = 0; j < minput->size(); j++) { - outputData[j] = inputData[j] < 0 ? (-inputData[j] * 2) : inputData[j]; - } - } - return InferenceEngine::StatusCode::OK; - } +#include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_assertions.hpp" +#include "file_utils.h" +#include "openvino/frontend/extension.hpp" +#include "openvino/runtime/core.hpp" -private: - const std::shared_ptr node; -}; +using testing::ElementsAreArray; -class CustomAbs : public ngraph::op::Op { +class CustomAbs : public ov::op::Op { public: - OPENVINO_RTTI("CustomAbs", "custom_opset"); + OPENVINO_OP("CustomAbs", "custom_opset") CustomAbs() = default; - CustomAbs(const ngraph::Output& arg): ngraph::op::Op({arg}) { + CustomAbs(const ov::Output& arg) : ov::op::Op({arg}) { constructor_validate_and_infer_types(); } void validate_and_infer_types() override { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override { + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override { return std::make_shared(new_args.at(0)); } - bool visit_attributes(ngraph::AttributeVisitor&) override { + bool visit_attributes(ov::AttributeVisitor&) override { return true; } -}; - -class CustomAbsExtension : public InferenceEngine::IExtension { -public: - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {} - void Unload() noexcept override {} - - std::map getOpSets() override { - std::map opsets; - ngraph::OpSet opset; - opset.insert(); - opsets["custom_opset"] = opset; - return opsets; + bool has_evaluate() const override { + return get_input_element_type(0) == ov::element::f32; } - std::vector getImplTypes(const std::shared_ptr& node) override { - if (node->description() != CustomAbs::get_type_info_static().name) - return {}; - return {"CPU"}; - } + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override { + if (inputs[0].get_element_type() == ov::element::f32) { + outputs[0].set_shape(inputs[0].get_shape()); - InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, const std::string& implType) override { - return std::make_shared(node); + auto first = inputs[0].data(); + + std::transform(first, first + inputs[0].get_size(), outputs[0].data(), [](float v) { + return v < 0 ? -v * 2.0f : v; + }); + return true; + } else { + return false; + } } }; -static void infer_model(InferenceEngine::Core& ie, InferenceEngine::CNNNetwork& network, - const std::vector& input_values, const std::vector& expected) { - auto function = network.getFunction(); - - auto network_inputs = network.getInputsInfo(); - auto network_outputs = network.getOutputsInfo(); - auto exe_network = ie.LoadNetwork(network, "CPU"); - auto inference_req = exe_network.CreateInferRequest(); - const auto& input = network_inputs.begin(); - const auto& input_info = input->second; - - auto blob = std::make_shared>(input_info->getTensorDesc()); - blob->allocate(); - ASSERT_EQ(input_values.size(), blob->size()); - float* blob_buffer = blob->wmap().template as(); - std::copy(input_values.begin(), input_values.end(), blob_buffer); - inference_req.SetBlob(input->first, blob); +static void infer_model(ov::Core& core, + ov::CompiledModel& model, + std::vector& input_values, + const std::vector& expected) { + auto input_tensor = ov::Tensor(ov::element::f32, model.input(0).get_shape(), input_values.data()); - inference_req.Infer(); + auto infer_req = model.create_infer_request(); + infer_req.set_input_tensor(input_tensor); + infer_req.infer(); - auto output = network_outputs.begin(); - InferenceEngine::MemoryBlob::CPtr computed = InferenceEngine::as(inference_req.GetBlob(output->first)); - const auto computed_data = computed->rmap(); - const auto* computed_data_buffer = computed_data.template as(); - std::vector computed_values(computed_data_buffer, - computed_data_buffer + computed->size()); - ASSERT_EQ(expected, computed_values); + auto computed = infer_req.get_output_tensor(0); + EXPECT_THAT(expected, ElementsAreArray(computed.data(), computed.get_size())); } static std::string model_full_path(const char* path) { @@ -191,20 +110,22 @@ TEST(Extension, XmlModelWithCustomAbs) { std::vector input_values{1, -2, 3, -4, 5, -6, 7, -8, 9, -10}; std::vector expected{1, 4, 3, 8, 5, 12, 7, 16, 9, 20}; - InferenceEngine::Core ie; - ie.AddExtension(std::make_shared()); - InferenceEngine::Blob::CPtr weights; - auto network = ie.ReadNetwork(model, weights); - infer_model(ie, network, input_values, expected); + + ov::Core core; + core.add_extension(std::make_shared>()); + auto weights = ov::Tensor(); + auto ov_model = core.read_model(model, weights); + auto compiled_model = core.compile_model(ov_model); + + infer_model(core, compiled_model, input_values, expected); } static std::string get_extension_path() { return FileUtils::makePluginLibraryName(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + OV_BUILD_POSTFIX); + std::string("openvino_template_extension") + OV_BUILD_POSTFIX); } - TEST(Extension, smoke_XmlModelWithExtensionFromDSO) { std::string model = R"V0G0N( @@ -220,7 +141,7 @@ TEST(Extension, smoke_XmlModelWithExtensionFromDSO) { - + @@ -258,21 +179,26 @@ TEST(Extension, smoke_XmlModelWithExtensionFromDSO) { )V0G0N"; std::vector input_values{1, 2, 3, 4, 5, 6, 7, 8}; - std::vector expected{12, 13, 14, 15, 16, 17, 18, 19}; - InferenceEngine::Core ie; - ie.SetConfig({ { ov::hint::inference_precision.name(), ov::element::f32.get_type_name() } }, "CPU"); - ie.AddExtension(std::make_shared(get_extension_path())); - InferenceEngine::Blob::CPtr weights; - auto network = ie.ReadNetwork(model, weights); - infer_model(ie, network, input_values, expected); -} + std::vector expected{1, 2, 3, 4, 5, 6, 7, 8}; + ov::Core core; + core.set_property("CPU", {{ov::hint::inference_precision.name(), ov::element::f32.get_type_name()}}); + core.add_extension(get_extension_path()); + auto weights = ov::Tensor(); + auto ov_model = core.read_model(model, weights); + auto compiled_model = core.compile_model(ov_model); + + infer_model(core, compiled_model, input_values, expected); +} TEST(Extension, OnnxModelWithExtensionFromDSO) { std::vector input_values{1, 2, 3, 4, 5, 6, 7, 8}; - std::vector expected{12, 13, 14, 15, 16, 17, 18, 19}; - InferenceEngine::Core ie; - ie.AddExtension(std::make_shared(get_extension_path())); - auto network = ie.ReadNetwork(model_full_path("func_tests/models/custom_template_op.onnx")); - infer_model(ie, network, input_values, expected); + std::vector expected{1, 2, 3, 4, 5, 6, 7, 8}; + + ov::Core core; + core.add_extension(get_extension_path()); + auto ov_model = core.read_model(model_full_path("func_tests/models/custom_template_op.onnx")); + auto compiled_model = core.compile_model(ov_model); + + infer_model(core, compiled_model, input_values, expected); } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/core_integration.cpp similarity index 96% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/core_integration.cpp index 0f71d3e80c30ad..f5b80651b56c3e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/core_integration.cpp @@ -8,8 +8,6 @@ using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; - namespace { // // Executable Network GetMetric diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/import_export.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/import_export.cpp new file mode 100644 index 00000000000000..f1284f706cca13 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/import_export.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/test_constants.hpp" +#include "behavior/compiled_model/import_export.hpp" + +namespace { + +using namespace ov::test::behavior; + +const std::vector netPrecisions = { + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, + ov::element::f16, + ov::element::f32, +}; +const ov::AnyMap empty_property = {}; + +INSTANTIATE_TEST_SUITE_P(smoke_serialization, + OVCompiledGraphImportExportTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(empty_property)), + OVCompiledGraphImportExportTest::getTestCaseName); + +TEST_P(OVCompiledModelGraphUniqueNodeNamesTest, CheckUniqueNodeNames) { + std::shared_ptr core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(model, target_device); + auto exec_graph = compiled_model.get_runtime_model(); + + int numReorders = 0; + int expectedReorders = 2; + std::unordered_set names; + ASSERT_NE(exec_graph, nullptr); + + for (const auto& op : exec_graph->get_ops()) { + const auto& rtInfo = op->get_rt_info(); + auto it = rtInfo.find(ov::exec_model_info::LAYER_TYPE); + ASSERT_NE(rtInfo.end(), it); + auto opType = it->second.as(); + + if (opType == "Reorder") { + numReorders++; + } + } + + ASSERT_EQ(numReorders, expectedReorders) + << "Expected reorders: " << expectedReorders << ", actual reorders: " << numReorders; +}; + +const std::vector netPrc = { + ov::element::f32, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, + OVCompiledModelGraphUniqueNodeNamesTest, + ::testing::Combine(::testing::ValuesIn(netPrc), + ::testing::Values(ov::Shape{1, 2, 5, 5}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + OVCompiledModelGraphUniqueNodeNamesTest::getTestCaseName); + +} // namespace + diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp similarity index 73% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp index 913315542db85b..669d7efa4c64d5 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp @@ -3,8 +3,10 @@ // #include "behavior/compiled_model/properties.hpp" -#include "ie_system_conf.h" + +#include "behavior/ov_executable_network/get_metric.hpp" #include "openvino/runtime/properties.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace ov::test::behavior; @@ -33,8 +35,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, #if (defined(__APPLE__) || defined(_WIN32)) auto default_affinity = [] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto numaNodes = ov::get_available_numa_nodes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else if (numaNodes.size() > 1) { @@ -45,7 +47,7 @@ auto default_affinity = [] { }(); #else auto default_affinity = [] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { return ov::Affinity::HYBRID_AWARE; } else { @@ -72,23 +74,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, const std::vector properties = {{ov::num_streams(ov::streams::NUMA)}, {ov::num_streams(ov::streams::AUTO)}, {ov::num_streams(0), ov::inference_num_threads(1)}, - {ov::num_streams(1), ov::inference_num_threads(1)}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}}; + {ov::num_streams(1), ov::inference_num_threads(1)}}; const std::vector hetero_properties = { {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::num_streams(ov::streams::AUTO)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, }; const std::vector auto_batch_properties = { - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}}, - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}, - {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "1"}}, - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}, - {ov::auto_batch_timeout(10)}}, + {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)")}, + {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)"), ov::auto_batch_timeout(1)}, + {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)"), ov::auto_batch_timeout(10)}, }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, @@ -162,4 +157,60 @@ INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassCompileModelWithCorrectPropertiesTe OVClassCompileModelWithCorrectPropertiesTest, ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(heteroConfigsWithSecondaryProperties))); + +// +// OV CompiledModel Get RO Property +// + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_NETWORK_NAME, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, + ::testing::Values("CPU", "HETERO:CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_ThrowsUnsupported, + ::testing::Values("CPU", "HETERO:CPU")); + +// +// OV CompiledModel GetProperty / SetProperty +// + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkGetConfigTest, OVClassExecutableNetworkGetConfigTest, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkSetConfigTest, OVClassExecutableNetworkSetConfigTest, + ::testing::Values("CPU")); + +// +// Hetero OV CompiledModel Get RO Property +// + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, + ::testing::Values("CPU")); + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, + ::testing::Values("CPU")); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp deleted file mode 100644 index 165c26385e959a..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "behavior/executable_network/exec_graph_info.hpp" - -namespace { - -using namespace ExecutionGraphTests; - -INSTANTIATE_TEST_SUITE_P(smoke_serialization, ExecGraphSerializationTest, - ::testing::Values(ov::test::utils::DEVICE_CPU), - ExecGraphSerializationTest::getTestCaseName); - -TEST_P(ExecGraphUniqueNodeNames, CheckUniqueNodeNames) { -InferenceEngine::CNNNetwork cnnNet(fnPtr); - -auto ie = PluginCache::get().ie(); -auto execNet = ie->LoadNetwork(cnnNet, target_device); - -InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo(); - -int numReorders = 0; -int expectedReorders = 2; -std::unordered_set names; - -auto function = execGraphInfo.getFunction(); -ASSERT_NE(function, nullptr); - -for (const auto & op : function->get_ops()) { -const auto & rtInfo = op->get_rt_info(); -auto it = rtInfo.find(ExecGraphInfoSerialization::LAYER_TYPE); -ASSERT_NE(rtInfo.end(), it); -auto opType = it->second.as(); - -if (opType == "Reorder") { -numReorders++; -} -} - -ASSERT_EQ(numReorders, expectedReorders) << "Expected reorders: " << expectedReorders << ", actual reorders: " << numReorders; -}; - -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 -}; - -INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, ExecGraphUniqueNodeNames, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({1, 2, 5, 5})), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ExecGraphUniqueNodeNames::getTestCaseName); - -} // namespace - diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp deleted file mode 100644 index 52a4bee4fbc720..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/exec_network_base.hpp" -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - - const std::vector> configs = { - {}, - }; - - const std::vector> heteroConfigs = { - {{"TARGET_FALLBACK", ov::test::utils::DEVICE_CPU}}}; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - ExecutableNetworkBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - ExecutableNetworkBaseTest::getTestCaseName); - - const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16 - }; - - const std::vector> configSetPrc = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configSetPrc)), - ExecNetSetPrecision::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp deleted file mode 100644 index 1e5badc668ffb0..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/get_metric.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -namespace { -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassImportExportTestP, IEClassImportExportTestP, - ::testing::Values("HETERO:CPU")); - -// -// Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("CPU", "HETERO:CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("CPU", "HETERO:CPU")); - -// -// Executable Network GetConfig / SetConfig -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest, - ::testing::Values("CPU")); - -// -// Hetero Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, - ::testing::Values("CPU")); - -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp deleted file mode 100644 index 255a87b07229c9..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/compiled_model/compiled_model_base.hpp" -#include "ie_plugin_config.hpp" - -using namespace ov::test::behavior; -namespace { - - const std::vector configs = { - {}, - }; - - const std::vector heteroConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - OVCompiledModelBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - OVCompiledModelBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - - const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16 - }; - - const std::vector configSetPrc = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp deleted file mode 100644 index 16f4c82c74be24..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "behavior/compiled_model/import_export.hpp" - -#include "ie_plugin_config.hpp" -#include - -using namespace ov::test::behavior; -namespace { -const std::vector netPrecisions = { - ov::element::i8, - ov::element::i16, - ov::element::i32, - ov::element::i64, - ov::element::u8, - ov::element::u16, - ov::element::u32, - ov::element::u64, - ov::element::f16, - ov::element::f32, -}; -const std::vector configs = { - {}, -}; - -const std::vector heteroConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, - OVCompiledGraphImportExportTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs)), - OVCompiledGraphImportExportTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, - OVCompiledGraphImportExportTest, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(heteroConfigs)), - OVCompiledGraphImportExportTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P( - smoke_OVClassImportExportTestP, OVClassCompiledModelImportExportTestP, - ::testing::Values("HETERO:CPU")); - -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp new file mode 100644 index 00000000000000..57533fdc278211 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/memory_states.hpp" + +using namespace ov::test::behavior; +using namespace ov; + +namespace { +std::vector memoryStateTestCases = { + memoryStateParams(OVInferRequestVariableStateTest::get_network(), + {"c_1-3", "r_1-3"}, + ov::test::utils::DEVICE_CPU, + {}), + memoryStateParams(OVInferRequestVariableStateTest::get_network(), + {"c_1-3", "r_1-3"}, + ov::test::utils::DEVICE_HETERO, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; + +INSTANTIATE_TEST_SUITE_P(smoke_VariableState, + OVInferRequestVariableStateTest, + ::testing::ValuesIn(memoryStateTestCases), + OVInferRequestVariableStateTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp similarity index 93% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp index 6d05ac79e467f7..d848a5a45a21a6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp @@ -1,13 +1,13 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include +#include "behavior/ov_plugin/auto_batching_tests.hpp" const std::vector get_vs_set{ true, false }; const std::vector num_streams{ 1, 2 }; const std::vector num_requests{ 1, 3, 8, 9, 16, 64 }; const std::vector num_batch{ 1, 4, 8, 16, 32, 64, 128, 256 }; -using namespace AutoBatchingTests; +using namespace ov::test::behavior; namespace { INSTANTIATE_TEST_SUITE_P(smoke_AutoBatching_CPU, AutoBatching_Test, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index a4d3119ae52234..da54ed76e2dada 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -3,43 +3,44 @@ // #include "behavior/ov_plugin/caching_tests.hpp" -#include -#include -#include +#include "ov_ops/multiclass_nms_ie_internal.hpp" +#include "ov_ops/nms_ie_internal.hpp" +#include "ov_ops/nms_static_shape_ie.hpp" + +using namespace ov; using namespace ov::test::behavior; -using namespace ngraph; namespace { - static const std::vector precisionsCPU = { - ngraph::element::f32, - ngraph::element::f16, - ngraph::element::i32, - ngraph::element::i64, - ngraph::element::i8, - ngraph::element::u8, - ngraph::element::i16, - ngraph::element::u16, + static const std::vector precisionsCPU = { + ov::element::f32, + ov::element::f16, + ov::element::i32, + ov::element::i64, + ov::element::i8, + ov::element::u8, + ov::element::i16, + ov::element::u16, }; - static const std::vector floatPrecisionsCPU = { - ngraph::element::f32, - ngraph::element::f16, + static const std::vector floatPrecisionsCPU = { + ov::element::f32, + ov::element::f16, }; static const std::vector batchSizesCPU = { 1, 2 }; - static const std::vector precisionsCPUInternal = { - ngraph::element::f32 + static const std::vector precisionsCPUInternal = { + ov::element::f32 }; static const std::vector batchSizesCPUInternal = { 1 }; - static std::shared_ptr simple_function_non_max_suppression_internal(ngraph::element::Type, size_t) { + static std::shared_ptr simple_function_non_max_suppression_internal(element::Type, size_t) { auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); auto max_output_boxes_per_class = ov::op::v0::Constant::create(element::i32, Shape{1}, {10}); @@ -48,11 +49,11 @@ namespace { auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, 0, true, element::i32); auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); + auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } - static std::shared_ptr simple_function_matrix_nms_internal(ngraph::element::Type, size_t) { + static std::shared_ptr simple_function_matrix_nms_internal(element::Type, size_t) { auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); ov::op::v8::MatrixNms::Attributes attr; @@ -60,18 +61,18 @@ namespace { attr.output_type = element::i32; auto nms = std::make_shared>(boxes, scores, attr); auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); + auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } - static std::shared_ptr simple_function_multiclass_nms_internal(ngraph::element::Type, size_t) { + static std::shared_ptr simple_function_multiclass_nms_internal(element::Type, size_t) { auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); ov::op::util::MulticlassNmsBase::Attributes attr; attr.output_type = element::i32; auto nms = std::make_shared(boxes, scores, attr); auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); + auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } @@ -147,4 +148,8 @@ namespace { CompileModelCacheRuntimePropertiesTestBase, ::testing::Combine(::testing::ValuesIn(TestCpuTargets), ::testing::ValuesIn(CpuConfigs)), CompileModelCacheRuntimePropertiesTestBase::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU, + CompileModelLoadFromCacheTest, + ::testing::Combine(::testing::ValuesIn(TestCpuTargets), ::testing::ValuesIn(CpuConfigs)), + CompileModelLoadFromCacheTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 85295589ceeb99..934b044a8f752f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -2,36 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/ov_plugin/core_integration.hpp" - -#include - -#include "behavior/ov_plugin/core_integration_sw.hpp" #include "behavior/ov_plugin/query_model.hpp" -#include "openvino/core/type/element_type.hpp" #include "openvino/runtime/core.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; - -// defined in plugin_name.cpp -extern const char * cpu_plugin_file_name; namespace { -// -// IE Class Common tests with -// - -INSTANTIATE_TEST_SUITE_P( - smoke_OVClassImportExportTestP, OVClassImportExportTestP, - ::testing::Values("HETERO:CPU")); // IE Class Query model -INSTANTIATE_TEST_SUITE_P(smoke_OVClassQueryModelTest, OVClassQueryModelTest, ::testing::Values("CPU")); - -// OV Class Load network -INSTANTIATE_TEST_SUITE_P( - smoke_OVClassLoadNetworkTest, OVClassLoadNetworkTestWithThrow, - ::testing::Values("")); +INSTANTIATE_TEST_SUITE_P(smoke_OVClassQueryModelTest, OVClassQueryModelTest, +::testing::Values("CPU")); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp index 7e3ab594a90e50..0139db99ac116d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp @@ -7,7 +7,7 @@ namespace { const Params params[] = { std::tuple{ov::test::utils::DEVICE_CPU, {{ov::enable_profiling(true)}}}, - std::tuple{ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", ov::test::utils::DEVICE_CPU}}}, + std::tuple{ov::test::utils::DEVICE_HETERO, {{ov::device::priorities.name(), ov::test::utils::DEVICE_CPU}}}, }; const Params paramsStreams[] = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/plugin_name.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/plugin_name.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/plugin_name.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/plugin_name.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index b736a5ce7b6be2..bc7aae3c0efe6b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -3,11 +3,9 @@ // #include "behavior/ov_plugin/properties_tests.hpp" - -#include +#include "openvino/runtime/auto/properties.hpp" using namespace ov::test::behavior; -using namespace InferenceEngine::PluginConfigParams; namespace { @@ -15,17 +13,53 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVClassCommon, OVBasicPropertiesTestsP, ::testing::Values(std::make_pair("openvino_intel_cpu_plugin", "CPU"))); -const std::vector cpu_properties = { - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, +auto cpu_properties = []() -> std::vector { + std::vector properties = { + {}, + {ov::hint::enable_cpu_pinning(true)}, + {ov::hint::enable_cpu_pinning(false)}, + {ov::enable_profiling(true)}, + {ov::enable_profiling(false)}, + {ov::internal::exclusive_async_requests(true)}, + {ov::internal::exclusive_async_requests(false)}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, + {{ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, {ov::hint::num_requests(1)}}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, + {ov::num_streams(ov::streams::AUTO)}, + {ov::num_streams(8)}, + // check that hints doesn't override customer value (now for streams and later for other config opts) + {{ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, {ov::hint::num_requests(3)}}, + {{ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, {ov::hint::num_requests(3)}}, + }; + + auto numa_nodes = ov::get_available_numa_nodes(); + if (numa_nodes.size() > 1) { + properties.push_back({ov::num_streams(ov::streams::NUMA)}); + } + return properties; }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVPropertiesTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(cpu_properties)), + ::testing::ValuesIn(cpu_properties())), OVPropertiesTests::getTestCaseName); +const std::vector cpu_inproperties = { + {{ov::hint::performance_mode.name(), "DOESN'T EXIST"}}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), {ov::hint::num_requests(-1)}}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + {ov::hint::num_requests.name(), "should be int"}}, + {{ov::num_streams.name(), "OFF"}}, + {{ov::hint::enable_cpu_pinning.name(), "OFF"}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + OVPropertiesIncorrectTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(cpu_inproperties)), + OVPropertiesIncorrectTests::getTestCaseName); + const std::vector cpu_setcore_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), ov::hint::num_requests(2), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp deleted file mode 100644 index 7bc6f7b10d3512..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/caching_tests.hpp" -#include -#include -#include - -using namespace LayerTestsDefinitions; -using namespace ngraph; - -namespace { - static const std::vector precisionsCPU = { - ngraph::element::f32, - ngraph::element::f16, - ngraph::element::i32, - ngraph::element::i64, - ngraph::element::i8, - ngraph::element::u8, - ngraph::element::i16, - ngraph::element::u16, - }; - - static const std::vector floatPrecisionsCPU = { - ngraph::element::f32, - ngraph::element::f16 - }; - - static const std::vector batchSizesCPU = { - 1, 2 - }; - - static const std::vector precisionsCPUInternal = { - ngraph::element::f32 - }; - - static const std::vector batchSizesCPUInternal = { - 1 - }; - - static std::shared_ptr simple_function_non_max_suppression_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - auto max_output_boxes_per_class = ov::op::v0::Constant::create(element::i32, Shape{1}, {10}); - auto iou_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.75}); - auto score_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.7}); - auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, - iou_threshold, score_threshold, 0, true, element::i32); - auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - return func; - } - - static std::shared_ptr simple_function_matrix_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - ov::op::v8::MatrixNms::Attributes attr; - // convert_precision does not support internal op 'NmsStaticShapeIE' - attr.output_type = element::i32; - auto nms = std::make_shared>(boxes, scores, attr); - auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - return func; - } - - static std::shared_ptr simple_function_multiclass_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - ov::op::util::MulticlassNmsBase::Attributes attr; - attr.output_type = element::i32; - auto nms = std::make_shared(boxes, scores, attr); - auto res = std::make_shared(nms); - auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - return func; - } - - static std::vector internal_functions_cpu() { - std::vector funcs = { - nGraphFunctionWithName { simple_function_non_max_suppression_internal, "NonMaxSuppressionIEInternal"}, - nGraphFunctionWithName { simple_function_matrix_nms_internal, "NmsStaticShapeIE_MatrixNms"}, - nGraphFunctionWithName { simple_function_multiclass_nms_internal, "MulticlassNmsIEInternal"}, - }; - return funcs; - } - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getNumericAnyTypeFunctions()), - ::testing::ValuesIn(precisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU_Float, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getFloatingPointOnlyFunctions()), - ::testing::ValuesIn(floatPrecisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU_Internal, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(internal_functions_cpu()), - ::testing::ValuesIn(precisionsCPUInternal), - ::testing::ValuesIn(batchSizesCPUInternal), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LoadNetworkCacheTestBase::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp deleted file mode 100644 index 5cdbb8fbd7285b..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_plugin_config.hpp" -#include "ie_system_conf.h" -#include "behavior/plugin/configuration_tests.hpp" - -using namespace BehaviorTestsDefinitions; - -namespace { - #if (defined(__APPLE__) || defined(_WIN32)) - auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else if (numaNodes.size() > 1) { - return std::string{CONFIG_VALUE(NUMA)}; - } else { - return std::string{CONFIG_VALUE(NO)}; - } - }()}; - #else - auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); - if (coreTypes.size() > 1) { - return std::string{CONFIG_VALUE(HYBRID_AWARE)}; - } else { - return std::string{CONFIG_VALUE(YES)}; - } - }()}; - #endif - - INSTANTIATE_TEST_SUITE_P( - smoke_Basic, - DefaultConfigurationTest, - ::testing::Combine( - ::testing::Values("CPU"), - ::testing::Values(DefaultParameter{CONFIG_KEY(CPU_BIND_THREAD), defaultBindThreadParameter})), - DefaultConfigurationTest::getTestCaseName); - - const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 - }; - - const std::vector> conf = { - {} - }; - - const std::vector> Configs = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}, - // check that hints doesn't override customer value (now for streams and later for other config opts) - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}}, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(Configs)), - CorrectConfigTests::getTestCaseName); - - const std::vector> inconfigs = { - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(inconfigs)), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(inconfigs)), - IncorrectConfigAPITests::getTestCaseName); - - const std::vector> ConfigsCheck = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}}, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectConfigCheck, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(ConfigsCheck)), - CorrectConfigCheck::getTestCaseName); - - const std::vector> cpu_prop_config = {{ - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "2"}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}, - }}; - - const std::vector> cpu_loadNetWork_config = {{ - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::NO}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "10"}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}, - }}; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(cpu_prop_config), - ::testing::ValuesIn(cpu_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp deleted file mode 100644 index 6934ffaa19f78c..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/core_integration.hpp" - -using namespace BehaviorTestsDefinitions; - -using namespace InferenceEngine::PluginConfigParams; - -// defined in plugin_name.cpp -extern const char * cpu_plugin_file_name; - -namespace { -// -// IE Class Common tests with -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassCommon, IEClassBasicTestP, - ::testing::Values(std::make_pair(cpu_plugin_file_name, "CPU"))); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassNetworkTestP, IEClassNetworkTestP, - ::testing::Values("CPU")); - -// -// IE Class GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_STREAMS, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("CPU", "HETERO")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, - ::testing::Values("CPU")); - -// -// IE Class GetConfig -// - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassGetConfigTest, IEClassGetConfigTest, - ::testing::Values("CPU")); - -////////////////////////////////////////////////////////////////////////////////////////// - -TEST(IEClassBasicTest, smoke_SetConfigAfterCreatedThrow) { - InferenceEngine::Core ie; - std::string value = {}; - - ASSERT_NO_THROW(ie.SetConfig({{KEY_CPU_THREADS_NUM, "1"}}, "CPU")); - ASSERT_NO_THROW(value = ie.GetConfig("CPU", KEY_CPU_THREADS_NUM).as()); - ASSERT_EQ("1", value); - - ASSERT_NO_THROW(ie.SetConfig({{KEY_CPU_THREADS_NUM, "4"}}, "CPU")); - ASSERT_NO_THROW(value = ie.GetConfig("CPU", KEY_CPU_THREADS_NUM).as()); - ASSERT_EQ("4", value); -} - -// IE Class Query network - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest, - ::testing::Values("CPU")); - -// IE Class Load network - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest, - ::testing::Values("CPU")); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTestWithThrow, - ::testing::Values("")); -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp deleted file mode 100644 index 5f1ada306d367a..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#ifdef __GLIBC__ -#include -#endif - -namespace { - -const Params params[] = { - std::tuple{ ov::test::utils::DEVICE_CPU, {{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}}, - std::tuple{ ov::test::utils::DEVICE_HETERO, {{ "TARGET_FALLBACK", ov::test::utils::DEVICE_CPU }}}, -}; - -const Params paramsStreams[] = { - std::tuple{ ov::test::utils::DEVICE_CPU, {{ CONFIG_KEY(CPU_THROUGHPUT_STREAMS), CONFIG_VALUE(CPU_THROUGHPUT_AUTO) }}}, -}; -} // namespace - -INSTANTIATE_TEST_SUITE_P(CPU, CoreThreadingTests, testing::ValuesIn(params), CoreThreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(CPU, CoreThreadingTestsWithIterations, - testing::Combine(testing::ValuesIn(params), - testing::Values(4), - testing::Values(50), - testing::Values(ModelClass::Default)), - CoreThreadingTestsWithIterations::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(CPU_Streams, CoreThreadingTestsWithIterations, - testing::Combine(testing::ValuesIn(paramsStreams), - testing::Values(4), - testing::Values(50), - testing::Values(ModelClass::Default)), - CoreThreadingTestsWithIterations::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp deleted file mode 100644 index 43b40e4688cdad..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/life_time.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - const std::vector> orders = { - // 0 - plugin - // 1 - executable_network - // 2 - infer_request - // 3 - variable state - {3, 0, 1, 2}, - {3, 0, 2, 1}, - {3, 1, 0, 2}, - {3, 1, 2, 0}, - {3, 2, 0, 1}, - {3, 2, 1, 0}, - {0, 3, 1, 2}, - {0, 1, 3, 2} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, HoldersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(orders)), - HoldersTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp index 673c0f82460c92..0348ac2354db8d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp @@ -8,62 +8,60 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - //ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, false, - {ngraph::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ngraph::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } }, true, - {ngraph::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ngraph::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, false, - {ngraph::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ngraph::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } }, true, - {ngraph::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ngraph::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { {}, {}, false }, { {}, {}, true }, }; @@ -71,7 +69,7 @@ const std::vector params = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, AddTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(params)), AddTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp index 9304b63da83d9d..3fe2ca1394b398 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector opsetVersions = { @@ -30,22 +29,22 @@ const std::vector trasform const std::vector params{ // u8 { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, // u16 { - { 65536ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 65536ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, // u32 { - { 4294967296ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 4294967296ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, AssignAndReadValueTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::ValuesIn(opsetVersions), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp index 4adea1767af5a7..420bf2a22961dc 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector params = { @@ -20,7 +19,7 @@ const std::vector params = { { { 4, 3, 50, 86 }, { 1, 1, 2, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, "BatchToSpace", "u8" }, @@ -30,7 +29,7 @@ const std::vector params = { { 1, 1, 2, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp index 7e32d5ddf5363c..64fc05849b0793 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -27,7 +26,7 @@ const std::vector trasform const std::vector params{ // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { {}, {{0.f, 0.f, 0.f}}, @@ -38,7 +37,7 @@ const std::vector params{ }, // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { -12.8f }, { 12.7f } }, { {}, {{0.f, 0.f, 0.f}}, @@ -50,7 +49,7 @@ const std::vector params{ // per-channel quantization with the same values { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -64,7 +63,7 @@ const std::vector params{ { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -79,7 +78,7 @@ const std::vector params{ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ClampTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp index 43068e06ead893..b97120a378193f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp @@ -8,83 +8,81 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector precisions = { + ov::element::f32 }; const std::vector testValues = { // U8 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {} }, // I8 { {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {}, {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {} }, // mixed: U8 + I8 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {} }, // mixed: I8 + U8 { {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {}, {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {} }, // FQ with unexpected quantizationLevels { {}, - { 14ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 14ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {}, {}, - { 14ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 14ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {} }, // FQ with INT4 quantizationLevels { {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {}, {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {} }, // FQ with INT4+INT8 quantizationLevels { {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {}, {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {} }, }; -const std::vector shapes = { - ngraph::Shape({ 1, 3, 16, 16 }), - ngraph::Shape({ 4, 3, 16, 16 }) +const std::vector shapes = { + ov::Shape({ 1, 3, 16, 16 }), + ov::Shape({ 4, 3, 16, 16 }) }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, @@ -98,15 +96,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, namespace concat_transformation_mixed { -const std::vector precisions = { - ngraph::element::f16 +const std::vector precisions = { + ov::element::f16 }; const std::vector testValues = { // mixed dequantization: FP32 & FP16 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), {}, @@ -121,7 +119,7 @@ const std::vector testValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), ConcatTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp index ceceaddea51ae6..3a66bac15d8b97 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -22,30 +21,30 @@ const std::vector trasform const std::vector testValues = { // U8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithChildAndOutputTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp index 28cd434791e9f0..41131e1a9e362a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -23,39 +22,39 @@ const std::vector testValues = { // U8 { 1, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // U8 and unsupported concat axis { 2, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { 1, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { 1, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { 1, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithDifferentChildrenTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp index 4f57f15313c5d0..7bdfb00b6b6061 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp @@ -8,12 +8,10 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -26,7 +24,7 @@ const std::vector trasform const std::vector transparentIntermediateValues = { true, false }; const std::vector multiChannelValues = { /*true,*/ false }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp index 691509a2536ea3..f8ccfff534ecb0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_transformation.cpp @@ -8,12 +8,10 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector precisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -23,7 +21,7 @@ const std::vector trasform // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8() }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp index b8fe75b2975c21..324734790730f4 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -25,30 +24,30 @@ const std::vector trasform const std::vector testValues = { // U8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: U8 + I8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithSplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp index f8309d5e29939c..266aa0fe34c977 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp @@ -10,8 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -21,90 +21,90 @@ const std::vector trasform const std::vector params = { // FQ on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, "convolutionBackpropData_original", "u8" }, // FQ on weights { - {256ul, ngraph::Shape{}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, + {256ul, ov::Shape{}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, "convolutionBackpropData_original", "u8" }, // FQ on weights // with zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, "", "" }, // without zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, "", "" }, // with incorrect zero point on activations { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, "", "" }, // with incorrect zero point on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, "", "" }, // QDq on weights // with zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {{ov::element::f32}, { {12.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // without zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {{ngraph::element::f32}, {}, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {{ov::element::f32}, {}, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // with incorrect zero point on activations { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, - {{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, + {{ov::element::f32}, { {12.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // with incorrect zero point on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {{ngraph::element::f32}, { {1000.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {{ov::element::f32}, { {1000.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // issue #56886: with incorrect dequantization on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {{ngraph::element::f32}, {}, { {4.f, 2.f, 4.f, 2.f, 4.f, 2.f, 4.f, 2.f}, ngraph::element::f32, {8, 1, 1, 1}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {{ov::element::f32}, {}, { {4.f, 2.f, 4.f, 2.f, 4.f, 2.f, 4.f, 2.f}, ov::element::f32, {8, 1, 1, 1}, false }}, "", "" } }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 8, 16, 16 }, true} }; -const std::vector outputShapes = { +const std::vector outputShapes = { { 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp index 1ac1eb2b3e65ad..6944b93f6b4efd 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp @@ -11,9 +11,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -57,20 +56,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector{ 15.f }, ngraph::element::f32}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector{ 15.f }, ov::element::f32}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "f32" @@ -111,20 +110,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector{ 15.f }, ngraph::element::f32}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector{ 15.f }, ov::element::f32}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "u8" @@ -162,20 +161,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "f32" @@ -213,47 +212,47 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "u8" }, { - { 16ul, {{ 1, 1, 1, 1 }}, { -0.8f }, { 0.f }, { 0.f }, { 15.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 16ul, {{ 1, 1, 1, 1 }}, { -0.8f }, { 0.f }, { 0.f }, { 15.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "u8" }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 4, 4 }, { 4, 3, 4, 4 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp index fdf00c2753ff04..066a46e0dbb520 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp @@ -11,9 +11,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16, +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -22,7 +21,7 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, {}, false, @@ -32,72 +31,72 @@ const std::vector params { {}, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, { - { 256ul, ngraph::Shape {}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape {}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape {}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape {}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, { - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 14ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 14ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "f32" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, true, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, - { 255ul, ngraph::Shape { 1 }, { 0.f }, { 254.f }, { -18.7f }, { 18.7f } }, + { 255ul, ov::Shape { 1 }, { 0.f }, { 254.f }, { -18.7f }, { 18.7f } }, false, "Convolution", "u8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, { - 255ul, ngraph::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, + 255ul, ov::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, { -1.52806e-39f, -0.2f, -0.3f, -0.3f, -0.2f, -0.1f }, { 1.52806e-39f, 0.2f, 0.3f, 0.3f, 0.2f, 0.1f } }, false, @@ -105,10 +104,10 @@ const std::vector params "u8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, { - 255ul, ngraph::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, + 255ul, ov::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, { -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f }, { 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f } }, @@ -118,34 +117,34 @@ const std::vector params }, // not supported quantization level on data { - { 65536ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 65536ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 255ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "f32" }, // not supported quantization level on data & weights { - { 65536ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 65536ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 65536ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 65536ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "f32" }, // not supported quantization level on weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, false, - { 65536ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 65536ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "f32" } }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; @@ -162,14 +161,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionTransformation, const std::vector incorrectWeightsParams = { // incorrect weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, false }, // correct weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true } }; @@ -177,7 +176,7 @@ const std::vector i INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionWIthIncorrectWeightsTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::Shape({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(incorrectWeightsParams)), @@ -186,16 +185,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionWIthIncorrectWeightsTransformatio namespace convolution3D { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape { 1, 1, 1}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "u8" }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 16 }, { 4, 3, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp index df4bccfc05931e..363b11f62c6e42 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp @@ -8,12 +8,10 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector precisions = { + ov::element::f32 }; const std::vector modes = { @@ -21,7 +19,7 @@ const std::vector modes = { ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; -const std::vector inputShapesBS2 = { +const std::vector inputShapesBS2 = { {1, 4, 3, 3}, {2, 16, 5, 4} }; @@ -35,7 +33,7 @@ const auto DepthToSpaceBS2 = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(LPT_BS2, DepthToSpaceTransformation, DepthToSpaceBS2, DepthToSpaceTransformation::getTestCaseName); -const std::vector inputShapesBS3 = { +const std::vector inputShapesBS3 = { {1, 9, 3, 3}, {2, 27, 5, 4} }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp index 0f5227dfa69c7b..ace6b989f1c99c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp @@ -8,11 +8,10 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector elementwiseTypes = { @@ -23,24 +22,24 @@ const std::vector elementwiseTypes = { const std::vector params = { { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {"fakeQuantizeBefore1", "convolution1"}, {"fakeQuantizeBefore2", "convolution2"}, @@ -54,24 +53,24 @@ const std::vector p }, { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {"fakeQuantizeBefore1", "convolution1"}, {"fakeQuantizeBefore2", "convolution2"}, @@ -88,7 +87,7 @@ const std::vector p INSTANTIATE_TEST_SUITE_P(smoke_LPT, ElementwiseBranchSelectionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(params), ::testing::ValuesIn(elementwiseTypes)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp index c7fdc231cff28b..c4392c4426d098 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp @@ -8,7 +8,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { @@ -17,9 +16,9 @@ const std::vector testValues = { {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 }, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 } + ov::element::f32, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 }, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 } }, { { "fakeQuantize1" }, @@ -31,9 +30,9 @@ const std::vector testValues = { {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 }, - { 256ul, {}, { 0.f }, { 255.f / 2.1f }, { 0.f }, { 255.f / 2.1f }, ngraph::element::f32 } + ov::element::f32, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 }, + { 256ul, {}, { 0.f }, { 255.f / 2.1f }, { 0.f }, { 255.f / 2.1f }, ov::element::f32 } }, { { "fakeQuantize1", "fakeQuantize2" }, // not fused diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp index 42e3b27fbbd71c..83532f091dc4c1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp @@ -9,11 +9,10 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32 +const std::vector precisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -29,7 +28,7 @@ const std::vector fakeQuantizes = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndAvgPoolTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizes)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp index 4116af8a5cca86..5016feaa761214 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp @@ -9,11 +9,10 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32 +const std::vector precisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -29,7 +28,7 @@ const std::vector fakeQuantizes = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndMaxPoolTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizes)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp index ee4b7ce29b47ef..5f873a1057f7e8 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp @@ -11,9 +11,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -31,7 +30,7 @@ const std::vector testValues = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(testValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp index 80d790854d7b36..5a399e3b6ea464 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -23,22 +22,22 @@ const std::vector trasformationParamValues = { const std::vector testValues = { { - { ngraph::element::u8, ngraph::element::i8 }, - { ngraph::element::u8 }, + { ov::element::u8, ov::element::i8 }, + { ov::element::u8 }, true, { { 256ul, { }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, { 255ul, { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } } }, { - ngraph::element::u8, + ov::element::u8, { 256ul, { }, { 0.f }, { 2.55f }, { 0.f }, { 255.f } }, { } }, }, { - { ngraph::element::u8, ngraph::element::i8 }, - { ngraph::element::i8 }, // Convolution on CPU doesn't support it, but it will be not used + { ov::element::u8, ov::element::i8 }, + { ov::element::i8 }, // Convolution on CPU doesn't support it, but it will be not used // INT8 is not available for limited operation (Convolution) false, { @@ -47,7 +46,7 @@ const std::vector testVa }, { // original precision is used - ngraph::element::u8, + ov::element::u8, // FakeQuantize has to select the first available: U8, not limited operation required I8 but this fact doesn't affect { 256ul, { }, { 0.f }, { 25.5f }, { 0.f }, { 255.f } }, // FakeQuantize on weights is not changed @@ -59,7 +58,7 @@ const std::vector testVa INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizePrecisionSelectionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(testValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp index f27e3174a5a1c8..5b2244139f2107 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -89,7 +88,7 @@ const std::vector fakeQuantizeOnDataValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp index 5c8732a5989227..7fabd71421e5a4 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -24,77 +24,77 @@ const std::vector trasformationParamValues = { const std::vector fakeQuantizeOnDataValues = { { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.3f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.3f}, ov::element::f32, {}, false } }, {}, "f32" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.3f}, ngraph::element::f32, {}, false } + { {0.3f}, ov::element::f32, {}, false } }, {}, "i8" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, { }, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.3f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.3f}, ov::element::f32, {}, false } }, {}, "f32" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, { }, - { {0.3f}, ngraph::element::f32, {}, false } + { {0.3f}, ov::element::f32, {}, false } }, {}, "u8" @@ -104,7 +104,7 @@ const std::vector fakeQuanti INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeWithNotOptimalTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp index 6f19bde8288ddf..43a868ffcaaf4e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp @@ -8,30 +8,28 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector shapes = { { - ngraph::PartialShape{ 1, 16 }, - ngraph::PartialShape{ 16, 8 }, + ov::PartialShape{ 1, 16 }, + ov::PartialShape{ 16, 8 }, false, false }, { - ngraph::PartialShape{ 1, 16 }, - ngraph::PartialShape{ 8, 16 }, + ov::PartialShape{ 1, 16 }, + ov::PartialShape{ 8, 16 }, false, true }, { - ngraph::PartialShape{ 16, 1 }, - ngraph::PartialShape{ 16, 8 }, + ov::PartialShape{ 16, 1 }, + ov::PartialShape{ 16, 8 }, true, false }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp index 8ee91f7f161d6d..04e5ec8139b2d9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp @@ -5,25 +5,24 @@ #include "low_precision_transformations/fuse_convert_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - element::f32 +const std::vector precisions = { + ov::element::f32 }; -const std::vector< ngraph::PartialShape > inputAndQuantizationShapes = { +const std::vector< ov::PartialShape > inputAndQuantizationShapes = { { 1, 4, 16, 16 }, }; const std::vector deqOperations = { { - { ngraph::element::f32 }, + { ov::element::f32 }, {1.f}, {0.45f} }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, {0.45f} } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp index 9fc00f6521dd42..3c2af72db8f566 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp @@ -7,7 +7,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { @@ -18,11 +17,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, {}, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -30,11 +29,11 @@ const std::vector testValu {128, 3}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, - { {}, {}, { {0.01f, 0.1f, 1.f}, ngraph::element::f32, {1, 3} } }, - ngraph::element::f32, + ov::element::f32, + { {}, {}, { {0.01f, 0.1f, 1.f}, ov::element::f32, {1, 3} } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -43,11 +42,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, {}, { {0.01f, 0.f, 0.01f} } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -56,11 +55,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -69,11 +68,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::u8, - { {ngraph::element::f32}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::u8, + { {ov::element::f32}, { -128 }, { 0.01f } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -82,11 +81,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { {128}, ngraph::element::f32 }, - ngraph::element::u8, - { {ngraph::element::f32}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, + { {128}, ov::element::f32 }, + ov::element::u8, + { {ov::element::f32}, { -128 }, { 0.01f } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -97,9 +96,9 @@ const std::vector testValu { { }, { }, - ngraph::element::i32, - { {ngraph::element::f32}, {}, {} }, - ngraph::element::f32, + ov::element::i32, + { {ov::element::f32}, {}, {} }, + ov::element::f32, { 256ul, {}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } } } }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp index 8c23420484e2ad..a3d26436350cc2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp @@ -12,8 +12,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -36,7 +36,7 @@ const std::vector fakeQuantizeOnD INSTANTIATE_TEST_SUITE_P(smoke_LPT, FuseFakeQuantizeAndScaleShiftTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 9, 9 })), + ::testing::Values(ov::PartialShape({ 1, 3, 9, 9 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp index d422d30037470a..61e49894cd7f30 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp @@ -7,8 +7,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; -using namespace ngraph; namespace { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp index 298cfa4d215978..0c2633c9825110 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp @@ -7,8 +7,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; -using namespace ngraph; namespace { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp index 646b842e5b5013..1cbd7152fbccfb 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp @@ -10,8 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector precisions = { - ngraph::element::f32, +const std::vector precisions = { + ov::element::f32 }; const std::vector opset_version = { @@ -27,7 +27,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, // U8: per-channel quantization @@ -38,7 +38,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {1, 3, 1}, @@ -56,7 +56,7 @@ const std::vector testValues = { {1}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {3, 1}, @@ -74,7 +74,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp index 7b463b61b313bb..3e0b7e149482ac 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp @@ -11,12 +11,11 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; -const std::vector dimensions = { +const std::vector dimensions = { { 1, 3, 16, 16 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp index fe5ef5c998addd..3ccc4c23d752cc 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp @@ -10,8 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -20,7 +20,7 @@ const std::vector trasform const std::vector addPrecisionPreserved = { true, false }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 24, 18, 18 }}, {{ 1, 6, 24 }, { 1, 24, 18 }} }; @@ -30,8 +30,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -40,8 +40,8 @@ const std::vector pa { 3ul, 0, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -50,8 +50,8 @@ const std::vector pa { 3ul, 1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -68,7 +68,7 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "u8" @@ -77,8 +77,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, false, "Convolution", "u8" @@ -96,7 +96,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, GroupConvolutionTransformation::getTestCaseName); namespace test_values_4d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 24, 18, 18 }}, }; @@ -105,8 +105,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, false, "Convolution", "u8" @@ -115,8 +115,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1, 1 }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1, 1 }, {-127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f}, @@ -148,7 +148,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace test_values_4d namespace test_values_3d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24 }, { 1, 24, 18 }}, }; @@ -157,8 +157,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, false, "Convolution", "u8" @@ -167,8 +167,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1 }, + { 256ul, ov::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1 }, {-127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f}, @@ -200,7 +200,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace test_values_3d namespace depthwise { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 6, 18, 18 }}, {{ 1, 6, 24 }, { 1, 6, 18 }}, }; @@ -210,8 +210,8 @@ const std::vector pa { 6ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "", "" @@ -228,7 +228,7 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "", "" @@ -245,7 +245,7 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "", "" @@ -264,7 +264,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace depthwise namespace i8_3d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{1, 6, 1, 24, 24}, {1, 24, 1, 18, 18}}, {{1, 24, 8, 12, 12}, {1, 24, 1, 1, 1}} }; @@ -274,8 +274,8 @@ const std::vector pa { 3ul, -1, - {256ul, ngraph::Shape{1, 1, 1, 1, 1}, {-12.8f}, {12.7f}, {-12.8f}, {12.7f}}, - {255ul, ngraph::Shape { 1, 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, + {256ul, ov::Shape{1, 1, 1, 1, 1}, {-12.8f}, {12.7f}, {-12.8f}, {12.7f}}, + {255ul, ov::Shape { 1, 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, true, "Convolution", "i8" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp index 319d5c3cfa626f..f59641eb25a54d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -13,9 +13,8 @@ using namespace LayerTestsDefinitions; namespace { // clang-format off -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -63,20 +62,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -127,20 +126,20 @@ const std::vector // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -179,20 +178,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -234,20 +233,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5}}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5}}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -291,20 +290,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -351,20 +350,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -411,20 +410,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {3, 2, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ngraph::element::i8, true }, - { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false } + { ov::element::f32, false }, + { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ov::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ov::element::i8, true }, + { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ov::element::f32, {3, 2, 1, 1, 1}, false } }, {}, "output_original", @@ -468,20 +467,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -525,20 +524,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {3, 2, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false } + { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ov::element::f32, {3, 2, 1, 1, 1}, false } }, {}, "output_original", @@ -585,20 +584,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -607,7 +606,7 @@ const std::vector }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 6, 24, 24 } }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp index 3a0e72401cb58b..83110bab6dc9a1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp @@ -5,35 +5,34 @@ #include "low_precision_transformations/interpolate_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32 +const std::vector precisions = { + ov::element::f32 }; -const std::vector> shapes = { +const std::vector> shapes = { {{1, 4, 16, 16}, {32, 32}}, {{1, 2, 48, 80}, {50, 60}}, }; const std::vector interpAttrs = { interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, true, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "linear", false, false, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp index 4dcb9ef2475381..a19455e85c1957 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp @@ -7,52 +7,50 @@ #include "low_precision_transformations/mat_mul_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector precisions = { + ov::element::f32 }; std::vector testValues = { { { 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, + { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, { 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "u8" }, { { 8, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, + { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, { 8, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "u8" }, { { 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "i8" }, { { 1, 1, 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 1, 1, 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original", "i8" }, { { 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, "matMul_original/MM", "i8" } @@ -61,7 +59,7 @@ std::vector testValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, MatMulTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 384, 1024 })), + ::testing::Values(ov::PartialShape({ 1, 384, 1024 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), MatMulTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 2aa4269a850d08..51578d3c1a993e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -7,10 +7,11 @@ #include "low_precision_transformations/mat_mul_with_constant_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { ngraph::element::f32 }; +const std::vector precisions = { + ov::element::f32 +}; //transpose_a = false, transpose_b = true std::vector testValues = { @@ -18,7 +19,7 @@ std::vector testValues = { { { 2, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -28,9 +29,9 @@ std::vector testValues = { { { 2, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::i8, ov::Shape{ 2, 4 } }, {}, - { ngraph::element::f32, {}, {0.1f} }, + { ov::element::f32, {}, {0.1f} }, "FullyConnected", "u8" }, @@ -38,7 +39,7 @@ std::vector testValues = { { { 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}, {-10.5f}, {4.5f}, {-10.5f}, {4.5f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -48,7 +49,7 @@ std::vector testValues = { { { 1, 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -58,9 +59,9 @@ std::vector testValues = { { { 1, 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f}, {255.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 255.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::i8, ov::Shape{ 2, 4 } }, {}, - { ngraph::element::f32, {}, {{0.1f, 0.01f}, ngraph::element::f32, ngraph::Shape{ 2, 1 }} }, + { ov::element::f32, {}, {{0.1f, 0.01f}, ov::element::f32, ov::Shape{ 2, 1 }} }, "FullyConnected", "u8" }, @@ -68,7 +69,7 @@ std::vector testValues = { { { 1, 3, 4 }, { 256ul, {{1}, {1}, {1}, {1}}, {0.f}, {255.f}, {0.f}, {25.5f} }, - { std::vector(4 * 4, 2.f), ngraph::element::f32, ngraph::Shape{ 4, 4 } }, + { std::vector(4 * 4, 2.f), ov::element::f32, ov::Shape{ 4, 4 } }, { 256ul, {{1}, {1}, {1}, {1}}, {-128.f}, {127.f}, {-128.f}, {127.f} }, { {}, {}, {} }, "FullyConnected", @@ -78,7 +79,7 @@ std::vector testValues = { { { 2, 3 }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-10.f}, {5.f}, {-10.f, -5.f}, {5.f, 5.f} }, - { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::f32, ngraph::Shape{ 2, 3 } }, + { std::vector{1, 2, 3, 4, 5, 6}, ov::element::f32, ov::Shape{ 2, 3 } }, { 256ul, {{1}, {1}, {1}, {1}}, {-128.f}, {127.f}, {-12.8f}, {12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -88,9 +89,9 @@ std::vector testValues = { { { 2, 3 }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-10.f}, {5.f}, {-10.f, -5.f}, {5.f, 5.f} }, - { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::i8, ngraph::Shape{ 2, 3 } }, + { std::vector{1, 2, 3, 4, 5, 6}, ov::element::i8, ov::Shape{ 2, 3 } }, {}, - { ngraph::element::f32, {}, {0.1f} }, + { ov::element::f32, {}, {0.1f} }, "FullyConnected", "u8" } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index b688a629918151..a1d920576f74e1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -10,18 +10,18 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector params = { { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } } + { 256ul, ov::Shape { 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } } }, }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 16 }, { 10, 16 }}, {{ 1, 16 }, { 16, 10 }} }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp index f32a45256b187d..a933b2170511ff 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -9,9 +9,8 @@ using namespace LayerTestsDefinitions; -const std::vector netPrecisions = { - ngraph::element::f32, - //ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -48,9 +47,9 @@ const std::vector pa 3, "", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, { 0.01f } }, @@ -63,9 +62,9 @@ const std::vector pa 3, "relu", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, { 0.01f } }, @@ -103,11 +102,11 @@ const std::vector pa {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {255.f, 255.f / 2.f, 255.f / 3.f, 255.f / 4.f, 255.f / 5.f, 255.f / 6.f}, }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, - { {0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f}, ngraph::element::f32, {1, 6, 1, 1} }, + { {0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f}, ov::element::f32, {1, 6, 1, 1} }, }, "Concatenation", "u8", @@ -125,10 +124,10 @@ const std::vector pa {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {255.f, 255.f / 2.f, 255.f / 3.f, 255.f / 4.f, 255.f / 5.f, 255.f / 6.f}, }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, - { {-127.f, -127.f / 2.f, -127.f / 3.f, -127.f / 4.f, -127.f / 5.f, -127.f / 6.f}, ngraph::element::f32, {1, 6, 1, 1} }, + { ov::element::f32 }, + { {-127.f, -127.f / 2.f, -127.f / 3.f, -127.f / 4.f, -127.f / 5.f, -127.f / 6.f}, ov::element::f32, {1, 6, 1, 1} }, { 0.01f }, }, "Concatenation", @@ -137,7 +136,7 @@ const std::vector pa }, }; -const std::vector> shapes = { +const std::vector> shapes = { {{ 1, 1, 16, 16 }, { 1, 2, 16, 16 }, { 1, 3, 16, 16 }}, {{ 4, 1, 16, 16 }, { 4, 2, 16, 16 }, { 4, 3, 16, 16 }} }; @@ -168,7 +167,7 @@ namespace testValues2 { -1 }, }; - const std::vector> shapes = { + const std::vector> shapes = { {{ 1, 1, 16, 16 }, { 1, 1, 16, 16 }, { 1, 1, 16, 16 }}, {{ 4, 1, 16, 16 }, { 4, 1, 16, 16 }, { 4, 1, 16, 16 }} }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp index 3dacda4a854636..510c6bac6dfc46 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution.cpp @@ -5,22 +5,21 @@ #include "low_precision_transformations/multiply_to_group_convolution_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - element::f32 +const std::vector precisions = { + ov::element::f32 }; namespace shape4d { -const std::vector inputShapes = { +const std::vector inputShapes = { { 1ul, 3ul, 16ul, 16ul }, { 4ul, 3ul, 16ul, 16ul } }; const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -28,7 +27,7 @@ const std::vector params = { }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{1, 1, 1, 1}}, "output/GroupConvolution", "", @@ -36,7 +35,7 @@ const std::vector params = { }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{}}, "output/GroupConvolution", "", @@ -44,7 +43,7 @@ const std::vector params = { }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -52,7 +51,7 @@ const std::vector params = { }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -73,42 +72,42 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyToGroupConvolutionTransformation, } // namespace shape4d namespace shape5d { -const std::vector inputShapes = { +const std::vector inputShapes = { { 1ul, 3ul, 16ul, 16ul, 16ul }, { 4ul, 3ul, 16ul, 16ul, 16ul } }; const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1, 1}}, "output/GroupConvolution", "U8" }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{1, 1, 1, 1, 1}}, "output/GroupConvolution", "" }, // Multiply with scalar is not transformed to GroupConvolution { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{4.f}, element::f32, Shape{}}, "output/GroupConvolution", "" }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1, 1}}, "output/GroupConvolution", "U8" }, // Zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1, 1}}, "output/GroupConvolution", "U8" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp index 6f43ef6d917b2d..4e58bdaa91466f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - //ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; // If snippets fuse all operations into one subgraph node, @@ -22,84 +21,84 @@ const auto precision_for_fused_cases = ov::element::undefined; const std::vector params = { { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape {}, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape {}, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, true }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, { true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, { true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -128.f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -128.f }, { 1.27f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, precision_for_fused_cases, false }, - { false, {}, false, {}, {}, ngraph::element::f32, false }, - { true, {}, true, {}, {}, ngraph::element::f32, false }, + { false, {}, false, {}, {}, ov::element::f32, false }, + { true, {}, true, {}, {}, ov::element::f32, false }, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(params)), MultiplyTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp index 36536fab7343ce..7bc084b95a3d1c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent.cpp @@ -10,21 +10,20 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector values = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } } + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyWithOneParentTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(values)), MultiplyWithOneParentTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp index 4e1d16c17f5713..3eebef6330e9a1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp @@ -5,14 +5,13 @@ #include "low_precision_transformations/mvn_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - element::f32 +const std::vector precisions = { + ov::element::f32 }; -const std::vector inputAndQuantizationShapes = { +const std::vector inputAndQuantizationShapes = { { 1ul, 4ul, 16ul, 16ul }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp index fb2c1d4128b07f..378ad22a804f58 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp @@ -8,15 +8,13 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - //ngraph::element::f16 +const std::vector precisions = { + ov::element::f32 }; -const std::vector > inputAndQuantizationShapes = { +const std::vector > inputAndQuantizationShapes = { { { 1ul, 4ul, 16ul, 16ul }, { 1ul } }, { { 1ul, 4ul, 16ul, 16ul }, { 1ul, 4ul, 1ul, 1ul } }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp index 0573b240e1da7c..230fad2785a858 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -26,7 +26,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, OutputLayers, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues)), OutputLayers::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp index 847ac5c32735a3..7355f55cad7d5a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -25,7 +25,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_LPT, OutputLayersConcat, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues)), OutputLayersConcat::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp index 3e2b1c01a99a65..2a4f1440b232d1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -26,7 +26,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_LPT, OutputLayersConcatMultiChannel, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues)), OutputLayersConcatMultiChannel::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp index 4d27832332ba60..3a91b39d1c89da 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp @@ -12,12 +12,11 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 3, 16, 16}, { 4, 3, 16, 16} }; @@ -27,17 +26,17 @@ const std::vector trasform }; namespace commonTestCases { -const std::vector padModes = { - ngraph::op::PadMode::CONSTANT, - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector padModes = { + ov::op::PadMode::CONSTANT, + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, 0.f, @@ -47,7 +46,7 @@ const std::vector params = { // per-channel quantization with the same values { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -63,7 +62,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -92,7 +91,7 @@ namespace testCasesForConstantMode { const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, 0.f, @@ -100,7 +99,7 @@ const std::vector params = { "f32" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, -1, 1 }, { 0, 0, 1, -1 }, 0.f, @@ -108,7 +107,7 @@ const std::vector params = { "f32" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 0, 0 }, { 0, 0, -1, -1 }, 0.f, @@ -117,7 +116,7 @@ const std::vector params = { }, // tensor quantization with subtract, non zero padValue and pad by unique dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 2, 0 }, { 0, 0, 1, 0 }, 2.f, @@ -126,7 +125,7 @@ const std::vector params = { }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 2, 0 }, { 0, 0, -1, 0 }, 2.f, @@ -134,7 +133,7 @@ const std::vector params = { "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, -1, 0 }, { 0, 0, -1, 0 }, 2.f, @@ -145,7 +144,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -160,7 +159,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -175,7 +174,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -191,7 +190,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, @@ -204,7 +203,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, @@ -217,7 +216,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, @@ -233,7 +232,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(inputShapes), - ::testing::Values(ngraph::op::PadMode::CONSTANT), + ::testing::Values(ov::op::PadMode::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), @@ -241,16 +240,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, } // namespace testCasesForConstantMode namespace testCasesForOtherModes { -const std::vector modesWithoutConstant = { - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector modesWithoutConstant = { + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, 0.f, @@ -261,7 +260,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f }, { -2.f, -4.f, -6.f }, { 10.5f, 8.5f, 6.5f } }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp index 8efea7150bd341..7782168311ba7a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp @@ -8,25 +8,24 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32 +const std::vector precisions = { + ov::element::f32 }; std::vector testValues = { { {}, false}, - { { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, - { { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, - { { 256ul, ngraph::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, - { { 256ul, ngraph::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } + { { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, + { { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, + { { 256ul, ov::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, + { { 256ul, ov::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, PReluTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), PReluTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp index 03a1cc11af5aa5..303ae4f05ca975 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -22,52 +21,52 @@ const std::vector trasform const std::vector params = { { - ngraph::element::f32, + ov::element::f32, { 256ul, {{ 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {}, - { std::vector{ 2.f }, ngraph::element::i8, {9, 16}}, + { std::vector{ 2.f }, ov::element::i8, {9, 16}}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.03f}, ngraph::element::f32, {/* from parameter */}, false } + { {0.03f}, ov::element::f32, {/* from parameter */}, false } }, { {3, 3, 16, 1} }, - { {2}, ngraph::element::f32, {1, 1, 16, 1}, false }, + { {2}, ov::element::f32, {1, 1, 16, 1}, false }, { {2, 3, 0, 1} }, { {16, 1, 1, 3, 3} }, - ngraph::element::f32, + ov::element::f32, {}, "output_original", "u8" }, { - ngraph::element::f32, + ov::element::f32, { 256ul, {{ 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {}, - { std::vector{ 2.f }, ngraph::element::i8, {9, 16}}, + { std::vector{ 2.f }, ov::element::i8, {9, 16}}, { - { ngraph::element::f32, false }, - { {127.0f}, ngraph::element::f32, {/* from parameter */}, false}, - { {0.03f}, ngraph::element::f32, {/* from parameter */}, false } + { ov::element::f32, false }, + { {127.0f}, ov::element::f32, {/* from parameter */}, false}, + { {0.03f}, ov::element::f32, {/* from parameter */}, false } }, { {3, 3, 16, 1} }, - { {2}, ngraph::element::f32, {1, 1, 16, 1}, false }, + { {2}, ov::element::f32, {1, 1, 16, 1}, false }, { {2, 3, 0, 1} }, { {16, 1, 1, 3, 3} }, - ngraph::element::f32, + ov::element::f32, {}, "output_original", "f32" } }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 16, 9, 9 }, { 4, 16, 9, 9 } }; -const std::vector dequantizationOnWeightElementwiseConstantShapes = { - { ngraph::Shape({1, 16}) } +const std::vector dequantizationOnWeightElementwiseConstantShapes = { + { ov::Shape({1, 16}) } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, PullReshapeThroughDequantizationTransformation, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp index 42d513efad46fd..5b50dcce917a5a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp @@ -9,9 +9,8 @@ using namespace LayerTestsDefinitions; -const std::vector netPrecisions = { - ngraph::element::f32, - //ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -25,17 +24,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -55,17 +54,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -83,8 +82,8 @@ const std::vector param } }; -const std::vector> activations_shapes = {{{1, 2, 16}, {1, 1, 128}, {1, 1, 128}}}; -const std::vector> weights_shapes = {{{1, 512, 16}, {1, 512, 128}, {1, 512}}}; +const std::vector> activations_shapes = {{{1, 2, 16}, {1, 1, 128}, {1, 1, 128}}}; +const std::vector> weights_shapes = {{{1, 512, 16}, {1, 512, 128}, {1, 512}}}; INSTANTIATE_TEST_SUITE_P(smoke_LPT, RecurrentCellTransformation, ::testing::Combine( @@ -104,17 +103,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -134,17 +133,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -162,8 +161,8 @@ const std::vector param } }; -const std::vector> activations_shapes = {{{1, 1, 3}, {1, 1, 3}, {}}}; -const std::vector> weights_shapes = {{{1, 9, 3}, {1, 9, 3}, {1, 9}}}; +const std::vector> activations_shapes = {{{1, 1, 3}, {1, 1, 3}, {}}}; +const std::vector> weights_shapes = {{{1, 9, 3}, {1, 9, 3}, {1, 9}}}; INSTANTIATE_TEST_SUITE_P(smoke_LPT, RecurrentCellTransformation, ::testing::Combine( diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp index f12d44e5195563..525e0201626283 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -23,28 +22,28 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, false, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, false, "Output_original", @@ -52,7 +51,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -65,7 +64,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -78,7 +77,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -91,7 +90,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -107,7 +106,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMaxTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp index 3e4506bac0fa9a..73db6da96a879a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp @@ -12,8 +12,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -22,7 +22,7 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 1.27f }, { 0.f }, { 1.27f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 1.27f }, { 0.f }, { 1.27f } }, {}, {}, {{ 2, 3 }, true}, @@ -31,7 +31,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, { ov::element::u8 }, { { ov::element::f32 }, @@ -44,7 +44,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -128.f }, { 1.27f }, { 0.f }, { 255.f }, ov::element::f32 }, { ov::element::u8 }, { { ov::element::f32 }, @@ -57,7 +57,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 2, 3 }, false}, @@ -66,7 +66,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 1 }, true}, @@ -75,7 +75,7 @@ const std::vector params = "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 1 }, false}, @@ -85,7 +85,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -100,7 +100,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -115,7 +115,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -130,7 +130,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -148,7 +148,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMeanTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp index 07ef9b458ab632..f44e1926120360 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -23,28 +22,28 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, false, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, false, "Output_original", @@ -52,7 +51,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -65,7 +64,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -78,7 +77,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -91,7 +90,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -107,7 +106,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMinTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp index 1f5aec32221848..6c2f89f1e88fd2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -23,14 +22,14 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, "Output_original", "u8" }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 2.f }, { 10.f }, { 2.f }, { 10.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 2.f }, { 10.f }, { 2.f }, { 10.f } }, { 2, 3 }, false, "Output_original", @@ -38,7 +37,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -51,7 +50,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -64,7 +63,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -77,7 +76,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -93,7 +92,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceSumTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp index 350e1892fc2ec5..600af536d4404c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp @@ -8,26 +8,24 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector precisions = { + ov::element::f32 }; std::vector testValues = { { {}, false}, - { { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, - { { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, - { { 256ul, ngraph::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, - { { 256ul, ngraph::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } + { { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, + { { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, + { { 256ul, ov::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, + { { 256ul, ov::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReluTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(testValues)), ReluTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp index 1cca68667da319..34d7ad6ea2ecf7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -24,7 +23,7 @@ const std::vector params = { { { 1, 3, 32 }, { 1, 3, 4, 8 }, - { 256ul, ngraph::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -32,7 +31,7 @@ const std::vector params = { { { 1, 3, 32 }, { -1 }, - { 256ul, ngraph::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -40,7 +39,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 1, 3, 256 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -48,7 +47,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 0, 3, -1 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", "u8" }, @@ -56,7 +55,7 @@ const std::vector params = { { { 1, 3, 4, 8 }, { 1, -1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -64,7 +63,7 @@ const std::vector params = { { { 1, 3, 4, 8 }, { 1, 3, 4, 8, 1, 1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "u8" }, @@ -74,7 +73,7 @@ const std::vector params = { { 1, -1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, @@ -89,7 +88,7 @@ const std::vector params = { { 1, 3, -1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, @@ -105,7 +104,7 @@ const std::vector params = { { 1, -1, 8 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, @@ -118,7 +117,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 1, 1, 48, 16 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -128,7 +127,7 @@ const std::vector params = { { { 1, 3, 16 }, { 1, 1, 6, 8 }, - { 256ul, ngraph::Shape{ 1, 3, 1 }, + { 256ul, ov::Shape{ 1, 3, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -138,7 +137,7 @@ const std::vector params = { { { 1, 3, 2, 4 }, { 1, 1, 24 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -148,7 +147,7 @@ const std::vector params = { { { 1, 3, 2, 4, 2 }, { 1, 1, 48 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -158,7 +157,7 @@ const std::vector params = { { { 1, 3, 2, 4, 2 }, { 1, 1, 3, 16 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp index eefbdde3c26428..8753d9002fdf77 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp @@ -10,12 +10,11 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } }; @@ -26,14 +25,14 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, 0, 1, "output_original", "u8" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, -3, 1, "output_original", @@ -42,7 +41,7 @@ const std::vector par { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { 0.f, 0.f, 0.f }, @@ -56,7 +55,7 @@ const std::vector par { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { -4.f, -3.f, 0.f }, @@ -68,7 +67,7 @@ const std::vector par "u8" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, 2, 4, "output_original", @@ -77,7 +76,7 @@ const std::vector par { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp index f46191ab6a5f77..94908afacac49a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp @@ -10,16 +10,15 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector params = { { { 1, 3, 100, 171 }, { 1, 1, 2, 2 }, { 0, 0, 2, 2 }, { 0, 0, 2, 3 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, "SpaceToBatch", "u8" }, @@ -28,7 +27,7 @@ const std::vector params = { { 1, 1, 2, 2 }, { 0, 0, 2, 2 }, { 0, 0, 2, 3 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp index 7ea0b65146b823..635a53c74b37e6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp @@ -13,9 +13,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -28,18 +27,18 @@ const std::vector trasform const std::vector params = { // tensor quantization, split second dimension { - { 256ul, ngraph::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, + { 256ul, ov::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, 2, 2ul }, // tensor quantization, split third dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, -1, 2ul }, // per-channel quantization with the same values, split second dimension { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -50,7 +49,7 @@ const std::vector params = { // per-channel quantization with the same values, per-channel split { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -62,7 +61,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -74,7 +73,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -87,7 +86,7 @@ const std::vector params = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, SplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp index 794ddb29868e49..56ccc48355b896 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp @@ -11,11 +11,10 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { - const std::vector precisions = { - ngraph::element::f32 + const std::vector precisions = { + ov::element::f32 }; - const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8().setUpdatePrecisions(false), @@ -24,22 +23,22 @@ namespace { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3 }, { 1, 3, 5, 1} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2, 3 }, { 1, 1, 1, 1 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3 }, { 1, 64, 32, 1 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2.0, 3.0 }, { 1, 32, 1, 1 } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp index 1f08981510580d..ebd9cca91a8e80 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp @@ -12,9 +12,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -24,7 +23,7 @@ const std::vector trasform const std::vector params = { // channel slice, tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 0, 0 }, // begin { 1, 2, 1, 1 }, // end { 1, 1, 1, 1 }, // strided @@ -36,7 +35,7 @@ const std::vector params }, // special dimension slice, tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 0, 0 }, { 1, 3, 20, 24 }, { 1, 1, 1, 1 }, @@ -50,7 +49,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -69,7 +68,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -88,7 +87,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -108,7 +107,7 @@ const std::vector params INSTANTIATE_TEST_SUITE_P(smoke_LPT, StridedSliceTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 24, 24 })), + ::testing::Values(ov::PartialShape({ 1, 3, 24, 24 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp index 690c5103c1f4a0..03ec01495394ba 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp @@ -15,16 +15,16 @@ const std::vector testVal // U8: Multiply {} => Multiply (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + ov::element::f32, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, }, // U8: Multiply { 1x3x1x1 } => Multiply + Add (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1, 3, 1, 1}), + ov::Shape({1, 3, 1, 1}), {0.f, 0.f, 0.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f}, {0.f, 0.f, 0.f}, @@ -34,10 +34,10 @@ const std::vector testVal // U8: Subtract + Multiply { 1x3x1x1 } => Multiply + Add (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1, 3, 1, 1}), + ov::Shape({1, 3, 1, 1}), {2.55f / 2, 2.55f / 4.f, 2.55f / 6.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f}, {2.55f / 2, 2.55f / 4.f, 2.55f / 6.f}, @@ -46,10 +46,10 @@ const std::vector testVal }, { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1}), + ov::Shape({1}), {2.55f / 2}, {2.55f}, {2.55f / 2}, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp index 8d7fda0b60767e..89fcd6dfc271ca 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -25,7 +25,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, SubtractTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues)), SubtractTransformation::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp index 663efad28b2ce6..7d29f015c8c0b3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -11,9 +11,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -29,7 +28,7 @@ const std::vector transposeChannelDimValues = { true, false }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, TransposeAfterMatMulTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(perTensorValues), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp index 2125ceb013cdd7..1d458f02445152 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp @@ -10,9 +10,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector precisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector precisions = { + ov::element::f32 }; const std::vector testValues = { @@ -21,7 +20,7 @@ const std::vector testValues = { { 1, 1000, 1, 1}, { 0, 2, 3, 1}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, // U8: per-channel quantization @@ -29,7 +28,7 @@ const std::vector testValues = { { 1, 3, 1, 1}, { 0, 2, 3, 1}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {1, 3, 1, 1}, @@ -44,7 +43,7 @@ const std::vector testValues = { { 1, 1000, 1, 1, 3, 4}, { 0, 2, 1, 3, 5, 4}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp index 8b03c6c000fdd3..d8f92ba36531bd 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp @@ -11,11 +11,10 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { - const std::vector precisions = { - ngraph::element::f32 + const std::vector precisions = { + ov::element::f32 }; - const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), // LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8().setUpdatePrecisions(false), @@ -24,27 +23,27 @@ namespace { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 3, 5} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 3, 3, 5 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 4, 5, 6 } }, { - { 256ul, ngraph::Shape { 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2.0, 3.0 }, { 3, 4 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 4.0 }, { 46, 128, 2, 3 } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp index afc6ca568aa3dc..4d3827f31cdc23 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp @@ -13,9 +13,8 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -28,13 +27,13 @@ const std::vector trasform const std::vector params{ // tensor quantization, split second dimension { - { 256ul, ngraph::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, + { 256ul, ov::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, 2, std::vector{9, 7} }, // tensor quantization, split third dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, -1, std::vector{15, 1} }, @@ -42,7 +41,7 @@ const std::vector param { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -55,7 +54,7 @@ const std::vector param { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -67,7 +66,7 @@ const std::vector param // per-channel quantization with the same values, per-channel split { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -79,7 +78,7 @@ const std::vector param // per-channel quantization with the same values, split third dimension { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -93,7 +92,7 @@ const std::vector param INSTANTIATE_TEST_SUITE_P(smoke_LPT, VariadicSplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp index 91b899090b6792..7a118d125d7a52 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp @@ -79,6 +79,8 @@ std::vector raw_test_cases = { { 1, 0 }, { 1, 0 }, { }, { }, { 1, 0 } }, RawParams{ {{ 20, 10, 5 }}, { 0, 0 }, { 0, -1 }, { 1, 1 }, { 1, 0 }, { 1, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + RawParams{ {{ 1, 8400, 6 }}, { 0, 2 }, { 0, 4 }, { 1, 1 }, + { 0 }, { 0 }, { 0 }, { 0 }, { 1 } }, RawParams{ {{ 1, 12, 100, 1, 1 }}, { 0, -1, 0, 0 }, { 0, 0, 0, 0 }, { 1, 1, 1, 1 }, { 1, 0, 1, 0 }, { 1, 0, 1, 0 }, { }, { 0, 1, 0, 1 }, {} }, RawParams{ {{ 2, 2, 2, 2 }}, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 8537e7882161a7..ddff2c7d345de3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/core/visibility.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "openvino/runtime/system_conf.hpp" @@ -68,15 +69,10 @@ std::vector disabledTestPatterns() { R"(.*ReduceOpsLayerTest.*type=Mean_.*netPRC=(I64|I32).*)", R"(.*ReduceOpsLayerTest.*type=Mean_.*netPRC=U64.*)", // Not implemented yet: - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNetWithIncorrectConfig.*)", R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)", - R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*ExecGraphInfo.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*ExecGraphInfo.*)", - R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*CanCreateTwoExeNetworksAndCheckFunction.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*canCreateTwoCompiledModelAndCheckTheir.*)", // CPU does not support dynamic rank // Issue: 66778 @@ -137,11 +133,6 @@ std::vector disabledTestPatterns() { R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)", // Issue: 105838 R"(smoke_NmsLayerTest.*)", - // Issue: 95590 - R"(.*CachingSupportCase.*CompileModelCacheTestBase.*(TIwithLSTMcell1|MatMulBias|2InputSubtract)_(u|i).*)", - // Issue: 95607 - R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*(TIwithLSTMcell1|MatMulBias|2InputSubtract)_(i|u).*)", - R"(.*CachingSupportCase.*ReadConcatSplitAssign.*)", // 94982. FP32->I32 conversion issue in the reference implementation. There can be some garbage in the rest of // float values like 0.333333745. // The kernel does not have such garbage. The diff 0.000000745 is taken into account in calculations and affects @@ -214,9 +205,24 @@ std::vector disabledTestPatterns() { R"(^smoke_Multinomial(?:Static|Dynamic)+(?:Log)*.*seed_g=0_seed_o=0.*device=CPU.*)", // Issue: 129025 R"(.*smoke_CpuExecNetworkCheck.*StreamsHasHigherPriorityThanLatencyHint.*)", -#ifdef OPENVINO_ARCH_32_BIT + // Issue: 119648 + R"(.*smoke_LPT/InterpolateTransformation.*)", + // Issue: 129931 + R"(smoke_FQLayerDQBias_4D_dynamic/FQLayerDQBias.*)", + R"(smoke_FQLayerDQBias_4D_static/FQLayerDQBias.*)", + R"(smoke_LPT/ConvolutionTransformation.*)", + R"(smoke_LPT/ConvolutionWIthIncorrectWeightsTransformation.*)", + R"(smoke_LPT/EliminateFakeQuantizeTransformation.*)", + R"(smoke_LPT/FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation.*)", + R"(smoke_LPT/FakeQuantizePrecisionSelectionTransformation.*)", + R"(smoke_LPT/GroupConvolutionTransformation.*)", + R"(smoke_LPT/MatMulTransformation.*)", + R"(smoke_LPT/MatMulWithOptimizedConstantFq.*)", + R"(smoke_QuantizedConvolutionBatchNorm/QuantizedConvolutionBatchNorm.*)", + R"(smoke_QuantizedConvolutionBatchNormTransposeOnWeights/QuantizedConvolutionBatchNorm.*)", +#if defined(OPENVINO_ARCH_ARM) // Issue: 126177 - R"(.*smoke_CompareWithRefs_4D_Bitwise.*/EltwiseLayerCPUTest.CompareWithRefs/.*_eltwiseOpType=Bitwise.*_NetType=i32_.*)" + R"(.*smoke_CompareWithRefs_4D_Bitwise.*/EltwiseLayerCPUTest.*_eltwise_op_type=Bitwise.*_model_type=i32_.*)" #endif }; @@ -274,14 +280,14 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(smoke_dynamicShapes4D.*INFERENCE_PRECISION_HINT=f16.*)"); // Issue: 124309 retVector.emplace_back(R"(.*InferRequestPreprocessConversionTest.*oLT=NHWC.*)"); - retVector.emplace_back(R"(.*smoke_NoReshape/ExecGraphUniqueNodeNames.CheckUniqueNodeNames.*)"); + retVector.emplace_back(R"(.*smoke_NoReshape/OVCompiledModelGraphUniqueNodeNamesTest.CheckUniqueNodeNames.*)"); retVector.emplace_back(R"(.*smoke_BehaviorTests/InferRequestPerfCountersTest.CheckOperationInPerfMap.*)"); - retVector.emplace_back(R"(smoke_BehaviorTests/ExecutableNetworkBaseTest.CheckExecGraphInfo.*)"); retVector.emplace_back(R"(smoke_BehaviorTests/OVCompiledModelBaseTestOptional.CheckExecGraphInfo.*)"); retVector.emplace_back( R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=FakeQuantizeBinaryConvolution.*)"); // Issue: 124395 retVector.emplace_back(R"(smoke_VariableStateBasic/InferRequestVariableStateTest.*)"); + retVector.emplace_back(R"(smoke_VariableState/OVInferRequestVariableStateTest.*)"); # endif #endif diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp index 77c78e31ca6b00..dc25378528199c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp @@ -3,8 +3,9 @@ // #include "snippets/matmul.hpp" + #include "common_test_utils/test_constants.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp index 437c8c5b97ec0a..ec97a61647b5b1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/transpose_matmul.cpp @@ -3,8 +3,9 @@ // #include "snippets/transpose_matmul.hpp" + #include "common_test_utils/test_constants.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp index a70b3c7bbc3659..357f27eb04a4e3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; using namespace ov::test; namespace { @@ -22,6 +21,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Check, ParameterResultSubgraphTest, ::testing::Combine(::testing::ValuesIn(inputShapes), ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); + ParameterResultSubgraphTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp index 5eafb558b057e4..132450682059e5 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp @@ -83,6 +83,10 @@ void ActivationLayerCPUTest::generate_inputs(const std::vector& targe in_data.range = range; in_data.resolution = resolution; tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); + // cover Sign NAN test case + if ((activationType == utils::ActivationTypes::Sign) && funcInput.get_element_type() == ov::element::f32) { + static_cast(tensor.data())[0] = std::numeric_limits::quiet_NaN(); + } } else { tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp index 60205e1a0591a7..3cb45936f4709f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp @@ -83,7 +83,7 @@ void ConvolutionLayerCPUTest::checkBiasFusing(ov::CompiledModel& execNet) const return it->second.as(); }; - if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == "Convolution") { + if (getExecValue(ov::exec_model_info::LAYER_TYPE) == "Convolution") { foundConv = true; ASSERT_EQ(3, node->inputs().size()); break; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp index 6d9ac97310f82a..3362bda02fdb78 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp @@ -274,6 +274,20 @@ INSTANTIATE_TEST_SUITE_P(smoke_static_1D, ::testing::Values(additionalConfig[0])), GatherLayerTestCPU::getTestCaseName); +const std::vector> staticInputShapes1DI32 = {{{{}, {{1}}}, {{}, {{1}}}}, + {{{}, {{15}}}, {{}, {{15}}}}, + {{{}, {{64}}}, {{}, {{64}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_static_1D_I32, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(staticInputShapes1DI32), + ::testing::Values(std::tuple{0, 0}), + ::testing::Values(ElementType::i32), + ::testing::Values(true), + ::testing::Values(CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + const std::vector> dynamicInputShapes1D = { {{{ov::Dimension{1, 70}}, // Dynamic shape 0 {{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {11}, {13}, @@ -293,6 +307,23 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic_1D, ::testing::Values(additionalConfig[0])), GatherLayerTestCPU::getTestCaseName); +const std::vector> dynamicInputShapes1DI32 = { + {{{ov::Dimension{1, 70}}, // Dynamic shape 0 + {{1}, {15}, {64}}}, // Target shapes + {{-1}, // Dynamic shape 1 + {{1}, {15}, {64}}}} // Target shapes +}; + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_1D_I32, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes1DI32), + ::testing::Values(std::tuple{0, 0}), + ::testing::Values(ElementType::i32), + ::testing::Values(true, false), + ::testing::Values(CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + ///// 4D JIT ///// std::vector> get4DShapesJitStat(int maxBatchDims) { std::vector> result = {}; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp index fba83b23446dbc..18ae356d62ae05 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp @@ -101,7 +101,7 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterfacesecond.as(); }; - if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == "Convolution") { + if (getExecValue(ov::exec_model_info::LAYER_TYPE) == "Convolution") { foundConv = true; ASSERT_EQ(3, node->inputs().size()); break; @@ -225,8 +225,8 @@ TEST_P(ExpectFallbackGroupConvolutionLayerCPUTest, CompareWithRefs) { OPENVINO_ASSERT(rtInfo.end() != it); return it->second.as(); }; - if ("Convolution" == getExecValue(ExecGraphInfoSerialization::LAYER_TYPE)) { - auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE); + if ("Convolution" == getExecValue(ov::exec_model_info::LAYER_TYPE)) { + auto primType = getExecValue(ov::exec_model_info::IMPL_TYPE); ASSERT_TRUE(selectedType != primType) << "primType is unexpected: " << primType; } } diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp index 6850cd585ae1ab..878176c900e101 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp @@ -795,6 +795,46 @@ const auto testParams3D_nightly = ::testing::Combine(fullyConnectedParams3D_nigh ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); INSTANTIATE_TEST_SUITE_P(nightly_FC_3D, MatMulLayerCPUTest, testParams3D_nightly, MatMulLayerCPUTest::getTestCaseName); + +class MatMulLayerCPUTestUndefShapes : public MatMulLayerCPUTest { +}; + +TEST_P(MatMulLayerCPUTestUndefShapes, CompareWithRefs) { + auto second_shape = inputDynamicShapes.at(1); + PartialShape new_second_shape(std::vector(second_shape.rank().get_length(), -1)); + std::map new_inputs; + new_inputs[0] = inputDynamicShapes.at(0); + new_inputs[1] = new_second_shape; + function->reshape(new_inputs); + run(); + CheckPluginRelatedResults(compiledModel, cpuNodeType); +} + +const fusingSpecificParams matmulFullDynInputsFusingParams[] = { + fusingMultiplyPerChannel, + fusingMultiplyAddPerChannel, + fusingAddPerChannel +}; + +const auto matMulParamsDynamicFusingFullUndefShapes = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())); + +const auto testParamsDynamicFusingFullUndefShapes = ::testing::Combine(matMulParamsDynamicFusingFullUndefShapes, + ::testing::Values(MatMulNodeType::MatMul), + ::testing::ValuesIn(matmulFullDynInputsFusingParams), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); + +INSTANTIATE_TEST_SUITE_P( + smoke_MM_Dynamic_Fusing_Full_Undef_Shapes, + MatMulLayerCPUTestUndefShapes, + testParamsDynamicFusingFullUndefShapes, + MatMulLayerCPUTest::getTestCaseName); + } // namespace } // namespace MatMul } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp index 3c43939e654bf2..fdf408ded1e676 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp @@ -112,6 +112,12 @@ const std::vector scatterParams = { {{4, 2, {3, 9}, {4, 11}, {2, 3}, {2, 4}}, {{4, 2, 9, 10, 3, 4}, {4, 2, 3, 4, 3, 4}, {4, 2, 9, 11, 2, 2}}}}, IndicesDescription{{4, 2}, {0, 2, 4, 6, 1, 3, 5, 7}}, Axis{0}}, + ScatterUpdateLayerParams{ScatterUpdateShapes{ + {{-1}, {{9}, {32}, {63}, {64}}}, + {{-1}, {{2}, {2}, {2}, {2}}}, + }, + IndicesDescription{{2}, {1, 8}}, + Axis{0}}, }; const std::vector inputPrecisions = { diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp index f76100141cb7e4..6b04f843ca7860 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp @@ -224,7 +224,8 @@ const std::vector testCasesCommon4D = { StridedSliceParams{{0, 1, 0, 10}, {1, 5, 32, 30}, {1, 1, 1, 1}, {0, 1, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, StridedSliceParams{{0, 0, 2, 10}, {1, 8, 32, 18}, {1, 2, 1, 2}, {0, 0, 1, 0}, {0, 0, 0, 1}, {}, {}, {}}, StridedSliceParams{{0, 0, 10}, {0, 32, 18}, {1, 1, 1}, {1, 1, 0}, {1, 1, 0}, {}, {}, {1, 0, 0}}, - StridedSliceParams{{0, 4, 10}, {1, 8, 0}, {1, 1, 1}, {1, 0, 1}, {1, 1, 1}, {}, {}, {0, 0, 1}}}; + StridedSliceParams{{0, 4, 10}, {1, 8, 0}, {1, 1, 1}, {1, 0, 1}, {1, 1, 1}, {}, {}, {0, 0, 1}}, + StridedSliceParams{{0, 4}, {0, 5}, {1, 1}, {0}, {0}, {0}, {0}, {1}}}; const std::vector inputShapesStatic4D = {{1, 5, 32, 32}, {2, 5, 32, 48}}; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp index ba742ee86c819c..72576aa3ab0332 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp @@ -76,7 +76,7 @@ class TensorIteratorCPUTest : public testing::WithParamInterfaceset_sliced_input(body_params[1], params[1], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(add, -1, -1, 1, 0, sequence_axis); } else { - NGRAPH_CHECK(false, "Bidirectional case is not supported."); + OPENVINO_ASSERT(false, "Bidirectional case is not supported."); } function = std::make_shared(ov::OutputVector{tensor_iterator->output(0)}, params); @@ -136,4 +136,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_TensorIteratorSimple, ::testing::ValuesIn(inputPrecisions)), TensorIteratorCPUTest::getTestCaseName); -} // namespace \ No newline at end of file +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp index 1c8ad07f8fd549..f108d3c76e3d6b 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_multiple_query_sdp.cpp @@ -2,23 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include "openvino/opsets/opset13.hpp" +#include "transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -using namespace InferenceEngine; - -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using InputShapeAndTransposeOrder = std::pair, std::vector>; using ConcatMultiQuerySDPParams = std::tuple(inputParams[0], preOrder); auto concat_axis = transposeOrder[2]; auto beam_idx = std::make_shared(ElementType::i32, ov::PartialShape{-1}); beam_idx->set_friendly_name("beam_idx"); inputParams.push_back(beam_idx); - auto gatherK = std::make_shared(pastk, beam_idx, op::v0::Constant::create(ElementType::i32, {1}, {transposeOrder[0]})); - auto gatherV = std::make_shared(pastv, beam_idx, op::v0::Constant::create(ElementType::i32, {1}, {transposeOrder[0]})); + auto gatherK = std::make_shared( + pastk, + beam_idx, + ov::op::v0::Constant::create(ElementType::i32, {1}, {transposeOrder[0]})); + auto gatherV = std::make_shared( + pastv, + beam_idx, + ov::op::v0::Constant::create(ElementType::i32, {1}, {transposeOrder[0]})); auto concatK = std::make_shared(OutputVector{gatherK, inputParams[1]}, concat_axis); auto concatV = std::make_shared(OutputVector{gatherV, inputParams[2]}, concat_axis); - auto unsquezeAxis = op::v0::Constant::create(ov::element::i32, {}, {-2}); + auto unsquezeAxis = ov::op::v0::Constant::create(ov::element::i32, {}, {-2}); auto unsqueezeK = std::make_shared(concatK, unsquezeAxis); auto unsqueezeV = std::make_shared(concatV, unsquezeAxis); - auto targetShape = op::v0::Constant::create(qkvType, {1, 1, 1, 4, 1}, {1}); + auto targetShape = ov::op::v0::Constant::create(qkvType, {1, 1, 1, 4, 1}, {1}); auto broadcastK = std::make_shared(unsqueezeK, targetShape); auto broadcastV = std::make_shared(unsqueezeV, targetShape); - auto target4D = op::v0::Constant::create(ov::element::i32, {4}, {0, 0, 8, 64}); + auto target4D = ov::op::v0::Constant::create(ov::element::i32, {4}, {0, 0, 8, 64}); auto reshapeK = std::make_shared(broadcastK, target4D, true); auto reshapeV = std::make_shared(broadcastV, target4D, true); @@ -175,7 +176,7 @@ class ConcatMultiQuerySDPTest : public testing::WithParamInterface(transposeSDP, constReshape, true); // BLHS -> B,L,HxS - auto add = std::make_shared(reshapeSDP, op::v0::Constant::create(qkvType, {1}, {1.0f})); + auto add = std::make_shared(reshapeSDP, ov::op::v0::Constant::create(qkvType, {1}, {1.0f})); auto pastk_assign = std::make_shared(concatK, var_k); auto pastv_assign = std::make_shared(concatV, var_v); pastk_assign->set_friendly_name("pastk_w"); @@ -187,7 +188,7 @@ class ConcatMultiQuerySDPTest : public testing::WithParamInterface(results, sinks, inputParams, "ConcatTranposeSDP"); + function = std::make_shared(results, sinks, inputParams, "ConcatTranposeSDP"); targetDevice = ov::test::utils::DEVICE_CPU; functionRefs = function->clone(); @@ -330,4 +331,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConcatMultiQuerySDPTest, ::testing::Values(true, false)), ConcatMultiQuerySDPTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp index 2eddaa63050855..61ddf873aec6f1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_transpose_sdp_transpose.cpp @@ -2,23 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include "openvino/opsets/opset13.hpp" +#include "transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -using namespace InferenceEngine; - -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using InputShapeAndTransposeOrder = std::pair, std::vector>; using ConcatSDPTransposeTestParams = std::tuple(inputParams[3], var_v); pastv->set_friendly_name("pastv_r"); - std::shared_ptr pastk_shapeof, pastv_shapeof; + std::shared_ptr pastk_shapeof, pastv_shapeof; if (hasShapeOf) { pastk_shapeof = std::make_shared(pastk); pastv_shapeof = std::make_shared(pastv); @@ -132,10 +127,10 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface(ElementType::i32, ov::PartialShape{-1}); beam_idx->set_friendly_name("beam_idx"); inputParams.push_back(beam_idx); - auto gatherK = std::make_shared(pastk, beam_idx, op::v0::Constant::create(ElementType::i32, {1}, {0})); - auto gatherV = std::make_shared(pastv, beam_idx, op::v0::Constant::create(ElementType::i32, {1}, {0})); - auto concatK = std::make_shared(OutputVector{gatherK, inputParams[1]}, concat_axis); - auto concatV = std::make_shared(OutputVector{gatherV, inputParams[2]}, concat_axis); + auto gatherK = std::make_shared(pastk, beam_idx, ov::op::v0::Constant::create(ElementType::i32, {1}, {0})); + auto gatherV = std::make_shared(pastv, beam_idx, ov::op::v0::Constant::create(ElementType::i32, {1}, {0})); + auto concatK = std::make_shared(ov::OutputVector{gatherK, inputParams[1]}, concat_axis); + auto concatV = std::make_shared(ov::OutputVector{gatherV, inputParams[2]}, concat_axis); auto transposeK = std::make_shared(concatK, preOrder); auto transposeV = std::make_shared(concatV, preOrder); @@ -159,7 +154,7 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface(transposeSDP, constReshape, true); // BLHS -> B,L,HxS - auto add = std::make_shared(reshapeSDP, op::v0::Constant::create(inType, {1}, {1.0f})); + auto add = std::make_shared(reshapeSDP, ov::op::v0::Constant::create(inType, {1}, {1.0f})); auto pastk_assign = std::make_shared(concatK, var_k); auto pastv_assign = std::make_shared(concatV, var_v); pastk_assign->set_friendly_name("pastk_w"); @@ -170,12 +165,12 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface(results, sinks, inputParams, "ConcatTranposeSDP"); + ov::SinkVector sinks{pastk_assign, pastv_assign}; + function = std::make_shared(results, sinks, inputParams, "ConcatTranposeSDP"); targetDevice = ov::test::utils::DEVICE_CPU; functionRefs = function->clone(); - pass::Manager manager; + ov::pass::Manager manager; // decompose ScaledDotProductAttention manager.register_pass(); manager.run_passes(functionRefs); @@ -197,8 +192,8 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterface& targetInputStaticShapes) { inputs.clear(); - auto create_input = [this] (std::shared_ptr param, ov::Shape shape, float val) { - if (param->get_element_type() == element::i32) { + auto create_input = [this] (std::shared_ptr param, ov::Shape shape, float val) { + if (param->get_element_type() == ov::element::i32) { ov::Tensor t{ov::element::i32, shape}; auto size = shape[0]; auto* p = static_cast(t.data()); @@ -207,12 +202,12 @@ class ConcatSDPTransposeTestBase : public testing::WithParamInterfaceget_element_type() == element::f32) { + } else if (param->get_element_type() == ov::element::f32) { ov::Tensor t{ov::element::f32, shape}; strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); inputs.insert({param, t}); } else { - ASSERT_TRUE(param->get_element_type() == element::bf16); + ASSERT_TRUE(param->get_element_type() == ov::element::bf16); ov::Tensor t{ov::element::bf16, shape}; strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); inputs.insert({param, t}); @@ -336,12 +331,12 @@ class ConcatSDPTransposeTestSetState : public ConcatSDPTransposeTestBase { void new_state(ov::element::Type& type, const ov::Shape& pastKVInitShape) { auto fill = [] (ov::Tensor& t, float val) { auto shape = t.get_shape(); - if (t.get_element_type() == element::f32) { + if (t.get_element_type() == ov::element::f32) { strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); - } else if (t.get_element_type() == element::f16) { + } else if (t.get_element_type() == ov::element::f16) { strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); } else { - ASSERT_TRUE(t.get_element_type() == element::bf16); + ASSERT_TRUE(t.get_element_type() == ov::element::bf16); strided_iota(static_cast(t.data()), t.get_size(), val, 0.1f); } }; @@ -437,4 +432,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConcatSDPTransposeTestSetState, ConcatSDPTransposeTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp index 2c68b14cb41508..8af12820d7b19d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp @@ -6,36 +6,14 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" +#include "openvino/runtime/aligned_buffer.hpp" namespace ov { namespace test { -template -class AlignedBufferWrapper { -public: - AlignedBufferWrapper(size_t size, size_t alignment) { - _buffer.reset(new ngraph::runtime::AlignedBuffer(size * sizeof(T), alignment)); - } - AlignedBufferWrapper(const AlignedBufferWrapper&) = delete; - AlignedBufferWrapper& operator=(const AlignedBufferWrapper&) = delete; - AlignedBufferWrapper(AlignedBufferWrapper&&) = default; - AlignedBufferWrapper& operator=(AlignedBufferWrapper&&) = default; - - T* get_ptr() { - return _buffer->get_ptr(); - } - - size_t size() const { - return _buffer->size() / sizeof(T); - } -private: - std::unique_ptr _buffer = nullptr; -}; - class DenormalNullifyCheck : public SubgraphBaseTest { protected: -std::unique_ptr> pConstStorage; +std::unique_ptr pConstStorage; void validate() override { const auto& actualOutputs = get_plugin_outputs(); @@ -63,9 +41,9 @@ void SetUp() override { const auto elemsCount = shape_size(inpShape); const auto rtPrc = ov::element::f32; ov::ParameterVector params {std::make_shared(rtPrc, ov::Shape(inpShape))}; - pConstStorage.reset(new AlignedBufferWrapper(elemsCount, alignment)); + pConstStorage.reset(new ov::AlignedBuffer(elemsCount, alignment)); - auto constTensor = std::make_shared(rtPrc, inpShape, pConstStorage->get_ptr()); + auto constTensor = ov::Tensor(rtPrc, inpShape, pConstStorage->get_ptr()); auto constNode = std::make_shared(constTensor); ov::NodeVector input = {params[0], constNode}; auto concat = std::make_shared(input, 1); @@ -78,7 +56,7 @@ void SetUp() override { TEST_F(DenormalNullifyCheck, smoke_CPU_Denormal_Check) { using indexInterval = std::pair; - size_t elemsCount = pConstStorage->size(); + size_t elemsCount = pConstStorage->size() / sizeof(float); const indexInterval intervals[] = { {0, elemsCount/2}, {elemsCount/2, elemsCount}, @@ -99,9 +77,9 @@ TEST_F(DenormalNullifyCheck, smoke_CPU_Denormal_Check) { auto denormal = random.Generate(denormalsRange) + 1; float tmp; memcpy(&tmp, &denormal, sizeof(float)); - pConstStorage->get_ptr()[i] = tmp; + pConstStorage->get_ptr()[i] = tmp; } else { - pConstStorage->get_ptr()[i] = randomRange[i]; + pConstStorage->get_ptr()[i] = randomRange[i]; } } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp index 61b362a2dee39e..4c5119c25a5cc7 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp @@ -4,7 +4,6 @@ #include "common_test_utils/ov_tensor_utils.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" #include "ov_models/builders.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp index 520a9613651288..22e89df313694d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp @@ -147,7 +147,7 @@ class MatMulDecompressConvertTest : public testing::WithParamInterfaceget_ops()) { - if (getExecValue(fcNode->get_rt_info(), ExecGraphInfoSerialization::LAYER_TYPE) == "FullyConnected") { + if (getExecValue(fcNode->get_rt_info(), ov::exec_model_info::LAYER_TYPE) == "FullyConnected") { const auto& constNode = fcNode->get_input_node_shared_ptr(1); ov::element::Type expectedType( getExecValue(constNode->get_rt_info(), ov::exec_model_info::OUTPUT_PRECISIONS)); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp index c99e21c34cc9d9..590502e858e112 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp @@ -118,7 +118,7 @@ class MatmulBrgemmInt8Test : public testing::WithParamInterfacesecond.as(); }; if (node->get_friendly_name() == nodeName) { - auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE); + auto primType = getExecValue(ov::exec_model_info::IMPL_TYPE); ASSERT_TRUE(primTypeCheck(primType)) << "primType is unexpected: " << primType << " Expected: " << selectedType; ASSERT_EQ(node->get_output_element_type(0), outType); ASSERT_EQ(node->get_input_element_type(0), inType); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp deleted file mode 100644 index 8b086d0833ecf5..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "common_test_utils/test_constants.hpp" -#include "shared_test_classes/subgraph/parameter_result.hpp" - -using namespace SubgraphTestsDefinitions; - -namespace ov { -namespace test { - -class ParameterResultCustomBlobTest : public ParameterResultSubgraphTestLegacyApi { -protected: - void Infer() override { - constexpr size_t inferIterations = 10lu; - - inferRequest = executableNetwork.CreateInferRequest(); - - auto inputBlob = inputs.front(); - const size_t elementsCount = inputBlob->size(); - for (size_t i = 0; i < inferIterations; ++i) { - ov::test::utils::fill_data_random(inputBlob, 10, 0, 1, i); - auto inputsInfo = cnnNetwork.getInputsInfo().begin()->second; - std::string inputName = cnnNetwork.getInputsInfo().begin()->first; - - std::vector customInpData(elementsCount); - auto inpBlobData = inputBlob->buffer().as(); - std::copy(inpBlobData, inpBlobData + elementsCount, customInpData.begin()); - - auto& tensorDesc = inputsInfo->getTensorDesc(); - auto customBlob = InferenceEngine::make_shared_blob(tensorDesc, customInpData.data(), elementsCount); - inferRequest.SetBlob(inputName, customBlob); - - inferRequest.Infer(); - - ParameterResultSubgraphTestLegacyApi::Validate(); - } - } - void Validate() override { - // Do nothing. We call Validate() in the Infer() method - } -}; - -TEST_P(ParameterResultCustomBlobTest, CompareWithRefs) { - // Just to show that it is not possible to set different precisions for inputs and outputs with the same name. - // If it was possible, the input would have I8 precision and couldn't store data from the custom blob. - inPrc = InferenceEngine::Precision::I8; - outPrc = InferenceEngine::Precision::FP32; - - Run(); -} -namespace { -INSTANTIATE_TEST_SUITE_P(smoke_Check_Custom_Blob, - ParameterResultCustomBlobTest, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); -} // namespace - -class ParameterResultSameBlobTest : public ParameterResultSubgraphTestLegacyApi { -protected: - void Infer() override { - constexpr size_t inferIterations = 10lu; - - for (size_t i = 0; i < inferIterations; ++i) { - ParameterResultSubgraphTestLegacyApi::Infer(); - ParameterResultSubgraphTestLegacyApi::Validate(); - } - } - void Validate() override { - // Do nothing. We call Validate() in the Infer() method - } -}; - -TEST_P(ParameterResultSameBlobTest, CompareWithRefs) { - Run(); -} -namespace { -INSTANTIATE_TEST_SUITE_P(smoke_Check_Same_Blob, - ParameterResultSameBlobTest, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); -} // namespace -} // namespace test -} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp index 1fe39571e2843e..7a6ed5be8aa8b5 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp @@ -9,8 +9,11 @@ namespace ov { namespace test { -// Subgraph: +// These tests are designed for correctness of reshape's in-place implementation. /* + * Case 1: + * Subgraph + * * params[0] params[1] * | | * constant shapeOf / @@ -22,7 +25,7 @@ namespace test { * | * result * - * This test is designed for correctness of reshape's in-place implementation. + * * Due to non-const target shape parameter (params[1]), reshape node * is non-constant node even though the input tensor is constant node. @@ -81,5 +84,59 @@ class InPlaceReshapeFromConstantCheck : public SubgraphBaseTest { TEST_F(InPlaceReshapeFromConstantCheck, smoke_CPU_InPlaceReshapeFromConstantCheck) { run(); } + +/* Case 2: + * Subgraph + * + * params[0] params[1] + * \ / + * \ / + * add---reshape2---result2 + * | + * reshape1 + * | + * MVN + * | + * result1 + * + * The same memory is shared between the `result2` input and `MVN` output. The CPU graph inplace memory conflict + * resolution logic must prevent `result2` data being rewritten by the MVN node. + */ + +class InPlaceReshapeShareInputCheck : public SubgraphBaseTest { +protected: + void SetUp() override { + const auto rtPrc = ov::element::f32; + const ov::Shape inpShape = {1, 16, 16}; + targetStaticShapes = {{inpShape, inpShape}}; + targetDevice = ov::test::utils::DEVICE_CPU; + ov::ParameterVector params{std::make_shared(rtPrc, inpShape), + std::make_shared(rtPrc, inpShape)}; + + auto add = std::make_shared(params[0], params[1]); + std::vector newShape1 = {1, 1, 16, 16}; + auto targetShape1 = std::make_shared(ov::element::i64, ov::Shape{4}, newShape1); + auto reshape1 = std::make_shared(add, targetShape1, false); + auto mvn = std::make_shared(reshape1, + ov::op::v0::Constant::create(ov::element::i32, ov::Shape{2}, {2, 3}), + true, + 0.1, + ov::op::MVNEpsMode::INSIDE_SQRT); + auto res1 = std::make_shared(mvn); + + std::vector newShape2 = {1, 4, 8, 8}; + auto targetShape2 = std::make_shared(ov::element::i64, ov::Shape{4}, newShape2); + auto reshape2 = std::make_shared(add, targetShape2, false); + + auto res2 = std::make_shared(reshape2); + + function = std::make_shared(ov::ResultVector{res1, res2}, params, "reshape_share_input_check"); + } +}; + +TEST_F(InPlaceReshapeShareInputCheck, smoke_CPU_InPlaceReshapeShareInputCheck) { + run(); +} + } // namespace test } // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp index ae0f14f88dc54c..ffdeb1ee5caf98 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include #include #include diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shape_infer_subgraph.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shape_infer_subgraph.cpp new file mode 100644 index 00000000000000..71dd0a11e99e93 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shape_infer_subgraph.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +class ShapeInferSubgraphTest : virtual public SubgraphBaseTest { +public: + void run() override { + ov::element::Type netPrecision = inType = outType = ov::element::f32; + targetDevice = ov::test::utils::DEVICE_CPU; + + ov::ParameterVector params{ + std::make_shared(netPrecision, ov::PartialShape({-1, -1, -1}))}; + + auto const_op = [](const std::vector& values) { + return op::v0::Constant::create(ElementType::i64, {values.size()}, values); + }; + + auto shapeOf = std::make_shared(params[0]); + auto gather1 = std::make_shared(shapeOf, const_op({0}), const_op({0})); + auto gather2 = std::make_shared(shapeOf, const_op({1, 2}), const_op({0})); + auto concat = + std::make_shared(ov::NodeVector{gather1, const_op({32}), gather2, const_op({128})}, 0); + + auto gather3 = std::make_shared(shapeOf, const_op({1}), const_op({0})); + auto add = std::make_shared(gather1, gather3); + auto scatter_update = + std::make_shared(const_op({0, 0}), const_op({1}), add, const_op({0})); + + ov::ResultVector results{std::make_shared(concat), + std::make_shared(scatter_update)}; + function = std::make_shared(results, params, "shape_infer"); + + std::vector input_shapes = {{4, 2, 3}}; + init_input_shapes(ov::test::static_shapes_to_test_representation(input_shapes)); + ov::test::SubgraphBaseTest::run(); + } +}; + +namespace { +TEST_F(ShapeInferSubgraphTest, smoke_ShapeInferSubgraphTest_CPU) { + run(); +} +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp index 0fb8364ba3e4fd..6b35b81fd69456 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp @@ -10,9 +10,6 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp" -// To be removed -#include "exec_graph_info.hpp" - namespace CPUTestUtils { typedef enum { undef, diff --git a/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp b/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp index a70d37a80370f9..510b6745bb09eb 100644 --- a/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp @@ -5,10 +5,35 @@ #include #include #include "mlas/sgemm.hpp" +#include "onednn/dnnl.h" +#include "cpu_memory.h" +#include "openvino/core/parallel.hpp" +#include "openvino/runtime/aligned_buffer.hpp" // This test is used to test whether mlas gemm lib compiles successfully -TEST(GemmTests, getPackedSize) { +TEST(MLASGemmTests, getPackedSize) { int N = 51864; int K = 384; ASSERT_NO_THROW(ov::intel_cpu::mlas_sgemm_pack_get_size(N, K)); +} +// Test mlas thread partition with even/odd threads +TEST(MLASGemmTests, simpleGemm) { + const auto L2cacheSize = dnnl::utils::get_cache_size(2, true); + size_t M = 128; + size_t K = 512; + size_t N = L2cacheSize / sizeof(float) / (M); + std::vector aData(M * K, (1.0f/33)); + size_t bSize = ov::intel_cpu::mlas_sgemm_pack_get_size(N, K); + size_t nthr = parallel_get_max_threads(); + auto alignedB = ov::AlignedBuffer(bSize, 64); + float* bData = reinterpret_cast(alignedB.get_ptr()); + std::vector cData(M * N, 0.0f); + + ASSERT_NO_THROW( + ov::intel_cpu:: + mlas_sgemm_compute("N", "T", M, N, K, 1.0f, aData.data(), K, bData, N, 0.0f, cData.data(), N, nullptr, nthr)); + + ASSERT_NO_THROW( + ov::intel_cpu:: + mlas_sgemm_compute("N", "T", M, N, K, 1.0f, aData.data(), K, bData, N, 0.0f, cData.data(), N, nullptr, nthr - 1)); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp index 11119a20b764fe..67b88c92c8a675 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp @@ -31,8 +31,8 @@ class EltwiseWithMulAddFunction : public SnippetsFunctionBase { : SnippetsFunctionBase(inputShapes), add_input_idx(add_input_idx), scalar_input(scalar_input) { - NGRAPH_CHECK(input_shapes.size() == 3, "Got invalid number of input shapes"); - NGRAPH_CHECK(add_input_idx < 2, "Got invalid input idx for add operation"); + OPENVINO_ASSERT(input_shapes.size() == 3, "Got invalid number of input shapes"); + OPENVINO_ASSERT(add_input_idx < 2, "Got invalid input idx for add operation"); } protected: @@ -81,13 +81,13 @@ class EltwiseWithMulAddFunction : public SnippetsFunctionBase { } void validate_function(const std::shared_ptr &m) const override { - NGRAPH_CHECK(m != nullptr, "The test requires Model to be defined"); + OPENVINO_ASSERT(m != nullptr, "The test requires Model to be defined"); const auto ¶ms = m->get_parameters(); - NGRAPH_CHECK(params.size() == (scalar_input ? input_shapes.size() - 1 : input_shapes.size()), - "Passed input shapes and produced function are inconsistent."); - for (size_t i = 0; i < params.size(); i++) - NGRAPH_CHECK(std::equal(input_shapes[i].begin(), input_shapes[i].end(), params[i]->get_shape().begin()), + OPENVINO_ASSERT(params.size() == (scalar_input ? input_shapes.size() - 1 : input_shapes.size()), "Passed input shapes and produced function are inconsistent."); + for (size_t i = 0; i < params.size(); i++) + OPENVINO_ASSERT(std::equal(input_shapes[i].begin(), input_shapes[i].end(), params[i]->get_shape().begin()), + "Passed input shapes and produced function are inconsistent."); } private: diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp index f9326d15432ada..a4cb47458116f7 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/enable_ht_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp index a13271c249fbc7..fcddb92bb91cb2 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/scheduling_core_type_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp index 246f2f54387c06..8a773d5712bd29 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp index 3b68ef44d693fc..486a284370daf4 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp @@ -3,12 +3,11 @@ // #include -#include - -#include +#include "common_test_utils/test_common.hpp" #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" +#include "openvino/runtime/system_conf.hpp" using namespace testing; using namespace ov; diff --git a/src/plugins/intel_cpu/thirdparty/mlas b/src/plugins/intel_cpu/thirdparty/mlas index 7a35e48a723944..d1bc25ec4660cd 160000 --- a/src/plugins/intel_cpu/thirdparty/mlas +++ b/src/plugins/intel_cpu/thirdparty/mlas @@ -1 +1 @@ -Subproject commit 7a35e48a723944972088627be1a8b60841e8f6a5 +Subproject commit d1bc25ec4660cddd87804fcf03b2411b5dfb2e94 diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp index 75398c280aebc2..f69cf71482bb78 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp @@ -306,7 +306,7 @@ struct program { // if subgraph can be optimized if it consists of only inputs and corresponding outputs bool _can_be_optimized; std::unique_ptr _impls_cache; - const size_t _impls_cache_capacity = 10000; + const size_t _impls_cache_capacity = 300; std::shared_ptr _compilation_context; bool _loaded_from_cache = false; diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp index 94f56e8b926d39..e3ec998df78890 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp @@ -8,8 +8,10 @@ #include #include "intel_gpu/runtime/layout.hpp" #include "intel_gpu/runtime/memory.hpp" +#include "intel_gpu/runtime/optionals.hpp" #include "intel_gpu/runtime/shape_predictor.hpp" #include "openvino/core/layout.hpp" +#include "openvino/core/node.hpp" #include "openvino/core/type/element_type.hpp" namespace ov { @@ -71,6 +73,19 @@ inline ov::element::Type convert_to_supported_device_type(ov::element::Type et) } } +using PrecisionMap = std::map; + +std::vector get_output_data_types(const ov::Node* op, PrecisionMap precision_map = {}); +std::vector get_output_paddings(const ov::Node* op); + +inline std::vector get_output_data_types(const std::shared_ptr& op, PrecisionMap precision_map = {}) { + return get_output_data_types(op.get(), precision_map); +} + +inline std::vector get_output_paddings(const std::shared_ptr& op) { + return get_output_paddings(op.get()); +} + inline ov::Shape get_tensor_shape(const ov::PartialShape& pshape) { ov::Shape res(pshape.size()); for (size_t i = 0; i < pshape.size(); i++) { diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp index 226c0cc44fd64d..b87b1b18e607df 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/compiled_model.hpp @@ -31,7 +31,8 @@ class CompiledModel : public ov::ICompiledModel { CompiledModel(cldnn::BinaryInputBuffer& ib, const std::shared_ptr& plugin, RemoteContextImpl::Ptr context, - const ExecutionConfig& config); + const ExecutionConfig& config, + const bool loaded_from_cache); std::shared_ptr create_infer_request() const override; std::shared_ptr create_sync_infer_request() const override; diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp index 57e460296b2296..0bacf2ec9a00a8 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp @@ -40,6 +40,10 @@ class VariableState : public ov::IVariableState { bool is_set() const; void set(); void set_layout(const cldnn::layout& new_layout); + void set_memory(const cldnn::memory::ptr& new_mem, const cldnn::layout& actual_layout); + size_t get_actual_mem_size() const { + return actual_size; + } private: cldnn::layout m_layout; diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp index ae979d17fb3d37..6451e0daf0b4d4 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm.hpp @@ -25,293 +25,6 @@ enum class lstm_weights_order { fizo }; -/// @brief LSTM Output selection -/// @details The current implementation allows the use to select the output -/// of an LSTM node by specifing any of the following options -enum class lstm_output_selection { - /// output the entire hidden sequence - sequence = 0, - /// output just the last hidden value - hidden, - /// output the last hidden and last cell values - hidden_cell, - /// output the hidden sequence concatenated with the last cell - sequence_cell -}; - -/// @brief Performs forward Long Short-Term Memory (LSTM) layer. -/// @details The current implementation of LSTM is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm) - - lstm() : primitive_base("", {}) {} - - /// @brief Constructs lstm layer. - /// @param id This primitive id. - /// @param input Vector of primitive id. - /// @param weights Primitive id containing weights data. - /// @param bias Primitive id containing bias data. Provide empty string if using lstm without bias. - /// @param initial_hidden Primitive id containing initial_hidden data. Provide empty string if using lstm without initial_hidden values. - /// @param initial_cell Primitive id containing initial_cell data. Provide empty string if using lstm without initial_cell values. - /// @param peepholes Primitive id containing peepholes data. Provide empty string if using lstm without peepholes. - /// @param clip Clip threshold. Provide 0 if using lstm without activations clip threshold. - /// @param input_forget Provide 0 if using lstm without coupled input-forget gates. - /// @param activations Vector of activations. Specify [f, g, h]. Default are [sigmoid, tanh, tanh] - /// @param activation_params Vector of ativation params. Specify params for each [f, g, h] activation. - /// @brief Output selection. Default the entire hidden sequence is returned. - /// @param offset_order Order of the concatenated weights, recurrent, and bias. ONNX default is iofz [input, output, forget, block]. - lstm(const primitive_id& id, - const std::vector& input, - const primitive_id& weights, - const primitive_id& recurrent, - const primitive_id& bias = "", - const primitive_id& initial_hidden = "", - const primitive_id& initial_cell = "", - const primitive_id& peepholes = "", - const float clip = 0, - const bool input_forget = 0, - const std::vector& activations = {}, - const std::vector activation_params = {}, - const lstm_output_selection output_selection = lstm_output_selection::sequence, - const lstm_weights_order offset_order = lstm_weights_order::iofz, - const padding& output_padding = padding()) - : primitive_base(id, input, {output_padding}), - weights(weights), - recurrent(recurrent), - bias(bias), - initial_hidden(initial_hidden), - initial_cell(initial_cell), - peepholes(peepholes), - clip(clip), - input_forget(input_forget), - activations(activations), - activation_params(activation_params), - output_selection(output_selection), - offset_order(offset_order) {} - - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive id containing bias data. - primitive_id bias; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id initial_hidden; - /// @brief Primitive id containing the initial value of the cell state data. - primitive_id initial_cell; - /// @brief Primitive id containing peepholes data. - primitive_id peepholes; - /// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified. - float clip = 0.0f; - /// @brief Couple the input and forget gates if input_forget is 1. Default is 0. - bool input_forget = 0; - /// @brief A list of 3 activation functions for the input, output, forget, cell, and hidden. - std::vector activations; - /// @brief Optional scaling values used by some activation functions. The values are consumed in the order of activation functions. - std::vector activation_params; - /// @brief Output selection. Default the entire hidden sequence is returned. - lstm_output_selection output_selection = lstm_output_selection::sequence; - /// @brief Weights, recurrent weights, and biases order. [iofz] : ONNX, [ifoz] : Caffe - lstm_weights_order offset_order = lstm_weights_order::izof; - - // NOT SUPPORTED YET - // /// @brief Optional tensor specifying lengths of the sequences in a batch. - // /// If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`. - // tensor sequence_lens; - // /// @brief The sequence output for the hidden. - // uint32_t output_sequence; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, peepholes.empty()); - seed = hash_combine(seed, clip); - seed = hash_combine(seed, input_forget); - seed = hash_range(seed, activations.begin(), activations.end()); - for (auto& act_param : activation_params) { - seed = hash_combine(seed, act_param.a); - seed = hash_combine(seed, act_param.b); - } - seed = hash_combine(seed, output_selection); - seed = hash_combine(seed, offset_order); - seed = hash_combine(seed, bias.empty()); - seed = hash_combine(seed, initial_hidden.empty()); - seed = hash_combine(seed, initial_cell.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - bool act_params_eq = activation_params.size() == rhs_casted.activation_params.size(); - for (size_t i = 0; i < activation_params.size(); ++i) { - act_params_eq &= activation_params[i].a == rhs_casted.activation_params[i].a && - activation_params[i].b == rhs_casted.activation_params[i].b; - } - - #define cmp_fields(name) name == rhs_casted.name - return act_params_eq && - cmp_fields(clip) && - cmp_fields(input_forget) && - cmp_fields(activations) && - cmp_fields(output_selection) && - cmp_fields(offset_order) && - cmp_fields(initial_hidden.empty()) && - cmp_fields(initial_cell.empty()) && - cmp_fields(peepholes.empty()) && - cmp_fields(bias.empty()); - #undef cmp_fields - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << weights; - ob << recurrent; - ob << bias; - ob << initial_hidden; - ob << initial_cell; - ob << peepholes; - ob << clip; - ob << input_forget; - ob << activations; - ob << activation_params; - ob << make_data(&output_selection, sizeof(lstm_output_selection)); - ob << make_data(&offset_order, sizeof(lstm_weights_order)); - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> weights; - ib >> recurrent; - ib >> bias; - ib >> initial_hidden; - ib >> initial_cell; - ib >> peepholes; - ib >> clip; - ib >> input_forget; - ib >> activations; - ib >> activation_params; - ib >> make_data(&output_selection, sizeof(lstm_output_selection)); - ib >> make_data(&offset_order, sizeof(lstm_weights_order)); - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(weights); - ret.push_back(recurrent); - if (!bias.empty()) { - ret.push_back(bias); - } - if (!initial_hidden.empty()) { - ret.push_back(initial_hidden); - } - if (!initial_cell.empty()) { - ret.push_back(initial_cell); - } - return ret; - } -}; - -struct lstm_gemm : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_gemm) - - lstm_gemm() : primitive_base("", {}), - direction(0) {} - - /// @brief Constructs lstm layer. - /// @param id This primitive id. - /// @param input input primitive id. - /// @param input weights Primitive id containing weights data. - /// @param input recurrent Primitive id containing recurrent data. It is required even for no hidden values. - /// @param input bias Primitive id containing bias data. Provide empty string if using lstm without bias. - /// @param input hidden Primitive id containing hidden data. Provide empty string if using lstm without hidden values. - /// @param direction default = 0, bidirectional = 1. - lstm_gemm(const primitive_id& id, - const input_info& input, - const primitive_id& weights, - const primitive_id& recurrent, - const primitive_id& bias = "", - const primitive_id& hidden = "", - const uint32_t direction = 0, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - weights(weights), - recurrent(recurrent), - bias(bias), - hidden(hidden), - direction(direction) {} - - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive id containing bias data. - primitive_id bias; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id hidden; - /// @brief direction default = 0, bidirectional = 1. - uint32_t direction; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, direction); - seed = hash_combine(seed, bias.empty()); - seed = hash_combine(seed, hidden.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return direction == rhs_casted.direction && - bias.empty() == rhs_casted.bias.empty() && - hidden.empty() == rhs_casted.hidden.empty(); - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << weights; - ob << recurrent; - ob << bias; - ob << hidden; - ob << direction; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> weights; - ib >> recurrent; - ib >> bias; - ib >> hidden; - ib >> direction; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(weights); - ret.push_back(recurrent); - if (!bias.empty()) - ret.push_back(bias); - if (!hidden.empty()) - ret.push_back(hidden); - return ret; - } -}; - struct lstm_elt : public primitive_base { CLDNN_DECLARE_PRIMITIVE(lstm_elt) diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic.hpp deleted file mode 100644 index d459754ebca509..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic.hpp +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "primitive.hpp" -#include - -namespace cldnn { - -/// @brief Performs forward Long Short-Term Memory (LSTM_DYNAMIC) layer. -/// @details The current implementation of LSTM_DYNAMIC is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm_dynamic : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_dynamic) - - lstm_dynamic() : primitive_base("", {}) {} - - /// @brief Constructs lstm_dynamic layer. - /// @param id This primitive id. - /// @param input Primitive id of input layer. - /// @param dyn_length Primitive id of layer containg dynamic length values (shape: 1D). - /// @param weights Primitive id containing weights data. - /// @param recurrent Primitive id containing recurrent data. - /// @param last_hidden_output Id of mutable data primitive pointing to buffer, which will be filled with last hidden state. - /// @param last_cell_output Id of mutable data primitive pointing to buffer, which will be filled with last cell state. - /// @param bias Primitive id containing bias data. Provide empty string if using lstm_dynamic without bias. - /// @param initial_hidden Primitive id containing initial_hidden data. Provide empty string if using lstm_dynamic without initial_hidden values. - /// @param initial_cell Primitive id containing initial_cell data. Provide empty string if using lstm_dynamic without initial_cell values. - /// @param clip Clip threshold. Provide 0 if using lstm without activations clip threshold. - /// @param input_forget Provide 0 if using lstm without coupled input-forget gates. - lstm_dynamic(const primitive_id& id, - const input_info& input, - const primitive_id& dyn_length, - const primitive_id& weights, - const primitive_id& recurrent, - const primitive_id& last_hidden_state = "", - const primitive_id& last_cell_state = "", - const primitive_id& bias = "", - const primitive_id& initial_hidden = "", - const primitive_id& initial_cell = "", - const float clip = 0.0f, - const bool input_forget = false, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - dyn_length(dyn_length), - weights(weights), - recurrent(recurrent), - last_hidden_state(last_hidden_state), - last_cell_state(last_cell_state), - bias(bias), - initial_hidden(initial_hidden), - initial_cell(initial_cell), - clip(clip), - input_forget(input_forget) {} - - /// @brief Primitive id containing the dynamic sequence lengths. - primitive_id dyn_length; - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last hidden state. - primitive_id last_hidden_state; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last cell state. - primitive_id last_cell_state; - /// @brief Primitive id containing bias data. - primitive_id bias; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id initial_hidden; - /// @brief Primitive id containing the initial value of the cell state data. - primitive_id initial_cell; - /// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified. - float clip = 0.0f; - /// @brief Couple the input and forget gates if input_forget is 1. Default is 0. - bool input_forget = false; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, clip); - seed = hash_combine(seed, input_forget); - seed = hash_combine(seed, last_hidden_state.empty()); - seed = hash_combine(seed, last_cell_state.empty()); - seed = hash_combine(seed, bias.empty()); - seed = hash_combine(seed, initial_hidden.empty()); - seed = hash_combine(seed, initial_cell.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - #define cmp_fields(name) name == rhs_casted.name - return cmp_fields(clip) && - cmp_fields(input_forget) && - cmp_fields(last_hidden_state.empty()) && - cmp_fields(last_cell_state.empty()) && - cmp_fields(initial_hidden.empty()) && - cmp_fields(initial_cell.empty()) && - cmp_fields(bias.empty()); - #undef cmp_fields - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << dyn_length; - ob << weights; - ob << recurrent; - ob << last_hidden_state; - ob << last_cell_state; - ob << bias; - ob << initial_hidden; - ob << initial_cell; - ob << clip; - ob << input_forget; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> dyn_length; - ib >> weights; - ib >> recurrent; - ib >> last_hidden_state; - ib >> last_cell_state; - ib >> bias; - ib >> initial_hidden; - ib >> initial_cell; - ib >> clip; - ib >> input_forget; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(dyn_length); - ret.push_back(weights); - ret.push_back(recurrent); - - if (!last_hidden_state.empty()) { - ret.push_back(last_hidden_state); - } - if (!last_cell_state.empty()) { - ret.push_back(last_cell_state); - } - if (!bias.empty()) { - ret.push_back(bias); - } - if (!initial_hidden.empty()) { - ret.push_back(initial_hidden); - } - if (!initial_cell.empty()) { - ret.push_back(initial_cell); - } - return ret; - } -}; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_input.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_input.hpp deleted file mode 100644 index 6a8e90d9a494ba..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_input.hpp +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/primitive.hpp" -#include - -namespace cldnn { - -/// @brief Performs forward calcaulations of input gates for dynamic lstm layer. -/// @details The current implementation of LSTM_DYNAMIC is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm_dynamic_input : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_dynamic_input) - - lstm_dynamic_input() : primitive_base("", {}) {} - - /// @brief Constructs lstm_dynamic layer. - /// @param id This primitive id. - /// @param input Primitive id of input layer. - /// @param dyn_length Primitive id of ilayer containg dynamic length values (shape: 1D). - /// @param weights Primitive id containing weights data. - /// @param recurrent Primitive id containing recurrent data. - /// @param bias Primitive id containing bias data. Provide empty string if using lstm_dynamic without bias. - lstm_dynamic_input(const primitive_id& id, - const input_info& input, - const primitive_id& dyn_length, - const primitive_id& weights, - const primitive_id& bias = "", - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), dyn_length(dyn_length), weights(weights), bias(bias) {} - - /// @brief Primitive id containing the dynamic sequence lengths. - primitive_id dyn_length; - /// @brief Primitive id containing weights data. - primitive_id weights; - /// @brief Primitive id containing bias data. - primitive_id bias; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, bias.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return bias.empty() == rhs_casted.bias.empty(); - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << dyn_length; - ob << weights; - ob << bias; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> dyn_length; - ib >> weights; - ib >> bias; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(dyn_length); - ret.push_back(weights); - - if (!bias.empty()) { - ret.push_back(bias); - } - return ret; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_timeloop.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_timeloop.hpp deleted file mode 100644 index ef184707f94db7..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/lstm_dynamic_timeloop.hpp +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/primitive.hpp" -#include - -namespace cldnn { - -/// @brief Performs forward calcaulations of input gates for dynamic lstm layer. -/// @details The current implementation of LSTM_DYNAMIC is described the following equations. -/// it = f(Xt*(Wi^T) + Ht-1*Ri + Wbi) -/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Wbf) -/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Wbo) -/// Ht = ot (.) h(Ct) -/// Where f = Sigmoid, g = Tanh, and h = Tanh. -struct lstm_dynamic_timeloop - : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(lstm_dynamic_timeloop) - - lstm_dynamic_timeloop() : primitive_base("", {}) {} - - /// @brief Constructs lstm_dynamic layer. - /// @param id This primitive id. - /// @param input Primitive id of input layer. - /// @param dyn_length Primitive id of ilayer containg dynamic length values (shape: 1D). - /// @param recurrent Primitive id containing recurrent data. - /// @param initial_hidden Primitive id containing initial_hidden data. Provide empty string if using lstm_dynamic without initial_hidden values. - /// @param initial_cell Primitive id containing initial_cell data. Provide empty string if using lstm_dynamic without initial_cell values. - /// @param clip Clip threshold. Provide 0 if using lstm without activations clip threshold. - /// @param input_forget Provide 0 if using lstm without coupled input-forget gates. - lstm_dynamic_timeloop(const primitive_id& id, - const input_info& input, - const primitive_id& dyn_length, - const primitive_id& recurrent, - const primitive_id& last_hidden_state = "", - const primitive_id& last_cell_state = "", - const primitive_id& initial_hidden = "", - const primitive_id& initial_cell = "", - const float clip = 0.0f, - const bool input_forget = 0, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - dyn_length(dyn_length), - recurrent(recurrent), - last_hidden_state(last_hidden_state), - last_cell_state(last_cell_state), - initial_hidden(initial_hidden), - initial_cell(initial_cell), - clip(clip), - input_forget(input_forget) {} - - /// @brief Primitive id containing the dynamic sequence lengths. - primitive_id dyn_length; - /// @brief Primitive id containing recurrent data. - primitive_id recurrent; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last hidden state. - primitive_id last_hidden_state; - /// @brief Primitive Id of mutable data primitive pointing to buffer, which will be filled with last cell state. - primitive_id last_cell_state; - /// @brief Primitive id containing the initial value of the hidden data. - primitive_id initial_hidden; - /// @brief Array of primitive ids containing the initial value of the hidden state data (Ht-1). - primitive_id initial_cell; - /// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified. - float clip = 0.0f; - /// @brief Couple the input and forget gates if input_forget is 1. Default is 0. - bool input_forget = 0; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, clip); - seed = hash_combine(seed, input_forget); - seed = hash_combine(seed, last_hidden_state.empty()); - seed = hash_combine(seed, last_cell_state.empty()); - seed = hash_combine(seed, initial_hidden.empty()); - seed = hash_combine(seed, initial_cell.empty()); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - #define cmp_fields(name) name == rhs_casted.name - return cmp_fields(clip) && - cmp_fields(input_forget) && - cmp_fields(last_hidden_state.empty()) && - cmp_fields(last_cell_state.empty()) && - cmp_fields(initial_hidden.empty()) && - cmp_fields(initial_cell.empty()); - #undef cmp_fields - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << dyn_length; - ob << recurrent; - ob << last_hidden_state; - ob << last_cell_state; - ob << initial_hidden; - ob << initial_cell; - ob << clip; - ob << input_forget; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> dyn_length; - ib >> recurrent; - ib >> last_hidden_state; - ib >> last_cell_state; - ib >> initial_hidden; - ib >> initial_cell; - ib >> clip; - ib >> input_forget; - } - -protected: - std::vector> get_dependencies() const override { - std::vector> ret; - ret.push_back(dyn_length); - ret.push_back(recurrent); - - if (!last_hidden_state.empty()) { - ret.push_back(last_hidden_state); - } - if (!last_cell_state.empty()) { - ret.push_back(last_cell_state); - } - if (!initial_hidden.empty()) { - ret.push_back(initial_hidden); - } - if (!initial_cell.empty()) { - ret.push_back(initial_cell); - } - return ret; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/pyramid_roi_align.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/pyramid_roi_align.hpp deleted file mode 100644 index 123faf83f29332..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/pyramid_roi_align.hpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "primitive.hpp" -#include -#include -#include - -namespace cldnn { - -/// @brief Performs RoI Align using image pyramid. -/// @details Applies RoI Align to layer from the image pyramid. -/// @par Level of the pyramid is selected by equation: -/// floor(START_LEVEL + log2(sqrt(w * h) / IMAGE_SIZE) -/// @par Where: -/// @li w, h - width and heigt of the region -/// @li START_LEVEL - scale of first level of the pyramid -/// @li IMAGE_SIZE - original image size -/// @par RoI Align algorithm performs max-pooling on region of interest -/// using billinear interpolation of surrounding values to avoid quantization. -struct pyramid_roi_align : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(pyramid_roi_align) - - pyramid_roi_align() : primitive_base("", {}) {} - - /// @param id This primitive id. - /// @param rois Input RoI boxes as tuple [x1, y1, x2, y2] describing two opposite corners of the region. - /// @param P2 First level of the image pyramid. - /// @param P3 Second level of the image pyramid. - /// @param P4 Third level of the image pyramid. - /// @param P5 Fourth level of the image pyramid. - /// @param output_size Output pooling size from the region pooling. - /// @param sampling_ratio Number of sampling points per output value. - /// @param pyramid_scales Scales of each level of pyramid in relation to original image. - /// @param pyramid_starting_level Starting level of the pyramid that should be used for region of whole image. - pyramid_roi_align(const primitive_id& id, - const input_info& rois, - const input_info& P2, - const input_info& P3, - const input_info& P4, - const input_info& P5, - int output_size, - int sampling_ratio, - std::vector pyramid_scales, - int pyramid_starting_level, - const padding &output_padding = padding()) - : primitive_base(id, - { rois, P2, P3, P4, P5 }, - {output_padding}) - , output_size(output_size) - , sampling_ratio(sampling_ratio) - , pyramid_scales(std::move(pyramid_scales)) - , pyramid_starting_level(pyramid_starting_level) - {} - - int output_size = 0; - int sampling_ratio = 0; - std::vector pyramid_scales; - int pyramid_starting_level = 0; - - size_t hash() const override { - size_t seed = primitive::hash(); - seed = hash_combine(seed, sampling_ratio); - seed = hash_range(seed, pyramid_scales.begin(), pyramid_scales.end()); - seed = hash_combine(seed, pyramid_starting_level); - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return output_size == rhs_casted.output_size && - sampling_ratio == rhs_casted.sampling_ratio && - pyramid_scales == rhs_casted.pyramid_scales && - pyramid_starting_level == rhs_casted.pyramid_starting_level; - } - - void save(BinaryOutputBuffer& ob) const override { - primitive_base::save(ob); - ob << output_size; - ob << sampling_ratio; - ob << pyramid_scales; - ob << pyramid_starting_level; - } - - void load(BinaryInputBuffer& ib) override { - primitive_base::load(ib); - ib >> output_size; - ib >> sampling_ratio; - ib >> pyramid_scales; - ib >> pyramid_starting_level; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/split.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/split.hpp deleted file mode 100644 index a31e0c765c2d32..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/split.hpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "primitive.hpp" -#include -#include - -namespace cldnn { - -/// @brief Performs split operation on input. -/// @details splits the input data into n parts, for each user provides name and offsets. -/// @n User cannot use split primitive directly. -/// @n It is needed to refer to the output ids with the name ":". -/// @n -/// @n\b Assumptions -/// @n - offsets1 < offsets2 < offsets3 < ... -/// @n - size[n] = offsets[n+1] - offsets[n]; -/// @n - last element: size[n] = split_input.size - offsets[n]; -/// @n - no buffer overlapping, as the output size is calculated using offset and input size -/// @n - split primitive id cannot be used by any other primitive (user needs to use output_ids only) -/// @n Breaking any of this conditions will cause exeption throw. -/// @n -/// @n\b Example: -/// @n Splitting output to 2 parts by the features: -/// @n input_size = { 2, 4, 3, 5 }; -/// @n split_id = "split"; -/// @n output_ids_offsets[0] = { "out0", { 0,0,0,0 } }; -/// @n output_ids_offsets[1] = { "out1", { 0,2,0,0 } }; -/// @n After split there would be 2 primitives: "split:out0" and "split:out1" which contain 2 feature maps (lower and upper) -struct split : public primitive_base { - CLDNN_DECLARE_PRIMITIVE(split) - - /// @brief Constructs split primitive. - /// @param id This primitive id. - /// @param input Input primitive id. - /// @param output_ids_offsets Pairs of output_ids and offsets - split(const primitive_id& id, - const input_info& input, - const std::vector >& output_ids_offsets, - const padding& output_padding = padding()) - : primitive_base(id, {input}, {output_padding}), - output_offsets(extract_tensor_vector(output_ids_offsets)), - output_ids(extract_primitive_vector(output_ids_offsets)) {} - - /// @brief Array of tensors with offsets. - std::vector output_offsets; - /// @brief List of output_ids. - const primitive_id_arr output_ids; - - size_t hash() const override { - size_t seed = primitive::hash(); - for (auto& offset : output_offsets) { - seed = hash_combine(seed, offset.hash()); - } - return seed; - } - - bool operator==(const primitive& rhs) const override { - if (!compare_common_params(rhs)) - return false; - - auto rhs_casted = downcast(rhs); - - return output_offsets == rhs_casted.output_offsets; - } - -protected: - static std::vector extract_primitive_vector( - const std::vector >& stor) { - std::vector res; - for (auto& stor_pair : stor) res.push_back(stor_pair.first); - - return res; - } - - static std::vector extract_tensor_vector(const std::vector >& stor) { - std::vector res; - for (auto& stor_pair : stor) res.push_back(stor_pair.second); - - return res; - } -}; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp index ec7276ebcc1f34..77b49d794bab75 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp @@ -218,8 +218,6 @@ struct format { os_is_yx_osv32_isv4_swizzled_by_2, ///< format for weights for IMAD convolutions os_is_yx_osv32_isv4, ///< format for weights for IMAD convolutions os_is_zyx_osv32_isv4, ///< format for weights for IMAD convolutions - lstm_weights_dio, ///< dynamic_lstm, direction, - ///< than IO (I - input size, O - 4 * hidden_size) os_is_osv32_isv32_swizzled_by_4, ///< format for weights for 1x1 IMAD convolution os_iyx_osv8, os_iyx_osv32__ai32, diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp index d68cfcd7edbae0..8a4fddecdd4fb6 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/kernel_args.hpp @@ -66,10 +66,7 @@ struct argument_desc { SLOPE, INTERNAL_BUFFER, SCALAR, - RECURRENT, // RNN/LSTM/GRU recurrent weights - HIDDEN, // RNN/LSTM/GRU hidden input CELL, // LSTM cell input - LSTM_PACK, // LSTM packed output WEIGHTS_ZERO_POINTS, ACTIVATIONS_ZERO_POINTS, COMPENSATION, diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp index 51f09989502a13..aea07971ca6020 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp @@ -51,9 +51,15 @@ struct ShapePredictor { std::pair predict_preallocation_shape(const std::string& id, const ov::Shape& current_shape, size_t dt_bitwidth, - bool can_reuse_buffer); + bool can_reuse_buffer, + int32_t next_iters_prealloc_count = -1); + bool can_preallocate(size_t desired_buffer_size); + void reset() { + _shapes_info.clear(); + } + private: void add_shape(const std::string& id, const ov::Shape& shape); diff --git a/src/plugins/intel_gpu/src/graph/eltwise.cpp b/src/plugins/intel_gpu/src/graph/eltwise.cpp index b537f2481b7e40..3a8d75e81197f4 100644 --- a/src/plugins/intel_gpu/src/graph/eltwise.cpp +++ b/src/plugins/intel_gpu/src/graph/eltwise.cpp @@ -376,10 +376,16 @@ eltwise_inst::typed_primitive_inst(network& network, eltwise_node const& node) : } } - for (size_t d = 0; d < input0_pshape.size(); ++d) { - bool sizes_equal = input0_pshape[d] == input_pshape[d]; + auto base_pshape = input0_pshape; + if (prim->broadcast_spec == ov::op::AutoBroadcastType::NUMPY && + base_pshape.size() < input_pshape.size()) { + base_pshape.insert(base_pshape.begin(), input_pshape.size() - base_pshape.size(), 1); + } + + for (size_t d = 0; d < base_pshape.size(); ++d) { + bool sizes_equal = base_pshape[d] == input_pshape[d]; bool broadcast = - (input0_pshape[d] == 1 || input_pshape[d] == 1) && (input0_pshape[d] != 1 || input_pshape[d] != 1); + (base_pshape[d] == 1 || input_pshape[d] == 1) && (base_pshape[d] != 1 || input_pshape[d] != 1); CLDNN_ERROR_BOOL(node.id(), "Sizes equal or broadcast is possible", !(sizes_equal || broadcast), diff --git a/src/plugins/intel_gpu/src/graph/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/fully_connected.cpp index 7dd86bd52b3e6a..d566339103bf48 100644 --- a/src/plugins/intel_gpu/src/graph/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/fully_connected.cpp @@ -97,8 +97,8 @@ layout fully_connected_inst::calc_output_layout(fully_connected_node const& node auto input_pshape = input_layout.get_partial_shape(); auto weights_layout = *impl_param.weights_layout; auto weights_pshape = weights_layout.get_partial_shape(); - auto output_type = input_layout.data_type; - if ((output_type == data_types::u8 || output_type == data_types::i8) && desc->output_data_types[0]) + auto output_type = desc->output_data_types[0].value_or(input_layout.data_type); + if (data_type_traits::is_i8_u8(input_layout.data_type) && desc->output_data_types[0]) output_type = *desc->output_data_types[0]; if (impl_param.has_fused_primitives()) { @@ -139,8 +139,8 @@ std::vector fully_connected_inst::calc_output_layouts(fully_connected_no auto input_layout = impl_param.get_input_layout(); auto weights_layout = *impl_param.weights_layout; - auto output_type = input_layout.data_type; - if (data_type_traits::is_i8_u8(output_type) && desc->output_data_types[0]) + auto output_type = desc->output_data_types[0].value_or(input_layout.data_type); + if (data_type_traits::is_i8_u8(input_layout.data_type) && desc->output_data_types[0]) output_type = *desc->output_data_types[0]; if (impl_param.has_fused_primitives()) { diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/clamp_fp16_output.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/clamp_fp16_output.cpp deleted file mode 100644 index 02f68d76df954e..00000000000000 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/clamp_fp16_output.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pass_manager.h" -#include "program_node.h" - -#include "gemm_inst.h" -#include "reshape_inst.h" -#include "softmax_inst.h" - -using namespace cldnn; - -void clamp_fp16_output::run(program& p) { - for (auto& node : p.get_processing_order()) { - // Add clamp activation to avoid inf result which causes Nan output - if (node->is_type() && !node->is_output() && node->get_output_layout().data_type == data_types::f16) { - auto user = node->get_users().front(); - // Reshape could be added in CreateMatMulOp : check a user node of the Reshape - if (user->is_type()) - user = user->get_users().front(); - - if (user->is_type()) { - float out_lo = data_type_traits::min(data_types::f16); - float out_hi = data_type_traits::max(data_types::f16); - auto activ_id = node->id() + "_overflow_clip"; - auto activ = std::make_shared(activ_id, input_info(node->id()), - activation_func::clamp, activation_additional_params{out_lo, out_hi}); - program_node& act_node = p.get_or_create(activ); - - fused_primitive_desc local_desc(activ); - local_desc.input_layout = node->get_output_layout(); - local_desc.f_param = act_node.get_fuse_params(); - local_desc.outer_dep_start_idx = -1; // No external dep - local_desc.total_num_deps = 1; - local_desc.output_layout = node->get_output_layout(); - if (node->get_fused_primitives().size() > 0) { - local_desc.fused_deps.emplace(node->get_fused_primitives().back().desc->id, 0); - } - - node->add_fused_primitive(local_desc); - } - } - } -} diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp index cf9f44a9a59686..0b557579674885 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/graph_initializations.cpp @@ -5,19 +5,6 @@ #include "pass_manager.h" #include "program_node.h" -#include "split_inst.h" -#include "convolution_inst.h" -#include "crop_inst.h" -#include "lstm_inst.h" -#include "reshape_inst.h" -#include "resample_inst.h" -#include "depth_to_space_inst.h" -#include "lstm_dynamic_inst.h" -#include "lstm_dynamic_input_inst.h" -#include "lstm_dynamic_timeloop_inst.h" -#include "mutable_data_inst.h" -#include "arg_max_min_inst.h" - #include #include #include @@ -28,375 +15,6 @@ using namespace cldnn; namespace cldnn { -namespace { -std::string get_id_string(size_t i) { - std::stringstream ss; - ss << std::setw(5) << std::setfill('0') << i; - return ss.str(); -} -} // namespace - -void graph_initializations::handle_split_node(program& p, split_node& node) { - if (!node.get_users().empty()) { - throw std::logic_error("Split layer cannot be used directly! Please use split output \"" + node.id() + - ":\"!"); - } - // get_output size and validate split primitive inputs - layout output_layout = node.get_output_layout(); - tensor output_layout_size = output_layout.get_tensor(); - - auto split_prim = node.typed_desc(); - std::size_t split_num = split_prim->output_offsets.size(); - - std::vector transformed_ids; - - // create crop for each split output provided - for (std::size_t i = 0; i < split_num; i++) { - primitive_id output_id = node.id() + ":" + split_prim->output_ids[i]; - - auto output_node_itr = p.nodes_map.find(output_id); - if (output_node_itr == p.nodes_map.end()) { - continue; - } - - transformed_ids.push_back(std::move(output_id)); - - auto node_ptr = output_node_itr->second; - - // calculate crop reference input size - tensor reference_input_size; - - // For all the split offsets before the last split offset, the size can be calculated - // size_of_offset[n] = offset[n + 1] - offset[n]; - if (i != (split_num - 1)) { - reference_input_size += split_prim->output_offsets[i + 1] - split_prim->output_offsets[i]; - } else { // For the last split i.e. size[split_num - 1] = split_input.size - offsets[n]; - reference_input_size += output_layout_size - split_prim->output_offsets[i]; - } - - // For all the other dimensions, copy from the split_input - for (int32_t dimension = 0; dimension < tensor_dim_max; dimension++) { - if (reference_input_size.raw[dimension] == 0) { - reference_input_size.raw[dimension] = output_layout_size.raw[dimension]; - } - } - - // update crop primitive - node_ptr->set_output_padding(output_layout.data_padding); - auto crop_prim = node_ptr->as().typed_desc(); - crop_prim->reference_input = reference_input_size; - } - - // remove input->split connection and remove original split node - p.remove_connection(node.input(), node); - - p.add_optimized_primitive_info(node.id(), transformed_ids); - p.optimized_out.push_back(node.id()); - p.nodes_map.erase(node.id()); -} - -void graph_initializations::handle_lstm_node(program& p, lstm_node& node) { - // lstm_node& lstm_node = node->as(); - bool initial_hidden_term = node.initial_hidden_term(); - bool initial_cell_term = node.initial_cell_term(); - bool bias_term = node.bias_term(); - auto lstm_prim = node.typed_desc(); - primitive_id weights_id = lstm_prim->weights; - primitive_id recurrent_id = lstm_prim->recurrent; - primitive_id bias_id = bias_term ? lstm_prim->bias : ""; - primitive_id initial_hidden_id = initial_hidden_term ? lstm_prim->initial_hidden : ""; - primitive_id initial_cell_id = initial_cell_term ? lstm_prim->initial_cell : ""; - - // removing connection with weights to get proper dependency order for next operations - p.remove_connection(p.get_node(weights_id), node); - p.remove_connection(p.get_node(recurrent_id), node); - if (bias_term) - p.remove_connection(p.get_node(bias_id), node); - if (initial_hidden_term) - p.remove_connection(p.get_node(initial_hidden_id), node); - if (initial_cell_term) - p.remove_connection(p.get_node(initial_cell_id), node); - - // calculating sizes - program_node& input = node.input(); - layout input_layout = input.get_output_layout(); - tensor recurrent_size = p.get_node(recurrent_id).get_output_layout().get_tensor(); - - // hidden tensor size = [batch, seq, hidden_size, direction] - // the output of the element wise operation is cropped and used in the next time step - // sequence_len = 1 and direction = 1. The backward pass is separated from the forward pass - auto hidden_size = tensor(input_layout.batch(), 1, recurrent_size.spatial[0], 1); - - size_t directions = recurrent_size.feature[0]; - size_t num_input_dependencies = node.get_dependencies().size(); - size_t sequence_len = node.sequence_len(); - - // Calculate the input sequence length for the lstm node - // Case 1: If the input comes in as a concatenated input i.e. the - // input is not divided into sequence elements - if (sequence_len == 1 && num_input_dependencies == 1) { - // Get the sequence length from the input to LSTM - sequence_len = input_layout.feature(); - - // If the input's feature/sequence length field is > 1, i.e. If - // the sequence elements are concatenated into one single input - // then it has to be split into individual sequence elements - if (sequence_len > 1) { - for (size_t sequence_element = 0; sequence_element < sequence_len; sequence_element++) { - primitive_id crop_id = input.id() + ":crop:" + get_id_string(sequence_element); - tensor crop_tensor{input_layout.batch(), 1, input_layout.spatial(0), input_layout.spatial(1)}; - tensor offset_tensor{0, static_cast(sequence_element), 0, 0}; - auto input_crop = std::make_shared(crop_id, input.id(), crop_tensor, offset_tensor); - auto& input_crop_node = p.get_or_create(input_crop); - - // Add the crop nodes as user for input - p.add_connection(input, input_crop_node); - - // Connect crop with lstm - p.add_connection(input_crop_node, node); - } - - // We have the sequence elements (cropped inputs) as input to LSTM. - // The original input is no longer a dependency to LSTM. - // Remove the input node as a dependency to LSTM - p.remove_connection(input, node); - - // Update the total no. of input dependecies - num_input_dependencies = node.get_dependencies().size(); - } - // if the sequence has a single element but it has multiple inputs then - // the parent of this lstm is an lstm node. If this is a bidirectional lstm - // then the sequence length is the number of dependencies divided by 2. - } else if (sequence_len == 1 && num_input_dependencies > 1) { - sequence_len = (directions == 1) ? num_input_dependencies : num_input_dependencies / 2; - } - - // check if this lstm node has an lstm child - bool has_lstm_children = false; - for (auto& user : node.get_users()) { - if (user->is_type()) { - has_lstm_children = true; - } - } - - bool emit_last_cell = lstm_prim->output_selection == lstm_output_selection::hidden_cell || - lstm_prim->output_selection == lstm_output_selection::sequence_cell; - bool emit_sequence = lstm_prim->output_selection == lstm_output_selection::sequence_cell || - lstm_prim->output_selection == lstm_output_selection::sequence; - - std::vector cell_list(directions * sequence_len); - std::vector hidden_list(directions * sequence_len); - std::map> output_map; - size_t input_directions = input_layout.spatial(1); - - // lstm expanding - for (size_t dir = 0; dir < directions; ++dir) { - auto hidden_id = initial_hidden_id; - auto cell_id = initial_cell_id; - for (size_t i = 0; i < sequence_len; ++i) { - size_t idx = i + dir * sequence_len; - primitive_id lstm_gemm_id = node.id() + ":lstm_gemm" + get_id_string(idx); - primitive_id lstm_elt_id = node.id() + ":lstm_elt" + get_id_string(idx); - primitive_id crop_id = node.id() + ":crop" + get_id_string(idx); - - size_t input_idx = i; - // for bidirectional lstms, if first LSTM layer then reverse input - // for subsequent stacked layers the input is strided on the dir dimension - if (num_input_dependencies > sequence_len) { // stacked layer - input_idx = dir * sequence_len + i; - } else if ((input_directions < 2) && dir > 0) { // first layer - input_idx = sequence_len - i - 1; - } - - // primitive_id lstm_gemm_input_id = node->get_dependency(input_idx).get_primitive()->id; - // the line below requires an attention: get_org_primitive_id() might not be an actual id of a node - // (see rename method) ToDO: ensure that get_org_primitive_id() is suitable here - primitive_id lstm_gemm_input_id = node.get_dependency(input_idx).get_org_primitive_id(); - - auto lstm_gemm_node = std::make_shared(lstm_gemm_id, - lstm_gemm_input_id, - weights_id, - recurrent_id, - bias_id, - hidden_id, - (uint32_t)dir); - auto& n1 = p.get_or_create(lstm_gemm_node); - - auto lstm_elt_node = std::make_shared(lstm_elt_id, - lstm_gemm_id, - cell_id, - lstm_prim->clip, - lstm_prim->input_forget, - lstm_prim->activations, - lstm_prim->activation_params, - lstm_prim->offset_order, - (uint32_t)dir); - auto& n2 = p.get_or_create(lstm_elt_node); - // adding lstm_elt as user - p.add_connection(n1, n2); - // adding dependecy to lstm_gemm node - // input - p.add_connection(node.get_dependency(input_idx), n1); - // adding weights and initial values to lstm_gemm - p.add_connection(p.get_node(weights_id), n1); - p.add_connection(p.get_node(recurrent_id), n1); - if (bias_term) - p.add_connection(p.get_node(bias_id), n1); - - // adding cell and hiddens as dependencies - if (i > 0) { - p.add_connection(*cell_list[(i - 1) * directions + dir], n2); - p.add_connection(*hidden_list[(i - 1) * directions + dir], n1); - } else { // if initial values are present - if (initial_hidden_term) - p.add_connection(p.get_node(hidden_id), n1); - if (initial_cell_term) - p.add_connection(p.get_node(cell_id), n2); - } - - // lstm_hidden - { - hidden_id = crop_id + ":hidden"; - auto crop_hidden = - std::make_shared(hidden_id, lstm_elt_id, hidden_size, tensor{0, 0, 0, 0}); - auto& n3 = p.get_or_create(crop_hidden); - // adding eltwise as dependency to hidden - p.add_connection(n2, n3); - - // if parent is lstm adding hiddens as dependency - if (has_lstm_children) { - for (auto& user : node.get_users()) { - p.add_connection(n3, *user); - } - } - hidden_list[i * directions + dir] = &n3; - if (i == sequence_len - 1 || emit_sequence) { - output_map[i * directions + dir] = {hidden_id, &n3}; - } - } - - // lstm_cell - if (i < sequence_len - 1 || emit_last_cell) { - cell_id = crop_id + ":cell"; - auto crop_cell = std::make_shared(cell_id, lstm_elt_id, hidden_size, tensor{0, 1, 0, 0}); - auto& n4 = p.get_or_create(crop_cell); - p.add_connection(n2, n4); - cell_list[i * directions + dir] = &n4; - if (i == sequence_len - 1) { - output_map[sequence_len * directions + dir] = {cell_id, &n4}; - } - } - } - } - // if there is no next lstm, concatenation is created - if (!has_lstm_children) { - std::vector output_ids_offsets; - for (auto& e : output_map) { - output_ids_offsets.push_back(input_info(e.second.first)); - } - primitive_id concatenation_id = node.id() + ":concat"; - auto concatenation_primitive = std::make_shared(concatenation_id, output_ids_offsets, 1); - auto& concatenation_node = p.get_or_create(concatenation_primitive); - for (auto& e : output_map) { - p.add_connection(*e.second.second, concatenation_node); - } - if (directions == 2) { - // bidirectional support requires concatenations along the direction and sequence axis - // instead we can concatenate along the sequence axis and reshape the tensor to the account - // for the direction - size_t concatenate_len = emit_sequence ? sequence_len : 1; - if (emit_last_cell) - concatenate_len++; - - tensor output_size{input_layout.batch(), - static_cast(concatenate_len), - hidden_size.spatial[0], - (int32_t)directions}; - auto reshape_primitive = std::make_shared(node.id() + ":reshape", concatenation_id, output_size); - auto& reshape_node = p.get_or_create(reshape_primitive); - p.add_connection(concatenation_node, reshape_node); - p.replace_all_usages(node, reshape_node); - } else { - p.replace_all_usages(node, concatenation_node); - } - } - // removing expanded node - p.remove_all_connections(node); - p.nodes_map.erase(node.id()); -} - -void graph_initializations::handle_dynamic_lstm_node(program& p, lstm_dynamic_node& node) { - // [0] Prepare helper temp variables. - // auto& lstm_dynamic_node = node->as(); - auto& node_id = node.id(); - auto input_id = node.get_primitive()->input.at(0); - auto dyn_length_id = node.dyn_length_id(); - auto weights_id = node.weights_id(); - auto bias_id = node.bias_id(); - std::string suffix = "__cldnn_"; - - // [1] Add lstm_dynamic_input - auto lstm_dynamic_input_primitive = - std::make_shared(node_id + suffix + "input", - input_id, - dyn_length_id, - weights_id, - bias_id, - node.get_primitive()->output_paddings[0]); - auto& lstm_dynamic_input_node = p.get_or_create(lstm_dynamic_input_primitive); - p.add_connection(node.input(), lstm_dynamic_input_node); // connect real input to dlstm_input - // connect other deps - p.add_connection(p.get_node(dyn_length_id), lstm_dynamic_input_node); - p.add_connection(p.get_node(weights_id), lstm_dynamic_input_node); - if (!bias_id.empty()) - p.add_connection(p.get_node(bias_id), lstm_dynamic_input_node); - lstm_dynamic_input_node.get_output_layout(); // calc out layout - - auto recurrent_id = node.recurrent_id(); - auto init_hidden_id = node.initial_hidden_id(); - auto init_cell_id = node.initial_cell_id(); - auto last_hidden_id = node.last_hidden_state_id(); - auto last_cell_id = node.last_cell_state_id(); - auto lstm_dynamic_timeloop_primitive = - std::make_shared(node_id + suffix + "timeloop", - lstm_dynamic_input_node.id(), - dyn_length_id, - recurrent_id, - last_hidden_id, - last_cell_id, - init_hidden_id, - init_cell_id, - node.clip(), - node.input_forget(), - lstm_dynamic_input_primitive->output_paddings[0]); - auto& lstm_dynamic_timeloop_node = p.get_or_create(lstm_dynamic_timeloop_primitive); - p.add_connection(lstm_dynamic_input_node, lstm_dynamic_timeloop_node); // connect dlstm_input to dlstm_timeloop - // connect other deps - p.add_connection(p.get_node(dyn_length_id), lstm_dynamic_timeloop_node); - p.add_connection(p.get_node(recurrent_id), lstm_dynamic_timeloop_node); - - // [hack] reversed dependecies so the prociessing/execution order will be valid (from the user persepctive) - // It means that this optional outputs for sure will be "executed" layer. - // This connection will be reversed (to normal state) later in program.cpp (right after caluticaiton prcoessing order)! - if (!last_hidden_id.empty()) - p.add_connection(lstm_dynamic_timeloop_node, p.get_node(last_hidden_id)); - if (!last_cell_id.empty()) - p.add_connection(lstm_dynamic_timeloop_node, p.get_node(last_cell_id)); - // [hack end] - if (!init_hidden_id.empty()) - p.add_connection(p.get_node(init_hidden_id), lstm_dynamic_timeloop_node); - if (!init_cell_id.empty()) - p.add_connection(p.get_node(init_cell_id), lstm_dynamic_timeloop_node); - lstm_dynamic_timeloop_node.get_output_layout(); // calc out layout - - // [2] Finally replace original node with the new ones. - p.replace_all_usages(node, lstm_dynamic_timeloop_node); - p.remove_all_connections(node); - p.remove_if_dangling(node); - p.rename(lstm_dynamic_timeloop_node, node_id); // get original id - - // we dont have to set output since it will be done in next graph_opts step -} void graph_initializations::set_outputs(program& p) { auto custom_outputs = p.get_config().get_property(ov::intel_gpu::custom_outputs); @@ -417,18 +35,6 @@ void graph_initializations::set_outputs(program& p) { } void graph_initializations::run(program& p) { - auto itr = p.nodes_map.begin(); - while (itr != p.nodes_map.end()) { - auto node_itr = itr++; - auto& node = node_itr->second; - if (node->is_type()) { - handle_split_node(p, node->as()); - } else if (node->is_type()) { - handle_lstm_node(p, node->as()); - } else if (node->is_type()) { - handle_dynamic_lstm_node(p, node->as()); - } - } set_outputs(p); p.get_processing_order().calc_processing_order(p); } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp index 3f568b96ebacda..f8d3ed08139817 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/post_optimize_weights.cpp @@ -10,7 +10,6 @@ #include "deconvolution_inst.h" #include "deformable_convolution_inst.h" #include "fully_connected_inst.h" -#include "lstm_dynamic_input_inst.h" namespace cldnn { @@ -21,11 +20,6 @@ template post_optimize_weights::weights_bias_offset post_optimize_we return weights_bias_offset(node.get_primitive()->input.size(), program_helpers::wrap_if_single(node.get_primitive()->weights).size()); } -template <> -post_optimize_weights::weights_bias_offset post_optimize_weights::get_weights_bias_offset(const lstm_dynamic_input_node& node) { - return weights_bias_offset(node.get_primitive()->input.size() + 1, program_helpers::wrap_if_single(node.get_primitive()->weights).size()); -} - // function which prepares given primitive for weights optimization template void post_optimize_weights::optimize_weights(T& node, program& p) { @@ -129,8 +123,6 @@ void post_optimize_weights::run(program& p) { optimize_weights(node->as(), p); } else if (node->is_type()) { optimize_weights(node->as(), p); - } else if (node->is_type()) { - optimize_weights(node->as(), p); } } } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 6a51ed0504b0ff..db9216202cfb55 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -15,6 +15,7 @@ #include "depth_to_space_inst.h" #include "resample_inst.h" #include "loop_inst.h" +#include "lstm_elt_inst.h" #include "strided_slice_inst.h" #include "shape_of_inst.h" #include "non_max_suppression_inst.h" @@ -80,6 +81,21 @@ bool concat_in_place_optimization::match(const program_node& concat_node, GPU_DEBUG_IF(debug_config->disable_runtime_buffer_fusing) { do_runtime_buffer_fusing = false; } + + auto concat_axis = concat_params.typed_desc()->axis; + size_t concat_axis_index = concat_axis < 0 ? concat_axis + concat_params.get_output_layout().get_rank() : concat_axis; + auto def_fmt = format::get_default_format(concat_params.get_output_layout().get_rank()); + // If static padding exists in non dyn_pad axis, returns false to avoid optimized out. + if (concat_node.is_dynamic()) { + for (size_t j = 0; j < concat_params.get_output_layout().get_rank(); j++) { + if (j != concat_axis_index) { + if ((concat_params.get_output_layout().data_padding.lower_size().sizes(def_fmt)[j] != 0) + || (concat_params.get_output_layout().data_padding.upper_size().sizes(def_fmt)[j] != 0)) + return false; + } + } + } + auto pred_nodes = concat_node.get_dependencies(); for (auto p : pred_nodes) { // TODO : In dynamic shape only one user is allowed for optimzied concat @@ -105,9 +121,7 @@ bool concat_in_place_optimization::match(const program_node& concat_node, // Otherwise, use explicit concat instead. auto output_format = concat_params.get_output_layout().format; auto output_datatype = concat_params.get_output_layout().data_type; - auto concat_axis = concat_params.typed_desc()->axis; - auto def_fmt = format::get_default_format(concat_params.get_output_layout().get_rank()); auto lower_padd_in_axis = concat_params.get_output_layout().data_padding.lower_size().sizes(def_fmt)[concat_axis]; lower_padd_in_axis = std::max(lower_padd_in_axis, pred_params[0].get_output_layout().data_padding.lower_size().sizes(def_fmt)[concat_axis]); diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index c6faa671dc8cd5..57947bc6da9c83 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -56,7 +56,6 @@ using namespace cldnn; void prepare_primitive_fusing::run(program& p) { fuse_reorders(p); remove_redundant_reshape(p); - fuse_sigmoid_mul_to_swish(p); fuse_bias(p); fuse_simple_primitives(p); fuse_constant_transposes(p); @@ -124,71 +123,6 @@ void prepare_primitive_fusing::remove_redundant_reshape(program &p) { } } -void prepare_primitive_fusing::fuse_sigmoid_mul_to_swish(program &p) { - auto itr = p.get_processing_order().begin(); - while (itr != p.get_processing_order().end()) { - auto node_itr = itr++; - auto& node = (*node_itr); - - if (node->is_output()) - continue; - - program_helpers::do_for_types(*node, [&p](eltwise_node& node) { - if (node.get_dependencies().size() != 2) - return; - - if (node.get_primitive()->mode != eltwise_mode::prod) - return; - - auto& mul = node; - program_node* activation_input = nullptr; - size_t values_id = 1; - if (node.get_dependency(0).is_type()) { - activation_input = &node.get_dependency(0); - } else if (node.get_dependency(1).is_type()) { - activation_input = &node.get_dependency(1); - values_id = 0; - } - - if (!activation_input) - return; - - if (activation_input->as().get_primitive()->activation_function != activation_func::logistic) - return; - - auto& sigmoid = activation_input->as(); - - if (sigmoid.is_output() || sigmoid.get_users().size() != 1) - return; - - auto& input = node.get_dependency(values_id); - - if (&input != &sigmoid.input()) - return; - - activation_additional_params swish_params = {1.0f, 0.0f}; - auto swish_prim = std::make_shared(mul.id() + "_swish", input.id(), activation_func::swish, swish_params); - auto& swish = p.get_or_create(swish_prim); - - p.add_optimized_primitive_info(node.id(), {swish.id()}); - p.add_optimized_primitive_info(sigmoid.id(), {swish.id()}); - - p.add_connection(input, swish); - p.replace_all_usages(mul, swish); - - p.remove_all_connections(mul); - p.remove_all_connections(sigmoid); - - p.remove_if_dangling(mul); - p.remove_if_dangling(sigmoid); - - p.get_processing_order().insert_next(&input, &swish); - - swish.recalc_output_layout(); - }); - } -} - void prepare_primitive_fusing::fuse_reorders(program &p) { // This loop tries fusing several reorders one by one (if present) into one reorder auto itr = p.get_processing_order().begin(); diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp index 893d35395077bb..a3f43332cf7a95 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp @@ -316,8 +316,7 @@ void prepare_quantization::prepare_dequantize_merge(program& p, eltwise_node& el } auto get_scale_shift_mem = [](const cldnn::eltwise_node& eltw, size_t dep_id) -> memory::ptr { - if (dep_id >= eltw.get_dependencies().size()) - CLDNN_ERROR_MESSAGE(eltw.id(), "Invalid dependency id in dequantize optimization"); + OPENVINO_ASSERT(dep_id < eltw.get_dependencies().size(), "[GPU] ", eltw.id(), "Invalid dependency id in dequantize optimization"); return eltw.get_dependency(dep_id).as().get_attached_memory_ptr(); }; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/reverse_optional_nodes_outputs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/reverse_optional_nodes_outputs.cpp deleted file mode 100644 index 41df4c0ce5920b..00000000000000 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/reverse_optional_nodes_outputs.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pass_manager.h" -#include "program_helpers.h" -#include "lstm_dynamic_timeloop_inst.h" - -#include - -using namespace cldnn; - -/* - Pass made for nodes, which has optional outputs (and had to reverse connections so - the processing order was valid). -*/ -void reverse_optional_nodes_outputs::run(program& p) { - for (auto& node : p.get_processing_order()) { - if (node->is_type()) { - auto& typed_node = node->as(); - typed_node.reverse_optional_outputs_connections(); - } - } -} diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/strided_slice_optimize.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/strided_slice_optimize.cpp deleted file mode 100644 index c35d5e37233daf..00000000000000 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/strided_slice_optimize.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "intel_gpu/runtime/error_handler.hpp" -#include "pass_manager.h" -#include "program_helpers.h" -#include "strided_slice_inst.h" -#include "reshape_inst.h" -#include "data_inst.h" -#include -#include - -using namespace cldnn; - -void strided_slice_optimize::run(program& p) { - auto node_itr = p.get_processing_order().begin(); - while (node_itr != p.get_processing_order().end()) { - auto& node = (*node_itr++); - if (node->is_type() && node->get_output_layout().is_static()) { - auto& strided_slice_node = node->as(); - auto& new_axis_mask = strided_slice_node.get_primitive()->new_axis_mask; - - if (std::find(new_axis_mask.begin(), new_axis_mask.end(), 1) == new_axis_mask.end()) - continue; - - auto node_layout = strided_slice_node.get_output_layout(); - // only 4D or less dimension output runs optimization - if (node_layout.get_rank() > 4) - continue; - - auto& deps = node->get_dependencies(); - auto is_other_deps_constant = [deps]() { - for (size_t i = 1; i < deps.size(); i++) { - if (!deps[i].first->is_type()) return false; - } - return true; - }; - if (!is_other_deps_constant()) - continue; - - for (size_t i = deps.size(); i--;) - if (deps[i].first->is_type()) - node->remove_dependency(i); - - auto node_size = node_layout.get_tensor().sizes(format::bfyx); - - auto is_shift_possible = [&](const std::vector& dims) -> bool { - if (dims.empty()) - CLDNN_ERROR_MESSAGE(node->id(), "Error while adding new axis: node has incorrect dimensions"); - - if (dims[dims.size() - 1] == 1) - return true; - else - CLDNN_ERROR_MESSAGE(node->id(), "Not supported yet: too many axes for adding"); - return false; - }; - - std::vector output_dims_sizes = node_size; - if (std::find(new_axis_mask.begin(), new_axis_mask.end(), 1) != new_axis_mask.end()) { - for (size_t i = 0; i < new_axis_mask.size(); ++i) { - if (new_axis_mask[new_axis_mask.size() - i - 1] == 1) { - if (is_shift_possible(output_dims_sizes)) { - for (size_t j = output_dims_sizes.size() - 1; j > i; --j) - output_dims_sizes[j] = output_dims_sizes[j - 1]; - output_dims_sizes[i] = 1; - } - } - } - } - - auto reshape_prim = std::make_shared( - "reshape_" + node->id(), - node->get_dependency(0).get_primitive()->id, - tensor(output_dims_sizes[0], output_dims_sizes[1], output_dims_sizes[3], output_dims_sizes[2])); - - auto& reshape_prim_node = p.get_or_create(reshape_prim); - - layout output_layout = { node_layout.data_type, node_layout.format, reshape_prim->output_shape }; - reshape_prim_node.set_output_layout(output_layout); - - p.add_intermediate(reshape_prim_node, *node, 0, true); - p.extract_and_remove(*node); - } - } -} diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index bb891c0ab8f99f..d83f40748123d8 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -722,8 +722,6 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::g_os_y_is_x_osv8_isv4; case format::g_os_is_yx_isv16_osv16: return kernel_selector::weights_layout::g_os_is_yx_isv16_osv16; - case format::lstm_weights_dio: - return kernel_selector::weights_layout::dlstm_dir_io; case format::os_i_yxs_osv4_yxsv4: return kernel_selector::weights_layout::os_i_yxs_osv4_yxsv4; default: @@ -1002,8 +1000,6 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::os_is_zyx_osv64_isv16; case kernel_selector::weights_layout::os_is_yx_isv8_osv16_isv2: return cldnn::format::os_is_yx_isv8_osv16_isv2; - case kernel_selector::weights_layout::dlstm_dir_io: - return cldnn::format::lstm_weights_dio; case kernel_selector::weights_layout::os_iyx_osv16_rotate_180: return cldnn::format::os_iyx_osv16; case kernel_selector::weights_layout::os_i_yxs_osv4_yxsv4: diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp index f92f121e3e23b8..9bbb6753feb8a9 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp @@ -79,10 +79,12 @@ struct kv_cache_impl : typed_primitive_impl_ocl { variable.set(); if (can_be_optimized) { + GPU_DEBUG_TRACE_DETAIL << desc->id << " : Output is same as variable memory! Skip copying " << std::endl; // When primitive is optimized, concat kernel writes directly to variable memory return res_event; } else { // Othwerise, we need to copy result from out buffer to state memory + GPU_DEBUG_TRACE_DETAIL << desc->id << " : Copying output to variable meomry" << std::endl; auto& stream = instance.get_network().get_stream(); stream.enqueue_barrier(); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_input.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_input.cpp deleted file mode 100644 index 8bf8ba0a8fcedc..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_input.cpp +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "lstm_dynamic_input_inst.h" -#include "lstm_dynamic/lstm_dynamic_input_kernel_selector.h" -#include "lstm_dynamic/lstm_dynamic_input_kernel_base.h" - -namespace cldnn { -namespace ocl { - -struct lstm_dynamic_input_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::lstm_dynamic_input_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::lstm_dynamic_input_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - -protected: - kernel_arguments_data get_arguments(const typed_primitive_inst& instance) const override { - kernel_arguments_data args; - args.inputs = { instance.input_memory_ptr(), instance.dyn_length_memory()}; - args.outputs = { instance.output_memory_ptr() }; - args.weights = instance.weights_memory(); - args.bias = instance.bias_term() ? instance.bias_memory() : nullptr; - return args; - } - -public: - static kernel_params_t get_kernel_params(const kernel_impl_params& impl_param) { - const auto& primitive = impl_param.typed_desc(); - auto params = get_default_params(impl_param); - - const auto dyn_len_idx = 1; - const auto weights_idx = 2; - const auto bias_idx = 3; - - const auto& weights_layout = impl_param.get_input_layout(weights_idx); - params.weights = convert_weights_tensor(weights_layout); - - auto has_bias = !primitive->bias.empty(); - if (has_bias) { - const auto& bias_layout = impl_param.get_input_layout(bias_idx); - params.bias.push_back(convert_data_tensor(bias_layout)); - } - - const auto& dyn_length_tensor = impl_param.input_layouts[dyn_len_idx]; - params.inputs.push_back(convert_data_tensor(dyn_length_tensor)); - - params.direction = weights_layout.feature(); - - auto optional_params = get_default_weights_bias_optional_params(impl_param.get_program()); - return {params, optional_params}; - } -}; - -namespace detail { - -attach_lstm_dynamic_input_impl::attach_lstm_dynamic_input_impl() { - implementation_map::add(impl_types::ocl, typed_primitive_impl_ocl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f16, format::bfyx), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_dynamic_input_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_dynamic_input) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_dynamic) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_timeloop.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_timeloop.cpp deleted file mode 100644 index 18e39d0e7615ec..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_dynamic_timeloop.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "lstm_dynamic_timeloop_inst.h" -#include "lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h" -#include "lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h" - -namespace cldnn { -namespace ocl { - -struct lstm_dynamic_timeloop_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::lstm_dynamic_timeloop_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::lstm_dynamic_timeloop_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - -protected: - kernel_arguments_data get_arguments(const typed_primitive_inst& instance) const override { - kernel_arguments_data args; - args.inputs = {instance.input_memory_ptr(), instance.dyn_length_memory()}; - if (instance.last_hidden_output_term()) - args.inputs.push_back(instance.last_hidden_output_memory()); - if (instance.last_cell_output_term()) - args.inputs.push_back(instance.last_cell_output_memory()); - args.outputs = { instance.output_memory_ptr() }; - args.recurrent = instance.recurrent_memory(); - args.hidden = instance.initial_hidden_term() ? instance.initial_hidden_memory() : nullptr; - args.cell = instance.initial_cell_term() ? instance.initial_cell_memory() : nullptr; - return args; - } - -public: - static std::unique_ptr create(const lstm_dynamic_timeloop_node& arg, const kernel_impl_params& impl_param) { - auto dlstm_timeloop_params = get_default_params(impl_param); - - // dyn length - const auto& dyn_length_tensor = impl_param.input_layouts[arg.get_dependency_idx("dyn_length")]; - dlstm_timeloop_params.inputs.push_back(convert_data_tensor(dyn_length_tensor)); - - // recurrent - const auto& recurrent_layout = impl_param.input_layouts[arg.get_dependency_idx("recurrent")]; - dlstm_timeloop_params.recurrent = convert_data_tensor(recurrent_layout); - - dlstm_timeloop_params.direction = arg.direction(); - - if (arg.initial_cell_term()) { - const auto& cell_layout = impl_param.input_layouts[arg.get_dependency_idx("initial_cell")]; - dlstm_timeloop_params.set_cell(convert_data_tensor(cell_layout)); - } - - if (arg.last_hidden_output_term()) { - const auto& last_hidden_output_layout = impl_param.input_layouts[arg.get_dependency_idx("last_hidden_output")]; - dlstm_timeloop_params.set_last_hidden_output(convert_data_tensor(last_hidden_output_layout)); - } - - if (arg.initial_hidden_term()) { - const auto& hidden_layout = impl_param.input_layouts[arg.get_dependency_idx("initial_hidden")]; - dlstm_timeloop_params.set_hidden(convert_data_tensor(hidden_layout)); - } - - if (arg.last_cell_output_term()) { - const auto& last_cell_state_layout = impl_param.input_layouts[arg.get_dependency_idx("last_cell_output")]; - dlstm_timeloop_params.set_last_cell_output(convert_data_tensor(last_cell_state_layout)); - } - dlstm_timeloop_params.set_dynamic_shape_offsets(); - // finially get best kernel - auto dlstm_timeloop_optional_params = - get_default_optional_params(impl_param.get_program()); - - auto& kernel_selector = kernel_selector::lstm_dynamic_timeloop_kernel_selector::Instance(); - auto best_kernel = kernel_selector.get_best_kernel(dlstm_timeloop_params, dlstm_timeloop_optional_params); - - return make_unique(best_kernel); - } -}; - -namespace detail { - -attach_lstm_dynamic_timeloop_impl::attach_lstm_dynamic_timeloop_impl() { - implementation_map::add(impl_types::ocl, lstm_dynamic_timeloop_impl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f16, format::bfyx), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_dynamic_timeloop_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_dynamic_timeloop) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp index 0bccdd999b2889..9ebf715bbc3112 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_elt.cpp @@ -136,4 +136,3 @@ attach_lstm_elt_impl::attach_lstm_elt_impl() { BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_elt_impl) BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_elt) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_gemm.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_gemm.cpp deleted file mode 100644 index 47809d63dc1bf4..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/lstm_gemm.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "lstm_gemm_inst.h" -#include "lstm/lstm_gemm_kernel_selector.h" -#include "lstm/lstm_gemm_kernel_base.h" - -namespace cldnn { -namespace ocl { - -struct lstm_gemm_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::lstm_gemm_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::lstm_gemm_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - -protected: - kernel_arguments_data get_arguments(const typed_primitive_inst& instance) const override { - kernel_arguments_data args = parent::get_arguments(instance); - - args.outputs = { instance.output_memory_ptr() }; - args.weights = instance.weights_memory(); - args.recurrent = instance.recurrent_memory(); - args.bias = instance.bias_term() ? instance.bias_memory() : nullptr; - args.hidden = instance.hidden_term() ? instance.hidden_memory() : nullptr; - - return args; - } - -public: - static std::unique_ptr create(const lstm_gemm_node& arg, const kernel_impl_params& impl_param) { - const auto input_idx = 0; - const auto weight_idx = 1; - const auto recurrent_idx = 2; - const auto bias_idx = 3; - const auto hidden_idx = arg.bias_term() ? 4 : 3; - - const auto& weights_layout = impl_param.input_layouts[weight_idx]; - auto lstm_gemm_params = get_default_params(impl_param); - lstm_gemm_params.weights = convert_data_tensor(weights_layout); - - if (arg.bias_term()) { - const auto& bias_layout = impl_param.input_layouts[bias_idx]; - lstm_gemm_params.SetBias(convert_data_tensor(bias_layout)); - } - if (arg.hidden_term()) { - const auto& recurrent_layout = impl_param.input_layouts[recurrent_idx]; - lstm_gemm_params.recurrent = convert_data_tensor(recurrent_layout); - - const auto& hidden_layout = impl_param.input_layouts[hidden_idx]; - lstm_gemm_params.SetHidden(convert_data_tensor(hidden_layout)); - // TODO: make a generic function to get the direction - if (hidden_layout.spatial(1) > 1) { - lstm_gemm_params.hidden_direction = arg.direction(); - } - } - lstm_gemm_params.direction = arg.direction(); - - // Update the direction of the input for the gemm kernel - const auto& input_layout = impl_param.input_layouts[input_idx]; - size_t input_directions = input_layout.spatial(1); - - if (input_directions > 1) { // For bidirection input, input direction can be 1 or 0 - lstm_gemm_params.input_direction = arg.direction(); - } else { // For unidirectional input - lstm_gemm_params.input_direction = 0; - } - lstm_gemm_params.set_dynamic_shape_offsets(); - auto lstm_gemm_optional_params = - get_default_optional_params(impl_param.get_program()); - - auto& kernel_selector = kernel_selector::lstm_gemm_kernel_selector::Instance(); - auto best_kernel = kernel_selector.get_best_kernel(lstm_gemm_params, lstm_gemm_optional_params); - - return make_unique(best_kernel); - } -}; - -namespace detail { - -attach_lstm_gemm_impl::attach_lstm_gemm_impl() { - implementation_map::add(impl_types::ocl, lstm_gemm_impl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f16, format::bfyx), - std::make_tuple(data_types::f32, format::fyxb), - std::make_tuple(data_types::f16, format::fyxb), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::lstm_gemm_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::lstm_gemm) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/pyramid_roi_align.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/pyramid_roi_align.cpp deleted file mode 100644 index cf0b2f7e4794f9..00000000000000 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/pyramid_roi_align.cpp +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "primitive_base.hpp" - -#include "pyramid_roi_align_inst.h" -#include "pyramid_roi_align/pyramid_roi_align_kernel_selector.h" -#include "pyramid_roi_align/pyramid_roi_align_kernel_base.h" - -#include - -namespace cldnn { -namespace ocl { - -struct pyramid_roi_align_impl : typed_primitive_impl_ocl { - using parent = typed_primitive_impl_ocl; - using parent::parent; - using kernel_selector_t = kernel_selector::PyramidROIAlign_kernel_selector; - using kernel_params_t = std::pair; - - DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::pyramid_roi_align_impl) - - std::unique_ptr clone() const override { - return make_unique(*this); - } - - static kernel_params_t get_kernel_params(const kernel_impl_params& impl_param) { - const auto& primitive = impl_param.typed_desc(); - auto params = get_default_params(impl_param); - auto optional_params = get_default_optional_params(impl_param.get_program()); - - const auto P2_idx = 1; - const auto P3_idx = 2; - const auto P4_idx = 3; - const auto P5_idx = 4; - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P2_idx))); - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P3_idx))); - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P4_idx))); - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(P5_idx))); - - params.sampling_ratio_x = primitive->sampling_ratio; - params.sampling_ratio_y = primitive->sampling_ratio; - - auto first_layer_scale = primitive->pyramid_scales[0]; - auto image_size_x = impl_param.get_input_layout(P2_idx).spatial(0) * first_layer_scale; - auto image_size_y = impl_param.get_input_layout(P2_idx).spatial(1) * first_layer_scale; - params.image_size_x = image_size_x; - params.image_size_y = image_size_y; - - params.pyramid_starting_level = primitive->pyramid_starting_level; - - return {params, optional_params}; - } -}; - -namespace detail { - -attach_pyramid_roi_align_impl::attach_pyramid_roi_align_impl() { - implementation_map::add(impl_types::ocl, typed_primitive_impl_ocl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f32, format::yxfb), - std::make_tuple(data_types::f32, format::byxf), - std::make_tuple(data_types::f16, format::bfyx), - std::make_tuple(data_types::f16, format::yxfb), - std::make_tuple(data_types::f16, format::byxf), - }); -} - -} // namespace detail -} // namespace ocl -} // namespace cldnn - -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::pyramid_roi_align_impl) -BIND_BINARY_BUFFER_WITH_TYPE(cldnn::pyramid_roi_align) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp index a1f81551305f97..bb2dba327f15b7 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp @@ -42,7 +42,6 @@ void register_implementations() { REGISTER_OCL(group_normalization); REGISTER_OCL(kv_cache); REGISTER_OCL(lrn); - REGISTER_OCL(lstm_gemm); REGISTER_OCL(lstm_elt); REGISTER_OCL(multiclass_nms); REGISTER_OCL(multinomial); @@ -55,7 +54,6 @@ void register_implementations() { REGISTER_OCL(permute); REGISTER_OCL(pooling); REGISTER_OCL(prior_box); - REGISTER_OCL(pyramid_roi_align); REGISTER_OCL(quantize); REGISTER_OCL(random_uniform); REGISTER_OCL(range); @@ -82,8 +80,6 @@ void register_implementations() { REGISTER_OCL(slice); REGISTER_OCL(strided_slice); REGISTER_OCL(tile); - REGISTER_OCL(lstm_dynamic_input); - REGISTER_OCL(lstm_dynamic_timeloop); REGISTER_OCL(gather_tree); REGISTER_OCL(resample); REGISTER_OCL(grn); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp index 6c16814916ac67..6c27c72dc4caae 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp @@ -36,10 +36,6 @@ #include "intel_gpu/primitives/grn.hpp" #include "intel_gpu/primitives/group_normalization.hpp" #include "intel_gpu/primitives/lrn.hpp" -#include "intel_gpu/primitives/lstm.hpp" -#include "intel_gpu/primitives/lstm_dynamic.hpp" -#include "intel_gpu/primitives/lstm_dynamic_input.hpp" -#include "intel_gpu/primitives/lstm_dynamic_timeloop.hpp" #include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/multinomial.hpp" #include "intel_gpu/primitives/mvn.hpp" @@ -48,7 +44,6 @@ #include "intel_gpu/primitives/one_hot.hpp" #include "intel_gpu/primitives/permute.hpp" #include "intel_gpu/primitives/pooling.hpp" -#include "intel_gpu/primitives/pyramid_roi_align.hpp" #include "intel_gpu/primitives/quantize.hpp" #include "intel_gpu/primitives/random_uniform.hpp" #include "intel_gpu/primitives/range.hpp" @@ -125,7 +120,6 @@ REGISTER_OCL(grid_sample); REGISTER_OCL(group_normalization); REGISTER_OCL(kv_cache); REGISTER_OCL(lrn); -REGISTER_OCL(lstm_gemm); REGISTER_OCL(lstm_elt); REGISTER_OCL(multiclass_nms); REGISTER_OCL(multinomial); @@ -138,7 +132,6 @@ REGISTER_OCL(one_hot); REGISTER_OCL(permute); REGISTER_OCL(pooling); REGISTER_OCL(prior_box); -REGISTER_OCL(pyramid_roi_align); REGISTER_OCL(quantize); REGISTER_OCL(random_uniform); REGISTER_OCL(range); @@ -165,8 +158,6 @@ REGISTER_OCL(space_to_batch); REGISTER_OCL(space_to_depth); REGISTER_OCL(strided_slice); REGISTER_OCL(tile); -REGISTER_OCL(lstm_dynamic_input); -REGISTER_OCL(lstm_dynamic_timeloop); REGISTER_OCL(gather_tree); REGISTER_OCL(resample); REGISTER_OCL(grn); diff --git a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h index bd6d6e3bc0a9f8..34c4ccf555008b 100644 --- a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h @@ -36,6 +36,50 @@ class typed_primitive_inst : public typed_primitive_inst_base= 2) { + auto spatial_axis = sequence_axis_legacy - 2; + // Default and minimum number of dimensions is 4 + auto spatial_size = std::max(past_layout_rank, 4) - 2; + sequence_axis_legacy = spatial_size - spatial_axis - 1 + 2; + } + return sequence_axis_legacy; + } + + static int64_t get_max_pad(const layout& target_layout, size_t buffer_size, int64_t legacy_sequence_axis, std::string target_name = "") { + if (buffer_size == 0) + return 0; + const size_t total_elements = target_layout.count(); + const int64_t concat_axis_size = target_layout.get_tensor().sizes()[legacy_sequence_axis]; + const int64_t sequence_element_size = total_elements / concat_axis_size; + const int64_t max_sequence_elements = buffer_size / sequence_element_size; + auto max_pad = std::max(max_sequence_elements - concat_axis_size, 0); + auto target_layout_name = (target_name != "") ? target_name : "target_layout"; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] " << target_name << " : " << target_layout.to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] buffer size " << buffer_size << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] total_elements " << total_elements << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] concat_axis_size = " << concat_axis_size << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] sequence_element_size = " << sequence_element_size << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] max_sequence_elements = " << max_sequence_elements << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[get_max_pad] max_pad (max_sequence_elements - concat_axis_size) = " << max_pad << std::endl; + return max_pad; + } + typed_primitive_inst(network& network, const kv_cache_node& desc); typed_primitive_inst(network& network) : parent(network), memory_state::variable("") {} }; diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_input_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_input_inst.h deleted file mode 100644 index 0dece56e7d2c96..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_input_inst.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm_dynamic_input.hpp" -#include "primitive_inst.h" -#include "intel_gpu/runtime/error_handler.hpp" - -#include -#include - -namespace cldnn { - -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - typed_program_node(std::shared_ptr prim, program& prog) : parent(prim, prog) {} - - program_node& input() const { return get_dependency(0); } - program_node& dyn_length() const { return get_dependency(1); } - program_node& weights() const { return get_dependency(2); } - - program_node& bias() const { - CLDNN_ERROR_BOOL(id(), "Bias term", !bias_term(), "Trying to get non existing bias."); - return get_dependency(3); - } - - int32_t direction() const { return weights().get_output_layout().feature(); } - bool dyn_length_term() const { return !get_primitive()->dyn_length.empty(); } - bool bias_term() const { return !get_primitive()->bias.empty(); } - bool weights_term() const { return !get_primitive()->weights.empty(); } -}; - -using lstm_dynamic_input_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_dynamic_input_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_dynamic_input_node const& node); - -public: - typed_primitive_inst(network& network, lstm_dynamic_input_node const& node); - - memory::ptr dyn_length_memory() const { return dep_memory_ptr(1); } - memory::ptr weights_memory() const { return dep_memory_ptr(2); } - memory::ptr bias_memory() const { - CLDNN_ERROR_BOOL(id(), "Bias term", !bias_term(), "Trying to get non existing bias memory."); - return dep_memory_ptr(3); - } - int32_t direction() const { return node->direction(); } - bool bias_term() const { return node->bias_term(); } -}; - -using lstm_dynamic_input_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_inst.h deleted file mode 100644 index 011c226cda9de5..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_inst.h +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm_dynamic.hpp" -#include "primitive_inst.h" -#include "intel_gpu/runtime/error_handler.hpp" - -#include -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - - typed_program_node(std::shared_ptr prim, program& prog) : parent(prim, prog) {} - - program_node& input() const { return get_dependency(0); } - float clip() const { return get_primitive()->clip; } - bool input_forget() const { return get_primitive()->input_forget; } - primitive_id bias_id() const { return get_primitive()->bias; } - primitive_id weights_id() const { return get_primitive()->weights; } - primitive_id recurrent_id() const { return get_primitive()->recurrent; } - primitive_id initial_hidden_id() const { return get_primitive()->initial_hidden; } - primitive_id initial_cell_id() const { return get_primitive()->initial_cell; } - primitive_id dyn_length_id() const { return get_primitive()->dyn_length; } - primitive_id last_hidden_state_id() const { return get_primitive()->last_hidden_state; } - primitive_id last_cell_state_id() const { return get_primitive()->last_cell_state; } -}; - -using lstm_dynamic_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_dynamic_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_dynamic_node const& node); - - typed_primitive_inst(network& network, lstm_dynamic_node const& node); - - static void check_direction(program_node& node, int32_t direction, std::string name) { - if (node.get_output_layout().spatial(1) != direction) - CLDNN_ERROR_MESSAGE(node.id(), name + " directions size need to equal 1 or 2 (bidrectional) !"); - } - - static void check_common_lstm_dynamic_sizes(program_node& node, - int32_t batch_size, - int32_t hidden_size, - int32_t direction, - std::string name) { - auto node_layout = node.get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - name + " format", - node.get_output_layout().format.value, - "expected bfyx format", - format::bfyx); - CLDNN_ERROR_NOT_EQUAL(node.id(), - name + " batch size", - node_layout.batch(), - "input batch size", - batch_size, - "Sizes mismatch, " + name + ": " + node.id()); - check_direction(node, direction, name); - CLDNN_ERROR_NOT_EQUAL(node.id(), - name + " x size", - node_layout.spatial(0), - "input_size", - hidden_size, - "Sizes mismatch, " + name + ": " + node.id()); - CLDNN_ERROR_NOT_EQUAL(node.id(), - name + " f size", - node_layout.feature(), - "1", - 1, - "Sizes mismatch, " + name + ": " + node.id()); - } -}; - -using lstm_dynamic_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_timeloop_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_timeloop_inst.h deleted file mode 100644 index 0d1aea8f9b2e64..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_dynamic_timeloop_inst.h +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm_dynamic_timeloop.hpp" -#include "primitive_inst.h" - -#include -#include -#include - -namespace cldnn { - -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -private: - std::vector _param_list; - program_node& get_dependency_by_name(std::string val) const; - void init_params_list(); - inline size_t get_param_list_index(const std::string& dependency_tag) const { - return static_cast(std::distance(_param_list.begin(), std::find_if( - _param_list.begin(), _param_list.end(), [&](const std::string& tag) { return tag == dependency_tag; }))); - } - -public: - typed_program_node(std::shared_ptr prim, program& prog) - : parent(std::move(prim), prog) { - init_params_list(); - can_share_buffer(false); - } - - void reverse_optional_outputs_connections(); - size_t get_dependency_idx(std::string val) const; - - program_node& input() const { return get_dependency_by_name("input"); } - program_node& dyn_length() const { return get_dependency_by_name("dyn_length"); } - program_node& recurrent() const { return get_dependency_by_name("recurrent"); } - program_node& last_hidden_state() const { return get_dependency_by_name("last_hidden_output"); } - program_node& last_cell_state() const { return get_dependency_by_name("last_cell_output"); } - program_node& initial_hidden() const { return get_dependency_by_name("initial_hidden"); } - program_node& initial_cell() const { return get_dependency_by_name("initial_cell"); } - - float clip() const { return get_primitive()->clip; } - int32_t direction() const { return recurrent().get_output_layout().feature(); } - bool input_forget() const { return get_primitive()->input_forget; } - bool dyn_length_term() const { return !get_primitive()->dyn_length.empty(); } - bool recurrent_term() const { return !get_primitive()->recurrent.empty(); } - bool initial_hidden_term() const { return !get_primitive()->initial_hidden.empty(); } - bool initial_cell_term() const { return !get_primitive()->initial_cell.empty(); } - bool last_hidden_output_term() const { return !get_primitive()->last_hidden_state.empty(); } - bool last_cell_output_term() const { return !get_primitive()->last_cell_state.empty(); } -}; - -using lstm_dynamic_timeloop_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_dynamic_timeloop_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_dynamic_timeloop_node const& node); - -public: - typed_primitive_inst(network& network, lstm_dynamic_timeloop_node const& node); - - memory::ptr dyn_length_memory() const { return get_dependency_memory("dyn_length"); } - memory::ptr recurrent_memory() const { return get_dependency_memory("recurrent"); } - memory::ptr last_hidden_output_memory() const { return get_dependency_memory("last_hidden_output"); } - memory::ptr last_cell_output_memory() const { return get_dependency_memory("last_cell_output"); } - memory::ptr initial_hidden_memory() const { return get_dependency_memory("initial_hidden"); } - memory::ptr initial_cell_memory() const { return get_dependency_memory("initial_cell"); } - - bool dyn_length_term() const { return node->dyn_length_term(); } - bool initial_hidden_term() const { return node->initial_hidden_term(); } - bool initial_cell_term() const { return node->initial_cell_term(); } - bool last_hidden_output_term() const { return node->last_hidden_output_term(); } - bool last_cell_output_term() const { return node->last_cell_output_term(); } - -private: - memory::ptr get_dependency_memory(std::string val) const { return dep_memory_ptr(node->get_dependency_idx(val)); } -}; - -using lstm_dynamic_timeloop_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_gemm_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_gemm_inst.h deleted file mode 100644 index 5ddeb6051bc8c6..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_gemm_inst.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm.hpp" -#include "primitive_inst.h" - -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - using parent::parent; - - program_node& input() const { return get_dependency(0); } - program_node& weights() const { return get_dependency(1); } - program_node& recurrent() const { return get_dependency(2); } - program_node& bias() const { return get_dependency(3); } - program_node& hidden() const { return bias_term() ? get_dependency(4) : get_dependency(3); } - bool bias_term() const { return !get_primitive()->bias.empty(); } - bool hidden_term() const { return !get_primitive()->hidden.empty(); } - uint32_t direction() const { return get_primitive()->direction; } -}; - -using lstm_gemm_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_gemm_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_gemm_node const& node); - -public: - typed_primitive_inst(network& network, lstm_gemm_node const& node); - - memory::ptr weights_memory() const { return dep_memory_ptr(1); } - memory::ptr recurrent_memory() const { return dep_memory_ptr(2); } - memory::ptr bias_memory() const { return dep_memory_ptr(3); } - memory::ptr hidden_memory() const { return bias_term() ? dep_memory_ptr(4) : dep_memory_ptr(3); } - bool bias_term() const { return !get_typed_desc()->bias.empty(); } - bool hidden_term() const { return !get_typed_desc()->hidden.empty(); } - uint32_t direction() const { return get_typed_desc()->direction; } -}; - -using lstm_gemm_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/lstm_inst.h b/src/plugins/intel_gpu/src/graph/include/lstm_inst.h deleted file mode 100644 index 2cad108fdd8128..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/lstm_inst.h +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/lstm.hpp" -#include "primitive_inst.h" - -#include -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - using parent::parent; - - program_node& input() const { return get_dependency(0); } - program_node& weights() const { return get_dependency(1); } - program_node& recurrent() const { return get_dependency(2); } - program_node& bias() const { return get_dependency(3); } - program_node& inital_hidden() const { return get_dependency(bias_term() ? 4 : 3); } - program_node& inital_cell() const { - // This doesn't scale. We should use a map to get the dependencies index at primitive level - return get_dependency(bias_term() ? (initial_hidden_term() ? 5 : 4) : (initial_hidden_term() ? 4 : 2)); - } - program_node& peepholes() const { return get_dependency(6); } - bool bias_term() const { return !get_primitive()->bias.empty(); } - bool peepholes_term() const { return !get_primitive()->peepholes.empty(); } - bool initial_hidden_term() const { return !get_primitive()->initial_hidden.empty(); } - bool initial_cell_term() const { return !get_primitive()->initial_cell.empty(); } - std::vector activations() const { return get_primitive()->activations; } - std::vector activation_params() const { - return get_primitive()->activation_params; - } - size_t sequence_len() const { return get_primitive()->input.size(); } -}; - -using lstm_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(lstm_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(lstm_node const& node); - -public: - typed_primitive_inst(network& network, lstm_node const& node); - - memory& weights_memory() const { return dep_memory(1); } - memory& recurrent_memory() const { return dep_memory(2); } - memory& bias_memory() const { return dep_memory(3); } - memory& initial_hidden_memory() const { return dep_memory(bias_term() ? 4 : 3); } - memory& initial_cell_memory() const { - return dep_memory(bias_term() ? (initial_hidden_term() ? 5 : 4) : (initial_hidden_term() ? 4 : 2)); - } - memory& peepholes_memory() const { return dep_memory(6); } - bool bias_term() const { return !argument->bias.empty(); } - bool peepholes_term() const { return !argument->peepholes.empty(); } - bool initial_hidden_term() const { return !argument->initial_hidden.empty(); } - bool initial_cell_term() const { return !argument->initial_cell.empty(); } - std::vector activations() const { return argument->activations; } - std::vector activation_params() const { return argument->activation_params; } -}; - -using lstm_inst = typed_primitive_inst; - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/pass_manager.h b/src/plugins/intel_gpu/src/graph/include/pass_manager.h index 0020eee07c6233..8c92ec4f5c6886 100644 --- a/src/plugins/intel_gpu/src/graph/include/pass_manager.h +++ b/src/plugins/intel_gpu/src/graph/include/pass_manager.h @@ -6,9 +6,6 @@ #include "intel_gpu/graph/program.hpp" #include "layout_optimizer.h" -#include "split_inst.h" -#include "lstm_inst.h" -#include "lstm_dynamic_inst.h" #include "quantize_inst.h" #include "eltwise_inst.h" #include "convolution_inst.h" @@ -82,9 +79,6 @@ class graph_initializations : public base_pass { private: void run(program& p) override; - void handle_split_node(program& p, split_node& node); - void handle_lstm_node(program& p, lstm_node& node); - void handle_dynamic_lstm_node(program& p, lstm_dynamic_node& node); void set_outputs(program& p); }; @@ -104,14 +98,6 @@ class mark_nodes : public base_pass { void run(program& p) override; }; -class clamp_fp16_output : public base_pass { -public: - clamp_fp16_output() : base_pass("clamp_fp16_output") {} - -private: - void run(program& p) override; -}; - class mark_shape_of_subgraphs : public base_pass { // This optimization pass aggregates nodes into shape_of subgraphs for further optimizations. // There are few key requirements to decide if node belongs to shape_of subgraph or not: @@ -191,7 +177,6 @@ class prepare_primitive_fusing : public base_pass { private: void run(program& p) override; - void fuse_sigmoid_mul_to_swish(program &p); void fuse_bias(program &p); void fuse_reorders(program& p); void fuse_simple_primitives(program &p); @@ -316,18 +301,6 @@ class trim_to_outputs : public base_pass { void run(program& p) override; }; -class strided_slice_optimize : public base_pass { -public: - strided_slice_optimize() : base_pass("strided_slice_optimize") {} - void run(program& p) override; -}; - -class reverse_optional_nodes_outputs : public base_pass { -public: - reverse_optional_nodes_outputs() : base_pass("reverse_optional_nodes_outputs") {} - void run(program& p) override; -}; - class concat_input_order : public base_pass { // This optimization changes order of inputs for concatenation to provide // better alignment for execution and allow for optimizing out in some cases. diff --git a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h index 68ec642008c7a7..e2973d892e9fdd 100644 --- a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h @@ -229,6 +229,8 @@ class primitive_inst { void set_shape_change() { _shape_changed = true; } void build_deps(); + + void update_paddings(); void do_runtime_skip_reorder(); void do_runtime_skip_gather(); void do_runtime_skip_permute(); @@ -368,7 +370,7 @@ class primitive_inst { bool _is_constant = false; bool _needs_completion_event = false; - size_t max_output_layout_size = 0; + size_t _max_output_layout_count = 0; std::vector max_intermediates_memory_sizes; std::vector allocate_outputs(kernel_impl_params* updated_params = nullptr, bool reset_mem = true, bool runtime_alloc = false); @@ -382,6 +384,9 @@ class primitive_inst { virtual void update_shape(); virtual event::ptr update_weights(); + virtual void update_shape_info_tensor(const kernel_impl_params& params); + + void fill_shape_info_data(const layout& runtime_layout, const layout& node_layout, int32_t* shape_info_ptr, size_t& offset); bool use_async_compilation(); // if primitive_inst doesn't replace impl to new impl(static impl with opt kerenl or dynamic impl), return false bool update_impl(); diff --git a/src/plugins/intel_gpu/src/graph/include/pyramid_roi_align_inst.h b/src/plugins/intel_gpu/src/graph/include/pyramid_roi_align_inst.h deleted file mode 100644 index 71126be00b3933..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/pyramid_roi_align_inst.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "intel_gpu/primitives/pyramid_roi_align.hpp" -#include "primitive_inst.h" - -#include -#include - -namespace cldnn { -template <> -struct typed_program_node : public typed_program_node_base { - using parent = typed_program_node_base; - -public: - typed_program_node(std::shared_ptr prim, program& prog) : parent(prim, prog) {} - - program_node& input() const { return get_dependency(0); } - // program_node& boxes() const { return get_dependency(0); } - program_node& P2() const { return get_dependency(1); } - program_node& P3() const { return get_dependency(2); } - program_node& P4() const { return get_dependency(3); } - program_node& P5() const { return get_dependency(4); } -}; - -using pyramid_roi_align_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(pyramid_roi_align_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(pyramid_roi_align_node const& node); - typed_primitive_inst(network& network, pyramid_roi_align_node const& node); - - memory& input() const { return dep_memory(0); } - memory& P2() const { return dep_memory(1); } - memory& P3() const { return dep_memory(2); } - memory& P4() const { return dep_memory(3); } - memory& P5() const { return dep_memory(4); } -}; - -using pyramid_roi_align_inst = typed_primitive_inst; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/split_inst.h b/src/plugins/intel_gpu/src/graph/include/split_inst.h deleted file mode 100644 index 0c3fb839f0e086..00000000000000 --- a/src/plugins/intel_gpu/src/graph/include/split_inst.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "intel_gpu/primitives/split.hpp" -#include "primitive_inst.h" - -#include - -namespace cldnn { - -using split_node = typed_program_node; - -template <> -class typed_primitive_inst : public typed_primitive_inst_base { - using parent = typed_primitive_inst_base; - using parent::parent; - -public: - static layout calc_output_layout(split_node const& node, kernel_impl_params const& impl_param); - static std::string to_string(split_node const& node); - typed_primitive_inst(network& network, split_node const& node); -}; - -using split_inst = typed_primitive_inst; -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/loop.cpp b/src/plugins/intel_gpu/src/graph/loop.cpp index c2ec118c378ff7..36ca523093f595 100644 --- a/src/plugins/intel_gpu/src/graph/loop.cpp +++ b/src/plugins/intel_gpu/src/graph/loop.cpp @@ -1040,24 +1040,58 @@ std::vector loop_inst::handle_buffers_for_next_iteration(const loop_ } } else if (mapping.type == loop_inst::backedge_memory_mapping::SINGLE) { memory::ptr to_mem = mapping.to_primitive->output_memory_ptr(); - if (iter == 0) { - auto ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); - if (ev) event_vec = {ev}; - GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from inintal_mem(" << mapping.initial_mem << ")" << std::endl; - } else { - if (is_dynamic()) { - // In dynamic model, do not swap memory buffer between input and output in inner body network. - // Just copy data from input buffer memory to output buffer memory. + + if (is_dynamic()) { + // In dynamic model, do not swap memory buffer between input and output in inner body network. + // Check size of input buffer memory and output buffer memory + // If size is differnet, allocate new input memory for the required size, + // Else just copy data from input buffer memory to output buffer memory. + cldnn::event::ptr ev; + if (iter == 0) { + auto to_id = mapping.to_primitive->id(); + // Check backedge_to shape needs to be updated by initial_mem + if (!mapping.initial_mem->get_layout().identical(to_mem->get_layout())) { + to_mem = body_network->get_engine().allocate_memory(mapping.initial_mem->get_layout(), false); + body_network->set_input_data(to_id, to_mem); + ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); + GPU_DEBUG_LOG << iter << ") [SINGLE] Backedge_to node(" << to_id << ") is set to new memory(" + << to_mem << ", " << to_mem->get_layout().to_short_string() + << ") because of shape update from initial memory(" + << mapping.initial_mem << "," << mapping.initial_mem->get_layout().to_short_string() << ")" << std::endl; + } else { + ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); + GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from inintal_mem(" << mapping.initial_mem << ")" << std::endl; + } + } else { auto from_id = mapping.from_primitive->id(); + auto to_id = mapping.to_primitive->id(); if (body_network->has_event(from_id)) { auto ev = body_network->get_primitive_event(from_id); if (ev) ev->wait(); } memory::ptr from_mem = mapping.from_primitive->output_memory_ptr(); - auto ev = to_mem->copy_from(body_network->get_stream(), *(from_mem)); - if (ev) event_vec = {ev}; + + // Check backedge_to shape needs to be updated by backedge_from + if (!from_mem->get_layout().identical(to_mem->get_layout())) { + to_mem = body_network->get_engine().allocate_memory(from_mem->get_layout(), false); + GPU_DEBUG_LOG << iter << ") [SINGLE] Backedge_to node(" << to_id << ") is set to new memory(" + << to_mem << ", " << to_mem->get_layout().to_short_string() + << ") because of shape update from backedge_from()" << from_id + <<")'s memory(" << from_mem << "," << from_mem->get_layout().to_short_string() << ")" << std::endl; + body_network->set_input_data(to_id, to_mem); + ev = to_mem->copy_from(body_network->get_stream(), *(from_mem)); + } else { + ev = to_mem->copy_from(body_network->get_stream(), *(from_mem)); + } GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from [" << mapping.from_primitive->id() << "(" << from_mem << ")] to [" << mapping.to_primitive->id() << "(" << to_mem << ")]" << std::endl; + } + if (ev) event_vec = {ev}; + } else { + if (iter == 0) { + auto ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); + if (ev) event_vec = {ev}; + GPU_DEBUG_LOG << iter << ") [SINGLE] Copy data from inintal_mem(" << mapping.initial_mem << ")" << std::endl; } else { // In static model, swap memory buffer between output and input in inner body network memory::ptr from_mem = mapping.from_primitive->output_memory_ptr(); diff --git a/src/plugins/intel_gpu/src/graph/lstm.cpp b/src/plugins/intel_gpu/src/graph/lstm.cpp deleted file mode 100644 index fa5e64e1fa33cf..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm.cpp +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm) - -layout lstm_inst::calc_output_layout(lstm_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_node!"); - auto input_layout = impl_param.get_input_layout(); - auto hidden_layout = node.inital_hidden().get_output_layout(); - - // input = [ batch, sequence, direction, input_size ] - // weights = [ 1, direction, 4 * hidden_size, input_size ] - // recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] - // biases = [ 1, 1, direction, 4 * hidden_size ] - // hidden = [ batch, 1, direction, hidden_size ] - // cell = [ batch, 1, direction, hidden_size ] - // output = [ batch, sequence, direction, hidden_size ] - auto result = layout(input_layout.data_type, - format::bfyx, - tensor(hidden_layout.feature(), - input_layout.feature(), - hidden_layout.spatial(0), - hidden_layout.spatial(1))); - return result; -} - -std::string lstm_inst::to_string(lstm_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto weights_id = desc->weights; - auto recurrent_id = desc->recurrent; - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - auto peepholes_id = desc->peepholes != "" ? desc->peepholes : "no peepholes"; - auto initial_hidden_id = desc->initial_hidden != "" ? desc->initial_hidden : "no inital hidden"; - auto initial_cell_id = desc->initial_cell != "" ? desc->initial_cell : "no initial cell"; - - std::stringstream primitive_description; - - json_composite lstm_info; - lstm_info.add("weights id", weights_id); - lstm_info.add("recurrent id", recurrent_id); - lstm_info.add("bias id", std::move(bias_id)); - lstm_info.add("peepholes id", std::move(peepholes_id)); - lstm_info.add("initial_hidden id", std::move(initial_hidden_id)); - lstm_info.add("initial_cell id", std::move(initial_cell_id)); - node_info->add("lstm info", lstm_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_inst::typed_primitive_inst(network& network, lstm_node const& node) : parent(network, node) { - auto input_layout = node.input().get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx); -} - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_dynamic.cpp b/src/plugins/intel_gpu/src/graph/lstm_dynamic.cpp deleted file mode 100644 index 44ee3720a1a142..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_dynamic.cpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_dynamic_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_dynamic) - -// input_tensor: [b: batch, f: max_sequence_length, x: input_size, y: direction] -// weights_tensor: [b: 1, f: direction, x: input_size, y: 4 * hidden_size] -// recurr_tensor: [b: 1, f: direction, x: hidden_size, y: 4 * hidden_size] -// init_hidden: [b: batch, f: 1, x: hidden_size, y: direction] -// init_cell: [b: batch, f: 1, x: hidden_size, y: direction] -// output_tensor: [b: batch, f: max_sequence_length, x: hidden_size, y: direction] -layout lstm_dynamic_inst::calc_output_layout(lstm_dynamic_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_dynamic_node!"); - /* - This program node is just placeholder for input + timeloop combinations, thus this is returning dummy layout. - */ - return impl_param.get_input_layout(); -} - -std::string lstm_dynamic_inst::to_string(lstm_dynamic_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto weights_id = desc->weights; - auto recurrent_id = desc->recurrent; - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - auto initial_hidden_id = desc->initial_hidden != "" ? desc->initial_hidden : "no inital hidden"; - auto initial_cell_id = desc->initial_cell != "" ? desc->initial_cell : "no initial cell"; - - std::stringstream primitive_description; - json_composite lstm_dynamic_info; - lstm_dynamic_info.add("dyn_length id", desc->dyn_length); - lstm_dynamic_info.add("weights id", std::move(weights_id)); - lstm_dynamic_info.add("recurrent id", recurrent_id); - lstm_dynamic_info.add("bias id", bias_id); - lstm_dynamic_info.add("initial_hidden id", std::move(initial_hidden_id)); - lstm_dynamic_info.add("initial_cell id", initial_cell_id); - node_info->add("lstm_dynamic info", lstm_dynamic_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_dynamic_inst::typed_primitive_inst(network& network, lstm_dynamic_node const& node) : parent(network, node) { - CLDNN_ERROR_MESSAGE(node.id(), - std::string("This primitive_inst should never be created. It should be repalced by ") - .append("lstm_dynamic_input + lstm_dyamic_timeloop combinations.")); -} - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_dynamic_input.cpp b/src/plugins/intel_gpu/src/graph/lstm_dynamic_input.cpp deleted file mode 100644 index 0633c949f13ba5..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_dynamic_input.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_dynamic_input_inst.h" -#include "lstm_dynamic_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_dynamic_input) -// input_tensor: [b: batch, f: max_sequence_length, x: input_size, y: direction] -// weights_tensor: [b: 1, f: direction, x: input_size, y: 4 * hidden_size] -// output_tensor: [b: batch, f: max_sequence_length, x: 4 * hidden_size, y: direction] -layout lstm_dynamic_input_inst::calc_output_layout(lstm_dynamic_input_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_dynamic_node!"); - auto input_layout = impl_param.get_input_layout(0); - auto weight_layout = impl_param.get_input_layout(2); - auto batch = input_layout.batch(); - auto direction = weight_layout.feature(); - auto output_sequence = input_layout.feature(); - return layout(input_layout.data_type, - input_layout.format, - tensor(batch, output_sequence, weight_layout.spatial(1), direction)); -} - -std::string lstm_dynamic_input_inst::to_string(lstm_dynamic_input_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - - std::stringstream primitive_description; - json_composite lstm_dynamic_input_info; - lstm_dynamic_input_info.add("dyn_length id", desc->dyn_length); - lstm_dynamic_input_info.add("weights id", desc->weights); - lstm_dynamic_input_info.add("bias id", bias_id); - lstm_dynamic_input_info.add("max seq len", node.input().get_output_layout().feature()); - lstm_dynamic_input_info.add("hidden size", node.weights().get_output_layout().spatial(1) / 4); - lstm_dynamic_input_info.add("direction", node.weights().get_output_layout().feature()); - node_info->add("lstm_dynamic_input info", lstm_dynamic_input_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_dynamic_input_inst::typed_primitive_inst(network& network, lstm_dynamic_input_node const& node) - : parent(network, node) { - // Check input - auto input_layout = node.input().get_output_layout(); - auto direction = node.direction(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx); - lstm_dynamic_inst::check_direction(node.input(), direction, "input"); - - // check dynamic length - CLDNN_ERROR_BOOL(node.id(), - "Dynamic length memory", - !node.dyn_length_term(), - "Id of dynamic length memory is not set."); - auto dyn_length_size = node.dyn_length().get_output_layout().count(); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Batch", - node.get_output_layout().batch(), - "Dynamic tensor elements count.", - dyn_length_size, - "Should be equal."); - - // check weights - CLDNN_ERROR_BOOL(node.id(), "Weights memory", !node.weights_term(), "Id of weights memory is not set."); - auto weights_id = node.weights().id(); - auto weights_layout = node.weights().get_output_layout(); - auto hidden_size = weights_layout.spatial(1) / 4; - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "weights format", - node.weights().get_output_layout().format.value, - "expected bfyx format", - format::oiyx, format::lstm_weights_dio, format::bfyx); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Weights batch size", - weights_layout.batch(), - "1", - 1, - "Sizes mismatch, weights_id: " + weights_id); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Weights x size", - weights_layout.spatial(0), - "input_size", - input_layout.spatial(0), - "Sizes mismatch, weights_id: " + weights_id); - - // check bias - if (node.bias_term()) { - auto bias_id = node.id(); - auto bias_tensor = node.bias().get_output_layout().get_tensor(); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Bias count", - bias_tensor.count(), - "direction * 4 * hidden_size", - direction * 4 * hidden_size, - "Bias count mismtach, bias_id: " + bias_id); - lstm_dynamic_inst::check_direction(node.bias(), direction, "bias"); - } -} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_dynamic_timeloop.cpp b/src/plugins/intel_gpu/src/graph/lstm_dynamic_timeloop.cpp deleted file mode 100644 index 6ec45a35e72e3b..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_dynamic_timeloop.cpp +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "lstm_dynamic_timeloop_inst.h" -#include "lstm_dynamic_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_dynamic_timeloop) - -program_node& lstm_dynamic_timeloop_node::get_dependency_by_name(std::string val) const { - return get_dependency(get_dependency_idx(val)); -} - -void lstm_dynamic_timeloop_node::init_params_list() { - _param_list.push_back("input"); - _param_list.push_back("dyn_length"); - _param_list.push_back("recurrent"); - if (last_hidden_output_term()) - _param_list.push_back("last_hidden_output"); - if (last_cell_output_term()) - _param_list.push_back("last_cell_output"); - if (initial_hidden_term()) - _param_list.push_back("initial_hidden"); - if (initial_cell_term()) - _param_list.push_back("initial_cell"); -} - -void lstm_dynamic_timeloop_node::reverse_optional_outputs_connections() { - auto reverse_connections = [&](program_node& mutable_data_node, const std::string& dependency_tag) { - auto index_to_insert = get_param_list_index(dependency_tag); - mutable_data_node.dependencies.erase(std::remove_if(mutable_data_node.dependencies.begin(), mutable_data_node.dependencies.end(), - [&](const std::pair& dep) { - return this == dep.first; - })); - mutable_data_node.users.push_back(this); - users.remove(&mutable_data_node); - auto port_idx = get_port_from_deps(mutable_data_node.id()); - dependencies.insert(dependencies.begin() + index_to_insert, {&mutable_data_node, port_idx}); - // fix inputs/outputs - if (mutable_data_node.get_dependencies().empty()) { - myprog.get_inputs().push_back(&mutable_data_node); - } - if (mutable_data_node.is_output()) { - mutable_data_node.set_output(false); - auto& program_output = myprog.get_outputs(); - program_output.erase(std::remove(program_output.begin(), program_output.end(), &mutable_data_node)); - } - }; - - if (last_hidden_output_term()) { - reverse_connections(myprog.get_node(get_primitive()->last_hidden_state), "last_hidden_output"); - } - if (last_cell_output_term()) { - reverse_connections(myprog.get_node(get_primitive()->last_cell_state), "last_cell_output"); - } - - // moved mutable data do deps, try to set this node at output if no users - auto& outputs = myprog.get_outputs(); - if (users.empty() && std::find(outputs.begin(), outputs.end(), this) == outputs.end()) { - output = true; - myprog.get_outputs().push_back(this); - } -} - -size_t lstm_dynamic_timeloop_node::get_dependency_idx(std::string val) const { - auto ret = get_param_list_index(val); - CLDNN_ERROR_EQUAL(id(), - "Dependency index", - ret, - "out of range number", - _param_list.size(), - "Trying to get non-exsisting param!"); - return ret; -} - -// input_tensor: [b: batch, f: max_sequence_length, x: 4 * hiden_size, y: direction] -// recurr_tensor: [b: 1, f: direction, x: hidden_size, y: 4 * hidden_size] -// init_cell: [b: batch, f: 1, x: hidden_size, y: direction] -// output_tensor: [b: batch, f: max_sequence_length, x: hidden_size, y: direction] -layout lstm_dynamic_timeloop_inst::calc_output_layout(lstm_dynamic_timeloop_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_dynamic_node!"); - auto input_layout = impl_param.get_input_layout(); - auto batch = input_layout.batch(); - auto output_sequence = input_layout.feature(); - auto reccurent_layout = node.recurrent().get_output_layout(); - auto hidden_size = reccurent_layout.spatial(0); - auto direction = reccurent_layout.feature(); - return layout(input_layout.data_type, input_layout.format, tensor(batch, output_sequence, hidden_size, direction)); -} - -std::string lstm_dynamic_timeloop_inst::to_string(lstm_dynamic_timeloop_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto initial_hidden_id = desc->initial_hidden != "" ? desc->initial_hidden : "no initial hidden"; - auto initial_cell_id = desc->initial_cell != "" ? desc->initial_cell : "no inital cell"; - auto last_cell_id = desc->last_cell_state != "" ? desc->last_cell_state : "no inital cell"; - auto last_hidden_id = desc->last_hidden_state != "" ? desc->last_hidden_state : "no inital hidden"; - - std::stringstream primitive_description; - json_composite lstm_dynamic_input_info; - lstm_dynamic_input_info.add("dyn_length id", desc->dyn_length); - lstm_dynamic_input_info.add("recurrent id", desc->recurrent); - lstm_dynamic_input_info.add("initial cell id", std::move(initial_cell_id)); - lstm_dynamic_input_info.add("initial hidden id", initial_hidden_id); - lstm_dynamic_input_info.add("last cell id", last_cell_id); - lstm_dynamic_input_info.add("last hidden id", std::move(last_hidden_id)); - lstm_dynamic_input_info.add("max seq len", node.input().get_output_layout().feature()); - lstm_dynamic_input_info.add("hidden size", node.recurrent().get_output_layout().spatial(0)); - lstm_dynamic_input_info.add("direction", node.recurrent().get_output_layout().feature()); - node_info->add("lstm_dynamic_timeloop info", lstm_dynamic_input_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_dynamic_timeloop_inst::typed_primitive_inst(network& network, lstm_dynamic_timeloop_node const& node) - : parent(network, node) { - auto batch_size = node.get_output_layout().batch(); - auto direction = node.direction(); - - // TODO: check input sizes - auto input_id = node.input().id(); - auto input_layout = node.input().get_output_layout(); - auto hidden_size = input_layout.spatial(0) / 4; - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx); - lstm_dynamic_inst::check_direction(node.input(), direction, "input"); - - // check recurrent - CLDNN_ERROR_BOOL(node.id(), "Recurrent memory", !node.recurrent_term(), "Id of weights memory is not set."); - auto reccurent_id = node.recurrent().id(); - auto recurrent_layout = node.recurrent().get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "recurrent format", - node.recurrent().get_output_layout().format.value, - "expected bfyx format", - format::bfyx); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Recurrent batch size", - recurrent_layout.batch(), - "1", - 1, - "Sizes mismatch, reccuren_id: " + reccurent_id); - if (recurrent_layout.feature() != direction) - CLDNN_ERROR_MESSAGE(node.id(), "Reccurent directions size needs to be equal to 1 or 2 (bidrectional) !"); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Recurrent x size", - recurrent_layout.spatial(0), - "hidden_size", - hidden_size, - "Sizes mismatch, reccuren_id: " + reccurent_id); - CLDNN_ERROR_NOT_EQUAL(node.id(), - "Recurrent y size", - recurrent_layout.spatial(1), - "4 * hidden_size", - 4 * hidden_size, - "Sizes mismatch, reccuren_id: " + reccurent_id); - - if (initial_cell_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.initial_cell(), - batch_size, - hidden_size, - direction, - "initial_cell"); - } - - if (initial_hidden_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.initial_hidden(), - batch_size, - hidden_size, - direction, - "initial_hidden"); - } - - if (node.last_hidden_output_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.last_hidden_state(), - batch_size, - hidden_size, - direction, - "optional_hidden_output"); - } - - if (node.last_cell_output_term()) { - lstm_dynamic_inst::check_common_lstm_dynamic_sizes(node.last_cell_state(), - batch_size, - hidden_size, - direction, - "optional_cell_output"); - } -} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/lstm_gemm.cpp b/src/plugins/intel_gpu/src/graph/lstm_gemm.cpp deleted file mode 100644 index b4d98cd28898ff..00000000000000 --- a/src/plugins/intel_gpu/src/graph/lstm_gemm.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "intel_gpu/runtime/error_handler.hpp" -#include "lstm_gemm_inst.h" -#include "primitive_type_base.h" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(lstm_gemm) - -layout lstm_gemm_inst::calc_output_layout(lstm_gemm_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for lstm_gemm_node!"); - auto input_layout = impl_param.get_input_layout(0); - auto weights_layout = impl_param.get_input_layout(1); - - // input{bfyx} = [b: batch, f: sequence, x: input_size, y: 1] - // weights{bfyx} = [b: 1, f: direction, x: 4 * hidden_size, y: input_size ] - // recurrent{bfyx} = [b: 1, f: direction, x: 4 * hidden_size, y: hidden_size ] - // biases{bfyx} = [b: 1, f:1 , x: direction, y: 4 * hidden_size ] - // hidden{bfyx} = [b: batch, f: direction, x: 1 , y: hidden_size ] optional - // tempGEMM{bfyx} = [b: batch, f: direction, x: 4*hidden_size, y: 1] output - auto result = - layout(input_layout.data_type, - input_layout.format, - tensor(input_layout.batch(), weights_layout.feature(), weights_layout.spatial(1), 1)); - return result; -} - -std::string lstm_gemm_inst::to_string(lstm_gemm_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto weights_id = desc->weights; - auto recurrent_id = desc->recurrent; - auto bias_id = desc->bias != "" ? desc->bias : "no bias"; - auto hidden_id = desc->hidden != "" ? desc->hidden : "no inital hidden"; - - std::stringstream primitive_description; - - json_composite lstm_gemm_info; - lstm_gemm_info.add("weights id", weights_id); - lstm_gemm_info.add("recurrent id", recurrent_id); - lstm_gemm_info.add("bias id", std::move(bias_id)); - lstm_gemm_info.add("hidden id", hidden_id); - node_info->add("lstm gemm info", lstm_gemm_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -lstm_gemm_inst::typed_primitive_inst(network& network, lstm_gemm_node const& node) : parent(network, node) { - auto input_layout = node.input().get_output_layout(); - CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), - "input format", - input_layout.format.value, - "expected format", - format::bfyx, - format::fyxb); -} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 73248603fe9058..e52c1505bb2cae 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -463,29 +463,44 @@ event::ptr primitive_inst::realloc_if_needed() { if (_node->is_type()) return ev; + auto& sp = *get_network().get_shape_predictor(); + auto dt_size = ov::element::Type(actual_layout.data_type).bitwidth(); // read_value/assign nodes are supposed to always use variable memory if (auto stateful_prim = dynamic_cast(this)) { std::string variable_id = stateful_prim->variable_id(); auto& variable = get_network().get_variable(variable_id); - GPU_DEBUG_TRACE_DETAIL << "realloc_if_needed: variable " << id() << " set layout" << _impl_params->get_output_layout().to_string() << std::endl; if (_node->is_type()) { // Reuse state memory as output for kv cache if possible // otherwise clear _outputs for the cases when mem was reused previously if (_impl_params->can_be_optimized()) { + GPU_DEBUG_TRACE_DETAIL << id() << " : realloc_if_needed: Set kvcache output memmory as variable memory " << variable.get_memory()->buffer_ptr() + << " (ptr: " << variable.get_memory()->buffer_ptr() + << ", actual_size: " << variable.get_actual_mem_size()/8 << " bytes" + << ", variable layout " << variable.get_layout().to_short_string() << ")" << std::endl; + _outputs[0] = variable.get_memory(); + // To record shape predictor + auto prealloc_info = sp.predict_preallocation_shape(id(), _impl_params->output_layouts[0].get_shape(), dt_size, true); return ev; } else if (_outputs[0] && variable.get_memory() && get_network().get_engine().is_the_same_buffer(*_outputs[0], *variable.get_memory())) { + GPU_DEBUG_TRACE_DETAIL << id() << " : realloc_if_needed: Reset output mem" << std::endl; _outputs[0] = nullptr; - max_output_layout_size = 0; + _max_output_layout_count = 0; + } else { + GPU_DEBUG_TRACE_DETAIL << id() << " : realloc_if_needed: can_be_optimized = false and memories are not being shared" << std::endl; } + } else { + variable.set_layout(_impl_params->output_layouts[0]); + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable (ptr: " << variable.get_memory()->buffer_ptr() + << ", actual_size:" << variable.get_actual_mem_size() << " bytes" + << ", variable layout:" << variable.get_layout().to_short_string() << ")" << std::endl; } - variable.set_layout(actual_layout); - GPU_DEBUG_TRACE_DETAIL << id() << ": use variable memory " << variable.get_memory()->buffer_ptr() - << " (size=" << variable.get_memory()->size() << ")" << std::endl; // For nodes that can be optimized, variable memory is used as output memory // so there is no need for output memory reallocation - if (can_be_optimized()) + if (can_be_optimized()) { + _max_output_layout_count = variable.get_actual_mem_size() / (dt_size / 8); return ev; + } } // Update output layout with respect to FC's fake alignment @@ -507,27 +522,36 @@ event::ptr primitive_inst::realloc_if_needed() { } } - if (_node->is_type() || _node->is_type()) { - // For the nodes which can be optimized at runtime, input memory is used as output memory - // So there is no need to reallocate output memory - if (can_be_optimized()) + // Clear out memory if if was previously reused, but now primitive can't be optimized + if (_node->is_type() || _node->is_type() || _node->is_type() || _node->is_type()) { + if (can_be_optimized()) { + _max_output_layout_count = _deps[0].first->_max_output_layout_count; return ev; - // Clear out memory if if was previously reused, but now primitive can't be optimized - if (!can_be_optimized() && _outputs[0] && dep_memory_ptr(0) - && _network.get_engine().is_the_same_buffer(dep_memory(0), output_memory(0))) { + } else if (_outputs[0] && dep_memory_ptr(0) && + _network.get_engine().is_the_same_buffer(dep_memory(0), output_memory(0))) { + // Clear out memory if if was previously reused, but now primitive can't be optimized _outputs[0] = nullptr; - max_output_layout_size = 0; + _max_output_layout_count = 0; } } // update layout to ensure that it repsects paddings for correct allocation size if (_node_output_layout.data_padding.get_dynamic_pad_dims() != tensor(0)) { - const auto current_buf_size = updated_layout.get_buffer_size().sizes(); - updated_layout = layout(ov::Shape(current_buf_size.begin(), current_buf_size.end()), updated_layout.data_type, updated_layout.format); + size_t rank = updated_layout.get_shape().size(); + auto current_buf_shape = updated_layout.get_buffer_size().get_partial_shape(rank, std::min(static_cast(4), rank)); + updated_layout = layout(current_buf_shape, updated_layout.data_type, updated_layout.format); } - bool can_reuse_buffer = _outputs[0] && updated_layout.count() <= max_output_layout_size; + // If we allocated too large memory, reclaim the memory. + if (updated_layout.count() * 10 < _max_output_layout_count) { + GPU_DEBUG_TRACE_DETAIL << id() << ": Updated output size " << updated_layout.count() + << " is much smaller than current memory size! " << _max_output_layout_count + << "Reset memory" << std::endl; + _max_output_layout_count = 0; + } + + bool can_reuse_buffer = _outputs[0] && updated_layout.count() <= _max_output_layout_count; // Handle runtime dynamic concat optimization if (_node->is_type() && can_be_optimized() && allocation_done_by_other) { allocation_done_by_other = false; @@ -535,21 +559,26 @@ event::ptr primitive_inst::realloc_if_needed() { } auto current_shape = updated_layout.get_shape(); - auto& sp = *get_network().get_shape_predictor(); - auto dt_size = ov::element::Type(updated_layout.data_type).bitwidth(); - auto prealloc_info = sp.predict_preallocation_shape(id(), current_shape, dt_size, can_reuse_buffer); + std::pair prealloc_info; + int32_t tmp_prealloc_count = _node->is_type() ? kv_cache_inst::get_prealloc_iter_num() : -1; + GPU_DEBUG_IF(debug_config->mem_preallocation_params.is_initialized) { + // If debug config is set, repsect the config most + tmp_prealloc_count = -1; + } + prealloc_info = sp.predict_preallocation_shape(id(), current_shape, dt_size, can_reuse_buffer, tmp_prealloc_count); + if (prealloc_info.first && sp.can_preallocate(ov::shape_size(prealloc_info.second) * dt_size)) { auto new_layout = updated_layout; new_layout.set_partial_shape(prealloc_info.second); updated_params.output_layouts[0] = new_layout; } - if (updated_params.output_layouts[0].count() < updated_layout.count()) + if (updated_params.output_layouts[0].get_buffer_size().count() < updated_layout.get_buffer_size().count()) updated_params.output_layouts[0] = updated_layout; if (can_reuse_buffer) { GPU_DEBUG_TRACE_DETAIL << id() << ": reuse previously allocated output buffer - " - << actual_layout.count() << "/" << max_output_layout_size + << actual_layout.count() << "/" << _max_output_layout_count << std::endl; if (_outputs[0]->get_layout() != actual_layout) { _outputs[0] = _network.get_engine().reinterpret_buffer(*_outputs[0], actual_layout); @@ -559,12 +588,65 @@ event::ptr primitive_inst::realloc_if_needed() { } } else { GPU_DEBUG_TRACE_DETAIL << id() << ": realloc output memory. " - << " Current buffer_size=" << max_output_layout_size + << " Current buffer_size=" << _max_output_layout_count << " Requested buffer_size=" << updated_layout.count() << std::endl; _outputs = allocate_outputs(&updated_params, need_reset_output_memory(), true); // TODO : need to handle multiple outputs - max_output_layout_size = updated_params.output_layouts[0].count(); + _max_output_layout_count = updated_params.output_layouts[0].get_buffer_size().count(); } + // Set variable memory same as output memory + if (_node->is_type()) { + auto desc = _node->as().get_primitive(); + auto& variable = get_network().get_variable(desc->variable_info.variable_id); + auto present_layout = _impl_params->output_layouts[0]; + const auto& sequence_axis = desc->concat_axis; + auto sequence_axis_legacy = + kv_cache_inst::get_sequence_axis_legacy(sequence_axis, present_layout.get_partial_shape().size()); + GPU_DEBUG_TRACE_DETAIL << id() << " is kv_cache => set the variable with newly allocated output memory" + << std::endl; + bool axis_is_outer_most = true; + for (int64_t dim = 0; dim < sequence_axis; ++dim) { + if (present_layout.get_shape()[dim] > 1) { + axis_is_outer_most = false; + break; + } + } + if (present_layout.data_padding.get_dynamic_pad_dims().sizes()[sequence_axis_legacy] == 1) { + // Apply padding of variable to make it be optimized in the next iteration + auto max_pad = kv_cache_inst::get_max_pad(present_layout, + updated_params.output_layouts[0].get_buffer_size().count(), + sequence_axis_legacy, + "present_layout"); + if (max_pad > 0) { + kv_cache_inst::update_pad(present_layout, max_pad, sequence_axis_legacy); + if (!axis_is_outer_most) { + GPU_DEBUG_TRACE_DETAIL << id() << ": Update impl with new output padding" << std::endl; + set_shape_change(); + _impl_params->output_layouts[0] = present_layout; + update_impl(); + } + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable " << variable.get_name() + << "'s memory with allocated kv cache output: " + << present_layout.to_short_string() << " is_set = " << variable.is_set() + << std::endl; + variable.set_memory(_outputs[0], present_layout); + _impl_params->_can_be_optimized = true; + // No need to copy, still it can be optimized + GPU_DEBUG_TRACE_DETAIL << id() << ": Set can_be_optimized = true " << std::endl; + } else { + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable " << variable.get_name() + << "'s layout with allocated kv cache output: " << present_layout.to_short_string() + << " (is_set = " << variable.is_set() << ") " << std::endl; + variable.set_layout(present_layout); + } + } else { + GPU_DEBUG_TRACE_DETAIL << id() << ": Update variable " << variable.get_name() + << "'s layout with allocated kv cache output: " << present_layout.to_short_string() + << " (is_set = " << variable.is_set() << ") " << std::endl; + variable.set_layout(present_layout); + } + } + _mem_allocated = true; // intermediate memory allocation is required for primitives consisting of multiple kernels in dynamic case { @@ -601,10 +683,57 @@ bool primitive_inst::use_async_compilation() { GPU_DEBUG_IF(debug_config->disable_async_compilation) { return false; } - return (_node->is_type() || - _node->is_type() || - _node->is_type() || - _node->is_type()); + + return (_node->is_type() || _node->is_type() || _node->is_type() || + (_node->is_type() && _node->get_selected_impl() && + _node->get_selected_impl()->get_kernel_name().find("softmax_gpu_ref") != std::string::npos)); +} + +void primitive_inst::fill_shape_info_data(const layout& runtime_layout, const layout& node_layout, int32_t* shape_info_ptr, size_t& offset) { + if (node_layout.is_static()) { + GPU_DEBUG_TRACE_DETAIL << "tensor is static. Skipping" << std::endl; + return; + } + auto pshape = runtime_layout.get_partial_shape(); + auto shape_with_max_rank = layout::transform(pshape, + format::get_default_format(pshape.size()), + format::get_default_format(layout::max_rank())).to_shape(); + for (size_t j = 0; j < shape_with_max_rank.size(); ++j) { + GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << shape_with_max_rank[j] << std::endl; + shape_info_ptr[offset++] = static_cast(shape_with_max_rank[j]); + } + auto dynamic_pad = node_layout.data_padding.get_dynamic_pad_dims().sizes(format::get_default_format(layout::max_rank())); + auto data_padding = runtime_layout.data_padding; + for (size_t j = 0; j < shape_with_max_rank.size(); ++j) { + if (dynamic_pad[j] == 1) { + auto lower_pads = data_padding.lower_size().sizes(format::get_default_format(layout::max_rank())); + GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << lower_pads[j] + << "(pad_before for " << j << "-th dim)" << std::endl; + shape_info_ptr[offset++] = lower_pads[j]; // pad_before + auto upper_pads = data_padding.upper_size().sizes(format::get_default_format(layout::max_rank())); + GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << upper_pads[j] + << "(pad_after for " << j << "-th dim)" << std::endl; + shape_info_ptr[offset++] = upper_pads[j]; // pad_after + } + } +} + +void primitive_inst::update_shape_info_tensor(const kernel_impl_params& params) { + mem_lock lock(_shape_info_memory, _network.get_stream()); + auto shape_info_ptr = lock.data(); + size_t offset = 0; + for (size_t i = 0; i < _node->get_dependencies().size(); i++) { + GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for input[" << i << "]" << std::endl; + const auto& node_in_lay = _node->get_dependency(i).get_output_layout(); + const auto& runtime_in_lay = params.input_layouts[i]; + fill_shape_info_data(runtime_in_lay, node_in_lay, shape_info_ptr, offset); + } + for (size_t i = 0; i < _node->get_output_layouts().size(); i++) { + GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for output[" << i << "]" << std::endl; + const auto& node_out_lay = _node->get_output_layout(i); + const auto& runtime_out_lay = params.output_layouts[i]; + fill_shape_info_data(runtime_out_lay, node_out_lay, shape_info_ptr, offset); + } } bool primitive_inst::update_impl() { @@ -612,75 +741,6 @@ bool primitive_inst::update_impl() { GPU_DEBUG_PROFILED_STAGE(instrumentation::pipeline_stage::update_implementation); auto prev_impl_str = _impl != nullptr ? _impl->get_kernel_name() : "nullptr"; - auto update_shape_info = [this, prev_impl_str](const kernel_impl_params& params) { - mem_lock lock(_shape_info_memory, _network.get_stream()); - size_t offset = 0; - for (size_t i = 0; i < _node->get_dependencies().size(); i++) { - auto node_in_lay = _node->get_dependency(i).get_output_layout(); - if (node_in_lay.is_dynamic()) { - auto pshape = params.get_input_layout(i).get_partial_shape(); - GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for input[" << i << "]" << std::endl; - auto input_shape_max_rank = layout::transform(pshape, - format::get_default_format(pshape.size()), - format::get_default_format(layout::max_rank())).to_shape(); - for (size_t j = 0; j < input_shape_max_rank.size(); ++j) { - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << input_shape_max_rank[j] << std::endl; - lock[offset++] = static_cast(input_shape_max_rank[j]); - } - auto is_dynamic_pad = node_in_lay.data_padding.get_dynamic_pad_dims().sizes(format::get_default_format(layout::max_rank())); - auto data_padding = params.input_layouts[i].data_padding; - for (size_t j = 0; j < input_shape_max_rank.size(); ++j) { - if (is_dynamic_pad[j] == 1) { - auto lower_pads = - data_padding.lower_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << lower_pads[j] - << "(pad_before for input[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = lower_pads[j]; // pad_before - auto upper_pads = - data_padding.upper_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << upper_pads[j] - << "(pad_after for input[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = upper_pads[j]; // pad_after - } - } - } - } - for (size_t i = 0; i < _node->get_output_layouts().size(); i++) { - auto node_out_lay = _node->get_output_layout(i); - if (node_out_lay.is_dynamic()) { - GPU_DEBUG_TRACE_DETAIL << id() << " : update shape_info for output[" << i << "]" << std::endl; - auto pshape = params.get_output_layout(i).get_partial_shape(); - auto output_shape_max_rank = layout::transform(pshape, - format::get_default_format(pshape.size()), - format::get_default_format(layout::max_rank())) - .to_shape(); - for (size_t j = 0; j < output_shape_max_rank.size(); j++) { - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << output_shape_max_rank[j] << std::endl; - lock[offset++] = static_cast(output_shape_max_rank[j]); - } - auto is_dynamic_pad = node_out_lay.data_padding.get_dynamic_pad_dims().sizes(format::get_default_format(layout::max_rank())); - auto data_padding = params.output_layouts[i].data_padding; - for (size_t j = 0; j < output_shape_max_rank.size(); j++) { - if (is_dynamic_pad[j] == 1) { - auto lower_pads = data_padding.lower_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << lower_pads[j] - << "(pad_before for output[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = lower_pads[j]; - auto upper_pads = data_padding.upper_size().sizes(format::get_default_format(layout::max_rank())); - GPU_DEBUG_TRACE_DETAIL << " shape_info[" << offset << "] = " << upper_pads[j] - << "(pad_after for output[" << i << "] " << j << "-th dim)" << std::endl; - lock[offset++] = upper_pads[j]; // pad_after - } - } - } - } - std::stringstream s; - s << "shapes: "; - for (size_t i = 0; i < offset; i++) - s << lock[i] << " "; - GPU_DEBUG_TRACE_DETAIL << id() << ": update dynamic impl " << prev_impl_str << " to new shape: " << s.str() << std::endl; - }; - if (_impl != nullptr && (_impl->is_cpu() || can_be_optimized())) { // Return false if shape not changed, otherwise return true to trigger realloc_if_needed, but do not change impl itself return shape_changed(); @@ -754,7 +814,7 @@ bool primitive_inst::update_impl() { _impl = std::move(_dynamic_impl); auto new_impl_params = _impl->canonicalize_shapes(*_impl_params); _impl->update_dispatch_data(new_impl_params); - update_shape_info(new_impl_params); + update_shape_info_tensor(new_impl_params); } } else { _impl = _node->type()->choose_impl(*_node, updated_params_no_dyn_pad); @@ -776,6 +836,37 @@ bool primitive_inst::update_impl() { return true; } +void primitive_inst::update_paddings() { + auto reset_pad = [](kernel_impl_params& params, const program_node* node) { + params.output_layouts[0].data_padding = node->get_output_layout(0).data_padding; + }; + if (_node->is_type()) { + auto& variable = get_network().get_variable(_node->as().get_primitive()->variable_id); + // Reset paddings for read_value and users with dynamic pad when variable is reset + // to avoid wrong pad used for some nodes due to pad propagation logic (which uses previous iter pad values) + if (!variable.is_set()) { + primitive_inst* inst = this; + while (inst) { + reset_pad(*inst->_impl_params, inst->_node); + auto& users = inst->_node->get_users(); + if (users.size() == 1 && users.front()->get_output_layout(0).data_padding.get_dynamic_pad_dims() != tensor(0)) { + inst = inst->get_user_insts().front(); + } else { + inst = nullptr; + } + } + } + return; + } + if (_node->is_type() && _impl_params->output_layouts[0].data_padding.get_dynamic_pad_dims() != tensor(0)) { + if (can_be_optimized()) + _impl_params->output_layouts[0] = _impl_params->input_layouts[0]; + else + reset_pad(*_impl_params, _node); + return; + } +} + void primitive_inst::do_runtime_skip_reorder() { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("do_runtime_skip_reorder: " + id())); GPU_DEBUG_GET_INSTANCE(debug_config); @@ -836,91 +927,41 @@ void primitive_inst::do_runtime_skip_reorder() { void primitive_inst::do_runtime_in_place_kv_cache() { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("do_runtime_in_place_kv_cache: " + id())); - auto reset_pad = [](kernel_impl_params& params, const program_node* node) { - params.output_layouts[0].data_padding = node->get_output_layout(0).data_padding; - }; - if (_node->is_type()) { - auto& variable = get_network().get_variable(_node->as().get_primitive()->variable_id); - // Reset paddings for read_value and users with dynamic pad when variable is reset - // to avoid wrong pad used for some nodes due to pad propagation logic (which uses previous iter pad values) - if (!variable.is_set()) { - primitive_inst* inst = this; - while (inst) { - reset_pad(*inst->_impl_params, inst->_node); - auto& users = inst->_node->get_users(); - if (users.size() == 1 && users.front()->get_output_layout(0).data_padding.get_dynamic_pad_dims() != tensor(0)) { - inst = inst->get_user_insts().front(); - } else { - inst = nullptr; - } - } - } - return; - } - - if (_node->is_type() && _impl_params->output_layouts[0].data_padding.get_dynamic_pad_dims() != tensor(0)) { - if (can_be_optimized()) - _impl_params->output_layouts[0] = _impl_params->input_layouts[0]; - else - reset_pad(*_impl_params, _node); - return; - } - if (!_node->is_type()) return; _impl_params->_can_be_optimized = false; + if (_impl_params->get_input_layout(0).count() == 0) { return; } - auto desc = _node->as().get_primitive(); auto& past_layout = _impl_params->input_layouts[0]; auto& present_layout = _impl_params->output_layouts[0]; const auto& sequence_axis = desc->concat_axis; - auto sequence_axis_legacy = sequence_axis; - if (sequence_axis_legacy >= 2) { - auto spatial_axis = sequence_axis_legacy - 2; - // Default and minimum number of dimensions is 4 - auto spatial_size = std::max(past_layout.get_partial_shape().size(), 4) - 2; - sequence_axis_legacy = spatial_size - spatial_axis - 1 + 2; - } - + auto sequence_axis_legacy = kv_cache_inst::get_sequence_axis_legacy(sequence_axis, past_layout.get_partial_shape().size()); if (present_layout.data_padding.get_dynamic_pad_dims().sizes()[sequence_axis_legacy] != 1) return; - const size_t total_elements = past_layout.count(); - const int64_t concat_axis_size = past_layout.get_partial_shape()[sequence_axis].get_length(); - const int64_t sequence_element_size = total_elements / concat_axis_size; - - const int64_t max_sequence_elements = _deps[0].first->max_output_layout_size / sequence_element_size; - const int64_t max_pad = std::max(max_sequence_elements - concat_axis_size, 0); + GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << id() << " initial present_layout : " << present_layout.to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << id() << " initial past_layout : " << past_layout.to_string() << std::endl; + auto max_pad = kv_cache_inst::get_max_pad(past_layout, _deps[0].first->_max_output_layout_count, sequence_axis_legacy, "past_layout"); if (max_pad > 0) { - auto update_pad = [&](layout& l, int64_t pad) { - const auto& dyn_pad_dims = l.data_padding.get_dynamic_pad_dims(); - const auto& lower_padd = l.data_padding.lower_size().sizes(); - auto upper_padd = l.data_padding.upper_size().sizes(); - upper_padd[sequence_axis_legacy] = pad; - l.data_padding = padding(lower_padd, upper_padd, 0.f, dyn_pad_dims); - }; - - update_pad(present_layout, max_pad - 1); + kv_cache_inst::update_pad(present_layout, max_pad - 1, sequence_axis_legacy); + GPU_DEBUG_TRACE_DETAIL << "[do runtime_in_place_kv_cache] " << id() << " Updated present_layout's pad : " << present_layout.to_string() << std::endl; auto& variable = get_network().get_variable(desc->variable_info.variable_id); - GPU_DEBUG_TRACE_DETAIL << "do_runtime_in_place_kv_cache set_layout: " << present_layout.to_string() << " is_set = " << variable.is_set() << std::endl; variable.set_layout(present_layout); + GPU_DEBUG_TRACE_DETAIL << "[do_runtime_in_place_kv_cache] " << id() << "Updated variable with present_layout" + << variable.get_layout().to_string() << " is_set = " << variable.is_set() << std::endl; if (past_layout.data_padding.upper_size().sizes()[sequence_axis_legacy] > 0 && variable.is_set()) { - update_pad(past_layout, max_pad); + kv_cache_inst::update_pad(past_layout, max_pad, sequence_axis_legacy); _impl_params->_can_be_optimized = true; + GPU_DEBUG_TRACE_DETAIL << "[do_runtime_in_place_kv_cache] " << id() << " Updated past layout's pad : " << past_layout.to_string() << std::endl; } - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] concat_axis_size = " << concat_axis_size << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] sequence_element_size = " << sequence_element_size << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] max_sequence_elements = " << max_sequence_elements << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] max_pad = " << max_pad << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] can be optimized: " << _impl_params->_can_be_optimized << std::endl; - GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << present_layout.to_string() << std::endl; } + GPU_DEBUG_TRACE_DETAIL << "[do runtime kv_cache opt] " << id() << " can be optimized: " << _impl_params->_can_be_optimized << std::endl; } void primitive_inst::do_runtime_skip_gather() { @@ -1113,7 +1154,6 @@ event::ptr primitive_inst::execute(const std::vector& events) { OPENVINO_ASSERT(_node != nullptr, "[GPU] Invalid primitive_inst object for dynamic shapes case: program_node can't be null"); update_shape(); - do_runtime_in_place_kv_cache(); bool can_skip_execution = false; if (_impl_params->output_layouts[0].count() == 0) { @@ -1146,6 +1186,8 @@ event::ptr primitive_inst::execute(const std::vector& events) { // if the user is can_be_optimized and output node then current nodes' output should be allocated to host. do_runtime_skip_reorder(); do_runtime_skip_gather(); + update_paddings(); + do_runtime_in_place_kv_cache(); do_runtime_skip_permute(); if (!is_valid_fusion()) { @@ -1412,7 +1454,7 @@ primitive_inst::primitive_inst(network& network, program_node const& node, bool } _impl_params->strm = _network.get_stream_ptr(); if (_outputs[0]) - max_output_layout_size = _outputs[0]->get_layout().get_tensor().count(); + _max_output_layout_count = _outputs[0]->get_layout().get_tensor().count(); } memory::ptr primitive_inst::allocate_internal_buffer(size_t idx, bool reset) { diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index 43d0efc3dce4cd..28edf4774e1833 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -45,9 +45,6 @@ #include "shuffle_channels_inst.h" #include "arg_max_min_inst.h" #include "dft_inst.h" -#include "lstm_inst.h" -#include "lstm_elt_inst.h" -#include "lstm_gemm_inst.h" #include "multiclass_nms_inst.h" #include "mutable_data_inst.h" #include "pooling_inst.h" @@ -56,7 +53,6 @@ #include "prior_box_inst.h" #include "proposal_inst.h" #include "reorder_inst.h" -#include "split_inst.h" #include "mvn_inst.h" #include "gemm_inst.h" #include "adaptive_pooling_inst.h" @@ -423,7 +419,6 @@ void program::prepare_nodes(topology const& topology) { for (const auto& prim : topo_map) { get_or_create(prim.second); } - add_split_outputs(); for (const auto& node : nodes_map) { auto node_ptr = node.second.get(); if (node_ptr == nullptr) @@ -534,8 +529,6 @@ void program::pre_optimize_graph(bool is_internal) { processing_order.calculate_BFS_processing_order(); // this method makes sense only for OOOQ (out of order execution queue) - apply_opt_pass(); - bool output_size_handling_enabled = analyze_output_size_handling_need(); for (auto& node : processing_order) { if (!node->is_type()) @@ -582,8 +575,6 @@ void program::pre_optimize_graph(bool is_internal) { apply_opt_pass(); } - apply_opt_pass(); - apply_opt_pass(); apply_opt_pass(output_size_handling_enabled); @@ -598,10 +589,6 @@ void program::pre_optimize_graph(bool is_internal) { // check if there exists some layout incompatibilities and add an reorder node if required apply_opt_pass(); - // Modify fused post operation to resolve overflow of fp16 output by adding clamp activation - // Currently, 'gemm-softmax' case is applied for clamping - apply_opt_pass(); - // add optimization attributes for onednn primitives apply_opt_pass(); @@ -722,30 +709,6 @@ void program::transfer_memory_to_device() { } } -void program::add_split_outputs() { - auto itr = nodes_map.begin(); - while (itr != nodes_map.end()) { - auto node_itr = itr++; - auto& node = (*node_itr).second; - - if (node->is_type()) { - auto split_prim = node->as().typed_desc(); - input_info input(split_prim->input[0]); - auto split_num = split_prim->output_offsets.size(); - - // create crop for each split output provided - for (decltype(split_num) i = 0; i < split_num; i++) { - primitive_id output_id = node->id() + ":" + split_prim->output_ids[i]; - - // create dummy crop primitive and add it to nodes map - auto crop_prim = - std::make_shared(output_id, input, tensor{1, 1, 1, 1}, split_prim->output_offsets[i]); - get_or_create(crop_prim); - } - } - } -} - program::nodes_ordering& program::get_processing_order() { return processing_order; } const program::nodes_ordering& program::get_processing_order() const { return processing_order; } diff --git a/src/plugins/intel_gpu/src/graph/pyramid_roi_align.cpp b/src/plugins/intel_gpu/src/graph/pyramid_roi_align.cpp deleted file mode 100644 index 9b38b477f3d947..00000000000000 --- a/src/plugins/intel_gpu/src/graph/pyramid_roi_align.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "pyramid_roi_align_inst.h" -#include "primitive_type_base.h" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(pyramid_roi_align) - -layout pyramid_roi_align_inst::calc_output_layout(pyramid_roi_align_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for " - "pyramid_roi_align node!"); - - auto desc = impl_param.typed_desc(); - - auto boxes_layout = impl_param.get_input_layout(0); - auto P2_layout = impl_param.get_input_layout(1); - - int32_t output_b = boxes_layout.batch(); - int32_t output_f = P2_layout.feature(); - - int32_t output_x = desc->output_size; - int32_t output_y = desc->output_size; - - return layout{P2_layout.data_type, P2_layout.format, {output_b, output_f, output_x, output_y}}; -} - -std::string pyramid_roi_align_inst::to_string(pyramid_roi_align_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - std::stringstream primitive_description; - json_composite pyramid_roi_align_info; - node_info->add("pyramid_roi_align_info", std::move(pyramid_roi_align_info)); - node_info->dump(primitive_description); - return primitive_description.str(); -} - -pyramid_roi_align_inst::typed_primitive_inst(network& network, pyramid_roi_align_node const& node) - : parent(network, node) {} -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/read_value.cpp b/src/plugins/intel_gpu/src/graph/read_value.cpp index ec80ea5ef707cc..bf6e730e8a808b 100644 --- a/src/plugins/intel_gpu/src/graph/read_value.cpp +++ b/src/plugins/intel_gpu/src/graph/read_value.cpp @@ -40,6 +40,10 @@ void read_value_inst::update_output_memory() { return; const auto& variable = get_network().get_variable(variable_id()); + GPU_DEBUG_TRACE_DETAIL << id() << " Update output memory with variable " << variable_id() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - ptr : " << variable.get_memory()->buffer_ptr() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - layout " << variable.get_layout().to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - actual_size " << variable.get_actual_mem_size() << " bytes" << std::endl; set_output_memory(variable.get_memory(), false, 0); } } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/split.cpp b/src/plugins/intel_gpu/src/graph/split.cpp deleted file mode 100644 index 5edf7eb0135b04..00000000000000 --- a/src/plugins/intel_gpu/src/graph/split.cpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "split_inst.h" -#include "primitive_type_base.h" -#include "intel_gpu/runtime/memory.hpp" -#include "intel_gpu/runtime/error_handler.hpp" -#include "json_object.h" -#include - -namespace cldnn { -GPU_DEFINE_PRIMITIVE_TYPE_ID(split) - -layout split_inst::calc_output_layout(split_node const& node, kernel_impl_params const& impl_param) { - assert(static_cast(impl_param.desc->output_data_types[0]) == false && - "Output data type forcing is not supported for split_node!"); - auto desc = impl_param.typed_desc(); - auto output_ids = desc->output_ids; - auto output_offsets = desc->output_offsets; - auto param_num = output_ids.size(); - auto input_sizes = impl_param.get_non_padded_input_layout().get_tensor(); - tensor null_tensor { 0, 0, 0, 0 }; - - // check if output_ids count equals output_offsets count - CLDNN_ERROR_NOT_EQUAL(desc->id, - "Output_ids count", - param_num, - "output_offsets count", - output_offsets.size(), - "Output_ids count/ output_offsets count mismatch"); - - for (decltype(param_num) i = 0; i < param_num; i++) { - if (i != param_num - 1) - // check if output offset sizes is less than next output offset sizes - CLDNN_ERROR_TENSOR_SIZES_GREATER_THAN(desc->id, - "output_offsets", - output_offsets[i], - "next output_offsets", - output_offsets[i + 1], - "Output_offsets tensor/ next input output_offsets tensor mismatch"); - else - // check if output offset sizes matches output offsets sizes - CLDNN_ERROR_TENSOR_SIZES_GREATER_THAN(desc->id, - "Output_offsets", - output_offsets[i], - "input sizes", - input_sizes, - "Output_offsets tensor/ input tensor mismatch"); - - // check if offsets do not extend input sizes and if match the output sizes - CLDNN_ERROR_TENSOR_SIZES_LESS_THAN(desc->id, - "Output_offsets", - output_offsets[i], - "0 value", - null_tensor, - "Invalid output_offsets: dims cannot be less than 0"); - } - - return impl_param.get_non_padded_input_layout(); -} - -std::string split_inst::to_string(split_node const& node) { - auto desc = node.get_primitive(); - auto node_info = node.desc_to_json(); - auto output_ids = desc->output_ids; - auto output_offsets = desc->output_offsets; - auto& input = node.input(); - - std::stringstream primitive_description; - - json_composite split_info; - split_info.add("input id", input.id()); - split_info.add("output ids count", output_ids.size()); - split_info.add("offset count", output_offsets.size()); - - node_info->add("split info", split_info); - node_info->dump(primitive_description); - - return primitive_description.str(); -} - -split_inst::typed_primitive_inst(network& network, split_node const& node) : parent(network, node) { - CLDNN_ERROR_MESSAGE(node.id(), "Split primitive instance should not be created!"); -} - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_bfyx_opt.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_bfyx_opt.cl deleted file mode 100644 index 00952a89514de9..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_bfyx_opt.cl +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/sub_group_block_read.cl" -#include "include/batch_headers/sub_group_block_write.cl" -#include "include/batch_headers/fetch_data.cl" -#include "include/unit_type.cl" -#include "include/sub_group.cl" - -#if FP16_UNIT_USED - #define MAD_1X8(_result_block, _input_value, _weights_block) \ - { \ - _result_block.s0 = fma(_input_value, _weights_block.s0, _result_block.s0); \ - _result_block.s1 = fma(_input_value, _weights_block.s1, _result_block.s1); \ - _result_block.s2 = fma(_input_value, _weights_block.s2, _result_block.s2); \ - _result_block.s3 = fma(_input_value, _weights_block.s3, _result_block.s3); \ - _result_block.s4 = fma(_input_value, _weights_block.s4, _result_block.s4); \ - _result_block.s5 = fma(_input_value, _weights_block.s5, _result_block.s5); \ - _result_block.s6 = fma(_input_value, _weights_block.s6, _result_block.s6); \ - _result_block.s7 = fma(_input_value, _weights_block.s7, _result_block.s7); \ - } -#else - #define MAD_1X8(_result_block, _input_value, _weights_block) \ - { \ - _result_block.s0 = mad(_input_value, _weights_block.s0, _result_block.s0); \ - _result_block.s1 = mad(_input_value, _weights_block.s1, _result_block.s1); \ - _result_block.s2 = mad(_input_value, _weights_block.s2, _result_block.s2); \ - _result_block.s3 = mad(_input_value, _weights_block.s3, _result_block.s3); \ - _result_block.s4 = mad(_input_value, _weights_block.s4, _result_block.s4); \ - _result_block.s5 = mad(_input_value, _weights_block.s5, _result_block.s5); \ - _result_block.s6 = mad(_input_value, _weights_block.s6, _result_block.s6); \ - _result_block.s7 = mad(_input_value, _weights_block.s7, _result_block.s7); \ - } -#endif - -#define INC_OFFSET(_offset, _value) _offset += _value -#define SIMD_SIZE 8 - -REQD_SUB_GROUP_SIZE(SIMD_SIZE) -KERNEL(lstm_dynamic_input_bfyx_opt)( - const __global INPUT0_TYPE* input, - const __global DYN_LENGTH_TYPE* dyn_lengths, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint batch = (uint)get_global_id(1) % INPUT0_BATCH_NUM; - const uint dir = (uint)get_global_id(1) / INPUT0_BATCH_NUM; - const uint timestep = get_global_id(2); - if(timestep > (uint)dyn_lengths[batch]) - return; - // which general local work item within work group we have - const uint local_work_item_id = get_local_id(0); - // which id in SUBGROUP we have (0..7) - const uint sub_group_local_id = get_sub_group_local_id(); - // which SUBGROUP we have - const uint sub_group_id = local_work_item_id / SIMD_SIZE;//get_sub_group_id(); - const uint dir_sub_group_id = sub_group_id % SIMD_SIZE; - //which workgroup we have <0,1> - const uint wg_id = get_group_id(0); - const uint wg_offset = wg_id * (uint)get_local_size(0) * SIMD_SIZE; - //Subgroups have region of calcuations (ROC) within each local work item calculate simd_size values across y spatial. - //i.e sub_group_id = 1 have ROC, which starts at 64th y'th position - const uint sub_group_offset = SIMD_SIZE * 8; - const uint weights_single_dir_size = WEIGHTS_SIZE_X * WEIGHTS_SIZE_Y; - const uint dir_offset_for_weights = dir * weights_single_dir_size; - uint calcuation_offset = dir_offset_for_weights + wg_offset + dir_sub_group_id * sub_group_offset; - uint input_offset = GET_DATA_INDEX(INPUT0, batch, timestep, dir, sub_group_local_id); - const uint output_offset = GET_DATA_INDEX(OUTPUT, batch, timestep, dir, wg_offset + dir_sub_group_id * sub_group_offset); - -#if BIAS_TERM - //preload output with biases - const uint bias_calcuation_offset = dir * BIAS_SIZE_X + wg_offset + dir_sub_group_id * sub_group_offset; - UNIT_TYPE8 dot_prod = UNIT_BLOCK_READ8(biases, bias_calcuation_offset); -#else - UNIT_TYPE8 dot_prod = UNIT_VAL_ZERO; -#endif - - for(uint x = 0; x < INPUT0_SIZE_X / SIMD_SIZE; ++x) - { - UNIT_TYPE8 BLOCK_W0 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W1 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W2 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W3 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W4 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W5 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W6 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - UNIT_TYPE8 BLOCK_W7 = UNIT_BLOCK_READ8(weights, calcuation_offset); INC_OFFSET(calcuation_offset, WEIGHTS_SIZE_Y); - - UNIT_TYPE input_value = input[input_offset]; - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 0), BLOCK_W0); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 1), BLOCK_W1); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 2), BLOCK_W2); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 3), BLOCK_W3); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 4), BLOCK_W4); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 5), BLOCK_W5); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 6), BLOCK_W6); - MAD_1X8(dot_prod, _sub_group_shuffle(input_value, 7), BLOCK_W7); - - input_offset += SIMD_SIZE; - } - - UNIT_BLOCK_WRITE8(output, output_offset, dot_prod); -} - -#undef SIMD_SIZE -#undef INC_OFFSET -#undef MAD_1X8 -#undef OPT diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_ref.cl deleted file mode 100644 index 7985508b4e3b7c..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_input_ref.cl +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" -#include "include/batch_headers/fetch_weights.cl" -#include "include/acc_type.cl" - -KERNEL(lstm_dynamic_input_ref)( - const __global INPUT0_TYPE* input, - const __global DYN_LENGTH_TYPE* dyn_lengths, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint y = get_global_id(0); - const uint batch = (uint)get_global_id(1) % INPUT0_BATCH_NUM; - const uint dir = (uint)get_global_id(1) / INPUT0_BATCH_NUM; - const uint timestep = get_global_id(2); - - if(timestep > (uint)dyn_lengths[batch]) - return; - - ACCUMULATOR_TYPE dot_prod = 0; - for(uint x = 0; x < INPUT0_SIZE_X; ++x ) - { - const uint input_idx = GET_DATA_INDEX(INPUT0, batch, timestep, dir, x); - const uint weights_idx = GET_FILTER_INDEX(WEIGHTS, 0, 0, dir, y, x); - dot_prod += (ACCUMULATOR_TYPE)(input[input_idx] * weights[weights_idx]); - } - -#if BIAS_TERM - dot_prod += (ACCUMULATOR_TYPE)biases[GET_DATA_INDEX(BIAS, 0, 0, dir, y)]; -#endif - - output[GET_DATA_INDEX(OUTPUT, batch, timestep, dir, y)] = (OUTPUT_TYPE)dot_prod; -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_timeloop_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_timeloop_ref.cl deleted file mode 100644 index 7beec1f77dec99..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_dynamic_timeloop_ref.cl +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" -#include "include/acc_type.cl" - -#define ACTIVATION_LOGISTIC(input) (UNIT_VAL_ONE/(UNIT_VAL_ONE + exp(-input))) -#define ACTIVATION_HYPERBOLIC_TAN(input) (tanh(input)) - -KERNEL(lstm_dynamic_timeloop_ref)( - const __global INPUT0_TYPE* input, - const __global DYN_LENGTH_TYPE* dyn_lengths, - __global OUTPUT_TYPE* output, - const __global RECURRENT_TYPE* recurrent -#if INIT_HIDDEN_TERM - , const __global INIT_HIDDEN_TYPE* hidden -#endif -#if INIT_CELL_TERM - , const __global INIT_CELL_TYPE* cell -#endif -#if LAST_HIDDEN_TERM - , __global LAST_HIDDEN_TYPE* last_hidden -#endif -#if LAST_CELL_TERM - , __global LAST_CELL_TYPE* last_cell -#endif - ) -{ - const uint y_offset = (uint)get_global_id(0) * ELEMENTS_TO_COUNT; - const uint b = get_global_id(1); - const uint dir = get_global_id(2); - uint unroll_timesteps = dyn_lengths[b]; - - //if hidden_size is bigger then 256, then ELEMENTS_TO_COUNT will be hidden_size/256 - ACCUMULATOR_TYPE it[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE ot[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE zt[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE ft[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE eltiwse_vals[ELEMENTS_TO_COUNT]; - ACCUMULATOR_TYPE cell_vals[ELEMENTS_TO_COUNT]; - OUTPUT_TYPE output_value = UNIT_VAL_ZERO; - #if INIT_HIDDEN_TERM - bool use_hidden = true; - #else - bool use_hidden = false; - #endif //hidden_term - - #if INIT_CELL_TERM - bool use_cell = true; - #else - bool use_cell = false; - #endif //cell_term - - for(int timestep = 0; timestep < MAX_SEQUENCE_LENGTH; timestep++) - { - //not all workitems will do computations - if(timestep < unroll_timesteps) - { - for(uint element_idx = 0; element_idx < ELEMENTS_TO_COUNT; element_idx++) - { - const uint y = y_offset + element_idx; - // [f, i, z, o] - ft[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_F)]; - it[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_I)]; - zt[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_Z)]; - ot[element_idx] = input[GET_DATA_INDEX(INPUT0, b, timestep, dir, y + GEMM_OFFSET_O)]; - if(use_hidden) - { - for(uint x = 0; x < OUTPUT_SIZE_X; ++x) - { - if(timestep == 0) - { - #if INIT_HIDDEN_TERM - uint hidden_idx = GET_DATA_INDEX(INIT_HIDDEN, b, 0, dir, x); - ft[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_F, x)]); - it[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_I, x)]); - zt[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_Z, x)]); - ot[element_idx] += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_O, x)]); - #endif //INIT_HIDDEN_TERM - } - else - { - uint hidden_idx = GET_DATA_INDEX(OUTPUT, b, timestep - 1, dir, x); - ft[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_F, x)]); - it[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_I, x)]); - zt[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_Z, x)]); - ot[element_idx] += (ACCUMULATOR_TYPE)(output[hidden_idx] * recurrent[GET_DATA_INDEX(RECURRENT, 0, dir, y + GEMM_OFFSET_O, x)]); - } //else timesteo ==0 - }//for(uint x = 0; x < OUTPUT_SIZE_X; ++x) - }//if(use_hidden) - - //eltwise operation - eltiwse_vals[element_idx] = ACTIVATION_LOGISTIC(CLIP(it[element_idx])) * ACTIVATION_HYPERBOLIC_TAN(CLIP(zt[element_idx])); - #if INPUT_FORGET - eltiwse_vals[element_idx] *= ((ACCUMULATOR_TYPE)1 - ft[element_idx]); - #endif //INPUT_FORGET - - if(use_cell) - { - if(timestep == 0) - { - #if INIT_CELL_TERM - eltiwse_vals[element_idx] += cell[GET_DATA_INDEX(INIT_CELL, b, 0, dir, y)] * ACTIVATION_LOGISTIC(CLIP(ft[element_idx])); - #endif //INIT_CELL_TERM - } - else - { - eltiwse_vals[element_idx] += cell_vals[element_idx] * ACTIVATION_LOGISTIC(CLIP(ft[element_idx])); - } - } - //end of eltwise operation - }//for(uint cell_element = 0; cell_element < ELEMENTS_TO_COUNT; cell_element++) - } //first if(timestep < unroll_timesteps) - - //all workitems needs to hit the barrier before writing to global output memory - barrier(CLK_GLOBAL_MEM_FENCE); - - //not all workitems will do computations - if(timestep < unroll_timesteps) - { - for(uint element_idx = 0; element_idx < ELEMENTS_TO_COUNT; element_idx++) - { - const uint y = y_offset + element_idx; - output_value = (OUTPUT_TYPE)(ACTIVATION_HYPERBOLIC_TAN(eltiwse_vals[element_idx]) * ACTIVATION_LOGISTIC(ot[element_idx])); // hidden - output[GET_DATA_INDEX(OUTPUT, b, timestep, dir, y)] = output_value; - #if LAST_HIDDEN_TERM - if(timestep == unroll_timesteps - 1) - { - last_hidden[GET_DATA_INDEX(LAST_HIDDEN, b, 0, dir, y)] = output_value; - } - #endif //LAST_HIDDEN_TERM - cell_vals[element_idx] = (OUTPUT_TYPE)eltiwse_vals[element_idx]; - #if LAST_CELL_TERM - if(timestep == unroll_timesteps - 1) - { - last_cell[GET_DATA_INDEX(LAST_CELL, b, 0, dir, y)] = cell_vals[element_idx]; - } - #endif //LAST_CELL_TERM - //cleanup loop - use_hidden = true; - use_cell = true; - eltiwse_vals[element_idx] = UNIT_VAL_ZERO; - } - } //second if(timestep < unroll_timesteps) - - //all workitems needs to hit the barrier after writing to global output memory - barrier(CLK_GLOBAL_MEM_FENCE); - } -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemm_gpu_bfyx_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemm_gpu_bfyx_ref.cl deleted file mode 100644 index 8f9157a9521746..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemm_gpu_bfyx_ref.cl +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" -#include "include/acc_type.cl" - -#ifndef DIRECTION -#define DIRECTION 0 -#endif - -// input = [ batch, sequence, 1, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, direction, 1, 4 * hidden_size ] output -KERNEL(lstm_gemm)( - const __global INPUT0_TYPE* input, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if HIDDEN_TERM - , const __global OUTPUT_TYPE* hidden, - const __global RECURRENT_TYPE* recurrent -#endif -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint y = get_global_id(0); - const uint b = get_global_id(1); - - ACCUMULATOR_TYPE dotProd = 0; - for(uint x = 0; x < INPUT0_SIZE_X; ++x ) { - const uint input_idx = GET_DATA_INDEX(INPUT0, b, 0, INPUT_DIRECTION, x); - const uint weights_idx = GET_DATA_INDEX(WEIGHTS, 0, DIRECTION, y, x); - dotProd += (ACCUMULATOR_TYPE)(input[input_idx] * weights[weights_idx]); - } - -#if HIDDEN_TERM - for(uint x = 0; x < HIDDEN_SIZE_X; ++x ) { - const uint hidden_idx = GET_DATA_INDEX(HIDDEN, b, 0, HIDDEN_DIRECTION, x); - const uint recurrent_idx = GET_DATA_INDEX(RECURRENT, 0, DIRECTION, y, x); - dotProd += (ACCUMULATOR_TYPE)(hidden[hidden_idx] * recurrent[recurrent_idx]); - } -#endif - -#if BIAS_TERM - const uint bias_idx = GET_DATA_INDEX(BIAS, 0, 0, DIRECTION, y); - dotProd += (ACCUMULATOR_TYPE)biases[bias_idx]; -#endif - const uint output_idx = GET_DATA_INDEX(OUTPUT, b, 0, 0, y); - output[output_idx] = (OUTPUT_TYPE)dotProd; -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl deleted file mode 100644 index 9536c81e5ee23b..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16.cl +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/sub_group_shuffle.cl" -#include "include/batch_headers/fetch_data.cl" -#include "include/acc_type.cl" - -#ifndef DIRECTION -#define DIRECTION 0 -#endif - -#ifndef SIMD -#define SIMD 16 -#endif - -// Sums value of result across all subgroups. -#define SUM_ACROSS_SUB_GROUP(val) \ - \ -{ \ - val += _sub_group_shuffle(val, x+1); \ - val += _sub_group_shuffle(val, x+2); \ - val += _sub_group_shuffle(val, x+4); \ - val += (SIMD > 8) ? _sub_group_shuffle(val, x+8) : 0; \ - val += (SIMD > 16) ? _sub_group_shuffle(val, x+16) : 0; \ -} - -// input = [ batch, sequence, 1, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, direction, 1, 4 * hidden_size ] output - -__attribute__((reqd_work_group_size(SIMD, 1, 1))) -KERNEL(lstm_gemm)( - const __global INPUT0_TYPE* input, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if HIDDEN_TERM - , const __global OUTPUT_TYPE* hidden, - const __global RECURRENT_TYPE* recurrent -#endif -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint x = get_local_id(0); - const uint y = get_global_id(1); - const int local_sz = get_local_size(0); - const int weight_num_rows = get_global_size(1); - - uint K; - int start_offset; - int end_offset; - int matrix_offset; - int vector_offset; - float4 sum; - float result; - - K = INPUT0_SIZE_X; // Width of weight matrix - start_offset = GET_DATA_INDEX(WEIGHTS, 0, DIRECTION, y, 0); // set as the starting offset of the weight matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // Weight offset for the work item to work on - vector_offset = GET_DATA_INDEX(INPUT0, 0, 0, INPUT_DIRECTION, (x*4)); // Input offset for the work item to work on - sum = (float4)(0.f); - result = 0; - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - float4 mask = (float4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - float4 m = (float4) (weights[matrix_offset], weights[matrix_offset + 1], weights[matrix_offset + 2], weights[matrix_offset + 3]); - m = m * mask; - - const float4 v = (float4) (input[vector_offset], input[vector_offset + 1], input[vector_offset + 2], input[vector_offset + 3]); - - sum = mad(m, v, sum); - } - - result = sum.x + sum.y + sum.z + sum.w; - -#if HIDDEN_TERM - K = HIDDEN_SIZE_X; // width of recurrent matrix - start_offset = GET_DATA_INDEX(RECURRENT, 0, DIRECTION, y, 0); // set as the starting offset of the recurrent matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // recurrent offset for the work item to work on - vector_offset = GET_DATA_INDEX(HIDDEN, 0, 0, HIDDEN_DIRECTION, (x*4)); // hidden vector offset for the work item to work on - sum = (float4)(0.f); - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - float4 mask = (float4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - float4 m = (float4) (recurrent[matrix_offset], recurrent[matrix_offset + 1], recurrent[matrix_offset + 2], recurrent[matrix_offset + 3]); - m = m * mask; - - const float4 v = (float4) (hidden[vector_offset], hidden[vector_offset + 1], hidden[vector_offset + 2], hidden[vector_offset + 3]); - - sum = mad(m, v, sum); - } - - result += sum.x + sum.y + sum.z + sum.w; -#endif - - // Add together partial sums contained in each work item's "result" variable - SUM_ACROSS_SUB_GROUP(result); - - if(x == 0) - { - output[y] = (OUTPUT_TYPE)result; - -#if BIAS_TERM - const uint bias_idx = GET_DATA_INDEX(BIAS, 0, 0, DIRECTION, y); - float bias = (ACCUMULATOR_TYPE)biases[bias_idx]; - output[y] += (OUTPUT_TYPE)bias; -#endif - } -} - -#undef SUM_ACROSS_SUB_GROUP -#undef SIMD diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl deleted file mode 100644 index 15c68604ce5442..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16.cl +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/sub_group_shuffle.cl" -#include "include/batch_headers/fetch_data.cl" - -#ifndef DIRECTION -#define DIRECTION 0 -#endif - -#ifndef SIMD -#define SIMD 16 -#endif - -// Sums value of result across all subgroups. -#define SUM_ACROSS_SUB_GROUP(val) \ - \ -{ \ - val += _sub_group_shuffle(val, x+1); \ - val += _sub_group_shuffle(val, x+2); \ - val += _sub_group_shuffle(val, x+4); \ - val += _sub_group_shuffle(val, x+8); \ -} - -// input = [ batch, sequence, 1, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, direction, 1, 4 * hidden_size ] output - -__attribute__((reqd_work_group_size(SIMD, 1, 1))) -KERNEL(lstm_gemm)( - const __global INPUT0_TYPE* input, - __global OUTPUT_TYPE* output, - const __global WEIGHTS_TYPE* weights -#if HIDDEN_TERM - , const __global OUTPUT_TYPE* hidden, - const __global RECURRENT_TYPE* recurrent -#endif -#if BIAS_TERM - , const __global BIAS_TYPE* biases -#endif - ) -{ - const uint x = get_local_id(0); - const uint y = get_global_id(1); - const int local_sz = get_local_size(0); - - uint K; - int start_offset; - int end_offset; - int matrix_offset; - int vector_offset; - float4 sum; - float result; - - K = INPUT0_SIZE_X; // Width of weight matrix - start_offset = GET_DATA_INDEX(WEIGHTS, 0, DIRECTION, y, 0); // set as the starting offset of the weight matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // Weight offset for the work item to work on - vector_offset = GET_DATA_INDEX(INPUT0, 0, 0, INPUT_DIRECTION, (x*4)); // Input offset for the work item to work on - sum = (float4)(0.f); - result = 0; - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - half4 mask = (half4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - half4 m = (half4) (weights[matrix_offset], weights[matrix_offset + 1], weights[matrix_offset + 2], weights[matrix_offset + 3]); - m = m * mask; - - const half4 v = (half4)(input[vector_offset], input[vector_offset + 1], input[vector_offset + 2], input[vector_offset + 3]); - - sum = mad(convert_float4(m), convert_float4(v), sum); - } - - result = sum.x + sum.y + sum.z + sum.w; - -#if HIDDEN_TERM - K = HIDDEN_SIZE_X; // width of recurrent matrix - start_offset = GET_DATA_INDEX(RECURRENT, 0, DIRECTION, y, 0); // set as the starting offset of the recurrent matrix - end_offset = start_offset + K; - matrix_offset = start_offset + (x * 4); // recurrent offset for the work item to work on - vector_offset = GET_DATA_INDEX(HIDDEN, 0, 0, HIDDEN_DIRECTION, (x*4)); // hidden vector offset for the work item to work on - sum = (float4)(0.f); - for(; matrix_offset < end_offset; matrix_offset += (local_sz * 4), vector_offset += (local_sz * 4)) - { - half4 mask = (half4) (1 , (matrix_offset + 1) < end_offset , (matrix_offset + 2) < end_offset , (matrix_offset + 3) < end_offset); - half4 m = (half4) (recurrent[matrix_offset], recurrent[matrix_offset + 1], recurrent[matrix_offset + 2], recurrent[matrix_offset + 3]); - m = m * mask; - - const half4 v = (half4) (hidden[vector_offset], hidden[vector_offset + 1], hidden[vector_offset + 2], hidden[vector_offset + 3]); - - sum = mad(convert_float4(m), convert_float4(v), sum); - } - - result += sum.x + sum.y + sum.z + sum.w; -#endif - - // Add together partial sums contained in each work item's "result" variable - SUM_ACROSS_SUB_GROUP(result); - - if(x == 0) - { - output[y] = 0;// (half)result; - -#if BIAS_TERM - const uint bias_idx = GET_DATA_INDEX(BIAS, 0, 0, DIRECTION, y); - half bias = biases[bias_idx]; - result += (float)bias; -#endif - - output[y] = (half)result; - //output[y] = convert_half_rte(result); - - - } -} - -#undef SUM_ACROSS_SUB_GROUP -#undef SIMD diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pyramid_roi_align_gpu_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pyramid_roi_align_gpu_ref.cl deleted file mode 100644 index 4294b1244f8104..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pyramid_roi_align_gpu_ref.cl +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/batch_headers/fetch_data.cl" - -#define PYRAMID_LEVELS 4 - -struct Parameters -{ - int size_y, size_x, f_pitch, x_pitch, y_pitch, offset; -}; - -__constant struct Parameters parameters [PYRAMID_LEVELS] = - { - { INPUT1_SIZE_Y, INPUT1_SIZE_X, INPUT1_FEATURE_PITCH, INPUT1_X_PITCH, INPUT1_Y_PITCH, INPUT1_OFFSET }, - { INPUT2_SIZE_Y, INPUT2_SIZE_X, INPUT2_FEATURE_PITCH, INPUT2_X_PITCH, INPUT2_Y_PITCH, INPUT2_OFFSET }, - { INPUT3_SIZE_Y, INPUT3_SIZE_X, INPUT3_FEATURE_PITCH, INPUT3_X_PITCH, INPUT3_Y_PITCH, INPUT3_OFFSET }, - { INPUT4_SIZE_Y, INPUT4_SIZE_X, INPUT4_FEATURE_PITCH, INPUT4_X_PITCH, INPUT4_Y_PITCH, INPUT4_OFFSET } - }; - -inline INPUT1_TYPE FUNC(accumulate)(INPUT1_TYPE acc, INPUT1_TYPE val) { - return max(acc, val); -} - -#define ACCUMULATOR_INIT_VAL INPUT1_VAL_MIN - -KERNEL(pyramidROIAlign_gpu_ref)( - const __global INPUT0_TYPE *boxes, - const __global INPUT1_TYPE *P2, - const __global INPUT2_TYPE *P3, - const __global INPUT3_TYPE *P4, - const __global INPUT4_TYPE *P5, - __global OUTPUT_TYPE *output) -{ - const uint oyx = get_global_id(0); - const uint ox = oyx % OUTPUT_SIZE_X; - const uint oy = oyx / OUTPUT_SIZE_X; - const uint of = get_global_id(1); - const uint kerNum = (uint) get_global_id(2); - - INPUT0_TYPE hU = boxes[GET_DATA_INDEX(INPUT0, kerNum, 3, 0, 0)]; - INPUT0_TYPE hL = boxes[GET_DATA_INDEX(INPUT0, kerNum, 1, 0, 0)]; - INPUT0_TYPE h = hU - hL; - INPUT0_TYPE wU = boxes[GET_DATA_INDEX(INPUT0, kerNum, 2, 0, 0)]; - INPUT0_TYPE wL = boxes[GET_DATA_INDEX(INPUT0, kerNum, 0, 0, 0)]; - INPUT0_TYPE w = wU - wL; - - // TODO This scale could be used when box coordinates are not normalized, but in pixel coordinates. -#ifdef PYRAMID_ROI_ALIGN_PIXEL_BOXES - float image_area = IMAGE_SIZE_X * IMAGE_SIZE_Y; - float scale = 1.f / sqrt(image_area); -#else - float scale = 1.f; -#endif - - int roi_level = (int)round(PYRAMID_STARTING_LEVEL + log2(sqrt(h*w) * scale)); - // 0 <= roi_level < PYRAMID_LEVELS - roi_level = min(PYRAMID_LEVELS - 1, max(0, roi_level)); - - const __global INPUT1_TYPE* feature_map_ptrs[PYRAMID_LEVELS]; - - feature_map_ptrs[0] = P2; - feature_map_ptrs[1] = P3; - feature_map_ptrs[2] = P4; - feature_map_ptrs[3] = P5; - - const __global INPUT1_TYPE* feature_map_ptr = feature_map_ptrs[roi_level]; - - const uint sampling_ratio_x = SAMPLING_RATIO_X != 0 ? SAMPLING_RATIO_X : (uint)ceil(1.f * w * IMAGE_SIZE_X / OUTPUT_SIZE_X); - const uint sampling_ratio_y = SAMPLING_RATIO_Y != 0 ? SAMPLING_RATIO_Y : (uint)ceil(1.f * h * IMAGE_SIZE_Y / OUTPUT_SIZE_Y); - - //calculate cooficients for transformation - INPUT0_TYPE y1 = hL * (parameters[roi_level].size_y - 1); - INPUT0_TYPE x1 = wL * (parameters[roi_level].size_x - 1); - INPUT0_TYPE y2 = hU * (parameters[roi_level].size_y - 1); - INPUT0_TYPE x2 = wU * (parameters[roi_level].size_x - 1); - INPUT0_TYPE deltaX = (x2 - x1) / (OUTPUT_SIZE_X); - INPUT0_TYPE deltaY = (x2 - x1) / (OUTPUT_SIZE_Y); - INPUT0_TYPE pool_deltaX = deltaX / sampling_ratio_x; - INPUT0_TYPE pool_deltaY = deltaY / sampling_ratio_y; - - uint data_base_offset = parameters[roi_level].offset + parameters[roi_level].f_pitch * of; - - INPUT0_TYPE y_base = y1 + oy * deltaY + TO_INPUT0_TYPE(0.5f) * pool_deltaY; - INPUT0_TYPE x_base = x1 + ox * deltaX + TO_INPUT0_TYPE(0.5f) * pool_deltaX; - - INPUT1_TYPE accumulator = ACCUMULATOR_INIT_VAL; - - //transformation - for (int yi = 0; yi < sampling_ratio_y; ++yi) { - INPUT0_TYPE y = y_base + yi * pool_deltaY; - int y_low = (int)floor(y); - int y_high = (int)ceil(y); - - y_low = clamp(y_low, 0, parameters[roi_level].size_y - 1); - y_high = clamp(y_high, 0, parameters[roi_level].size_y - 1); - - if (y_low == y_high) { - if (y_high + 1 <= parameters[roi_level].size_y) - y_high += 1; - else - y_low -= 1; - } - - INPUT0_TYPE y_high_coeff = y - y_low; - INPUT0_TYPE y_low_coeff = y_high - y; - - for (int xi = 0; xi < sampling_ratio_x; ++xi) { - INPUT0_TYPE x = x_base + xi * pool_deltaX; - - int x_left = (int)floor(x); - int x_right = (int)ceil(x); - - x_left = clamp(x_left, 0, parameters[roi_level].size_x - 1); - x_right = clamp(x_right, 0, parameters[roi_level].size_x - 1); - - if (x_left == x_right) { - if (x_right + 1 <= parameters[roi_level].size_x) - x_right += 1; - else - x_left -= 1; - } - - INPUT0_TYPE x_right_coeff = x - x_left; - INPUT0_TYPE x_left_coeff = x_right - x; - - uint low_left_idx = data_base_offset + parameters[roi_level].x_pitch * x_left + parameters[roi_level].y_pitch * y_low; - uint high_left_idx = data_base_offset + parameters[roi_level].x_pitch * x_left + parameters[roi_level].y_pitch * y_high; - uint low_right_idx = data_base_offset + parameters[roi_level].x_pitch * x_right + parameters[roi_level].y_pitch * y_low; - uint high_right_idx = data_base_offset + parameters[roi_level].x_pitch * x_right + parameters[roi_level].y_pitch * y_high; - - INPUT1_TYPE low_left_val = feature_map_ptr[low_left_idx]; - INPUT1_TYPE high_left_val = feature_map_ptr[high_left_idx]; - INPUT1_TYPE low_right_val = feature_map_ptr[low_right_idx]; - INPUT1_TYPE high_right_val = feature_map_ptr[high_right_idx]; - - INPUT1_TYPE left_val = y_low_coeff * low_left_val + y_high_coeff * high_left_val; - INPUT1_TYPE right_val = y_low_coeff * low_right_val + y_high_coeff * high_right_val; - - INPUT1_TYPE interpolated_val = x_left_coeff * left_val + x_right_coeff * right_val; - - accumulator = FUNC_CALL(accumulate)(accumulator, interpolated_val); - } - } - - uint output_idx = GET_DATA_INDEX(OUTPUT, kerNum, of, oy, ox); - output[output_idx] = TO_OUTPUT_TYPE(accumulator); -} diff --git a/src/plugins/intel_gpu/src/kernel_selector/common_types.h b/src/plugins/intel_gpu/src/kernel_selector/common_types.h index fc946d9e4272bb..2b6f7be857be8c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/common_types.h +++ b/src/plugins/intel_gpu/src/kernel_selector/common_types.h @@ -39,7 +39,6 @@ enum class KernelType { REGION_YOLO, REORG_YOLO, MVN, - LSTM_GEMM, LSTM_ELT, BORDER, TILE, @@ -48,7 +47,6 @@ enum class KernelType { BUCKETIZE, GEMM, GRID_SAMPLE, - PYRAMID_ROI_ALIGN, CONTRACT, ONE_HOT, GATHER, @@ -65,8 +63,6 @@ enum class KernelType { STRIDED_SLICE, REVERSE_SEQUENCE, QUANTIZE, - LSTM_DYNAMIC_INPUT, - LSTM_DYNAMIC_TIMELOOP, REDUCE, GATHER_TREE, SPACE_TO_DEPTH, diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_runner_interface.h b/src/plugins/intel_gpu/src/kernel_selector/kernel_runner_interface.h deleted file mode 100644 index bfb4575248c6d3..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_runner_interface.h +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include -#include - -namespace kernel_selector { -class KernelRunnerInterface { -public: - // Gets a list of kernels, executes them and returns the run time of each kernel (in nano-seconds). - virtual std::vector run_kernels(const kernel_selector::KernelsData& kernelsData) = 0; - - virtual ~KernelRunnerInterface() = default; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp index fbf33dbedd750c..b2bb2f59f06efa 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp @@ -340,7 +340,6 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::winograd_6x3_s1_fused_weights: return "WINOGRAD_6x3_S1_FUSED_WEIGHTS"; case WeightsLayout::image_2d_weights_winograd_6x3_s1_fbxyb: return "IMAGE_2D_WEIGHTS_WINOGRAD_6x3_S1_FBXYB"; case WeightsLayout::image_2d_weights_winograd_6x3_s1_xfbyb: return "IMAGE_2D_WEIGHTS_WINOGRAD_6x3_S1_XFBYB"; - case WeightsLayout::dlstm_dir_io: return "DLSTM_DIR_IO"; case WeightsLayout::os_is_yx_isa8_osv8_isv4: return "OS_IS_YX_ISA8_OSV8_ISV4"; case WeightsLayout::os_is_yx_isa8_osv16_isv4: return "OS_IS_YX_ISA8_OSV16_ISV4"; case WeightsLayout::os_is_yx_isa8_osv8_isv4_swizzled_by_4: return "OS_IS_YX_ISA8_OSV8_ISV4_SWIZZLED_BY_4"; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h index c8ba581f51c696..dec0d3476ce9a5 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h @@ -228,14 +228,6 @@ class ParamsKey { uint32_t stride : 1; uint32_t broadcast : 1; } eltwise; - struct lstm_gemm_t { - uint32_t bias : 1; - uint32_t hidden : 1; - } lstm_gemm; - struct lstm_dynamic_t { - uint32_t last_hidden : 1; - uint32_t last_cell : 1; - } lstm_dynamic; struct lstm_elt_t { uint32_t cell : 1; } lstm_elt; @@ -338,11 +330,7 @@ class ParamsKey { void EnableEltwiseStride(); void EnableEltwiseBroadcast() { key.restrict.val.dedicated.eltwise.broadcast = 1; } - void EnableLSTMGEMMBias() { key.restrict.val.dedicated.lstm_gemm.bias = 1; } - void EnableLSTMGEMMHidden() { key.restrict.val.dedicated.lstm_gemm.hidden = 1; } void EnableLSTMEltCell() { key.restrict.val.dedicated.lstm_elt.cell = 1; } - void EnableLSTMDyanmicOptionalHiddenOutput() { key.restrict.val.dedicated.lstm_dynamic.last_hidden = 1; } - void EnableLSTMDyanmicOptionalCellOutput() { key.restrict.val.dedicated.lstm_dynamic.last_cell = 1; } void EnableConcatKernelPerInput() { key.restrict.val.dedicated.concat.kernelPerInput = 1; } void EnableConcatOneKernel() { key.restrict.val.dedicated.concat.oneKernel = 1; } void EnableArgMaxMinAxis(ArgMaxMinAxis a); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp index de93e27a3810b0..e62eb5c04426c8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp @@ -261,6 +261,9 @@ bool GemmKernelTiledOpt::Validate(const Params& params, const optional_params& o for (size_t input_idx = 0; input_idx < gmm_params.inputs.size(); ++input_idx) { auto& input = gmm_params.inputs[input_idx]; + if (!Tensor::SimpleLayout(input.GetLayout())) { + return false; + } // Supports outer padding as first element offset and dynamic padding for Batch, Feature, X, Y dimensions for first and second inputs // in case of shape agnostic kernel bool proper_pad_f = input.Feature().pad.is_dynamic ? false : input.Feature().pad.Total() == 0; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.cpp deleted file mode 100644 index 3f3e5c11fd2936..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.cpp +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemm_kernel_base.h" -#include "kernel_selector_utils.h" -#include "common_tools.h" - -namespace kernel_selector { -JitConstants LSTMGemmKernelBase::GetJitConstants(const lstm_gemm_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - const auto& weights = params.weights; - const auto& recurrent = params.recurrent; - const auto& hidden = params.hidden; - const auto& bias = params.bias; - if (params.hasBias) { - jit.AddConstants({MakeJitConstant("BIAS", bias), MakeJitConstant("BIAS_TERM", true)}); - } - if (params.hasHidden) { - jit.AddConstants({MakeJitConstant("HIDDEN", hidden), - MakeJitConstant("HIDDEN_TERM", true), - MakeJitConstant("RECURRENT", recurrent), - MakeJitConstant("HIDDEN_DIRECTION", params.hidden_direction)}); - } - jit.AddConstants({MakeJitConstant("WEIGHTS", weights)}); - jit.AddConstants({MakeJitConstant("DIRECTION", params.direction)}); - jit.AddConstants({MakeJitConstant("INPUT_DIRECTION", params.input_direction)}); - - return jit; -} - -KernelsData LSTMGemmKernelBase::GetCommonKernelsData(const Params& params, const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - const lstm_gemm_params& orgParams = static_cast(params); - - KernelData kd = KernelData::Default(params, orgParams.inputs.size()); - - const auto& input = orgParams.inputs[0]; - - auto newParams = orgParams; - newParams.inputs.resize(1); - newParams.inputs[0] = input; - auto out = newParams.outputs[0]; - // TODO: reorder weights if needed - auto& kernel = kd.kernels[0]; - auto cldnnJit = GetJitConstants(newParams); - auto entryPoint = GetEntryPoint(kernelName, newParams.layerID, params, options); - auto jit = CreateJit(kernelName, cldnnJit, entryPoint); - - kernel.params.workGroups.global = {out.X().v, out.Batch().v, 1}; - kernel.code.kernelString = GetKernelString(kernelName, jit, entryPoint, params.engineInfo); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, 0}); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::OUTPUT, 0}); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::WEIGHTS, 0}); - if (orgParams.hasHidden) { - kernel.params.arguments.push_back({ArgumentDescriptor::Types::HIDDEN, 0}); - kernel.params.arguments.push_back({ArgumentDescriptor::Types::RECURRENT, 0}); - } - if (orgParams.hasBias) { - kernel.params.arguments.push_back({ArgumentDescriptor::Types::BIAS, 0}); - } - - return {kd}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.h deleted file mode 100644 index 116d9426929b9b..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_base.h +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_gemm_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_gemm_params : public base_params { - lstm_gemm_params() : base_params(KernelType::LSTM_GEMM) {} - - DataTensor weights; - DataTensor recurrent; - DataTensor bias; - DataTensor hidden; - bool hasBias = false; - bool hasHidden = false; - uint32_t direction = 0; - uint32_t input_direction = 0; // for bidirectional node fusion in stacked LSTMs - uint32_t hidden_direction = 0; - - void SetBias(const DataTensor& v) { - bias = v; - hasBias = true; - } - - void SetHidden(const DataTensor& v) { - hidden = v; - hasHidden = true; - } - - ParamsKey GetParamsKey() const override { - ParamsKey k = base_params::GetParamsKey(); - - if (hasBias) { - k.EnableLSTMGEMMBias(); - } - - if (hasHidden) { - k.EnableLSTMGEMMHidden(); - } - - return k; - } -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_gemm_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_gemm_optional_params : optional_params { - lstm_gemm_optional_params() : optional_params(KernelType::LSTM_GEMM) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// LSTMGemmKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class LSTMGemmKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~LSTMGemmKernelBase() {} - - struct DispatchData : public CommonDispatchData {}; - -protected: - virtual JitConstants GetJitConstants(const lstm_gemm_params& params) const; - KernelsData GetCommonKernelsData(const Params& params, const optional_params& optParams) const; - - bool Validate(const Params& p, const optional_params&) const override { - if (p.GetType() != KernelType::LSTM_GEMM) { - return false; - } - - return true; - } -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.cpp deleted file mode 100644 index 3d1ee9175dfd7c..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.cpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemm_kernel_ref.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTMGemmKernelRef::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableDifferentTypes(); - k.EnableAllInputLayout(); - k.EnableAllOutputLayout(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableLSTMGEMMHidden(); - return k; -} - -KernelsData LSTMGemmKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority LSTMGemmKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.h deleted file mode 100644 index b729ab05f0d58e..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_ref.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_gemm_kernel_base.h" - -namespace kernel_selector { -class LSTMGemmKernelRef : public LSTMGemmKernelBase { -public: - LSTMGemmKernelRef() : LSTMGemmKernelBase("lstm_gemm_gpu_bfyx_ref") {} - virtual ~LSTMGemmKernelRef() {} - - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.cpp deleted file mode 100644 index b997572d754c37..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemm_kernel_selector.h" -#include "lstm_gemm_kernel_ref.h" -#include "lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h" -#include "lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h" - -namespace kernel_selector { -lstm_gemm_kernel_selector::lstm_gemm_kernel_selector() { - Attach(); - Attach(); - Attach(); -} - -KernelsData lstm_gemm_kernel_selector::GetBestKernels(const Params& params, const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::LSTM_GEMM); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.h deleted file mode 100644 index f878705ceace2c..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemm_kernel_selector.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class lstm_gemm_kernel_selector : public kernel_selector_base { -public: - static lstm_gemm_kernel_selector& Instance() { - static lstm_gemm_kernel_selector instance_; - return instance_; - } - - lstm_gemm_kernel_selector(); - - virtual ~lstm_gemm_kernel_selector() {} - - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp deleted file mode 100644 index 18d57c4920a248..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F32); - k.EnableDifferentTypes(); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableLSTMGEMMHidden(); - return k; -} - -DeviceFeaturesKey LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::get_required_device_features_key(const Params& params, const optional_params& options) const { - DeviceFeaturesKey k; - k.requires_subgroups(); - k.requires_subgroup_shuffle(); - - return k; -} - -KernelsData LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::GetKernelsData(const Params& params, - const optional_params& options) const { - KernelsData kernelsData = GetCommonKernelsData(params, options); - auto& kernel = kernelsData[0].kernels[0]; - - // This kernel is good if - // 1) Batch size is 1 - // 2) The input size y-x size is 64x1 - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - const auto& out = orgParams.outputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - kernel.params.workGroups.global = {16, out.X().v, out.Batch().v}; - - return kernelsData; -} - -KernelsPriority LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16::GetKernelsPriority(const Params& params, const optional_params& /*options*/) const { - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - return FORCE_PRIORITY_1; - else - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h deleted file mode 100644 index ef9ba3e46fb8e9..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_ff_simd16.h +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_gemm_kernel_base.h" - -namespace kernel_selector { -class LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16 : public LSTMGemmKernelBase { -public: - LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16() : LSTMGemmKernelBase("lstm_gemv_gpu_subgroup1x64_bfyx_ff_SIMD16") {} - virtual ~LSTMGemvKernel_subgroup1x64_bfyx_ff_SIMD16() {} - - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; - DeviceFeaturesKey get_required_device_features_key(const Params& params, const optional_params& /*options*/) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp deleted file mode 100644 index baa58ebde3fd9e..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F16); - k.EnableDifferentTypes(); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableLSTMGEMMHidden(); - return k; -} - -DeviceFeaturesKey LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::get_required_device_features_key(const Params& params, const optional_params& options) const { - DeviceFeaturesKey k; - k.requires_subgroups(); - k.requires_subgroup_shuffle(); - - return k; -} - -KernelsData LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::GetKernelsData(const Params& params, - const optional_params& options) const { - KernelsData kernelsData = GetCommonKernelsData(params, options); - auto& kernel = kernelsData[0].kernels[0]; - - // This kernel is good if - // 1) Batch size is 1 - // 2) The input size y-x size is 64x1 - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - const auto& out = orgParams.outputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - kernel.params.workGroups.global = {16, out.X().v, out.Batch().v}; - - return kernelsData; -} - -KernelsPriority LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16::GetKernelsPriority(const Params& params, const optional_params& /*options*/) const { - const lstm_gemm_params& orgParams = static_cast(params); - const auto& input = orgParams.inputs[0]; - - if ((input.Batch().v == 1) && (input.X().v >= 64) && (input.Y().v == 1)) - return FORCE_PRIORITY_1; - else - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h deleted file mode 100644 index 795d4d1a70725d..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm/lstm_gemv_gpu_subgroup1x64_bfyx_hh_simd16.h +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_gemm_kernel_base.h" - -namespace kernel_selector { -class LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16 : public LSTMGemmKernelBase { -public: - LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16() : LSTMGemmKernelBase("lstm_gemv_gpu_subgroup1x64_bfyx_hh_SIMD16") {} - virtual ~LSTMGemvKernel_subgroup1x64_bfyx_hh_SIMD16() {} - - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; - DeviceFeaturesKey get_required_device_features_key(const Params& params, const optional_params& /*options*/) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp deleted file mode 100644 index a7fbdf9c603ded..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic/lstm_dynamic_input_bfyx_opt.h" -#include "kernel_selector_utils.h" - -#include - -namespace kernel_selector { - -ParamsKey LSTM_DynamicInputKernelBfyxOpt::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableInputWeightsType(WeightsType::F32); - k.EnableInputWeightsType(WeightsType::F16); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableDifferentTypes(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableNonBiasTerm(); - k.EnableBiasPerFeature(); - k.EnableBiasPerOutput(); - return k; -} - -DeviceFeaturesKey LSTM_DynamicInputKernelBfyxOpt::get_required_device_features_key(const Params& params, const optional_params& options) const { - auto k = get_common_subgroups_device_features_key(params, options); - k.requires_subgroup_shuffle(); - - return k; -} - -bool LSTM_DynamicInputKernelBfyxOpt::Validate(const Params & p, const optional_params & o) const { - if (!LSTM_DynamicInputKernelBase::Validate(p, o)) { - return false; - } - - const auto& params = static_cast(p); - - const auto& weights = params.weights; - const auto weights_x = weights.X().v; - const auto weights_y = weights.Y().v; - const auto& input = params.inputs[0]; - const auto& out = params.outputs[0]; - - bool input_X_div_by_8 = input.X().v % 8 == 0; - bool weights_X_div_by_8 = weights_x % 8 == 0; - bool weights_Y_div_by_8_x_simd_size = weights_y % (8 * simd_size) == 0; - bool gws0_size = out.X().v / simd_size <= 512; // ToDo remove condition and update .cl code for bigger gws0 - - if (!input_X_div_by_8 || - !weights_X_div_by_8 || - !weights_Y_div_by_8_x_simd_size || - !gws0_size) - return false; - return true; -} - -KernelsData LSTM_DynamicInputKernelBfyxOpt::GetKernelsData(const Params& params, const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - DispatchData dispatchData; - - KernelData kd = KernelData::Default(params); - lstm_dynamic_input_params& dlstm_params = *static_cast(kd.params.get()); - - auto in_layout = dlstm_params.inputs[0].GetLayout(); - auto out_layout = dlstm_params.outputs[0].GetLayout(); - std::vector> dims_by_gws = {{ Tensor::DataChannelName::X }, - { Tensor::DataChannelName::Y, Tensor::DataChannelName::BATCH }, - { Tensor::DataChannelName::FEATURE }}; - - const auto& out = dlstm_params.outputs[0]; - auto hidden_size = out.X().v; - - dispatchData.gws = { hidden_size / simd_size, out.Batch().v * out.Y().v, out.Feature().v }; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, in_layout, out_layout, dims_by_gws); - - bool succeed = UpdateWeightsParams(dlstm_params, - options, - WeightsLayout::dlstm_dir_io, - kd.weightsReorderParams, - GetSupportedKey()); - - if (!succeed) { - return {}; - } - - auto cldnn_jit = GetJitConstants(dlstm_params); - auto entry_point = GetEntryPoint(kernelName, dlstm_params.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = kd.kernels[0]; - kernel.params.workGroups.global = dispatchData.gws; - kernel.params.workGroups.local = dispatchData.lws; - kernel.code.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); - SetKernelArguments(dlstm_params, kernel); - - return { kd }; -} - -KernelsPriority LSTM_DynamicInputKernelBfyxOpt::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return FORCE_PRIORITY_5; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.h deleted file mode 100644 index ed989beaba814f..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_dynamic_input_kernel_base.h" - -namespace kernel_selector { -class LSTM_DynamicInputKernelBfyxOpt : public LSTM_DynamicInputKernelBase { -public: - LSTM_DynamicInputKernelBfyxOpt() : LSTM_DynamicInputKernelBase("lstm_dynamic_input_bfyx_opt") {} - - virtual ~LSTM_DynamicInputKernelBfyxOpt() {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; - DeviceFeaturesKey get_required_device_features_key(const Params& params, const optional_params& /*options*/) const override; - -protected: - bool Validate(const Params& p, const optional_params& o) const override; - -private: - const uint32_t simd_size = 8; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp deleted file mode 100644 index 4c47aed7d50ba9..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_input_kernel_base.h" -#include "kernel_selector_utils.h" -#include "common_tools.h" -#include - -namespace kernel_selector { -JitConstants LSTM_DynamicInputKernelBase::GetJitConstants(const lstm_dynamic_input_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - - jit.AddConstants({MakeJitConstant("WEIGHTS", params.weights), - MakeJitConstant("DYN_LENGTH", params.inputs.at(1)), - MakeJitConstant("MAX_SEQUENCE_LENGTH", params.inputs.at(0).Feature().v)}); - - // [2] Optionals - if (!params.bias.empty()) { - jit.AddConstants({MakeJitConstant("BIAS", params.bias[0]), MakeJitConstant("BIAS_TERM", true)}); - } - - return jit; -} - -LSTM_DynamicInputKernelBase::DispatchData LSTM_DynamicInputKernelBase::SetDefault( - const lstm_dynamic_input_params& params) { - DispatchData dispatchData; - auto in_layout = params.inputs[0].GetLayout(); - auto out_layout = params.outputs[0].GetLayout(); - std::vector> dims_by_gws = {{ Tensor::DataChannelName::X }, - { Tensor::DataChannelName::Y, Tensor::DataChannelName::BATCH }, - { Tensor::DataChannelName::FEATURE }}; - - const auto& out = params.outputs[0]; - - // 4 * hidden, batch * dir, seq_len - dispatchData.gws = { out.X().v, out.Batch().v * out.Y().v, out.Feature().v }; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, in_layout, out_layout, dims_by_gws); - - return dispatchData; -} - -void kernel_selector::LSTM_DynamicInputKernelBase::SetKernelArguments(const lstm_dynamic_input_params& params, clKernelData& kernel) const { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, 0 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, 1 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::OUTPUT, 0 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::WEIGHTS, 0 }); - if (!params.bias.empty()) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::BIAS, 0 }); - } -} - -KernelsData LSTM_DynamicInputKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - const lstm_dynamic_input_params& orgParams = static_cast(params); - - auto dispatchData = SetDefault(orgParams); - KernelData k_data = KernelData::Default(params, 1); - - auto cldnn_jit = GetJitConstants(orgParams); - auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = k_data.kernels[0]; - kernel.params.workGroups.global = dispatchData.gws; - kernel.code.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); - SetKernelArguments(orgParams, kernel); - - return {k_data}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.h deleted file mode 100644 index 069942d657d479..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "weight_bias_params.h" -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_input_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_input_params : public weight_bias_params { - lstm_dynamic_input_params() : weight_bias_params(KernelType::LSTM_DYNAMIC_INPUT) {} - - int32_t direction = 1; -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_input_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_input_optional_params : weight_bias_optional_params { - lstm_dynamic_input_optional_params() : weight_bias_optional_params(KernelType::LSTM_DYNAMIC_INPUT) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// LSTM_DynamicInputKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class LSTM_DynamicInputKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~LSTM_DynamicInputKernelBase() {} - - struct DispatchData : public CommonDispatchData {}; - -protected: - virtual JitConstants GetJitConstants(const lstm_dynamic_input_params& params) const; - static DispatchData SetDefault(const lstm_dynamic_input_params& params); - KernelsData GetCommonKernelsData(const Params& params, - const optional_params& optParams) const; - void SetKernelArguments(const lstm_dynamic_input_params& params, clKernelData& k_data) const; - - bool Validate(const Params& p, const optional_params&) const override { - if (p.GetType() != KernelType::LSTM_DYNAMIC_INPUT) { - return false; - } - - return true; - } -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.cpp deleted file mode 100644 index 8d013efc8f56dc..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.cpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_input_kernel_selector.h" -#include "lstm_dynamic_input_ref_kernel.h" -#include "lstm_dynamic_input_bfyx_opt.h" - -namespace kernel_selector { -lstm_dynamic_input_kernel_selector::lstm_dynamic_input_kernel_selector() { - Attach(); - Attach(); -} - -KernelsData lstm_dynamic_input_kernel_selector::GetBestKernels(const Params& params, - const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::LSTM_DYNAMIC_INPUT); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.h deleted file mode 100644 index 57f7571bb395bb..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_kernel_selector.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class lstm_dynamic_input_kernel_selector : public kernel_selector_base { -public: - static lstm_dynamic_input_kernel_selector& Instance() { - static lstm_dynamic_input_kernel_selector instance_; - return instance_; - } - - lstm_dynamic_input_kernel_selector(); - - virtual ~lstm_dynamic_input_kernel_selector() {} - - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.cpp deleted file mode 100644 index 1b5214da61e31e..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic/lstm_dynamic_input_ref_kernel.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTM_DynamicInputKernelRef::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableInputWeightsType(WeightsType::F16); - k.EnableInputWeightsType(WeightsType::F32); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableDifferentTypes(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMGEMMBias(); - k.EnableNonBiasTerm(); - k.EnableBiasPerFeature(); - k.EnableBiasPerOutput(); - return k; -} - -KernelsData LSTM_DynamicInputKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority LSTM_DynamicInputKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return DONT_USE_IF_HAVE_SOMETHING_ELSE; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.h deleted file mode 100644 index 5c711c2662b764..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_input_ref_kernel.h +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_dynamic_input_kernel_base.h" - -namespace kernel_selector { -class LSTM_DynamicInputKernelRef : public LSTM_DynamicInputKernelBase { -public: - LSTM_DynamicInputKernelRef() : LSTM_DynamicInputKernelBase("lstm_dynamic_input_ref") {} - - virtual ~LSTM_DynamicInputKernelRef() {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - -protected: - ParamsKey GetSupportedKey() const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp deleted file mode 100644 index 2b3a4cebe0532f..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_timeloop_kernel_base.h" -#include "kernel_selector_utils.h" -#include "common_tools.h" -#include -#include - -namespace kernel_selector { -JitConstants LSTM_DynamicTimeloopKernelBase::GetJitConstants(const lstm_dynamic_timeloop_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - - const auto& out = params.outputs[0]; - size_t hidden_size = out.X().v; - - // [1] Certainties - jit.AddConstants({ - // IE default: fizo - MakeJitConstant("GEMM_OFFSET_I", 1 * hidden_size), - MakeJitConstant("GEMM_OFFSET_O", 3 * hidden_size), - MakeJitConstant("GEMM_OFFSET_F", 0 * hidden_size), - MakeJitConstant("GEMM_OFFSET_Z", 2 * hidden_size), - }); - - jit.AddConstants({MakeJitConstant("RECURRENT", params.recurrent), - MakeJitConstant("DYN_LENGTH", params.inputs.at(1)), - MakeJitConstant("HIDDEN_SIZE", hidden_size), - MakeJitConstant("MAX_SEQUENCE_LENGTH", params.inputs.at(0).Feature().v), - MakeJitConstant("ELEMENTS_TO_COUNT", hidden_size > 256 ? hidden_size / 256 : 1)}); - - if (params.has_hidden) { - const auto& hidden = params.hidden; - jit.AddConstants({ - MakeJitConstant("INIT_HIDDEN_TERM", true), - MakeJitConstant("INIT_HIDDEN", hidden), - }); - } - - if (params.has_cell) { - const auto& cell = params.cell; - jit.AddConstants({ - MakeJitConstant("INIT_CELL_TERM", true), - MakeJitConstant("INIT_CELL", cell), - }); - } - - if (params.clip > 0) { - std::string psclip = toCodeString(params.clip); - std::string nsclip = toCodeString(-params.clip); - jit.AddConstants( - {MakeJitConstant("CLIP(x)", - "((x > " + psclip + ") ? " + psclip + ": (x < " + nsclip + ") ? " + nsclip + " : (x))")}); - } else { - jit.AddConstants({MakeJitConstant("CLIP(x)", "(x)")}); - } - if (params.input_forget) { - jit.AddConstants({MakeJitConstant("INPUT_FORGET", true)}); - } - - if (params.has_last_hidden_output) { - jit.AddConstants( - {MakeJitConstant("LAST_HIDDEN", params.last_hidden_output), MakeJitConstant("LAST_HIDDEN_TERM", true)}); - } - - if (params.has_last_cell_output) { - jit.AddConstants( - {MakeJitConstant("LAST_CELL", params.last_cell_output), MakeJitConstant("LAST_CELL_TERM", true)}); - } - - return jit; -} - -LSTM_DynamicTimeloopKernelBase::DispatchData LSTM_DynamicTimeloopKernelBase::SetDefault( - const lstm_dynamic_timeloop_params& params) { - DispatchData dispatchData; - const auto& out = params.outputs[0]; - - auto out_x_size = out.X().v; - auto gws0 = out_x_size > 256 ? 256 : out_x_size; - dispatchData.gws = { gws0, out.Batch().v, static_cast(params.direction) }; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - - return dispatchData; -} - -void kernel_selector::LSTM_DynamicTimeloopKernelBase::SetKernelArguments(const lstm_dynamic_timeloop_params& params, clKernelData& kernel) const { - uint32_t input_idx = 0; - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::OUTPUT, 0 }); - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::RECURRENT, 0 }); - if (params.has_hidden) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::HIDDEN, 0 }); - } - if (params.has_cell) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::CELL, 0 }); - } - if (params.has_last_hidden_output) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - } - if (params.has_last_cell_output) { - kernel.params.arguments.push_back({ ArgumentDescriptor::Types::INPUT, input_idx++ }); - } -} - - -KernelsData LSTM_DynamicTimeloopKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options) const { - if (!Validate(params, options)) { - return {}; - } - - const lstm_dynamic_timeloop_params& org_params = static_cast(params); - - auto dispatchData = SetDefault(org_params); - KernelData k_data = KernelData::Default(params, 1); - - auto cldnn_jit = GetJitConstants(org_params); - auto entry_point = GetEntryPoint(kernelName, org_params.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = k_data.kernels[0]; - kernel.params.workGroups.global = dispatchData.gws; - kernel.params.workGroups.local = dispatchData.lws; - kernel.code.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); - SetKernelArguments(org_params, kernel); - return {k_data}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h deleted file mode 100644 index 6bd56ed8f91c65..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.h +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_timeloop_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_timeloop_params : public base_params { - lstm_dynamic_timeloop_params() : base_params(KernelType::LSTM_DYNAMIC_TIMELOOP) {} - - DataTensor recurrent; - DataTensor hidden; - DataTensor cell; - DataTensor last_hidden_output; - DataTensor last_cell_output; - - float clip = 0.0f; - bool input_forget = false; - bool has_hidden = false; - bool has_cell = false; - bool has_last_hidden_output = false; - bool has_last_cell_output = false; - int32_t direction = 1; - - void set_hidden(const DataTensor& v) { - hidden = v; - has_hidden = true; - } - - void set_cell(const DataTensor& v) { - cell = v; - has_cell = true; - } - - void set_last_hidden_output(const DataTensor& v) { - last_hidden_output = v; - has_last_hidden_output = true; - } - - void set_last_cell_output(const DataTensor& v) { - last_cell_output = v; - has_last_cell_output = true; - } - - ParamsKey GetParamsKey() const override { - ParamsKey k = base_params::GetParamsKey(); - - if (has_hidden) { - k.EnableLSTMGEMMHidden(); - } - - if (has_cell) { - k.EnableLSTMEltCell(); - } - - if (has_last_hidden_output) { - k.EnableLSTMDyanmicOptionalHiddenOutput(); - } - - if (has_last_cell_output) { - k.EnableLSTMDyanmicOptionalCellOutput(); - } - - return k; - } -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// lstm_dynamic_timeloop_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct lstm_dynamic_optional_params : optional_params { - lstm_dynamic_optional_params() : optional_params(KernelType::LSTM_DYNAMIC_TIMELOOP) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// LSTM_DynamicTimeloopKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class LSTM_DynamicTimeloopKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~LSTM_DynamicTimeloopKernelBase() {} - - struct DispatchData : public CommonDispatchData {}; - -protected: - virtual JitConstants GetJitConstants(const lstm_dynamic_timeloop_params& params) const; - static DispatchData SetDefault(const lstm_dynamic_timeloop_params& params); - KernelsData GetCommonKernelsData(const Params& params, - const optional_params& optParams) const; - void SetKernelArguments(const lstm_dynamic_timeloop_params& params, clKernelData& k_data) const; - bool Validate(const Params& p, const optional_params&) const override { - if (p.GetType() != KernelType::LSTM_DYNAMIC_TIMELOOP) { - return false; - } - - return true; - } -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.cpp deleted file mode 100644 index 0fe024ce448249..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic_timeloop_kernel_selector.h" -#include "lstm_dynamic_timeloop_ref_kernel.h" - -namespace kernel_selector { -lstm_dynamic_timeloop_kernel_selector::lstm_dynamic_timeloop_kernel_selector() { - Attach(); -} - -KernelsData lstm_dynamic_timeloop_kernel_selector::GetBestKernels(const Params& params, - const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::LSTM_DYNAMIC_TIMELOOP); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h deleted file mode 100644 index 45e02422ec5e89..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_selector.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class lstm_dynamic_timeloop_kernel_selector : public kernel_selector_base { -public: - static lstm_dynamic_timeloop_kernel_selector& Instance() { - static lstm_dynamic_timeloop_kernel_selector instance_; - return instance_; - } - - lstm_dynamic_timeloop_kernel_selector(); - - virtual ~lstm_dynamic_timeloop_kernel_selector() {} - - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.cpp deleted file mode 100644 index ce120ad1091328..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h" -#include "kernel_selector_utils.h" - -namespace kernel_selector { - -ParamsKey LSTM_DynamicTimeloopKernelRef::GetSupportedKey() const { - ParamsKey k; - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - k.EnableOutputDataType(Datatype::F32); - k.EnableInputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableDifferentTypes(); - k.EnableTensorOffset(); - k.EnableTensorPitches(); - k.EnableBatching(); - k.EnableLSTMEltCell(); - k.EnableLSTMGEMMHidden(); - k.EnableLSTMDyanmicOptionalCellOutput(); - k.EnableLSTMDyanmicOptionalHiddenOutput(); - return k; -} - -KernelsData LSTM_DynamicTimeloopKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority LSTM_DynamicTimeloopKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return DONT_USE_IF_HAVE_SOMETHING_ELSE; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h deleted file mode 100644 index 215f985503cdf3..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/lstm_dynamic/lstm_dynamic_timeloop_ref_kernel.h +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "lstm_dynamic_timeloop_kernel_base.h" - -namespace kernel_selector { -class LSTM_DynamicTimeloopKernelRef : public LSTM_DynamicTimeloopKernelBase { -public: - LSTM_DynamicTimeloopKernelRef() : LSTM_DynamicTimeloopKernelBase("lstm_dynamic_timeloop_ref") {} - - virtual ~LSTM_DynamicTimeloopKernelRef() {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - -protected: - ParamsKey GetSupportedKey() const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp deleted file mode 100644 index 28c4e94ba88603..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyramid_roi_align_kernel_base.h" -#include "kernel_selector_utils.h" -#include - -namespace kernel_selector { - -JitConstants PyramidROIAlignKernelBase::GetJitConstants(const PyramidROIAlign_params& params) const { - JitConstants jit = MakeBaseParamsJitConstants(params); - - jit.AddConstant(MakeJitConstant("IMAGE_SIZE_X", params.image_size_x)); - jit.AddConstant(MakeJitConstant("IMAGE_SIZE_Y", params.image_size_y)); - jit.AddConstant(MakeJitConstant("SAMPLING_RATIO_X", params.sampling_ratio_x)); - jit.AddConstant(MakeJitConstant("SAMPLING_RATIO_Y", params.sampling_ratio_y)); - jit.AddConstant(MakeJitConstant("PYRAMID_STARTING_LEVEL", params.pyramid_starting_level)); - - return jit; -} - -PyramidROIAlignKernelBase::DispatchData PyramidROIAlignKernelBase::SetDefault(const PyramidROIAlign_params& params) const { - DispatchData dispatchData; - dispatchData.gws = {1, 1, 1}; - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return dispatchData; -} - -KernelsData PyramidROIAlignKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options) const { - assert(params.GetType() == KernelType::PYRAMID_ROI_ALIGN); - - const auto& prim_params = - static_cast(params); - auto dispatchData = SetDefault(prim_params); - KernelData k_data = KernelData::Default(params); - auto cldnn_jit = GetJitConstants(prim_params); - auto entry_point = GetEntryPoint(kernelName, prim_params.layerID, params, options); - auto jit = CreateJit(kernelName, cldnn_jit, entry_point); - - auto& kernel = k_data.kernels[0]; - FillCLKernelData(kernel, - dispatchData, - params.engineInfo, - kernelName, - jit, - entry_point, - "", - false, - false, - (uint32_t)prim_params.inputs.size()); - - return {k_data}; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.h deleted file mode 100644 index 52c1d810a56a54..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_base_opencl.h" -#include "kernel_selector_params.h" - -namespace kernel_selector { -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// PyramidROIAlign_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct PyramidROIAlign_params : public base_params { - PyramidROIAlign_params() : base_params(KernelType::PYRAMID_ROI_ALIGN), - image_size_x(1), image_size_y(1), sampling_ratio_x(1), sampling_ratio_y(1), - pyramid_starting_level(0) {} - - int image_size_x; - int image_size_y; - int sampling_ratio_x; - int sampling_ratio_y; - int pyramid_starting_level; -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// index_select_optional_params -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -struct PyramidROIAlign_optional_params : optional_params { - PyramidROIAlign_optional_params() : optional_params(KernelType::PYRAMID_ROI_ALIGN) {} -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// PyramidROIAlignKernelBase -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class PyramidROIAlignKernelBase : public KernelBaseOpenCL { -public: - using KernelBaseOpenCL::KernelBaseOpenCL; - virtual ~PyramidROIAlignKernelBase() {} - - using DispatchData = CommonDispatchData; - -protected: - JitConstants GetJitConstants(const PyramidROIAlign_params& params) const; - virtual DispatchData SetDefault(const PyramidROIAlign_params& params) const; - KernelsData GetCommonKernelsData(const Params& params, const optional_params&) const; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp deleted file mode 100644 index 5492dd48c7e530..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyramid_roi_align_kernel_ref.h" -#include "kernel_selector_utils.h" - -#include - -namespace kernel_selector { -ParamsKey PyramidROIAlignKernelRef::GetSupportedKey() const { - ParamsKey k; - - k.EnableInputDataType(Datatype::F16); - k.EnableInputDataType(Datatype::F32); - - k.EnableOutputDataType(Datatype::F32); - k.EnableOutputDataType(Datatype::F16); - - k.EnableInputLayout(DataLayout::bfyx); - k.EnableInputLayout(DataLayout::yxfb); - k.EnableInputLayout(DataLayout::byxf); - - k.EnableOutputLayout(DataLayout::bfyx); - k.EnableOutputLayout(DataLayout::yxfb); - k.EnableOutputLayout(DataLayout::byxf); - - k.EnableBatching(); - k.EnableDifferentTypes(); - - return k; -} - -PyramidROIAlignKernelBase::DispatchData PyramidROIAlignKernelRef::SetDefault(const PyramidROIAlign_params& params) const { - auto dispatchData = PyramidROIAlignKernelBase::SetDefault(params); - auto in_layout = params.inputs[0].GetLayout(); - auto out_layout = params.outputs[0].GetLayout(); - std::vector> dims_by_gws = {{ Tensor::DataChannelName::X, Tensor::DataChannelName::Y }, - { Tensor::DataChannelName::FEATURE }, - { Tensor::DataChannelName::BATCH }}; - - dispatchData.gws = { - params.outputs[0].X().v * params.outputs[0].Y().v, - params.outputs[0].Feature().v, - params.outputs[0].Batch().v }; - - dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, in_layout, out_layout, dims_by_gws); - - return dispatchData; -} - -KernelsData PyramidROIAlignKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { - return GetCommonKernelsData(params, options); -} - -KernelsPriority PyramidROIAlignKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { - return FORCE_PRIORITY_9; -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.h deleted file mode 100644 index fcbcba2cf05155..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "pyramid_roi_align_kernel_base.h" - -namespace kernel_selector { -class PyramidROIAlignKernelRef : public PyramidROIAlignKernelBase { -public: - PyramidROIAlignKernelRef() : PyramidROIAlignKernelBase("pyramid_roi_align_gpu_ref") {} - KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; - KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; - ParamsKey GetSupportedKey() const override; -protected: - DispatchData SetDefault(const PyramidROIAlign_params& params) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.cpp deleted file mode 100644 index 296b8ef986b071..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyramid_roi_align_kernel_selector.h" -#include "pyramid_roi_align_kernel_ref.h" - -namespace kernel_selector { -PyramidROIAlign_kernel_selector::PyramidROIAlign_kernel_selector() { Attach(); } - -KernelsData PyramidROIAlign_kernel_selector::GetBestKernels(const Params& params, - const optional_params& options) const { - return GetNaiveBestKernel(params, options, KernelType::PYRAMID_ROI_ALIGN); -} -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.h deleted file mode 100644 index 42b807d792cd13..00000000000000 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/pyramid_roi_align/pyramid_roi_align_kernel_selector.h +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "kernel_selector.h" - -namespace kernel_selector { -class PyramidROIAlign_kernel_selector : public kernel_selector_base { -public: - static PyramidROIAlign_kernel_selector& Instance() { - static PyramidROIAlign_kernel_selector instance; - return instance; - } - - PyramidROIAlign_kernel_selector(); - KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; -}; -} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp index 719ff94aa32ca4..e6189addaf42fa 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp @@ -109,7 +109,6 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::winograd_6x3_s1_fused_weights, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::image_2d_weights_winograd_6x3_s1_fbxyb, { 1, 0, -1, 3, 2, -1 } }, { WeightsLayout::image_2d_weights_winograd_6x3_s1_xfbyb, { 3, 0, -1, 2, 1, -1 } }, - { WeightsLayout::dlstm_dir_io, { 1, 0, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_isa8_osv8_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_isa8_osv16_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_isa8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1 } }, diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h index 745c54e5689791..f78a1afe2a83be 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h @@ -134,7 +134,6 @@ enum WeightsLayout { // 3x3 with stride 1 image_2d_weights_winograd_6x3_s1_xfbyb, // image 2d winograd convolution weights for fused kernel, F(2, 3) --filter // 3x3 with stride 1 - dlstm_dir_io, // dlstm weights layout direction, input_size, 4* hiden_size os_is_yx_isa8_osv8_isv4, // for MMAD convolution os_is_zyx_isa8_osv8_isv4, // for MMAD convolution os_is_yx_isa8_osv16_isv4, // for fully connected MMAD @@ -316,7 +315,6 @@ inline bool SimpleLayout(WeightsLayout l) { case WeightsLayout::yxio: case WeightsLayout::oizyx: case WeightsLayout::iozyx: - case WeightsLayout::dlstm_dir_io: return true; default: return false; @@ -390,15 +388,6 @@ inline bool IsImageType(WeightsLayout l) { } } -inline bool IsDynamicLSTMType(WeightsLayout l) { - switch (l) { - case WeightsLayout::dlstm_dir_io: - return true; - default: - return false; - } -} - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Tensor Explanation //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/plugin/common_utils.cpp b/src/plugins/intel_gpu/src/plugin/common_utils.cpp index bf6bb5d79cd01a..7dfa876570b013 100644 --- a/src/plugins/intel_gpu/src/plugin/common_utils.cpp +++ b/src/plugins/intel_gpu/src/plugin/common_utils.cpp @@ -169,5 +169,24 @@ void convert_and_copy(const ov::ITensor* src, ov::ITensor const* dst, const cldn return ::convert_and_copy(src_ptr, src_et, dst_ptr, dst_et, size, cldnn::layout({}, ov::element::undefined, cldnn::format::bfyx, cldnn::padding())); } +std::vector get_output_data_types(const ov::Node* op, PrecisionMap precision_map) { + std::vector output_data_types; + for (size_t i = 0; i < op->get_output_size(); i++) { + auto type = op->get_output_element_type(i); + if (precision_map.find(type) != precision_map.end()) + type = precision_map.at(type); + output_data_types.push_back(cldnn::element_type_to_data_type(type)); + } + return output_data_types; +} + +std::vector get_output_paddings(const ov::Node* op) { + std::vector output_paddings; + for (size_t i = 0; i < op->get_output_size(); i++) { + output_paddings.push_back(cldnn::padding()); + } + return output_paddings; +} + } // namespace intel_gpu } // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp index 3df01c1f0727ab..2bed086a94ca8f 100644 --- a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp +++ b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp @@ -80,7 +80,8 @@ CompiledModel::CompiledModel(std::shared_ptr model, CompiledModel::CompiledModel(cldnn::BinaryInputBuffer& ib, const std::shared_ptr& plugin, RemoteContextImpl::Ptr context, - const ExecutionConfig& config) + const ExecutionConfig& config, + const bool loaded_from_cache) : ov::ICompiledModel(nullptr, plugin, context, @@ -90,7 +91,7 @@ CompiledModel::CompiledModel(cldnn::BinaryInputBuffer& ib, , m_config(config) , m_wait_executor(std::make_shared(ov::threading::IStreamsExecutor::Config{"Intel GPU plugin wait executor"})) , m_model_name("") - , m_loaded_from_cache(true) { + , m_loaded_from_cache(loaded_from_cache) { { size_t num_params; ib >> num_params; diff --git a/src/plugins/intel_gpu/src/plugin/graph.cpp b/src/plugins/intel_gpu/src/plugin/graph.cpp index 1194f659d89ff6..0b1748b36ab76d 100644 --- a/src/plugins/intel_gpu/src/plugin/graph.cpp +++ b/src/plugins/intel_gpu/src/plugin/graph.cpp @@ -185,9 +185,7 @@ std::shared_ptr Graph::get_runtime_model(std::vector Graph::get_runtime_model(std::vectorget_output_size(); - - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; - uint32_t blank_index = UINT32_MAX; if (reordered_inputs.size() == 3) { auto blank_index_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(2)); @@ -84,8 +66,8 @@ static void CreateCommonCTCGreedyDecoderOp(ProgramBuilder& p, const std::shared_ cldnn::padding({0, 0, 0, 0}, 0), cldnn::element_type_to_data_type(op->get_output_element_type(0)), op->get_output_size()); - primitive.output_paddings = get_output_paddings(); - primitive.output_data_types = get_output_data_types(); + primitive.output_paddings = get_output_paddings(op); + primitive.output_data_types = get_output_data_types(op); p.add_primitive(*op, primitive); } else { uint32_t blank_index = static_cast(op->get_input_shape(0).back() - 1); diff --git a/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp b/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp index 6e91cc7db9fe2f..843e4706764b8c 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp @@ -53,26 +53,6 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh auto boxesShape = op->get_input_partial_shape(0); size_t num_outputs = op->get_output_size(); - - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - // GPU primitive supports only i32 as output data type - if (type == ov::element::i64) { - type = ov::element::i32; - } - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; - if (p.use_new_shape_infer()) { auto nonMaxSuppressionLayerName = layer_type_name_ID(op); auto prim = cldnn::non_max_suppression( @@ -84,8 +64,8 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh op->m_sort_result_descending, "", "", "", "", "", "", num_outputs); - prim.output_paddings = get_output_paddings(); - prim.output_data_types = get_output_data_types(); + prim.output_paddings = get_output_paddings(op); + prim.output_data_types = get_output_data_types(op, {{ov::element::i64, ov::element::i32}}); prim.rotation = rotation; switch (reordered_inputs.size()) { @@ -153,7 +133,7 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh op->m_sort_result_descending, "", "", "", "", "", ""); - prim.output_data_types = get_output_data_types(); + prim.output_data_types = get_output_data_types(op, {{ov::element::i64, ov::element::i32}}); prim.rotation = rotation; switch (reordered_inputs.size()) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp index 6247c701079cf7..84ac443fe7f98d 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp @@ -56,20 +56,6 @@ static void CreateProposalOp(ProgramBuilder& p, const std::shared_ptrget_output_size(); - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; auto proposalPrim = cldnn::proposal(layerName, inputs[0], // cls_score @@ -98,8 +84,8 @@ static void CreateProposalOp(ProgramBuilder& p, const std::shared_ptrget_output_element_type(0)), num_outputs); - proposalPrim.output_paddings = get_output_paddings(); - proposalPrim.output_data_types = get_output_data_types(); + proposalPrim.output_paddings = get_output_paddings(op); + proposalPrim.output_data_types = get_output_data_types(op); p.add_primitive(*op, proposalPrim); } else { if (op->get_output_size() == 2) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/rms.cpp b/src/plugins/intel_gpu/src/plugin/ops/rms.cpp index 01289bd5022d6d..bf36aab7f32128 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/rms.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/rms.cpp @@ -23,17 +23,11 @@ static void CreateRMSOp(ProgramBuilder& p, const std::shared_ptr& op) { auto inputs = p.GetInputInfo(op); std::string primitive_name = layer_type_name_ID(op); - auto get_output_data_types = [&]() { - std::vector output_data_types; - auto type = op->get_output_element_type(0); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - return output_data_types; - }; auto rms = cldnn::rms(primitive_name, inputs[0], inputs[1], op->get_epsilon()); - rms.output_data_types = get_output_data_types(); + rms.output_data_types = get_output_data_types(op); p.add_primitive(*op, rms); } diff --git a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp index 2c38259f540c38..79bc1508a8bcd2 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp @@ -27,20 +27,6 @@ static void TopKImpl(ProgramBuilder& p, if (p.use_new_shape_infer()) { size_t num_outputs = op->get_output_size(); - auto get_output_paddings = [&]() { - std::vector output_paddings; - for (size_t i = 0; i < num_outputs; i++) - output_paddings.push_back(cldnn::padding()); - return output_paddings; - }; - auto get_output_data_types = [&]() { - std::vector output_data_types; - for (size_t i = 0; i < num_outputs; i++) { - auto type = op->get_output_element_type(i); - output_data_types.push_back(cldnn::element_type_to_data_type(type)); - } - return output_data_types; - }; auto topk_constant = std::dynamic_pointer_cast(op->input_value(1).get_node_shared_ptr()); auto argmaxPrim = cldnn::arg_max_min(layerName, @@ -55,8 +41,8 @@ static void TopKImpl(ProgramBuilder& p, cldnn::padding({0, 0, 0, 0}, 0), cldnn::element_type_to_data_type(op->get_output_element_type(0)), num_outputs); - argmaxPrim.output_paddings = get_output_paddings(); - argmaxPrim.output_data_types = get_output_data_types(); + argmaxPrim.output_paddings = get_output_paddings(op); + argmaxPrim.output_data_types = get_output_data_types(op); p.add_primitive(*op, argmaxPrim); } else { if (op->get_output_size() == 2) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp b/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp index ae7daf2b3992ce..4ae9362bc5d6c6 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/transpose.cpp @@ -34,7 +34,7 @@ static void CreateTransposeOp(ProgramBuilder& p, const std::shared_ptrget_output_element_type(0)); + permutePrim.output_data_types = get_output_data_types(op); p.add_primitive(*op, permutePrim); } diff --git a/src/plugins/intel_gpu/src/plugin/plugin.cpp b/src/plugins/intel_gpu/src/plugin/plugin.cpp index a51706d8b99774..3cdc4d399bb0b1 100644 --- a/src/plugins/intel_gpu/src/plugin/plugin.cpp +++ b/src/plugins/intel_gpu/src/plugin/plugin.cpp @@ -292,15 +292,24 @@ std::shared_ptr Plugin::import_model(std::istream& model, auto context_impl = get_context_impl(context); auto device_id = ov::DeviceIDParser{context_impl->get_device_name()}.get_device_id(); + // check ov::loaded_from_cache property and erase it due to not needed any more. + auto _orig_config = orig_config; + const auto& it = _orig_config.find(ov::loaded_from_cache.name()); + bool loaded_from_cache = false; + if (it != _orig_config.end()) { + loaded_from_cache = it->second.as(); + _orig_config.erase(it); + } + ExecutionConfig config = m_configs_map.at(device_id); - config.set_user_property(orig_config); + config.set_user_property(_orig_config); config.apply_user_properties(context_impl->get_engine().get_device_info()); if (config.get_property(ov::cache_mode) == ov::CacheMode::OPTIMIZE_SIZE) return nullptr; cldnn::BinaryInputBuffer ib(model, context_impl->get_engine()); - return std::make_shared(ib, shared_from_this(), context_impl, config); + return std::make_shared(ib, shared_from_this(), context_impl, config, loaded_from_cache); } ov::Any Plugin::get_property(const std::string& name, const ov::AnyMap& options) const { diff --git a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.cpp b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.cpp new file mode 100644 index 00000000000000..941b5c51ec3a67 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "clamp_fp16_output.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/softmax.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/pass/pattern/op/pattern.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/pass/pattern/op/or.hpp" + +#include + +namespace ov { +namespace intel_gpu { + +ClampFP16Output::ClampFP16Output() { + using namespace ov::op; + using namespace ov::pass::pattern; + using namespace ov::pass::pattern::op; + + auto in0 = any_input(as_value_predicate(class_other_than())); + auto in1 = any_input(as_value_predicate(class_other_than())); + auto matmul_m = wrap_type({in0, in1}, all_of({type_matches(ov::element::f16), consumers_count(1)})); + auto reshape_m = wrap_type({matmul_m, any_input()}, all_of({type_matches(ov::element::f16), consumers_count(1)})); + auto softmax_input_m = std::make_shared(ov::OutputVector{reshape_m, matmul_m}); + auto softmax_m = wrap_type({softmax_input_m}, type_matches(ov::element::f16)); + + ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto softmax = std::dynamic_pointer_cast(pattern_map.at(softmax_m).get_node_shared_ptr()); + if (!softmax || transformation_callback(softmax)) { + return false; + } + + auto matmul = pattern_map.at(matmul_m).get_node_shared_ptr(); + auto target_inputs = matmul->get_output_target_inputs(0); + + auto min = static_cast(std::numeric_limits::lowest()); + auto max = static_cast(std::numeric_limits::max()); + auto clamp = std::make_shared(matmul, min, max); + clamp->set_friendly_name(matmul->get_friendly_name() + "/ClampFP16Output"); + ov::copy_runtime_info({matmul, softmax}, clamp); + + for (auto& in : target_inputs) { + in.replace_source_output(clamp); + } + + return true; + }; + + auto m = std::make_shared(softmax_m, "ClampFP16Output"); + this->register_matcher(m, callback); +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp new file mode 100644 index 00000000000000..ac93d446ee749d --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace intel_gpu { + +/** + * @brief This transformation adds Clamp primitive between MatMul and Softmax operation + * which is targeting some transformer based models (mainly Stable Diffusion) which may have an fp16 overflow + * on MatMul output tensor which could lead to Inf/Nan values on the model output. + * We assume that Clamp operation handling costs almost nothing from the performance perspective as it's supposed to be fused to MatMul later + */ +class ClampFP16Output: public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::intel_gpu::ClampFP16Output"); + + ClampFP16Output(); +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.cpp new file mode 100644 index 00000000000000..a5d798e4c2721c --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fc_convert_fusion.hpp" + +#include "intel_gpu/op/fully_connected.hpp" +#include "intel_gpu/op/fully_connected_compressed.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/pass/pattern/op/or.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/utils/utils.hpp" + +namespace ov { +namespace intel_gpu { + +FullyConnectedConvertFusion::FullyConnectedConvertFusion() { + using namespace ov::pass::pattern; + + auto data = any_input(); + auto weights = any_input(); + auto fully_connected = wrap_type({data, weights}, consumers_count(1)); + auto fully_connected_compressed = wrap_type({data, weights, any_input(), any_input()}, consumers_count(1)); + auto fc = std::make_shared(OutputVector{fully_connected, fully_connected_compressed}); + auto convert = wrap_type({fc}, type_matches(element::f32)); + + ov::matcher_pass_callback callback = [=](Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + + const auto& m_data = pattern_map.at(data).get_node_shared_ptr(); + const auto& m_weights = pattern_map.at(weights).get_node_shared_ptr(); + const auto& m_convert = pattern_map.at(convert).get_node_shared_ptr(); + auto output_type = m_convert->get_output_element_type(0); + + std::shared_ptr m_fc = nullptr; + std::shared_ptr new_fc = nullptr; + auto it = pattern_map.find(fully_connected); + if (it != pattern_map.end()) { + m_fc = it->second.get_node_shared_ptr(); + new_fc = std::make_shared(m_data, m_weights, output_type); + } else { + m_fc = pattern_map.at(fully_connected_compressed).get_node_shared_ptr(); + new_fc = std::make_shared(m_data, + m_weights, + m_fc->input_value(2), + m_fc->input_value(3), + output_type); + } + new_fc->set_friendly_name(m_convert->get_friendly_name()); + copy_runtime_info(m.get_matched_nodes(), new_fc); + replace_node(m_convert, new_fc); + + return true; + }; + + auto m = std::make_shared(convert, "FullyConnectedConvertFusion"); + this->register_matcher(m, callback); +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp new file mode 100644 index 00000000000000..44db1882f8e87f --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" + +namespace ov { +namespace intel_gpu { + +class FullyConnectedConvertFusion: public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("FullyConnectedConvertFusion", "0"); + FullyConnectedConvertFusion(); +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index cea3bbfa391cf0..0c57b56671349c 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -122,6 +122,8 @@ #include "plugin/transformations/binary_conv_to_conv.hpp" #include "plugin/transformations/move_convert_after_gather.hpp" #include "plugin/transformations/kv_cache_fusion.hpp" +#include "plugin/transformations/fc_convert_fusion.hpp" +#include "plugin/transformations/clamp_fp16_output.hpp" #include "transformations/low_precision/mark_dequantization_subgraph.hpp" #include "low_precision/pull_reshape_through_dequantization.hpp" @@ -692,12 +694,14 @@ void TransformationsPipeline::apply(std::shared_ptr func) { { ov::pass::Manager manager; + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); + manager.register_pass(); // This is supposed to be the last pass to ensure that we don't have name collisions until // GPU plugin stops using friendly names for program creation diff --git a/src/plugins/intel_gpu/src/plugin/variable_state.cpp b/src/plugins/intel_gpu/src/plugin/variable_state.cpp index 1d89991826b94a..e2728eb1b0dea0 100644 --- a/src/plugins/intel_gpu/src/plugin/variable_state.cpp +++ b/src/plugins/intel_gpu/src/plugin/variable_state.cpp @@ -47,9 +47,20 @@ void VariableState::set() { m_is_set = true; } +void VariableState::set_memory(const cldnn::memory::ptr& new_mem, const cldnn::layout& actual_layout) { + GPU_DEBUG_TRACE_DETAIL << m_name << " : Update memory (Ptr : " << new_mem->buffer_ptr() + << ", layout : " << actual_layout.to_short_string() << ")" << std::endl; + m_memory = new_mem; + m_layout = actual_layout; + actual_size = m_memory->size(); + update_device_buffer(); +} + void VariableState::set_layout(const cldnn::layout& new_layout) { + if (m_layout == new_layout) + return; m_layout = new_layout; - GPU_DEBUG_TRACE_DETAIL << "Update state layout to " << new_layout.to_short_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << m_name << " : " << "Update state layout to " << new_layout.to_short_string() << std::endl; update_device_buffer(); } @@ -61,8 +72,12 @@ void VariableState::set_state(const ov::SoPtr& state) { } void VariableState::update_device_buffer() { - if (m_layout.is_dynamic() || m_layout.bytes_count() == 0) + if (m_layout.is_dynamic() || m_layout.bytes_count() == 0) { + m_shape_predictor->reset(); + m_memory.reset(); + actual_size = 0; return; + } if (actual_size < m_layout.bytes_count()) { const auto alloc_type = m_context->get_engine().use_unified_shared_memory() ? cldnn::allocation_type::usm_device : cldnn::allocation_type::cl_mem; diff --git a/src/plugins/intel_gpu/src/runtime/format.cpp b/src/plugins/intel_gpu/src/runtime/format.cpp index d907c4a10f045e..fcfda09b69601b 100644 --- a/src/plugins/intel_gpu/src/runtime/format.cpp +++ b/src/plugins/intel_gpu/src/runtime/format.cpp @@ -99,7 +99,6 @@ static const std::map format_traits_map { FMT_TRAITS(image_2d_weights_winograd_6x3_s1_xfbyb, 1, 1, 2, 0, {3, 1, 0, 2}, "xioy", "oixy", {}), FMT_TRAITS(image_2d_weights_c4_fyx_b, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {}), FMT_TRAITS(image_2d_weights_c1_b_fyx, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {}), - FMT_TRAITS(lstm_weights_dio, 1, 1, 2, 0, {0, 1, 3, 2}, "oixy", "oixy", {}), FMT_TRAITS(os_is_yx_isa8_osv16_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 8}, {0, 16}, {1, 4}}), FMT_TRAITS(os_is_yx_osa4_isa8_osv8_isv2, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 4}, {1, 8}, {0, 8}, {1, 2}}), FMT_TRAITS(os_is_yx_osa4_isa8_osv8_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 4}, {1, 8}, {0, 8}, {1, 4}}), diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_common.hpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_common.hpp index 4e034eb8e0629a..0373a8ada21090 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_common.hpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_common.hpp @@ -20,5 +20,7 @@ class ocl_error : public ov::Exception { explicit ocl_error(cl::Error const& err); }; +#define OCL_ERR_MSG_FMT(err) ("[GPU] " + std::string(err.what()) + std::string(", error code: ") + std::to_string(err.err())) + } // namespace ocl } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_device.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_device.cpp index 6d01f4f69fb24c..410e184998f632 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_device.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_device.cpp @@ -276,6 +276,7 @@ bool does_device_support(int32_t param, const cl::Device& device) { cl_device_unified_shared_memory_capabilities_intel capabilities; auto err = clGetDeviceInfo(device.get(), param, sizeof(cl_device_unified_shared_memory_capabilities_intel), &capabilities, NULL); if (err) throw std::runtime_error("[CLDNN ERROR]. clGetDeviceInfo error " + std::to_string(err)); + return !((capabilities & CL_UNIFIED_SHARED_MEMORY_ACCESS_INTEL) == 0u); } diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp index 807841362e6581..b6b1233ceba397 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp @@ -208,7 +208,7 @@ memory::ptr ocl_engine::reinterpret_buffer(const memory& memory, const layout& n memory.get_mem_tracker()); } } catch (cl::Error const& err) { - throw ocl::ocl_error(err); + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } } diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_event.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_event.cpp index bd8ea751d74d17..1ed0a7741b04fd 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_event.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_event.cpp @@ -50,7 +50,11 @@ void ocl_event::set_ocl_callback() { void ocl_event::wait_impl() { if (_event.get() != nullptr) { - _event.wait(); + try { + _event.wait(); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } } @@ -59,8 +63,12 @@ void ocl_event::set_impl() { } bool ocl_event::is_set_impl() { - if (_event.get() != nullptr) { - return _event.getInfo() == CL_COMPLETE; + try { + if (_event.get() != nullptr) { + return _event.getInfo() == CL_COMPLETE; + } + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } return true; } @@ -94,8 +102,12 @@ bool ocl_event::get_profiling_info_impl(std::list() == CL_COMPLETE; + try { + if (_last_ocl_event.get() != nullptr) { + return _last_ocl_event.getInfo() == CL_COMPLETE; + } + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } return true; } @@ -134,8 +154,12 @@ bool ocl_events::get_profiling_info_impl(std::list_event.getProfilingInfo(period.start, &ev_start); - be->_event.getProfilingInfo(period.stop, &ev_end); + try { + be->_event.getProfilingInfo(period.start, &ev_start); + be->_event.getProfilingInfo(period.stop, &ev_end); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } auto ev_duration = std::make_pair(static_cast(ev_start), static_cast(ev_end)); diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp index 5859fe955f33ba..d2bdef5bde52d3 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp @@ -51,7 +51,11 @@ void* gpu_buffer::lock(const stream& stream, mem_lock_type type) { auto& cl_stream = downcast(stream); std::lock_guard locker(_mutex); if (0 == _lock_count) { - _mapped_ptr = cl_stream.get_cl_queue().enqueueMapBuffer(_buffer, CL_TRUE, get_cl_map_type(type), 0, size()); + try { + _mapped_ptr = cl_stream.get_cl_queue().enqueueMapBuffer(_buffer, CL_TRUE, get_cl_map_type(type), 0, size()); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } _lock_count++; return _mapped_ptr; @@ -62,7 +66,11 @@ void gpu_buffer::unlock(const stream& stream) { std::lock_guard locker(_mutex); _lock_count--; if (0 == _lock_count) { - cl_stream.get_cl_queue().enqueueUnmapMemObject(_buffer, _mapped_ptr); + try { + cl_stream.get_cl_queue().enqueueUnmapMemObject(_buffer, _mapped_ptr); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } _mapped_ptr = nullptr; } } @@ -83,7 +91,11 @@ event::ptr gpu_buffer::fill(stream& stream, unsigned char pattern) { auto& cl_stream = downcast(stream); auto ev = stream.create_base_event(); cl::Event& ev_ocl = downcast(ev.get())->get(); - cl_stream.get_cl_queue().enqueueFillBuffer(_buffer, pattern, 0, size(), nullptr, &ev_ocl); + try { + cl_stream.get_cl_queue().enqueueFillBuffer(_buffer, pattern, 0, size(), nullptr, &ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } @@ -120,8 +132,11 @@ event::ptr gpu_buffer::copy_from(stream& stream, const memory& other, bool block auto& mem_inst = downcast(other); auto ev = stream.create_base_event(); cl::Event& ev_ocl = downcast(ev.get())->get(); - cl_stream.get_cl_queue().enqueueCopyBuffer(mem_inst.get_buffer(), get_buffer(), 0, 0, other.size(), nullptr, &ev_ocl); - + try { + cl_stream.get_cl_queue().enqueueCopyBuffer(mem_inst.get_buffer(), get_buffer(), 0, 0, other.size(), nullptr, &ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } if (blocking) ev->wait(); @@ -142,7 +157,11 @@ event::ptr gpu_buffer::copy_from(stream& stream, const void* host_ptr, bool bloc auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); data_size = (data_size == 0) ? size() : data_size; - cl_stream.get_cl_queue().enqueueWriteBuffer(_buffer, blocking, dst_offset, data_size, host_ptr, nullptr, ev_ocl); + try { + cl_stream.get_cl_queue().enqueueWriteBuffer(_buffer, blocking, dst_offset, data_size, host_ptr, nullptr, ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } @@ -155,7 +174,11 @@ event::ptr gpu_buffer::copy_to(stream& stream, void* host_ptr, bool blocking) { auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); - cl_stream.get_cl_queue().enqueueReadBuffer(_buffer, blocking, 0, size(), host_ptr, nullptr, ev_ocl); + try { + cl_stream.get_cl_queue().enqueueReadBuffer(_buffer, blocking, 0, size(), host_ptr, nullptr, ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } @@ -259,8 +282,11 @@ event::ptr gpu_image2d::fill(stream& stream, unsigned char pattern) { auto ev = stream.create_base_event(); cl::Event& ev_ocl = downcast(ev.get())->get(); cl_uint4 pattern_uint4 = {{pattern, pattern, pattern, pattern}}; - cl_stream.get_cl_queue().enqueueFillImage(_buffer, pattern_uint4, {0, 0, 0}, {_width, _height, 1}, 0, &ev_ocl); - + try { + cl_stream.get_cl_queue().enqueueFillImage(_buffer, pattern_uint4, {0, 0, 0}, {_width, _height, 1}, 0, &ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } // TODO: do we need sync here? cl_stream.finish(); @@ -271,14 +297,18 @@ void* gpu_image2d::lock(const stream& stream, mem_lock_type type) { auto& cl_stream = downcast(stream); std::lock_guard locker(_mutex); if (0 == _lock_count) { - _mapped_ptr = cl_stream.get_cl_queue() - .enqueueMapImage(_buffer, - CL_TRUE, - get_cl_map_type(type), - {0, 0, 0}, - {_width, _height, 1}, - &_row_pitch, - &_slice_pitch); + try { + _mapped_ptr = cl_stream.get_cl_queue() + .enqueueMapImage(_buffer, + CL_TRUE, + get_cl_map_type(type), + {0, 0, 0}, + {_width, _height, 1}, + &_row_pitch, + &_slice_pitch); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } _lock_count++; return _mapped_ptr; @@ -289,7 +319,11 @@ void gpu_image2d::unlock(const stream& stream) { std::lock_guard locker(_mutex); _lock_count--; if (0 == _lock_count) { - cl_stream.get_cl_queue().enqueueUnmapMemObject(_buffer, _mapped_ptr); + try { + cl_stream.get_cl_queue().enqueueUnmapMemObject(_buffer, _mapped_ptr); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } _mapped_ptr = nullptr; } } @@ -316,9 +350,13 @@ event::ptr gpu_image2d::copy_from(stream& stream, const memory& other, bool bloc auto& casted = downcast(other); auto ev = stream.create_base_event(); cl::Event* ev_ocl = &downcast(ev.get())->get(); - cl_stream.get_cl_queue().enqueueCopyImage(casted.get_buffer(), get_buffer(), - {0, 0, 0}, {0, 0, 0}, {_width, _height, 1}, - nullptr, ev_ocl); + try { + cl_stream.get_cl_queue().enqueueCopyImage(casted.get_buffer(), get_buffer(), + {0, 0, 0}, {0, 0, 0}, {_width, _height, 1}, + nullptr, ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } if (blocking) ev->wait(); @@ -336,8 +374,12 @@ event::ptr gpu_image2d::copy_from(stream& stream, const void* host_ptr, bool blo auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); - cl_stream.get_cl_queue().enqueueWriteImage(_buffer, blocking, {0, 0, 0}, {_width, _height, 1}, - _row_pitch, _slice_pitch, host_ptr, nullptr, ev_ocl); + try { + cl_stream.get_cl_queue().enqueueWriteImage(_buffer, blocking, {0, 0, 0}, {_width, _height, 1}, + _row_pitch, _slice_pitch, host_ptr, nullptr, ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } @@ -351,9 +393,14 @@ event::ptr gpu_image2d::copy_to(stream& stream, memory& other, bool blocking) { auto& casted = downcast(other); auto ev = stream.create_base_event(); cl::Event* ev_ocl = &downcast(ev.get())->get(); - cl_stream.get_cl_queue().enqueueCopyImage(get_buffer(), casted.get_buffer(), - {0, 0, 0}, {0, 0, 0}, {_width, _height, 1}, - nullptr, ev_ocl); + try { + cl_stream.get_cl_queue().enqueueCopyImage(get_buffer(), casted.get_buffer(), + {0, 0, 0}, {0, 0, 0}, {_width, _height, 1}, + nullptr, ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } + if (blocking) ev->wait(); @@ -369,8 +416,12 @@ event::ptr gpu_image2d::copy_to(stream& stream, void* host_ptr, bool blocking) { auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); - cl_stream.get_cl_queue().enqueueReadImage(_buffer, blocking, {0, 0, 0}, {_width, _height, 1}, - _row_pitch, _slice_pitch, host_ptr, nullptr, ev_ocl); + try { + cl_stream.get_cl_queue().enqueueReadImage(_buffer, blocking, {0, 0, 0}, {_width, _height, 1}, + _row_pitch, _slice_pitch, host_ptr, nullptr, ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } @@ -452,7 +503,11 @@ void* gpu_usm::lock(const stream& stream, mem_lock_type type) { } GPU_DEBUG_LOG << "Copy usm_device buffer to host buffer." << std::endl; _host_buffer.allocateHost(_bytes_count); - cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), _host_buffer.get(), _buffer.get(), _bytes_count, CL_TRUE); + try { + cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), _host_buffer.get(), _buffer.get(), _bytes_count, CL_TRUE); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } _mapped_ptr = _host_buffer.get(); } else { _mapped_ptr = _buffer.get(); @@ -487,7 +542,11 @@ event::ptr gpu_usm::fill(stream& stream, unsigned char pattern) { std::vector temp_buffer(_bytes_count, pattern); // TODO: Do we really need blocking call here? Non-blocking one causes accuracy issues right now, but hopefully it can be fixed in more performant way. const bool blocking = true; - cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), _buffer.get(), temp_buffer.data(), _bytes_count, blocking, nullptr, &ev_ocl); + try { + cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), _buffer.get(), temp_buffer.data(), _bytes_count, blocking, nullptr, &ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } @@ -517,18 +576,26 @@ event::ptr gpu_usm::copy_from(stream& stream, const memory& other, bool blocking if (other.get_allocation_type() == allocation_type::cl_mem) { // Copy cl_mem to usm_memory by cl::CommandQueue::enqueueReadBuffer() auto& mem_inst = downcast(other); - cl_stream.get_cl_queue().enqueueReadBuffer(mem_inst.get_buffer(), blocking, 0, size(), this->buffer_ptr(), nullptr, ev_ocl); + try { + cl_stream.get_cl_queue().enqueueReadBuffer(mem_inst.get_buffer(), blocking, 0, size(), this->buffer_ptr(), nullptr, ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } else { auto& casted = downcast(other); auto dst_ptr = get_buffer().get(); auto src_ptr = casted.get_buffer().get(); - cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), - dst_ptr, - src_ptr, - _bytes_count, - blocking, - nullptr, - ev_ocl); + try { + cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), + dst_ptr, + src_ptr, + _bytes_count, + blocking, + nullptr, + ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } return ev; } @@ -548,13 +615,17 @@ event::ptr gpu_usm::copy_from(stream& stream, const void* host_ptr, bool blockin data_size = (data_size == 0) ? _bytes_count : data_size; auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); - cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), - dst_ptr, - host_ptr, - data_size, - blocking, - nullptr, - ev_ocl); + try { + cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), + dst_ptr, + host_ptr, + data_size, + blocking, + nullptr, + ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } @@ -567,13 +638,17 @@ event::ptr gpu_usm::copy_to(stream& stream, void* host_ptr, bool blocking) { auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); auto src_ptr = get_buffer().get(); - cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), - host_ptr, - src_ptr, - _bytes_count, - blocking, - nullptr, - ev_ocl); + try { + cl_stream.get_usm_helper().enqueue_memcpy(cl_stream.get_cl_queue(), + host_ptr, + src_ptr, + _bytes_count, + blocking, + nullptr, + ev_ocl); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return ev; } diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp index 71942563ba82bd..e89201de456218 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_stream.cpp @@ -172,12 +172,6 @@ void set_arguments_impl(ocl_kernel_type& kernel, } } break; - case args_t::RECURRENT: - status = set_kernel_arg(kernel, i, data.recurrent); - break; - case args_t::HIDDEN: - status = set_kernel_arg(kernel, i, data.hidden); - break; case args_t::CELL: status = set_kernel_arg(kernel, i, data.cell); break; @@ -274,7 +268,7 @@ void ocl_stream::set_arguments(kernel& kernel, const kernel_arguments_desc& args GPU_DEBUG_TRACE_DETAIL << "Set arguments for primitive: " << args_desc.layerID << " (" << kern.get() << ")\n"; set_arguments_impl(kern, args_desc.arguments, args); } catch (cl::Error const& err) { - throw ocl_error(err); + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } } @@ -313,21 +307,29 @@ event::ptr ocl_stream::enqueue_kernel(kernel& kernel, if (err.err() == CL_OUT_OF_RESOURCES) { ov::intel_gpu::ForceExit(); } - throw ocl_error(err); + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } return std::make_shared(ret_ev, ++_queue_counter); } void ocl_stream::enqueue_barrier() { - _command_queue.enqueueBarrierWithWaitList(nullptr, nullptr); + try { + _command_queue.enqueueBarrierWithWaitList(nullptr, nullptr); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } event::ptr ocl_stream::enqueue_marker(std::vector const& deps, bool is_output) { // Wait for all previously enqueued tasks if deps list is empty if (deps.empty()) { cl::Event ret_ev; - _command_queue.enqueueMarkerWithWaitList(nullptr, &ret_ev); + try { + _command_queue.enqueueMarkerWithWaitList(nullptr, &ret_ev); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } return std::make_shared(ret_ev); } @@ -347,7 +349,7 @@ event::ptr ocl_stream::enqueue_marker(std::vector const& deps, bool } _command_queue.enqueueMarkerWithWaitList(&dep_events, &ret_ev); } catch (cl::Error const& err) { - throw ocl_error(err); + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } return std::make_shared(ret_ev, ++_queue_counter); @@ -372,14 +374,30 @@ event::ptr ocl_stream::create_base_event() { return std::make_shared(ret_ev, ++_queue_counter); } -void ocl_stream::flush() const { get_cl_queue().flush(); } -void ocl_stream::finish() const { get_cl_queue().finish(); } +void ocl_stream::flush() const { + try { + get_cl_queue().flush(); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } +} +void ocl_stream::finish() const { + try { + get_cl_queue().finish(); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } +} void ocl_stream::wait() { cl::Event ev; // Enqueue barrier with empty wait list to wait for all previously enqueued tasks - _command_queue.enqueueBarrierWithWaitList(nullptr, &ev); + try { + _command_queue.enqueueBarrierWithWaitList(nullptr, &ev); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } ev.wait(); } @@ -405,14 +423,14 @@ void ocl_stream::wait_for_events(const std::vector& events) { _command_queue.enqueueBarrierWithWaitList(nullptr, &barrier_ev); clevents.push_back(barrier_ev); } catch (cl::Error const& err) { - throw ocl_error(err); + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } } try { cl::WaitForEvents(clevents); } catch (cl::Error const& err) { - throw ocl_error(err); + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } } @@ -432,7 +450,7 @@ void ocl_stream::sync_events(std::vector const& deps, bool is_output else _command_queue.enqueueBarrierWithWaitList(nullptr, nullptr); } catch (cl::Error const& err) { - throw ocl_error(err); + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); } _last_barrier = ++_queue_counter; diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_user_event.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_user_event.cpp index 0e2588d462d52a..ba173de3b845f0 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_user_event.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_user_event.cpp @@ -33,13 +33,21 @@ void ocl_user_event::wait_impl() { } if (_event.get() != nullptr) { - _event.wait(); + try { + _event.wait(); + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } } bool ocl_user_event::is_set_impl() { if (_event.get() != nullptr) { - return _event.getInfo() == CL_COMPLETE; + try { + return _event.getInfo() == CL_COMPLETE; + } catch (cl::Error const& err) { + OPENVINO_THROW(OCL_ERR_MSG_FMT(err)); + } } return true; } diff --git a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp index 1ff00c905bd073..599878ca5566b1 100644 --- a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp +++ b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp @@ -59,7 +59,11 @@ bool ShapePredictor::can_preallocate(size_t desired_buffer_size) { std::pair ShapePredictor::predict_preallocation_shape(const std::string& id, const ov::Shape& current_shape, size_t dt_bitwidth, - bool can_reuse_buffer) { + bool can_reuse_buffer, + int32_t custom_next_iters_prealloc_count) { + size_t next_iters_prealloc_count = custom_next_iters_prealloc_count > 0 + ? static_cast(custom_next_iters_prealloc_count) + : _next_iters_preallocation_count; add_shape(id, current_shape); // Save shape information and exit without pre-allocation suggestion if current @@ -83,7 +87,6 @@ std::pair ShapePredictor::predict_preallocation_shape(const std break; diffs.push_back(result); } - bool can_use_iterations_preallocation = diffs.size() == min_shapes_num - 1; for (size_t i = 1; i < diffs.size(); ++i) { if (diffs[0] != diffs[i]) { @@ -116,7 +119,7 @@ std::pair ShapePredictor::predict_preallocation_shape(const std if (can_use_iterations_preallocation) { // Apply preallocation for the next N iterations - ov::Shape mul_shape(diffs[0].size(), _next_iters_preallocation_count); + ov::Shape mul_shape(diffs[0].size(), next_iters_prealloc_count); auto preallocation_shape = diffs[0] * mul_shape; auto new_shape = current_shape + preallocation_shape; return {true, new_shape}; diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp index ceff31a121913c..d371a174d2604c 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp @@ -252,12 +252,11 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) { ASSERT_NO_THROW(infer_request1.infer()); // Expect that output_tensor2 will not change it's data after infer() call - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); FuncTestUtils::compareRawBuffers(ref_values.data(), output_tensor2.data(), ref_values.size(), ov::shape_size(output_tensor2.get_shape()), - thr); + 1e-4f); } TEST(smoke_InferRequestDeviceMemoryAllocation, canSetSystemHostTensor) { diff --git a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp index 7e479025dab3da..ddc47d19f96b1d 100644 --- a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp @@ -119,7 +119,7 @@ auto config = []() { }; auto hetero_config = []() { - return ov::AnyMap{{"TARGET_FALLBACK", ov::test::utils::DEVICE_GPU}}; + return ov::AnyMap{{ov::device::priorities.name(), ov::test::utils::DEVICE_GPU}}; }; const std::vector input_shapes = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index f9cbbc18e0662c..8c39fd63103c24 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -3,6 +3,7 @@ // #include "behavior/compiled_model/compiled_model_base.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; namespace { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp index 6a7176302f684f..5868aab27577e1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp @@ -4,18 +4,20 @@ #include "behavior/compiled_model/properties.hpp" -#include "behavior/ov_plugin/properties_tests.hpp" -#include "openvino/runtime/core.hpp" - -using namespace ov::test::behavior; +namespace { +using ov::test::behavior::OVClassCompiledModelGetPropertyTest; +using ov::test::behavior::OVClassCompiledModelGetPropertyTest_EXEC_DEVICES; +using ov::test::behavior::OVClassCompiledModelGetIncorrectPropertyTest; +using ov::test::behavior::OVClassCompiledModelGetConfigTest; +using ov::test::behavior::OVClassCompiledModelSetIncorrectConfigTest; +using ov::test::behavior::OVClassCompiledModelPropertiesIncorrectTests; +using ov::test::behavior::OVClassCompileModelWithCorrectPropertiesTest; +using ov::test::behavior::OVCompiledModelIncorrectDevice; -using namespace InferenceEngine::PluginConfigParams; -namespace { // // Executable Network GetMetric // - INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest, ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU")); @@ -29,10 +31,11 @@ const std::vector>> G INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest_EXEC_DEVICES, ::testing::ValuesIn(GetMetricTest_ExecutionDevice_GPU)); + + // // Executable Network GetConfig / SetConfig // - INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetIncorrectPropertyTest, OVClassCompiledModelGetIncorrectPropertyTest, ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU")); @@ -45,8 +48,8 @@ INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelSetIncorrectConfigTest, OVClassCompiledModelSetIncorrectConfigTest, ::testing::Values("GPU")); -// IE Class Load network +// IE Class Load network INSTANTIATE_TEST_SUITE_P(smoke_OVCompiledModelIncorrectDevice, OVCompiledModelIncorrectDevice, ::testing::Values("GPU")); const std::vector incorrect_device_priorities_properties = {{ov::device::priorities("NONE")}, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index b0cbb1e06788da..7e99f01c921077 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -6,6 +6,7 @@ #include "behavior/ov_infer_request/callback.hpp" #include "openvino/runtime/properties.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index b8a6a9cd0d10b8..29b1e8f52a17bd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -3,6 +3,7 @@ // #include "behavior/ov_infer_request/cancellation.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 1116822d236da2..295da790320971 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -6,6 +6,7 @@ #include "behavior/ov_infer_request/io_tensor.hpp" #include "openvino/runtime/properties.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index fb30761e1e60d2..d400ed1108b23b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -5,6 +5,7 @@ #include #include "behavior/ov_infer_request/multithreading.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index b10d622fb56138..70cd59abbae41a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -3,6 +3,7 @@ // #include "behavior/ov_infer_request/perf_counters.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 8fcccf2944694a..fff7b3331e5c32 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -5,6 +5,7 @@ #include #include "behavior/ov_infer_request/wait.hpp" +#include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index 986c22663e6b3a..7e93e87ae51af5 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -73,4 +73,9 @@ namespace { ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(GPULoadFromFileConfigs)), CompileModelLoadFromMemoryTestBase::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_GPU, + CompileModelLoadFromCacheTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(GPULoadFromFileConfigs)), + CompileModelLoadFromCacheTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 5a5d028b788c28..8bff5ce426b757 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -10,6 +10,87 @@ using namespace ov::test::behavior; namespace { + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassBasicTestP, OVClassBasicTestP, + ::testing::Values(std::make_pair(std::string("openvino_intel_gpu_plugin"), std::string(ov::test::utils::DEVICE_GPU)))); + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassNetworkTestP, OVClassNetworkTestP, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU))); + +// +// OV Class GetMetric +// + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_SUPPORTED_METRICS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_AVAILABLE_DEVICES, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_FULL_DEVICE_NAME, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_OPTIMIZATION_CAPABILITIES, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_DEVICE_GOPS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_DEVICE_TYPE, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_RANGE_FOR_STREAMS, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetMetricTest, OVClassGetMetricTest_ThrowUnsupported, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetConfigTest, OVClassGetConfigTest_ThrowUnsupported, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU), + std::string(ov::test::utils::DEVICE_HETERO), + std::string(ov::test::utils::DEVICE_BATCH)) +); + +INSTANTIATE_TEST_SUITE_P( + nightly_OVClassGetAvailableDevices, OVClassGetAvailableDevices, + ::testing::Values(std::string(ov::test::utils::DEVICE_GPU)) +); + // IE Class Common tests with // INSTANTIATE_TEST_SUITE_P(nightly_OVClassModelTestP, OVClassModelTestP, ::testing::Values("GPU")); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp index 5eeacde0093d87..2ef40e0017c98b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -14,4 +14,9 @@ namespace { ::testing::Values(//ov::test::utils::DEVICE_BATCH, "HETERO:GPU"), OVHoldersTest::getTestCaseName); + + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVHoldersTestOnImportedNetwork, + ::testing::Values(ov::test::utils::DEVICE_GPU), + OVHoldersTestOnImportedNetwork::getTestCaseName); + } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp deleted file mode 100644 index 5f3e4120047a82..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/caching_tests.hpp" - -using namespace LayerTestsDefinitions; - -namespace { - static const std::vector precisionsGPU = { - ngraph::element::f32, - ngraph::element::f16, - ngraph::element::i32, - ngraph::element::i64, - ngraph::element::i8, - ngraph::element::u8, - ngraph::element::i16, - ngraph::element::u16, - }; - - static const std::vector floatPrecisionsGPU = { - ngraph::element::f32, - ngraph::element::f16 - }; - - static const std::vector batchSizesGPU = { - 1, 2 - }; - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_GPU, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getNumericAnyTypeFunctions()), - ::testing::ValuesIn(precisionsGPU), - ::testing::ValuesIn(batchSizesGPU), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_GPU_Float, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getFloatingPointOnlyFunctions()), - ::testing::ValuesIn(floatPrecisionsGPU), - ::testing::ValuesIn(batchSizesGPU), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - LoadNetworkCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_KernelCachingSupportCase_GPU, LoadNetworkCompiledKernelsCacheTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(std::make_pair(std::map(), "blob"))), - LoadNetworkCompiledKernelsCacheTest::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp deleted file mode 100644 index 6ab85ed4b453ed..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/life_time.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/life_time.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - const std::vector> orders = { - // 0 - plugin - // 1 - executable_network - // 2 - infer_request - {0, 1, 2}, - {0, 2, 1}, - {1, 0, 2}, - {1, 2, 0}, - {2, 0, 1}, - {2, 1, 0} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, HoldersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(orders)), - HoldersTest::getTestCaseName); - -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp index 7f13a10b36e2fa..8b7cd59c8395cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/add_transformation.cpp @@ -8,62 +8,61 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, false, - {ngraph::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ngraph::element::i8}, {ngraph::element::f32, ngraph::element::i8} + {ov::element::i8}, {ov::element::f32, ov::element::i8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } }, true, - {ngraph::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ngraph::element::i8}, {ngraph::element::i8, ngraph::element::f32} + {ov::element::i8}, {ov::element::i8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } }, false, - {ngraph::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - {ngraph::element::u8}, {ngraph::element::f32, ngraph::element::u8} + {ov::element::u8}, {ov::element::f32, ov::element::u8} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } }, true, - {ngraph::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } }, true, - {ngraph::element::u8}, {ngraph::element::u8, ngraph::element::f32} + {ov::element::u8}, {ov::element::u8, ov::element::f32} }, { {}, {}, false }, { {}, {}, true }, }; @@ -71,7 +70,7 @@ const std::vector params = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, AddTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(params)), AddTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp index 8d60739c8a7d67..1c7127995a8a18 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/assign_and_read_value_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + // ov::element::f16 }; const std::vector opsetVersions = { @@ -30,22 +30,22 @@ const std::vector trasform const std::vector params{ // u8 { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, // u16 { - { 65536ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 65536ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, // u32 { - { 4294967296ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 4294967296ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, }, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, AssignAndReadValueTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::ValuesIn(opsetVersions), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp index 2c8dfda5f94a6f..e593417704066a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/batch_to_space_transformation.cpp @@ -10,16 +10,16 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector params = { { { 4, 3, 50, 86 }, { 1, 1, 2, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, "batch_to_space", "u8" }, @@ -28,7 +28,7 @@ const std::vector params = { { 1, 1, 2, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp index dc0e8b43fe5485..9b4f5419b23bc6 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/clamp_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -27,7 +27,7 @@ const std::vector trasform const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { {}, {{0.f, 0.f, 0.f}}, @@ -38,7 +38,7 @@ const std::vector params = { }, // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { -12.8f }, { 12.7f } }, { {}, {{0.f, 0.f, 0.f}}, @@ -50,7 +50,7 @@ const std::vector params = { // per-channel quantization with the same values { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -64,7 +64,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -79,7 +79,7 @@ const std::vector params = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, ClampTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp index ebeeb2a2d6017c..683feecd9683bf 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp @@ -8,49 +8,48 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; const std::vector testValues = { // U8 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {} }, // I8 { {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {}, {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {} }, // mixed { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, {}, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, {} }, // FQ with unexpected quantizationLevels { {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {}, {}, - { 16ul, ngraph::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, + { 16ul, ov::Shape({}), {0.f}, {15.f}, {0.f}, {1.5f} }, {} }, }; @@ -58,7 +57,7 @@ const std::vector testValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues)), ConcatTransformation::getTestCaseName); @@ -67,15 +66,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, namespace concat_transformation_mixed { -const std::vector precisions = { - ngraph::element::f16 +const std::vector precisions = { + ov::element::f16 }; const std::vector testValues = { // mixed dequantization: FP32 & FP16 { {}, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), {}, @@ -90,7 +89,7 @@ const std::vector testValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues)), ConcatTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp index 6bf9daea59e6b4..be8a2a69c43254 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_child_and_output.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -22,30 +22,30 @@ const std::vector trasform const std::vector testValues = { // U8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithChildAndOutputTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp index 31da220ecab561..51a2eb9bec65e5 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -23,39 +23,39 @@ const std::vector testValues = { // U8 { 1, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // U8 and unsupported concat axis { 2, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { 1, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} } }, // mixed: U8 + I8 { 1, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { 1, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithDifferentChildrenTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp index 8a440e8c824682..a48d6dbac5f747 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_intermediate_transformation.cpp @@ -8,12 +8,11 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -29,7 +28,7 @@ const std::vector multiChannelValues = { /*true,*/ false }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithIntermediateTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(transparentIntermediateValues), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp index 1d39bf69a873fd..7f2e0a22eab328 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp @@ -8,12 +8,11 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -26,7 +25,7 @@ const std::vector trasform INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithNeighborsGraphTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues)), ConcatWithNeighborsGraphTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp index e220a6ec552b4a..29a8a58c00c625 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_with_split_transformation.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -25,30 +25,30 @@ const std::vector trasform const std::vector testValues = { // U8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} } }, // I8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: U8 + I8 { - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, // mixed: I8 + U8 { - { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } + { 256ul, ov::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithSplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 6, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues), ::testing::ValuesIn(trasformationParamValues)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp index c10197eb8009e3..51ef48c52bd6e6 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_backprop_data_transformation.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -22,92 +22,92 @@ const std::vector trasform const std::vector params = { // FQ on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, "convolutionBackpropData_original", "U8" }, // FQ on weights { - {256ul, ngraph::Shape{}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, + {256ul, ov::Shape{}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, "convolutionBackpropData_original", "U8" }, // FQ on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, "convolutionBackpropData_original", "I8" }, // FQ on weights // with zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, "", "" }, // without zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, "", "" }, // TODO: check fails in CI // // with incorrect zero point on activations // { -// {256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, -// {255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, +// {256ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, +// {255ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }}, // "", // "" // }, // // with incorrect zero point on weights // { -// {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, -// {255ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, +// {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, +// {255ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, // "", // "" // }, // QDq on weights // with zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {{ov::element::f32}, { {12.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // without zero point { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, - {{ngraph::element::f32}, {}, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }}, + {{ov::element::f32}, {}, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // with incorrect zero point on activations { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, - {{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }}, + {{ov::element::f32}, { {12.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" }, // with incorrect zero point on weights { - {256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, - {{ngraph::element::f32}, { {1000.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }}, + {256ul, ov::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }}, + {{ov::element::f32}, { {1000.f}, ov::element::f32, {}, false }, { {4.f}, ov::element::f32, {}, false }}, "", "" } }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 8, 16, 16 }, false}, {{ 1, 32, 16, 16 }, true} }; -const std::vector outputShapes = { +const std::vector outputShapes = { { 16, 16 } }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp index 522ed21b2b550b..2a477b9d216444 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_qdq_transformation.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -57,20 +57,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector{ 15.f }, ngraph::element::f32}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector{ 15.f }, ov::element::f32}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "U8" @@ -111,20 +111,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector{ 15.f }, ngraph::element::f32}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector{ 15.f }, ov::element::f32}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "U8" @@ -162,20 +162,20 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "FP32" @@ -213,27 +213,27 @@ const std::vector para // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{0.5f}, ngraph::element::i8}, + {{0.5f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, "Convolution", "U8" }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 3, 4, 4 }, { 4, 3, 4, 4 } }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp index d5d76692c3531a..9ce9e78c962aff 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/convolution_transformation.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -22,7 +22,7 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, {}, false, @@ -32,40 +32,40 @@ const std::vector params { {}, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "FP32" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "U8" }, { - { 256ul, ngraph::Shape {}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape {}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 255ul, ngraph::Shape {}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape {}, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "U8" }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.75f }, { 6.375f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.75f }, { 6.375f } }, true, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } }, false, "Convolution", "U8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, { - 255ul, ngraph::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, + 255ul, ov::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, { -1.52806e-39f, -0.2, -0.3, -0.3, -0.2, -0.1 }, { 1.52806e-39f, 0.2, 0.3, 0.3, 0.2, 0.1 } }, false, @@ -73,10 +73,10 @@ const std::vector params "U8" }, { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, + { 256ul, ov::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } }, true, { - 255ul, ngraph::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, + 255ul, ov::Shape { 6, 1, 1, 1 }, { -0.6f }, { 0.6f }, { -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f, -1.52806e-39f }, { 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f, 1.52806e-39f } }, @@ -86,27 +86,27 @@ const std::vector params }, // not supported quantization level on data { - { 65536ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 65536ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 255ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 255ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "FP32" }, // not supported quantization level on data & weights { - { 65536ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 65536ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 65536ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 65536ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "FP32" }, // not supported quantization level on weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, false, - { 65536ul, ngraph::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, + { 65536ul, ov::Shape{1, 1, 1, 1}, {0.f}, {254.f}, {-12.7f}, {12.7f}}, false, "Convolution", "FP32" @@ -116,7 +116,7 @@ const std::vector params INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::Shape({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), @@ -125,14 +125,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionTransformation, const std::vector incorrectWeightsParams = { // incorrect weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, false }, // correct weights { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true } }; @@ -140,7 +140,7 @@ const std::vector i INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConvolutionWIthIncorrectWeightsTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::Shape({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(incorrectWeightsParams)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp index 5449287a2dbb46..5bb0ddabe1a887 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp @@ -8,12 +8,11 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; const std::vector modes = { @@ -21,7 +20,7 @@ const std::vector modes = { ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; -const std::vector inputShapesBS2 = { +const std::vector inputShapesBS2 = { {1, 4, 3, 3}, {2, 16, 5, 4} }; @@ -35,7 +34,7 @@ const auto DepthToSpaceBS2 = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(LPT_BS2, DepthToSpaceTransformation, DepthToSpaceBS2, DepthToSpaceTransformation::getTestCaseName); -const std::vector inputShapesBS3 = { +const std::vector inputShapesBS3 = { {1, 9, 3, 3}, {2, 27, 5, 4} }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp index f8f1151fac0cfa..e89e1d4732f846 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/elementwise_branch_selection_transformation.cpp @@ -8,11 +8,10 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, +const std::vector netPrecisions = { + ov::element::f32, }; const std::vector elementwiseTypes = { @@ -23,24 +22,24 @@ const std::vector elementwiseTypes = { const std::vector params = { { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, {}, // GPU doesn't returns Reorders in performance counters { {"convolution1", "U8"}, @@ -50,24 +49,24 @@ const std::vector p }, { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, {} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { {}, - { std::vector(9, 1.f), ngraph::element::i8, {3, 3, 1, 1} }, - { {ngraph::element::f32}, {}, {std::vector(3, 1.f), ngraph::element::f32, {3, 1, 1, 1}} } + { std::vector(9, 1.f), ov::element::i8, {3, 3, 1, 1} }, + { {ov::element::f32}, {}, {std::vector(3, 1.f), ov::element::f32, {3, 1, 1, 1}} } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, {}, // GPU doesn't returns Reorders in performance counters { {"convolution1", "U8"}, @@ -80,7 +79,7 @@ const std::vector p INSTANTIATE_TEST_SUITE_P(smoke_LPT, ElementwiseBranchSelectionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(params), ::testing::ValuesIn(elementwiseTypes)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp index 636782ba93ff1a..ba4b05d8479a0b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/eliminate_fake_quantize_transformation.cpp @@ -7,7 +7,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { const std::vector testValues = { @@ -15,9 +14,9 @@ const std::vector testValues = { {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 }, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 } + ov::element::f32, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 }, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 } }, { { "fakeQuantize1" }, @@ -29,9 +28,9 @@ const std::vector testValues = { {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ngraph::element::f32 }, - { 256ul, {}, { 0.f }, { 255.f / 2.1f }, { 0.f }, { 255.f / 2.1f }, ngraph::element::f32 } + ov::element::f32, + { 256ul, {}, { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }, ov::element::f32 }, + { 256ul, {}, { 0.f }, { 255.f / 2.1f }, { 0.f }, { 255.f / 2.1f }, ov::element::f32 } }, { { "fakeQuantize1", "fakeQuantize2" }, // not fused diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp index 7b29143e4bc6d8..a0732fd163e9cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp @@ -9,12 +9,11 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -30,7 +29,7 @@ const std::vector fakeQuantizes = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndAvgPoolTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizes)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp index 2a83be0a515026..223c6404949a5a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp @@ -9,12 +9,11 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -30,7 +29,7 @@ const std::vector fakeQuantizes = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndMaxPoolTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizes)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp index f3d389a94af34e..c267457e71434d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_two_output_branches_with_convolution.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -31,7 +31,7 @@ const std::vector testValues = INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(testValues)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp index 853719aabd3854..8539648d1d0cc9 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -23,22 +23,22 @@ const std::vector trasformationParamValues = { const std::vector testValues = { { - { ngraph::element::u8, ngraph::element::i8 }, - { ngraph::element::u8 }, + { ov::element::u8, ov::element::i8 }, + { ov::element::u8 }, true, { { 256ul, { }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, { 255ul, { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } } }, { - ngraph::element::u8, + ov::element::u8, { 256ul, { }, { 0.f }, { 2.55f }, { 0.f }, { 255.f } }, { } }, }, { - { ngraph::element::u8, ngraph::element::i8 }, - { ngraph::element::i8 }, + { ov::element::u8, ov::element::i8 }, + { ov::element::i8 }, // INT8 is not available for limited operation (Convolution) false, { @@ -47,7 +47,7 @@ const std::vector testVa }, { // original precision is used - ngraph::element::u8, + ov::element::u8, // FakeQuantize has to select the first available: U8, not limited operation required I8 but this fact doesn't affect { 256ul, { }, { 0.f }, { 25.5f }, { 0.f }, { 255.f } }, // FakeQuantize on weights is not changed @@ -60,7 +60,7 @@ const std::vector testVa INSTANTIATE_TEST_SUITE_P(DISABLED_LPT, FakeQuantizePrecisionSelectionTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(testValues)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp index 560cbf0629b30c..4c11b9e73df348 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector isConvertOnConstants = { @@ -68,7 +68,7 @@ const std::vector fakeQuantizeOnDataValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 32, 72, 48 })), + ::testing::Values(ov::PartialShape({ 1, 32, 72, 48 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp index a771177d4bd71b..91e57fe3ed6e82 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16, +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16, }; const std::vector trasformationParamValues = { @@ -24,77 +24,77 @@ const std::vector trasformationParamValues = { const std::vector fakeQuantizeOnDataValues = { { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.3f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.3f}, ov::element::f32, {}, false } }, {}, "I8" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.3f}, ngraph::element::f32, {}, false } + { {0.3f}, ov::element::f32, {}, false } }, {}, "I8" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, { }, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.3f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.3f}, ov::element::f32, {}, false } }, {}, "I8" }, { - { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - {{5.f}, ngraph::element::i8}, + {{5.f}, ov::element::i8}, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, { }, - { {0.3f}, ngraph::element::f32, {}, false } + { {0.3f}, ov::element::f32, {}, false } }, {}, "I8" @@ -104,7 +104,7 @@ const std::vector fakeQuanti INSTANTIATE_TEST_SUITE_P(smoke_LPT, FakeQuantizeWithNotOptimalTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp index e8238c9b37006e..7d103e1e489ea0 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fully_connected_transformation.cpp @@ -8,12 +8,11 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector shapes = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp index ddf986e39bec66..2ab1df46b9394b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_convert_transformation.cpp @@ -5,7 +5,6 @@ #include "low_precision_transformations/fuse_convert_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { const std::vector precisions = { @@ -13,18 +12,18 @@ const std::vector precisions = { // element::f16 // TODO: temporarily commented due to failing in GPU Plugin on constant folding stage }; -const std::vectorinputAndQuantizationShapes = { +const std::vectorinputAndQuantizationShapes = { { 1, 4, 16, 16 }, }; const std::vector deqOperations = { { - { ngraph::element::f32 }, + { ov::element::f32 }, {1.f}, {0.45f} }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, {0.45f} } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp index 71daf75a837958..69c8ffe19e56ba 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp @@ -7,7 +7,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { @@ -18,11 +17,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, {}, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -31,11 +30,11 @@ const std::vector testValu {128, 3}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, - { {}, {}, { {0.01f, 0.1f, 1.f}, ngraph::element::f32, {1, 3} } }, - ngraph::element::f32, + ov::element::f32, + { {}, {}, { {0.01f, 0.1f, 1.f}, ov::element::f32, {1, 3} } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -44,11 +43,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, {}, { {0.01f, 0.f, 0.01f} } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -57,11 +56,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::f32, + ov::element::f32, { {}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -70,11 +69,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, + ov::element::f32, { }, - ngraph::element::u8, - { {ngraph::element::f32}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::u8, + { {ov::element::f32}, { -128 }, { 0.01f } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -83,11 +82,11 @@ const std::vector testValu {1, 3, 16, 16}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), { - ngraph::element::f32, - { {128}, ngraph::element::f32 }, - ngraph::element::u8, - { {ngraph::element::f32}, { -128 }, { 0.01f } }, - ngraph::element::f32, + ov::element::f32, + { {128}, ov::element::f32 }, + ov::element::u8, + { {ov::element::f32}, { -128 }, { 0.01f } }, + ov::element::f32, { 256ul, {}, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } } } }, @@ -98,9 +97,9 @@ const std::vector testValu { { }, { }, - ngraph::element::i32, - { {ngraph::element::f32}, {}, {} }, - ngraph::element::f32, + ov::element::i32, + { {ov::element::f32}, {}, {} }, + ov::element::f32, { 256ul, {}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } } } }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp index 9b663476240a41..2ef74064a6f987 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -37,7 +37,7 @@ const std::vector fakeQuantizeOnD INSTANTIATE_TEST_SUITE_P(smoke_LPT, FuseFakeQuantizeAndScaleShiftTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 9, 9 })), + ::testing::Values(ov::PartialShape({ 1, 3, 9, 9 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(fakeQuantizeOnDataValues)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp index ada9eac0594b47..d370d3b57a83a4 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_multiply_to_fq_transformation.cpp @@ -7,8 +7,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; -using namespace ngraph; namespace { const std::vector testValues = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp index bff2d8d91a67db..e506ef4a6630a6 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_subtract_to_fq_transformation.cpp @@ -7,8 +7,6 @@ #include using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; -using namespace ngraph; namespace { const std::vector testValues = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp index 540c84ece088cf..dbd5e3476a7a58 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; const std::vector opset_version = { @@ -28,7 +28,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, // U8: per-channel quantization @@ -39,7 +39,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {1, 3, 1}, @@ -57,7 +57,7 @@ const std::vector testValues = { {0}, std::int64_t{0}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp index d225f91e9cafbf..ba30b2c85c84c1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp @@ -11,12 +11,12 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; -const std::vector dimensions = { +const std::vector dimensions = { {1, 3, 16, 16} }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp index d5b86e0f2ba257..f2005374e12032 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -21,7 +21,7 @@ const std::vector trasform const std::vector addPrecisionPreserved = { true, false }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 24, 18, 18 }} }; @@ -30,8 +30,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "U8" @@ -40,8 +40,8 @@ const std::vector pa { 3ul, 0, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "U8" @@ -50,8 +50,8 @@ const std::vector pa { 3ul, 1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, "Convolution", "U8" @@ -68,15 +68,15 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, }, // group convolution, per-channel weights quantization { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, false, "", "" @@ -94,7 +94,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, GroupConvolutionTransformation::getTestCaseName); namespace test_values_4d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 24, 18, 18 }}, }; @@ -103,8 +103,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, false, "Convolution", "U8" @@ -113,8 +113,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1, 1 }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1, 1 }, {-127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f}, @@ -146,7 +146,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace test_values_4d namespace test_values_3d { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24 }, { 1, 24, 18 }}, }; @@ -155,8 +155,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1 }, { -127.f }, { 127.f }, { -127.f }, { 127.f } }, false, "Convolution", "U8" @@ -165,8 +165,8 @@ const std::vector pa { 3ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 3, 8, 1, 1 }, + { 256ul, ov::Shape { 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 3, 8, 1, 1 }, {-127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -127.f, -12.7f, -1.27f, -127.f, -12.7f, -1.27f, -127.f, -12.7f}, @@ -198,7 +198,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, } // namespace test_values_3d namespace depthwise { -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 6, 24, 24 }, { 1, 6, 18, 18 }} }; @@ -207,8 +207,8 @@ const std::vector pa { 6ul, -1, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, }, // depthwise convolution, per-channel quantization @@ -223,7 +223,7 @@ const std::vector pa { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }, { 25.5f, 25.5f, 25.5f / 2.f, 25.5f / 2.f, 25.5f / 4.f, 25.5f / 4.f } }, - { 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, + { 255ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f } }, true, } }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp index ce9ed2b6e73da8..514149f327168d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -13,9 +13,9 @@ using namespace LayerTestsDefinitions; namespace { // clang-format off -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + // ov::element::f16 }; const std::vector trasformationParamValues = { @@ -63,20 +63,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -127,20 +127,20 @@ const std::vector // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + {ov::element::f32}, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, - { {-128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {-128.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -179,20 +179,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5} }, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5} }, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -234,20 +234,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - {ngraph::element::f32}, + {ov::element::f32}, {}, - { {0.1f}, ngraph::element::f32, {}, false } + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::f32, {6, 2, 5, 5}}, - { 255ul, ngraph::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ngraph::element::f32 }, - { ngraph::element::i8, false }, + { std::vector(4, 15.f), ov::element::f32, {6, 2, 5, 5}}, + { 255ul, ov::Shape({ 1, 1, 1, 1 }), { 0.f }, { 25.5f }, { -128.f }, { 127.f }, ov::element::f32 }, + { ov::element::i8, false }, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -291,20 +291,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -351,20 +351,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {127.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::i8, true }, - { {0.2f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {127.f}, ov::element::f32, {}, false, 1ul, ov::element::i8, true }, + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -411,20 +411,20 @@ const std::vector // \ / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {3, 2, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, - { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ngraph::element::i8, true }, - { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false } + { ov::element::f32, false }, + { {126.f, 127.f, 126.f, 127.f, 126.f, 127.f}, ov::element::f32, {3, 2, 1, 1, 1}, false, 1ul, ov::element::i8, true }, + { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ov::element::f32, {3, 2, 1, 1, 1}, false } }, {}, "output_original", @@ -468,20 +468,20 @@ const std::vector // '\' / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -525,20 +525,20 @@ const std::vector // '\' / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {3, 2, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {3, 2, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ngraph::element::f32, {3, 2, 1, 1, 1}, false } + { {0.1f, 0.2f, 0.1f, 0.2f, 0.1f, 0.2f}, ov::element::f32, {3, 2, 1, 1, 1}, false } }, {}, "output_original", @@ -585,20 +585,20 @@ const std::vector // '\' / // Multiply { - { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ngraph::element::f32 }, - { ngraph::element::u8, false }, + { 256ul, {{ 1, 1, 1, 1 }}, { -12.8f }, { 12.7f }, { 0.f }, { 255.f }, ov::element::f32 }, + { ov::element::u8, false }, { - { ngraph::element::f32, false }, - { {128.f}, ngraph::element::f32, {}, false, 1ul, ngraph::element::u8, true }, - { {0.1f}, ngraph::element::f32, {}, false } + { ov::element::f32, false }, + { {128.f}, ov::element::f32, {}, false, 1ul, ov::element::u8, true }, + { {0.1f}, ov::element::f32, {}, false } }, - { std::vector(4, 15.f), ngraph::element::i8, {6, 2, 5, 5} }, + { std::vector(4, 15.f), ov::element::i8, {6, 2, 5, 5} }, {}, {}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.2f}, ngraph::element::f32, {}, false } + { {0.2f}, ov::element::f32, {}, false } }, { {3, 2, 2, 5, 5} }, "output_original", @@ -607,7 +607,7 @@ const std::vector }, }; -const std::vector shapes = { +const std::vector shapes = { { 1, 6, 24, 24 } }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp index cd15b4c6996da9..f7db97cf6c0551 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/interpolate_transformation.cpp @@ -5,36 +5,35 @@ #include "low_precision_transformations/interpolate_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; -const std::vector> shapes = { +const std::vector> shapes = { {{1, 4, 16, 16}, {32, 32}}, {{1, 2, 48, 80}, {50, 60}}, }; const std::vector interpAttrs = { interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, true, {0}, {0}), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "linear", false, false, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp index cb35532ca9a38a..db4497767ca2d5 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_transformation.cpp @@ -7,45 +7,44 @@ #include "low_precision_transformations/mat_mul_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; std::vector testValues = { { { 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, + { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, { 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} } + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} } }, { { 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} } + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} } }, { { 1, 1, 1, 4, 12, 2 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 1, 1, 1, 4, 2, 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, }, { { 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, { 12 }, - { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, + { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MatMulTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 384, 1024 })), + ::testing::Values(ov::PartialShape({ 1, 384, 1024 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues)), MatMulTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 4514aa15bdc8db..7c97eb25af837f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -7,12 +7,11 @@ #include "low_precision_transformations/mat_mul_with_constant_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; //transpose_a = false, transpose_b = true @@ -20,7 +19,7 @@ std::vector testValues = { { { 2, 3, 4 }, { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.55f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.55f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-128.f, -12.8f}, {127.f, 12.7f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -29,16 +28,16 @@ std::vector testValues = { { { 2, 3, 4 }, { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.f}, {0.f, 0.f, 0.f}, {255.f, 25.5f, 2.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::i8, ov::Shape{ 2, 4 } }, {}, - { ngraph::element::f32, {}, {0.1f} }, + { ov::element::f32, {}, {0.1f} }, "FullyConnected", "U8" }, { { 1, 3, 4 }, { 256ul, {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}, {-10.5f}, {4.5f}, {-10.5f}, {4.5f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-128.f, -12.8f}, {127.f, 12.7f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -47,7 +46,7 @@ std::vector testValues = { { { 1, 1, 3, 4 }, { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::f32, ov::Shape{ 2, 4 } }, { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-128.f, -12.8f}, {127.f, 12.7f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -56,16 +55,16 @@ std::vector testValues = { { { 1, 1, 3, 4 }, { 256ul, {{1, 3, 1}, {1, 3, 1}, {1, 3, 1}, {1, 3, 1}}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f}, {0.f, 0.f, 0.f}, {25.f, 24.f, 25.f} }, - { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, + { std::vector(4 * 2, 2.f), ov::element::i8, ov::Shape{ 2, 4 } }, {}, - { ngraph::element::f32, {}, {{0.1f, 0.01}, ngraph::element::f32, ngraph::Shape{ 2, 1 }} }, + { ov::element::f32, {}, {{0.1f, 0.01}, ov::element::f32, ov::Shape{ 2, 1 }} }, "FullyConnected", "U8" }, { { 1, 3, 4 }, { 256ul, {{1}, {1}, {1}, {1}}, {0.f}, {255.f}, {0.f}, {25.5f} }, - { std::vector(4 * 4, 2.f), ngraph::element::f32, ngraph::Shape{ 4, 4 } }, + { std::vector(4 * 4, 2.f), ov::element::f32, ov::Shape{ 4, 4 } }, { 256ul, {{1}, {1}, {1}, {1}}, {-128.f}, {127.f}, {-128.f}, {127.f} }, { {}, {}, {} }, "FullyConnected", @@ -74,7 +73,7 @@ std::vector testValues = { { { 2, 3 }, { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-10.f, -5.f}, {5.f, 5.f}, {-10.f, -5.f}, {5.f, 5.f} }, - { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::f32, ngraph::Shape{ 2, 3 } }, + { std::vector{1, 2, 3, 4, 5, 6}, ov::element::f32, ov::Shape{ 2, 3 } }, { 256ul, {{1}, {1}, {1}, {1}}, {-128.f}, {127.f}, {-12.8f}, {12.7f} }, { {}, {}, {} }, "FullyConnected", @@ -83,9 +82,9 @@ std::vector testValues = { { { 2, 3 }, { 256ul, {{2, 1}, {2, 1}, {2, 1}, {2, 1}}, {-10.f, -5.f}, {5.f, 5.f}, {-10.f, -5.f}, {5.f, 5.f} }, - { std::vector{1, 2, 3, 4, 5, 6}, ngraph::element::i8, ngraph::Shape{ 2, 3 } }, + { std::vector{1, 2, 3, 4, 5, 6}, ov::element::i8, ov::Shape{ 2, 3 } }, {}, - { ngraph::element::f32, {}, {0.1f} }, + { ov::element::f32, {}, {0.1f} }, "FullyConnected", "U8" } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 2556ad7d4fea93..e6ac3542231af5 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -10,19 +10,19 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector params = { { - { 256ul, ngraph::Shape { 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, - { 255ul, ngraph::Shape { 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } } + { 256ul, ov::Shape { 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 255ul, ov::Shape { 1 }, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f } } }, }; -const std::vector> inputShapes = { +const std::vector> inputShapes = { {{ 1, 16 }, { 10, 16 }}, {{ 1, 16 }, { 16, 10 }} }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp index 69b83bc34b2c38..16aef709783034 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -58,9 +58,9 @@ const std::vector pa 3, "", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, { 0.01f } }, @@ -73,9 +73,9 @@ const std::vector pa 3, "relu", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, { 0.01f } }, @@ -111,11 +111,11 @@ const std::vector pa {0.f, 0.f, 0.f}, {255.f, 255.f, 255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, + { ov::element::f32 }, {}, - { {0.01f, 0.01f, 0.01f}, ngraph::element::f32, {1, 3, 1, 1} } + { {0.01f, 0.01f, 0.01f}, ov::element::f32, {1, 3, 1, 1} } }, "Concat", "U8", @@ -133,10 +133,10 @@ const std::vector pa {0.f, 0.f, 0.f}, {255.f, 255.f, 255.f} }, - { ngraph::element::u8 }, + { ov::element::u8 }, { - { ngraph::element::f32 }, - { {0.01f, 0.01f, 0.01f}, ngraph::element::f32, {1, 3, 1, 1} }, + { ov::element::f32 }, + { {0.01f, 0.01f, 0.01f}, ov::element::f32, {1, 3, 1, 1} }, { 0.01f } }, "Concat", @@ -145,7 +145,7 @@ const std::vector pa }, }; -const std::vector> shapes = { +const std::vector> shapes = { {{ 1, 1, 16, 16 }, { 1, 1, 16, 16 }, { 1, 1, 16, 16 }}, {{ 4, 1, 16, 16 }, { 4, 1, 16, 16 }, { 4, 1, 16, 16 }} }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution_transformation.cpp index cd936affef58d6..1a98bd811c6f3d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_to_group_convolution_transformation.cpp @@ -5,7 +5,6 @@ #include "low_precision_transformations/multiply_to_group_convolution_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { const std::vector precisions = { @@ -13,14 +12,14 @@ const std::vector precisions = { }; namespace shape4d { -const std::vector inputShapes = { +const std::vector inputShapes = { { 1ul, 3ul, 16ul, 16ul }, { 4ul, 3ul, 16ul, 16ul } }; const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -28,7 +27,7 @@ const std::vector params = { }, // zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "I8", @@ -36,7 +35,7 @@ const std::vector params = { }, // zero point { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f / 2.f }, { -1.28f }, { 1.27f / 2.f} }, {{1.f, 2.f, 3.f}, element::f32, Shape{1, 3, 1, 1}}, "output/GroupConvolution", "U8", @@ -45,14 +44,14 @@ const std::vector params = { // Multiply => GroupConvolution optimizations { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{3.f}, element::f32, Shape{1, 1, 1, 1}}, "output/GroupConvolution", "", false }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {{3.f}, element::f32, Shape{1, 1, 1, 1}}, "output/GroupConvolution", "", diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp index 0141463b3fc63e..5a2f09f48b3699 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp @@ -10,83 +10,83 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - //ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + //ov::element::f16 }; const std::vector params = { { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ngraph::element::undefined, // ngraph::element::i8 + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + ov::element::undefined, // ov::element::i8 false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::undefined, // ngraph::element::u8 + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + ov::element::undefined, // ov::element::u8 false }, { true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::undefined, //ngraph::element::u8 + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + ov::element::undefined, //ov::element::u8 false }, { true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ngraph::element::undefined, // ngraph::element::i8 + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + ov::element::undefined, // ov::element::i8 false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ngraph::element::undefined, // ngraph::element::i8 + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, + ov::element::undefined, // ov::element::i8 false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -128.f }, { 1.27f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -128.f }, { 1.27f } }, false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::undefined, // ngraph::element::u8 + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + ov::element::undefined, // ov::element::u8 false }, { false, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, true, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::undefined, // ngraph::element::u8 + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + ov::element::undefined, // ov::element::u8 false }, - { false, {}, false, {}, {}, ngraph::element::undefined /* ngraph::element::f32 */, false }, - { true, {}, true, {}, {}, ngraph::element::undefined /* ngraph::element::f32 */, false }, + { false, {}, false, {}, {}, ov::element::undefined /* ov::element::f32 */, false }, + { true, {}, true, {}, {}, ov::element::undefined /* ov::element::f32 */, false }, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(params)), MultiplyTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent_transformation.cpp index 9f08b9e643f5ad..c1721e17b1dcc0 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_with_one_parent_transformation.cpp @@ -10,21 +10,21 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector values = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } } + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } } } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyWithOneParentTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(values)), MultiplyWithOneParentTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp index e0fe95dbd1456a..d758eaa531edc2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/mvn_transformation.cpp @@ -5,7 +5,6 @@ #include "low_precision_transformations/mvn_transformation.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { const std::vector precisions = { @@ -13,7 +12,7 @@ namespace { element::f16 }; - const std::vector inputAndQuantizationShapes = { + const std::vector inputAndQuantizationShapes = { { 1ul, 4ul, 16ul, 16ul }, }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp index 5032af2f2ababd..07cba7b268502f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/normalize_transformation.cpp @@ -8,15 +8,14 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; -const std::vector > inputAndQuantizationShapes = { +const std::vector > inputAndQuantizationShapes = { { { 1ul, 4ul, 16ul, 16ul }, { 1ul } }, { { 1ul, 4ul, 16ul, 16ul }, { 1ul, 4ul, 1ul, 1ul } }, }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp index 5273842172f9ac..eb4c668fa3bb18 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers.cpp @@ -11,8 +11,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -22,7 +22,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, OutputLayers, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues)), OutputLayers::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp index 3b2add93d258c4..680937c777416c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat.cpp @@ -11,8 +11,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -22,7 +22,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, OutputLayersConcat, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues)), OutputLayersConcat::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp index 70a024b6012fcc..a2a171d469a4e2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -11,8 +11,8 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector trasformationParamValues = { @@ -22,7 +22,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_LPT, OutputLayersConcatMultiChannel, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })), + ::testing::Values(ov::Shape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues)), OutputLayersConcatMultiChannel::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp index 52c847feb354d8..bb6e91c4a170d0 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp @@ -12,12 +12,12 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 3, 16, 16}, }; @@ -37,7 +37,7 @@ const std::vector padModes = { const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, }, @@ -45,7 +45,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -72,17 +72,17 @@ namespace testCasesForConstantMode { const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, -1, 1 }, { 0, 0, 1, -1 }, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, -1, -1 }, { 0, 0, -1, -1 }, }, @@ -110,7 +110,7 @@ const std::vector modesWithoutConstant = { const std::vector params = { // tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -2.f }, { 10.5f }, { -2.f }, { 10.5f } }, { 0, 0, 1, 1 }, { 0, 0, 1, 1 }, }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp index e3e600a368c704..5986b40640f3ad 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/prelu_transformation.cpp @@ -8,27 +8,26 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; std::vector testValues = { { {}, false}, - { { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, - { { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, - { { 256ul, ngraph::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, - { { 256ul, ngraph::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } + { { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, + { { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, + { { 256ul, ov::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, + { { 256ul, ov::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } }; // PRelu in low precision is not supported in GPU INSTANTIATE_TEST_SUITE_P(DISABLED_LPT, PReluTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues)), PReluTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp index 394ffbb1ff5cde..371eec0faaeba5 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pull_reshape_through_dequantization.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - // ngraph::element::f16 // TODO: enable f16 test inference (change ngraph function + fp32 to fp16 replacements) +const std::vector netPrecisions = { + ov::element::f32, + // ov::element::f16 // TODO: enable f16 test inference (change ngraph function + fp32 to fp16 replacements) }; const std::vector trasformationParamValues = { @@ -22,52 +22,52 @@ const std::vector trasform const std::vector params = { { - ngraph::element::f32, + ov::element::f32, { 256ul, {{ 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {}, - { std::vector{ 2.f }, ngraph::element::i8, {9, 16}}, + { std::vector{ 2.f }, ov::element::i8, {9, 16}}, { - { ngraph::element::f32, false }, + { ov::element::f32, false }, {}, - { {0.03f}, ngraph::element::f32, {/* from parameter */}, false } + { {0.03f}, ov::element::f32, {/* from parameter */}, false } }, { {3, 3, 16, 1} }, - { {2}, ngraph::element::f32, {1, 1, 16, 1}, false }, + { {2}, ov::element::f32, {1, 1, 16, 1}, false }, { {2, 3, 0, 1} }, { {16, 1, 1, 3, 3} }, - ngraph::element::f32, + ov::element::f32, {}, "output_original", "U8" }, { - ngraph::element::f32, + ov::element::f32, { 256ul, {{ 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }}, { 0.f }, { 25.5f }, { 0.f }, { 25.5f } }, {}, - { std::vector{ 2.f }, ngraph::element::i8, {9, 16}}, + { std::vector{ 2.f }, ov::element::i8, {9, 16}}, { - { ngraph::element::f32, false }, - { {127.0f}, ngraph::element::f32, {/* from parameter */}, false}, - { {0.03f}, ngraph::element::f32, {/* from parameter */}, false } + { ov::element::f32, false }, + { {127.0f}, ov::element::f32, {/* from parameter */}, false}, + { {0.03f}, ov::element::f32, {/* from parameter */}, false } }, { {3, 3, 16, 1} }, - { {2}, ngraph::element::f32, {1, 1, 16, 1}, false }, + { {2}, ov::element::f32, {1, 1, 16, 1}, false }, { {2, 3, 0, 1} }, { {16, 1, 1, 3, 3} }, - ngraph::element::f32, + ov::element::f32, {}, "output_original", "FP32" } }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 16, 9, 9 }, { 4, 16, 9, 9 } }; -const std::vector dequantizationOnWeightElementwiseConstantShapes = { - { ngraph::Shape({1, 16}) } +const std::vector dequantizationOnWeightElementwiseConstantShapes = { + { ov::Shape({1, 16}) } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, PullReshapeThroughDequantizationTransformation, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp index 87a11b0cb764b9..09e1e954e0a786 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/recurrent_cell_transformation.cpp @@ -9,9 +9,9 @@ using namespace LayerTestsDefinitions; -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -25,17 +25,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -55,17 +55,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -83,8 +83,8 @@ const std::vector param } }; -const std::vector> activations_shapes = {{{1, 1, 16}, {1, 1, 128}, {1, 1, 128}}}; -const std::vector> weights_shapes = {{{1, 512, 16}, {1, 512, 128}, {1, 512}}}; +const std::vector> activations_shapes = {{{1, 1, 16}, {1, 1, 128}, {1, 1, 128}}}; +const std::vector> weights_shapes = {{{1, 512, 16}, {1, 512, 128}, {1, 512}}}; // Quantized Recurrent models are not supported by GPU yet. Keep tests for future INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_LPT, RecurrentCellTransformation, @@ -105,17 +105,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -135,17 +135,17 @@ const std::vector param { // X {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, // H {256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f}}, - {ngraph::element::u8}, + {ov::element::u8}, { - {ngraph::element::f32}, + {ov::element::f32}, {}, {0.01f}, }, @@ -163,8 +163,8 @@ const std::vector param } }; -const std::vector> activations_shapes = {{{1, 2, 3}, {1, 2, 3}, {}}}; -const std::vector> weights_shapes = {{{1, 9, 3}, {1, 9, 3}, {1, 9}}}; +const std::vector> activations_shapes = {{{1, 2, 3}, {1, 2, 3}, {}}}; +const std::vector> weights_shapes = {{{1, 9, 3}, {1, 9, 3}, {1, 9}}}; // Quantized Recurrent models are not supported by GPU yet. Keep tests for future INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_LPT, RecurrentCellTransformation, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp index 0413b20fad4135..d467f8151a58cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_max_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -23,28 +23,28 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, false, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, true, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, false, }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -55,7 +55,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -66,7 +66,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -77,7 +77,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -91,7 +91,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMaxTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp index b6db9007b2c3e3..383cf580b87909 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_mean_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -23,28 +23,28 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{2, 3}, true}, {} }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{2, 3}, false}, {} }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 1 }, true}, {} }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, {}, {}, {{ 1 }, false}, @@ -52,7 +52,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -65,7 +65,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -78,7 +78,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -91,7 +91,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -107,7 +107,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMeanTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp index bbe489493f5df5..2a5a83a75bd412 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_min_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -23,28 +23,28 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, false, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, true, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 1 }, false, }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -55,7 +55,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -66,7 +66,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -77,7 +77,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -91,7 +91,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceMinTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp index 74b7d26ee7ffd6..3ea1f9568d2e56 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reduce_sum_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -23,18 +23,18 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 127.f } }, { 2, 3 }, true, }, { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 2.f }, { 10.f }, { 2.f }, { 10.f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 2.f }, { 10.f }, { 2.f }, { 10.f } }, { 2, 3 }, false, }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -45,7 +45,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -56,7 +56,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -67,7 +67,7 @@ const std::vector params = }, { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -81,7 +81,7 @@ const std::vector params = INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReduceSumTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })), + ::testing::Values(ov::PartialShape({ 1, 3, 10, 10 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp index 637f45d05ad5ff..f509677c9ce446 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/relu_transformation.cpp @@ -8,26 +8,25 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; std::vector testValues = { { {}, false}, - { { 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, - { { 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, - { { 256ul, ngraph::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, - { { 256ul, ngraph::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } + { { 256ul, ov::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} }, false }, + { { 256ul, ov::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }, true }, + { { 256ul, ov::Shape({}), {12.75f}, {25.5f}, {12.75f}, {25.5f} }, true }, + { { 256ul, ov::Shape({}), {-12.8f / 2.f}, {12.7f}, {-12.8f / 2.f}, {12.7f} }, true } }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, ReluTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(testValues)), ReluTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp index a73e60027140cf..a50b1739cdf92f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/reshape_transformation.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -24,7 +24,7 @@ const std::vector params = { { { 1, 3, 32 }, { 1, 3, 4, 8 }, - { 256ul, ngraph::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "U8" }, @@ -32,7 +32,7 @@ const std::vector params = { { { 1, 3, 32 }, { -1 }, - { 256ul, ngraph::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "U8" }, @@ -40,7 +40,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 1, 3, 256 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "U8" }, @@ -48,7 +48,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 0, 3, -1 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f }, { 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", "U8" }, @@ -56,7 +56,7 @@ const std::vector params = { { { 1, 3, 4, 8 }, { 1, -1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "U8" }, @@ -64,7 +64,7 @@ const std::vector params = { { { 1, 3, 4, 8 }, { 1, 3, 4, 8, 1, 1 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, "Reshape", "U8" }, @@ -74,7 +74,7 @@ const std::vector params = { { 1, -1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f / 2.f, 255.f / 3.f }, { 0.f, 0.f, 0.f }, @@ -89,7 +89,7 @@ const std::vector params = { { 1, 3, -1 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f / 2.f, 255.f / 3.f }, { 0.f, 0.f, 0.f }, @@ -105,7 +105,7 @@ const std::vector params = { { 1, -1, 8 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f / 2.f, 255.f / 3.f }, { 0.f, 0.f, 0.f }, @@ -118,7 +118,7 @@ const std::vector params = { { { 1, 3, 16, 16 }, { 1, 1, 48, 16 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -128,7 +128,7 @@ const std::vector params = { { { 1, 3, 16 }, { 1, 1, 6, 8 }, - { 256ul, ngraph::Shape{ 1, 3, 1 }, + { 256ul, ov::Shape{ 1, 3, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -138,7 +138,7 @@ const std::vector params = { { { 1, 3, 2, 4 }, { 1, 1, 24 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -148,7 +148,7 @@ const std::vector params = { { { 1, 3, 2, 4, 2 }, { 1, 1, 48 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", @@ -158,7 +158,7 @@ const std::vector params = { { { 1, 3, 2, 4, 2 }, { 1, 1, 3, 16 }, - { 256ul, ngraph::Shape{ 1, 3, 1, 1, 1 }, + { 256ul, ov::Shape{ 1, 3, 1, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f, 255.f }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f } }, "Reshape", diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp index bceaf8fd31f6dc..87b7f57adddc0b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/shuffle_channels_transformation.cpp @@ -10,12 +10,12 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; -const std::vector inputShapes = { +const std::vector inputShapes = { { 1, 3, 16, 16 } }; @@ -25,19 +25,19 @@ const std::vector trasform const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, 0, 1, }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, -3, 1, }, { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { 0.f, 0.f, 0.f }, @@ -49,7 +49,7 @@ const std::vector par { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { -4.f, -3.f, 0.f }, @@ -59,14 +59,14 @@ const std::vector par 1, }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } }, 2, 4, }, { { 256ul, - ngraph::Shape { 1, 3, 1, 1 }, + ov::Shape { 1, 3, 1, 1 }, { 0.f }, { 25.5f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp index ca9ef9f1dda658..0a5a4f35cb881d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/space_to_batch_transformation.cpp @@ -10,16 +10,16 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector params = { { { 1, 3, 100, 171 }, { 1, 1, 2, 2 }, { 0, 0, 2, 2 }, { 0, 0, 2, 3 }, - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, "space_to_batch", "u8" }, @@ -28,7 +28,7 @@ const std::vector params = { { 1, 1, 2, 2 }, { 0, 0, 2, 2 }, { 0, 0, 2, 3 }, { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 255.f/2.f, 255.f/3.f }, { 0.f, 0.f, 0.f }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp index 8f28760f2c70ce..d9f621f442e965 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/split_transformation.cpp @@ -13,9 +13,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -28,18 +28,18 @@ const std::vector trasform const std::vector params = { // tensor quantization, split second dimension { - { 256ul, ngraph::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, + { 256ul, ov::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, 2, 2ul }, // tensor quantization, split third dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, -1, 2ul }, // per-channel quantization with the same values, split second dimension { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -50,7 +50,7 @@ const std::vector params = { // per-channel quantization with the same values, per-channel split { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -62,7 +62,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -74,7 +74,7 @@ const std::vector params = { { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -87,7 +87,7 @@ const std::vector params = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, SplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp index adf09280b3b296..936801c2ed45b8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { - const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 + const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; @@ -25,22 +25,22 @@ namespace { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3 }, { 1, 3, 5, 1} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2, 3 }, { 1, 1, 1, 1 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3 }, { 1, 64, 32, 1 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2.0, 3.0 }, { 1, 32, 1, 1 } } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp index 0c24c71f76ec72..195c95dc8bdf19 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/strided_slice_transformation.cpp @@ -12,9 +12,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -24,7 +24,7 @@ const std::vector trasform const std::vector params = { // channel slice, tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 0, 0 }, // begin { 1, 2, 1, 1 }, // end { 1, 1, 1, 1 }, // strided @@ -36,7 +36,7 @@ const std::vector params }, // special dimension slice, tensor quantization { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { 0.f }, { 25.5f }, { 0.f }, { 12.8f } }, { 0, 0, 0, 0 }, { 1, 3, 20, 24 }, { 1, 1, 1, 1 }, @@ -50,7 +50,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -69,7 +69,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -88,7 +88,7 @@ const std::vector params { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { 0.f, 0.f, 0.f }, { 255.f, 25.5f, 2.55f }, { 0.f, 0.f, 0.f }, @@ -108,7 +108,7 @@ const std::vector params INSTANTIATE_TEST_SUITE_P(smoke_LPT, StridedSliceTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 24, 24 })), + ::testing::Values(ov::PartialShape({ 1, 3, 24, 24 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp index 2e52efdedd04e7..6064605844c4bd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_multiply_to_multiply_add.cpp @@ -14,16 +14,16 @@ const std::vector testVal // U8: Multiply {} => Multiply (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + ov::element::f32, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, }, // U8: Multiply { 1x3x1x1 } => Multiply + Add (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1, 3, 1, 1}), + ov::Shape({1, 3, 1, 1}), {0.f, 0.f, 0.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f}, {0.f, 0.f, 0.f}, @@ -33,10 +33,10 @@ const std::vector testVal // U8: Subtract + Multiply { 1x3x1x1 } => Multiply + Add (ScaleShift) { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1, 3, 1, 1}), + ov::Shape({1, 3, 1, 1}), {2.55f / 2, 2.55f / 4.f, 2.55f / 6.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f}, {2.55f / 2, 2.55f / 4.f, 2.55f / 6.f}, @@ -45,10 +45,10 @@ const std::vector testVal }, { {1, 3, 16, 16}, - ngraph::element::f32, + ov::element::f32, { 256ul, - ngraph::Shape({1}), + ov::Shape({1}), {2.55f / 2}, {2.55f}, {2.55f / 2}, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp index 985c2f9de0e1d0..0ee661031da3b1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -23,7 +23,7 @@ const std::vector trasformationParamValues = { INSTANTIATE_TEST_SUITE_P(smoke_LPT, SubtractTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues)), SubtractTransformation::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp index f82e9de2ad2b00..6d57c9a2c6d445 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -27,7 +27,7 @@ const std::vector transposeChannelDimValues = { true, false }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, TransposeAfterMatMulTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(perTensorValues), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp index 8d33c5cea8a374..87f77078ef1a30 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/transpose_transformation.cpp @@ -10,9 +10,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector precisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector precisions = { + ov::element::f32, + ov::element::f16 }; const std::vector testValues = { @@ -21,7 +21,7 @@ const std::vector testValues = { { 1, 1000, 1, 1}, { 0, 2, 3, 1}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, // U8: per-channel quantization @@ -29,7 +29,7 @@ const std::vector testValues = { { 1, 3, 1, 1}, { 0, 2, 3, 1}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, { 256, {1, 3, 1, 1}, @@ -44,7 +44,7 @@ const std::vector testValues = { { 1, 1000, 1, 1, 3, 4}, { 0, 2, 1, 3, 5, 4}, LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(), - ngraph::element::f32, + ov::element::f32, {256, {}, {0.f}, {25.5f}, {12.5f}, {25.5f + 12.5f}} }, }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp index d7665bfc1fbd30..72ddfeba696e4d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp @@ -11,9 +11,9 @@ using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; namespace { - const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 + const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; @@ -25,27 +25,27 @@ namespace { const std::vector params = { { - { 256ul, ngraph::Shape { 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 3, 5} }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 3, 3, 5 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 3.0 }, { 3, 4, 5, 6 } }, { - { 256ul, ngraph::Shape { 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 2.0, 3.0 }, { 3, 4 } }, { - { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, + { 256ul, ov::Shape { 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { -12.8f }, { 12.7f } }, { 4.0 }, { 46, 128, 2, 3 } } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp index 2861447c881f03..03dfec34fc1ca2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/variadic_split_transformation.cpp @@ -13,9 +13,9 @@ using namespace LayerTestsDefinitions; namespace { -const std::vector netPrecisions = { - ngraph::element::f32, - ngraph::element::f16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; const std::vector trasformationParamValues = { @@ -28,13 +28,13 @@ const std::vector trasform const std::vector params{ // tensor quantization, split second dimension { - { 256ul, ngraph::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, + { 256ul, ov::Shape{ }, { 0.f }, { 25.5f }, { 0.f }, { 25.5f / 2.f } }, 2, std::vector{9, 7} }, // tensor quantization, split third dimension { - { 256ul, ngraph::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, + { 256ul, ov::Shape{ 1, 1, 1, 1 }, { -12.8f }, { 12.7f }, { 0.f }, { 25.5f } }, -1, std::vector{15, 1} }, @@ -42,7 +42,7 @@ const std::vector param { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -55,7 +55,7 @@ const std::vector param { { 256ul, - ngraph::Shape{ 1, 3, 1, 1 }, + ov::Shape{ 1, 3, 1, 1 }, { -127.f, 0.f, 128.f / 2.f }, { 128.f / 4.f, 128.f / 2.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -67,7 +67,7 @@ const std::vector param // per-channel quantization with the same values, per-channel split { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -79,7 +79,7 @@ const std::vector param // per-channel quantization with the same values, split third dimension { { - 256ul, ngraph::Shape{ 1, 3, 1, 1 }, + 256ul, ov::Shape{ 1, 3, 1, 1 }, { -127.f, -127.f, -127.f }, { 128.f, 128.f, 128.f }, { 0.f, 0.f, 0.f }, @@ -93,7 +93,7 @@ const std::vector param INSTANTIATE_TEST_SUITE_P(smoke_LPT, VariadicSplitTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::PartialShape({ 1, 3, 16, 16 })), + ::testing::Values(ov::PartialShape({ 1, 3, 16, 16 })), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp index dfa44b5758a4ea..ca982f2d8ceeaa 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp @@ -4,8 +4,9 @@ #include -#include "single_op_tests/depth_to_space.hpp" #include "common_test_utils/test_constants.hpp" +#include "openvino/opsets/opset3.hpp" +#include "single_op_tests/depth_to_space.hpp" namespace { using ov::test::DepthToSpaceLayerTest; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp index a27aa05c4f9f69..74203f59f38c3b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp @@ -4,8 +4,9 @@ #include -#include "single_op_tests/gather_elements.hpp" #include "common_test_utils/test_constants.hpp" +#include "openvino/opsets/opset6.hpp" +#include "single_op_tests/gather_elements.hpp" namespace { using ov::test::GatherElementsLayerTest; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp index 01d71d360dbca6..f3270b8c83b922 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rdft.cpp @@ -78,7 +78,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_RDFT_5d, // RDFT can support last axis INSTANTIATE_TEST_SUITE_P(smoke_RDFT_5d_last_axis, RDFTLayerTest, - testing::Combine(testing::Values(InferenceEngine::SizeVector{10, 4, 8, 2, 5}), + testing::Combine(testing::Values(std::vector{10, 4, 8, 2, 5}), testing::ValuesIn(inputPrecisions), testing::ValuesIn(std::vector>{{{0, 1, 2, 3, 4}}}), testing::ValuesIn(std::vector>{{}, {3, 10, 8, 6, 2}}), @@ -89,7 +89,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_RDFT_5d_last_axis, // IRDFT can support 6d INSTANTIATE_TEST_SUITE_P(smoke_RDFT_6d, RDFTLayerTest, - testing::Combine(testing::Values(InferenceEngine::SizeVector{10, 4, 8, 2, 5, 2}), + testing::Combine(testing::Values(std::vector{10, 4, 8, 2, 5, 2}), testing::ValuesIn(inputPrecisions), testing::ValuesIn(std::vector>{{{0, 1, 2, 3, 4}}}), testing::ValuesIn(std::vector>{{}, {3, 10, 8, 6, 2}}), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp index 93ea0f2448d57b..98384ef39cfd7b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp @@ -116,6 +116,10 @@ std::vector ss_only_test_cases_fp32 = { { 5, 5, 5, 5 }})), { -1, 0, -1, 0 }, { -50, 0, -60, 0 }, { -1, 1, -1, 1 }, { 0, 0, 0, 0 }, { 0, 1, 0, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 4, 1 }})), + { 0, 0, 0, 0 }, { 2, 2, 4, 1 }, { 1, 1, 1, 1 }, + { 0 }, { 0 }, { 1 }, { 0 }, {0 } }, StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ { 128, 1, 1024 }})), { -1, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 70d0ee093e2b26..5ba72f7ac0e99c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -58,6 +58,12 @@ std::vector disabledTestPatterns() { R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*CompareWithRefImpl.*)", // Issue: 124060 R"(.*smoke_GridSample/GridSampleLayerTest.Inference/.*model_type=f16.*)", + // Issue: 119648 + R"(.*smoke_LPT/InterpolateTransformation.*)", + // Issue: 128924 + R"(.*OVClassModelTestP/OVClassModelTestP.ImportModelWithNullContextThrows.*)", + // Issue: 129802 + R"(.*smoke_OVClassBasicTestP/OVClassBasicTestP.registerExistingPluginThrows.*)", #if defined(_WIN32) R"(.*KernelCachingSupportCase.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)", #endif @@ -82,5 +88,7 @@ std::vector disabledTestPatterns() { R"(smoke_Nms9LayerTest.*)", // Doesn't match reference results as v6 ref impl behavior is misaligned with expected R"(smoke_MemoryTestV3.*)", + // Issue: 129991 + R"(.*StridedSliceLayerTest.*TS=.*2.2.4.1*.*)", }; } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp index 97614faf548515..41a6403fbf0dbb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp @@ -2,45 +2,41 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "subgraph_tests/constant_result_legacy.hpp" +#include "subgraph_tests/constant_result.hpp" #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; -using namespace InferenceEngine; - namespace { +using ov::test::ConstantSubgraphType; +using ov::test::ConstantResultSubgraphTest; const std::vector types = { ConstantSubgraphType::SINGLE_COMPONENT, ConstantSubgraphType::SEVERAL_COMPONENT }; -const std::vector shapes = { +const std::vector shapes = { {1, 3, 10, 10}, {2, 3, 4, 5} }; -const std::vector precisions = { - Precision::U8, - Precision::I8, - Precision::U16, - Precision::I16, - Precision::I32, - Precision::U64, - Precision::I64, - Precision::FP32, - Precision::BOOL +const std::vector model_types = { + ov::element::u8, + ov::element::i8, + ov::element::u16, + ov::element::i16, + ov::element::i32, + ov::element::u64, + ov::element::i64, + ov::element::f32, + ov::element::boolean }; INSTANTIATE_TEST_SUITE_P(smoke_Check, ConstantResultSubgraphTest, ::testing::Combine( ::testing::ValuesIn(types), ::testing::ValuesIn(shapes), - ::testing::ValuesIn(precisions), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), ConstantResultSubgraphTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp deleted file mode 100644 index c417dc6ce04a2c..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "subgraph_tests/parameter_result.hpp" - -#include - -#include "common_test_utils/test_constants.hpp" - -using namespace SubgraphTestsDefinitions; -using namespace ov::test; - -namespace { - -INSTANTIATE_TEST_SUITE_P(smoke_Check, - ParameterResultSubgraphTestLegacyApi, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {}}), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ParameterResultSubgraphTestBase::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Check, - ParameterResultSubgraphTest, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{1, 3, 10, 10}}}), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ParameterResultSubgraphTestBase::getTestCaseName); - -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reduce_eltwise.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reduce_eltwise.cpp index 0a3f87714939c0..186dc84587f8dd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reduce_eltwise.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reduce_eltwise.cpp @@ -6,41 +6,36 @@ #include "subgraph_tests/reduce_eltwise.hpp" -using namespace SubgraphTestsDefinitions; - namespace { - -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, -}; +using ov::test::ReduceEltwiseTest; INSTANTIATE_TEST_SUITE_P(smoke_ReduceEltwise6D, ReduceEltwiseTest, testing::Combine( - testing::Values(std::vector{2, 3, 4, 5, 6, 7}), + testing::Values(ov::Shape{2, 3, 4, 5, 6, 7}), testing::Values(std::vector{2, 3, 4}), testing::Values(ov::test::utils::OpType::VECTOR), testing::Values(false), - testing::ValuesIn(netPrecisions), + testing::Values(ov::element::f32), testing::Values(ov::test::utils::DEVICE_GPU)), ReduceEltwiseTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ReduceEltwise5D, ReduceEltwiseTest, testing::Combine( - testing::Values(std::vector{2, 3, 4, 5, 6}), + testing::Values(ov::Shape{2, 3, 4, 5, 6}), testing::Values(std::vector{2, 3}), testing::Values(ov::test::utils::OpType::VECTOR), testing::Values(false), - testing::ValuesIn(netPrecisions), + testing::Values(ov::element::f32), testing::Values(ov::test::utils::DEVICE_GPU)), ReduceEltwiseTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ReduceEltwise4D, ReduceEltwiseTest, testing::Combine( - testing::Values(std::vector{2, 3, 4, 5}), + testing::Values(ov::Shape{2, 3, 4, 5}), testing::Values(std::vector{2}), testing::Values(ov::test::utils::OpType::VECTOR), testing::Values(false), - testing::ValuesIn(netPrecisions), + testing::Values(ov::element::f32), testing::Values(ov::test::utils::DEVICE_GPU)), ReduceEltwiseTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/scale_shift.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/scale_shift.cpp index 57676b002effea..0549827f0c44a7 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/scale_shift.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/scale_shift.cpp @@ -2,15 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "subgraph_tests/scaleshift.hpp" #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; - namespace { +using ov::test::ScaleShiftLayerTest; -std::vector>> inShapes = { +std::vector> inShapes = { {{100}}, {{100}, {100}}, {{1, 8}}, @@ -41,17 +39,17 @@ std::vector> Shifts = { {-3.0f} }; -std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, +std::vector types = {ov::element::f32, + ov::element::f16, }; -} // namespace INSTANTIATE_TEST_SUITE_P(smoke_ScaleShift, ScaleShiftLayerTest, ::testing::Combine( ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(types), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(Scales), ::testing::ValuesIn(Shifts)), ScaleShiftLayerTest::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp index 473935bd799840..6b255c9981c08a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp @@ -118,7 +118,7 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterface outShapeNode; if (!outShapeData.empty()) { if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { - IE_ASSERT(inputDynamicShapes.size() == 2); + OPENVINO_ASSERT(inputDynamicShapes.size() == 2); auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; @@ -133,7 +133,7 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterface deconv; if (!outShapeData.empty()) { - IE_ASSERT(outShapeNode != nullptr); + OPENVINO_ASSERT(outShapeNode != nullptr); deconv = ov::test::utils::make_convolution_backprop_data(params[0], outShapeNode, model_type, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels); } else { diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp index 9df68f3af86b72..8ddfabd9bcdc50 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp @@ -127,7 +127,6 @@ class DetectionOutputLayerGPUTest : public testing::WithParamInterfaceinputs(); for (auto i = 0ul; i < funcInputs.size(); ++i) { const auto &funcInput = funcInputs[i]; - InferenceEngine::Blob::Ptr blob; int32_t resolution = 1; uint32_t range = 1; if (i == 2) { diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp index 8a93e4b89b12d2..499a15ec766ab7 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp @@ -121,7 +121,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface, ov::Tensor> ¶ms) { return params.first->get_friendly_name() == "param_1"; }); - IE_ASSERT(pos != inputs.end()); + OPENVINO_ASSERT(pos != inputs.end()); inputs.erase(pos); } auto expectedOutputs = calculate_refs(); @@ -164,7 +164,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface outShapeNode; if (!outShapeData.empty()) { if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { - IE_ASSERT(inputDynamicShapes.size() == 2); + OPENVINO_ASSERT(inputDynamicShapes.size() == 2); auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; @@ -179,7 +179,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface deconv; if (!outShapeData.empty()) { - IE_ASSERT(outShapeNode != nullptr); + OPENVINO_ASSERT(outShapeNode != nullptr); deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, groupNum); } else { diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp index 2b3d2dccf2cc77..ce54580e1f77d5 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp @@ -79,7 +79,7 @@ class MatMulLayerGPUTest : public testing::WithParamInterface void transpose(T& shape) { - IE_ASSERT(shape.size() > 1); + OPENVINO_ASSERT(shape.size() > 1); std::swap(*(shape.end() - 1), *(shape.end() - 2)); } diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp index 2e798d7639542b..ffda0040d446e4 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp @@ -72,7 +72,7 @@ class NmsLayerGPUTest : public testing::WithParamInterface, std::ostringstream result; if (!bounds.empty()) { - IE_ASSERT(bounds.size() == 3); + OPENVINO_ASSERT(bounds.size() == 3); result << "BatchesBounds=" << bounds[BATCHES] << "_BoxesBounds=" << bounds[BOXES] << "_ClassesBounds=" << bounds[CLASSES] << "_"; } for (const auto &ts : targetShapes) { diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp index 8200700e0bd902..39501c67e1bbb7 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp @@ -294,4 +294,219 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_dynamic_exit, DynamicShapeLoopTe /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), DynamicShapeLoopTest::getTestCaseName); + +using DynamicShapeLoopDynamicInputParams = typename std::tuple< + bool, + std::tuple< + bool, + int64_t, + int64_t, + int64_t + >, + int64_t, + InputShape, + InputShape, + ov::element::Type, + std::string>; + +class DynamicShapeLoopDynamicInputTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + bool static_iter_num; + bool static_continue_cond; + int64_t max_iter_num; + int64_t dynamic_exit; + int64_t axis; + int64_t start_value; + InputShape data_shapes; + InputShape constant_shapes; + ov::element::Type model_type; + std::string targetDevice; + auto args_pack = std::tie(static_iter_num, max_iter_num, dynamic_exit, axis); + std::tie( + static_continue_cond, + args_pack, + start_value, + data_shapes, + constant_shapes, + model_type, + targetDevice) = obj.param; + + std::ostringstream result; + result << "static_iter_num=" << std::to_string(static_iter_num) << "_"; + result << "static_continue_cond=" << std::to_string(static_continue_cond) << "_"; + result << "max_iter_num=" << std::to_string(max_iter_num) << "_"; + result << "dynamic_exit=" << std::to_string(dynamic_exit) << "_"; + result << "axis=" << std::to_string(axis) << "_"; + result << "start_value=" << std::to_string(start_value) << "_"; + result << "max_iter_num=" << std::to_string(max_iter_num) << "_"; + result << "IS=("; + result << ov::test::utils::partialShape2str({data_shapes.first}) << "_"; + for (size_t i = 0lu; i < data_shapes.second.size(); i++) { + result << "{"; + result << ov::test::utils::vec2str(data_shapes.second[i]) << "_"; + result << "}_"; + } + result << ")_"; + result << "netType=" << model_type << "_"; + result << "targetDevice=" << targetDevice << "_"; + + auto res_str = result.str(); + std::replace(res_str.begin(), res_str.end(), '-', '_'); + return res_str; + } + +private: + bool static_iter_num; // trip count provided by constant node + bool static_continue_cond; // initial_cond provided by constant node + int64_t max_iter_num; // -1 means infinity loop (expected dynamic exit condition in body) + int64_t dynamic_exit; // -1 means always true + int64_t axis; // -1 means no auto concatenation + int64_t start_value; + InputShape data_shapes; + InputShape constant_shapes; + ov::element::Type model_type; + +protected: + void SetUp() override { + auto args_pack = std::tie(static_iter_num, max_iter_num, dynamic_exit, axis); + std::tie( + static_continue_cond, + args_pack, + start_value, + data_shapes, + constant_shapes, + model_type, + targetDevice) = GetParam(); + + const auto inputShape = data_shapes.first; + const auto scalarShape = ov::Shape{}; + init_input_shapes({data_shapes, data_shapes, constant_shapes}); + + ov::ParameterVector params{}; + auto cond_input_create = [¶ms] (ov::element::Type model_type, + const ov::PartialShape &shape, + int value = 0, + bool is_static = false) -> std::shared_ptr { + if (is_static) + return std::make_shared(model_type, shape.to_shape(), value); + + auto input = std::make_shared(model_type, shape); + params.push_back(input); + return input; + }; + + // Create function that has smaller shape of init input backedge-to and bigger shape backedge-from + // It should be updated during iteration + auto start_add = cond_input_create(model_type, inputShape, start_value); + start_add->set_friendly_name("start_add"); + auto start_mul = cond_input_create(model_type, inputShape, 1); + start_mul->set_friendly_name("start_mul"); + auto count = cond_input_create(ov::element::i64, scalarShape, max_iter_num, static_iter_num); + count->set_friendly_name("count"); + auto skip = cond_input_create(ov::element::boolean, scalarShape, true, static_continue_cond); + skip->set_friendly_name("skip"); + auto init_const = cond_input_create(model_type, constant_shapes.first, 1); + init_const->set_friendly_name("init_const"); + + auto b_indx = std::make_shared(ov::element::i64, ov::Shape{}); + b_indx->set_friendly_name("body_index"); + auto b_data_add = std::make_shared(model_type, inputShape); + b_data_add->set_friendly_name("b_data_add"); + auto b_data_mul = std::make_shared(model_type, inputShape); + b_data_mul->set_friendly_name("b_data_mul"); + auto b_data_broadcast = std::make_shared(model_type, constant_shapes.first); + b_data_broadcast->set_friendly_name("b_data_broadcast"); + auto b_indx_cast = std::make_shared(b_indx, model_type); + b_indx_cast->set_friendly_name("body_index_cast"); + auto b_add = std::make_shared(b_data_add, b_indx_cast); + b_add->set_friendly_name("body_add"); + auto b_mul = std::make_shared(b_data_mul, b_indx_cast); + b_mul->set_friendly_name("body_mul"); + auto b_shapeof1 = std::make_shared(b_data_mul); + b_shapeof1->set_friendly_name("b_shapeof1"); + auto b_shapeof2 = std::make_shared(b_data_broadcast); + b_shapeof2->set_friendly_name("b_shapeof2"); + auto b_max = std::make_shared(b_shapeof1, b_shapeof2); + b_max->set_friendly_name("b_max"); + auto b_broadcast = std::make_shared(b_data_broadcast, b_max); + b_broadcast->set_friendly_name("b_broadcast"); + auto b_mul2 = std::make_shared(b_broadcast, b_mul); + b_mul2->set_friendly_name("b_mul2"); + + std::shared_ptr b_cond; + if (dynamic_exit == -1) { + b_cond = std::make_shared(ov::element::boolean, ov::Shape{}, true); + b_cond->set_friendly_name("body_condition"); + } else { + auto b_exit_value = std::make_shared(ov::element::i64, scalarShape, dynamic_exit); + b_exit_value->set_friendly_name("body_exit_value"); + b_cond = std::make_shared(b_indx, b_exit_value); + b_cond->set_friendly_name("body_condition_with_exit_value"); + } + + auto body = std::make_shared( + ov::OutputVector {b_cond, b_add, b_mul, b_mul2}, // TODO: check with reverse + ov::ParameterVector {b_indx, b_data_add, b_data_mul, b_data_broadcast}); // TODO: check with reverse + body->set_friendly_name("body_network"); + + auto loop = std::make_shared(count, skip); + loop->set_friendly_name("loop"); + loop->set_function(body); + loop->set_special_body_ports({0, 0}); + loop->set_merged_input(b_data_add, start_add, b_add); + loop->set_merged_input(b_data_mul, start_mul, b_mul); + loop->set_merged_input(b_data_broadcast, init_const, b_mul2); + if (axis == -1) { + loop->get_iter_value(b_add, -1); + loop->get_iter_value(b_mul, -1); + loop->get_iter_value(b_mul2, -1); + } else { + loop->get_concatenated_slices(b_add, 0, 1, 1, -1, axis); + loop->get_concatenated_slices(b_mul, 0, 1, 1, -1, axis); + } + + ov::ResultVector results; + for (size_t i = 0; i < loop->get_output_size(); i++) { + auto res = std::make_shared(loop->output(i)); + res->set_friendly_name("loop_output_" + std::to_string(i)); + results.push_back(res); + } + function = std::make_shared( + results, + params); + function->set_friendly_name("outer_body_network"); + } +}; + +TEST_P(DynamicShapeLoopDynamicInputTest, Inference) { + run(); +} + +static const std::vector> dynamic_loop_input { + // GCC4.8 limitation: have to specify type of each element in list + // static_trip_count | max | dynamic_exit | axis + std::tuple{ true , 5, 3, -1 }, // n_iter 3, dynamic exit on 3 + std::tuple{ true , -1, 5, -1 }, // n_iter 5, inf loop with dynamic exit on 5 +}; + +std::vector inputs_dynamic_shape = { + InputShape(ov::PartialShape({-1, 1, -1}), {{4, 1, 2}, {10, 1, 2}, {12, 1, 2}}), +}; + +std::vector constant_dynamic_shape = { + InputShape(ov::PartialShape({-1, 1, -1}), {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}}), +}; + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_dynamic, DynamicShapeLoopDynamicInputTest, + testing::Combine( + /* static_continue_cond */ testing::Values(true), + /* args_pack */ testing::ValuesIn(dynamic_loop_input), + /* start_value */ testing::Values(0), + /* data_shape */ testing::ValuesIn(inputs_dynamic_shape), + /* constant_shape */ testing::ValuesIn(constant_dynamic_shape), + /* model_type */ testing::ValuesIn(model_types), + /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), + DynamicShapeLoopDynamicInputTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp index df20a50e33b9b2..abbfea890f9cd0 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/convolution_fusion_test.cpp @@ -1488,43 +1488,6 @@ TEST_P(conv_fp32_eltwise_b_fs_zyx_fsv16, vector_ops) { execute(p); } -class conv_fp32_swish : public ConvFusingTest {}; -TEST_P(conv_fp32_swish, basic) { - auto p = GetParam(); - create_topologies( - input_layout("input", get_input_layout(p)), - data("weights", get_mem(get_weights_layout(p))), - data("bias", get_mem(get_per_channel_layout(p))), - convolution("conv_prim", input_info("input"), "weights", "bias", p.groups, p.stride, p.dilation, p.pad, p.pad, format::is_grouped(get_weights_layout(p).format)), - activation("sigmoid", input_info("conv_prim"), activation_func::logistic), - eltwise("mul", { input_info("conv_prim"), input_info("sigmoid") }, eltwise_mode::prod), - reorder("reorder_bfyx", input_info("mul"), p.default_format, data_types::f32) - ); - - if (engine.get_device_info().supports_immad && - p.default_type == data_types::f16) { - GTEST_SKIP(); // Issue: 94154 - } - - tolerance = default_tolerance(p.default_type); - if (p.default_type == data_types::f16) { - tolerance *= 3.f; // Issue: 94154 - } - execute(p); -} - -INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_fp32_swish, ::testing::ValuesIn(std::vector{ - // convolution_test_params{ CASE_CONV_FP32_1, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP32_2, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP32_3, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP32_4, 2, 2, 4 }, - - // convolution_test_params{ CASE_CONV_FP32_1, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP16_2, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP16_3, 2, 2, 4 }, - convolution_test_params{ CASE_CONV_FP16_4, 2, 2, 4 }, -})); - INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_fp32_eltwise_b_fs_zyx_fsv16, ::testing::ValuesIn(std::vector{ convolution_test_params{ CASE_CONV_FP32_6, 2, 2, 3 }, convolution_test_params{ CASE_CONV_FP32_7, 2, 2, 3 }, @@ -2030,52 +1993,6 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_int8_eltwise, ::testing::ValuesIn(std convolution_test_params{ CASE_CONV3D_S8S8_5, 2, 2, 3 }, })); -class conv_int8_scale_shift_swish : public ConvFusingTest {}; -TEST_P(conv_int8_scale_shift_swish, basic) { - auto p = GetParam(); - create_topologies( - input_layout("input", get_input_layout(p)), - data("weights", get_mem(get_weights_layout(p))), - data("bias", get_mem(get_per_channel_layout(p))), - data("scale_data", get_mem(get_per_channel_layout(p), 1.0f/255.f)), - data("shift_data", get_mem(get_per_channel_layout(p), 1)), - convolution("conv_prim", input_info("input"), "weights", "bias", p.groups, p.stride, p.dilation, p.pad, p.pad, format::is_grouped(get_weights_layout(p).format)), - eltwise("scale0", { input_info("conv_prim"), input_info("scale_data") }, eltwise_mode::prod), - eltwise("scale1", { input_info("conv_prim"), input_info("scale_data") }, eltwise_mode::prod), - eltwise("shift0", { input_info("scale0"), input_info("shift_data") }, eltwise_mode::sum), - eltwise("shift1", { input_info("scale1"), input_info("shift_data") }, eltwise_mode::sum), - activation("sigmoid", input_info("shift0"), activation_func::logistic), - eltwise("mul", { input_info("shift1"), input_info("sigmoid") }, eltwise_mode::prod), - reorder("reorder_bfyx", input_info("mul"), p.default_format, data_types::f32) - ); - - // high tolerance because many eltwise operations - tolerance = default_tolerance(p.default_type) * 10; - execute(p, -20, 20); -} - -INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_int8_scale_shift_swish, ::testing::ValuesIn(std::vector{ - convolution_test_params{ CASE_CONV_U8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_U8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_U8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_U8S8_4, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV_S8S8_4, 2, 2, 8 }, - - convolution_test_params{ CASE_CONV3D_U8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_4, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_U8S8_5, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_1, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_2, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_3, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_4, 2, 2, 8 }, - convolution_test_params{ CASE_CONV3D_S8S8_5, 2, 2, 8 }, -})); - class conv_int8_prelu_eltwise : public ConvFusingTest {}; TEST_P(conv_int8_prelu_eltwise, basic) { auto p = GetParam(); diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp index 98169c608f2bfb..25b11ee28f5987 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/format_test.cpp @@ -224,7 +224,6 @@ INSTANTIATE_TEST_SUITE_P(smoke, find_format_test, {{2, 3, 1, 0}, {}, true, false, false, false, false, format::yxio}, {{0, 1, 2, 3}, {{0, 16}}, true, false, false, false, false, format::os_iyx_osv16}, {{0, 1, 2, 3}, {}, true, false, false, true, false, format::winograd_2x3_s1_weights}, - {{0, 1, 3, 2}, {}, true, false, false, false, false, format::lstm_weights_dio}, {{0, 1, 2, 3}, {{1, 8}, {0, 8}, {1, 4}}, true, false, false, false, false, format::os_is_yx_isa8_osv8_isv4}, {{0, 1, 2, 3, 4}, {}, true, true, false, false, false, format::goiyx}, {{0, 2, 1, 3, 4}, {{1, 16}, {0, 16}}, true, true, false, false, false, format::g_is_os_yx_isv16_osv16}, diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index 46d830d3e2cda2..0be5445aa29584 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -118,52 +118,6 @@ TEST(prepare_buffer_fusing, static_node_after_optimized_out_dyn_reshape) { ASSERT_EQ(out_mem->get_layout().get_partial_shape(), expected_shape); } -TEST(prepare_buffer_fusing, propagate_data_padding) { - auto& engine = get_test_engine(); - - auto in_layout = layout{ ov::PartialShape{1, 4, 3, 3}, data_types::f32, format::bfyx }; - - std::vector> offsets; - std::vector inputs; - for (int i = 0; i < 2; i++) { - auto id = "crop_" + std::to_string(i); - inputs.push_back(input_info("split:" + id)); - offsets.push_back({ id, {0, (i * 2), 0, 0} }); - } - - topology topology; - topology.add(input_layout("input", in_layout)); - topology.add(split("split", input_info("input"), offsets)); - topology.add(reorder("crop_0_reorder", inputs[0], format::bfzyx, data_types::f32)); - topology.add(reorder("crop_1_reorder", inputs[1], format::bfzyx, data_types::f32)); - topology.add(concatenation("concat", {input_info("crop_0_reorder"), input_info("crop_1_reorder")}, 1)); - topology.add(reorder("output", input_info("concat"), format::bfyx, data_types::f32)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - - cldnn::network net(engine, topology, config); - - auto in_mem = engine.allocate_memory(in_layout); - tests::set_random_values(in_mem); - - net.set_input_data("input", in_mem); - std::map output; - ASSERT_NO_THROW(output = net.execute()); - - auto out_mem = output.at("output").get_memory(); - - ASSERT_NE(out_mem, nullptr); - cldnn::mem_lock output_ptr(out_mem, get_test_stream()); - cldnn::mem_lock input_ptr(in_mem, get_test_stream()); - - ASSERT_EQ(input_ptr.size(), output_ptr.size()); - for (size_t i = 0; i < input_ptr.size(); ++i) - { - ASSERT_EQ(output_ptr[i], input_ptr[i]); - } -} - TEST(prepare_buffer_fusing, in_place_concat_static) { auto& engine = get_test_engine(); auto in_layout1 = layout{ ov::PartialShape{1, 2, 3, 4}, data_types::f32, format::bfyx }; // => {1, 4, 3, 2} @@ -886,6 +840,79 @@ TEST(prepare_buffer_fusing, test_checking_padding_supported) { ASSERT_EQ(concat.can_be_optimized(), false); } +TEST(prepare_buffer_fusing, skip_in_place_concat_padding_in_non_concat_axis_of_dynamic) { + tests::random_generator rg(GET_SUITE_NAME); + auto& engine = get_test_engine(); + auto in_layout = layout{ ov::PartialShape{ov::Dimension::dynamic(), 3, ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + data_types::f16, format::bfyx}; + + auto begin = engine.allocate_memory({ ov::PartialShape{4}, data_types::i64, format::bfyx }); + auto end = engine.allocate_memory({ ov::PartialShape{4}, data_types::i64, format::bfyx }); + auto strides = engine.allocate_memory({ ov::PartialShape{4}, data_types::i64, format::bfyx }); + set_values(begin, {0, 0, 0, 0}); + set_values(end, {0, 0, 0, 9223372036854775807 }); + set_values(strides, {1, 1, 1, 2}); + + auto concat_padding = padding({0,0,1,1}, {0,0,1,1}); + + + auto in_static_layout = layout{ ov::PartialShape{1, 3, 320, 640}, data_types::f16, format::bfyx}; + auto input1_mem = engine.allocate_memory(in_static_layout); + auto input2_mem = engine.allocate_memory(in_static_layout); + auto input3_mem = engine.allocate_memory(in_static_layout); + auto input4_mem = engine.allocate_memory(in_static_layout); + + auto in1 = rg.generate_random_1d(input1_mem->count(), 0, 1); + auto in2 = rg.generate_random_1d(input2_mem->count(), 0, 1); + auto in3 = rg.generate_random_1d(input3_mem->count(), 0, 1); + auto in4 = rg.generate_random_1d(input4_mem->count(), 0, 1); + + set_values(input1_mem, in1); + set_values(input2_mem, in2); + set_values(input3_mem, in3); + set_values(input4_mem, in4); + + topology topology( + input_layout("input1", in_layout), + input_layout("input2", in_layout), + input_layout("input3", in_layout), + input_layout("input4", in_layout), + data("begin", begin), + data("end", end), + data("strides", strides), + strided_slice("strided_slice1", input_info("input1"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + strided_slice("strided_slice2", input_info("input2"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + strided_slice("strided_slice3", input_info("input3"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + strided_slice("strided_slice4", input_info("input4"), input_info("begin"), + input_info("end"), input_info("strides"), {1, 1, 1, 0}, {1, 1, 1, 0}, {}, {}, {}, {}), + concatenation("concat", {input_info("strided_slice1"), input_info("strided_slice2"), input_info("strided_slice3"), input_info("strided_slice4")}, 1, concat_padding), + reorder("reorder", input_info("concat"), format::fs_b_yx_fsv32, data_types::f16)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::optimize_data(true)); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + + auto program = program::build_program(engine, topology, config, false, true); + program_wrapper::apply_opt_pass(*program); + ASSERT_NE(program, nullptr); + + auto& concat = program->get_node("concat"); + ASSERT_EQ(concat.can_be_optimized(), false); + + network network(engine, topology, config); + network.set_input_data("input1", input1_mem); + network.set_input_data("input2", input2_mem); + network.set_input_data("input3", input3_mem); + network.set_input_data("input4", input4_mem); + auto outputs = network.execute(); + + const auto& concat_inst = network.get_primitive("concat"); + ASSERT_EQ(concat_inst->can_be_optimized(), false); +} + #ifdef ENABLE_ONEDNN_FOR_GPU TEST(prepare_buffer_fusing, in_place_onednn_concat_static) { auto& engine = get_test_engine(); diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp index 26162165ee0b2a..a641305b4329af 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/matmul_si_test.cpp @@ -110,7 +110,7 @@ INSTANTIATE_TEST_SUITE_P(smoke, fully_connected_test, { layout{ov::PartialShape{10, 1024}, data_types::f32, format::bfyx}, layout{ov::PartialShape{1000, 1024}, data_types::f32, format::bfyx}, - data_types::i32, false, false, + data_types::f32, false, false, layout{ov::PartialShape{10, 1000}, data_types::f32, format::bfyx} }, { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index 414ab37f11295e..2430ad6f995ca3 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -8874,8 +8874,6 @@ class convolution_test : public tests::generic_test { } static std::vector> generate_specific_test_params() { - // TODO: check split - // TODO: check convolution without bias const primitive_id& weights = "input1"; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp index 5da00e8fb739f3..8106b1e05d1a86 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp @@ -3176,6 +3176,98 @@ TEST(eltwise_gpu_f32, broadcast_test_in4x4x2x2) { } } +TEST(eltwise_gpu_f32, broadcast_test_dim3_dim4) { + auto& engine = get_test_engine(); + + ov::Shape in2_shape = {1, 1, 4, 1}; + auto input2 = engine.allocate_memory({ ov::PartialShape(in2_shape), data_types::f32, format::bfyx }); + + std::vector const_input = { + 1.f, 0.f, 5.f, 1.5f, + 2.f, 0.f, 6.f, 5.2f, + 3.f, 0.5f, 7.f, 12.f, + 4.f, -0.5f, 8.f, 8.f + }; + + set_values(input2, { + 0.5f, 2.5f, 0.5f, 2.5f + }); + + float answers[16] = { + 1.5, 0.5, 7.5, 4, + 2.5, 0.5, 8.5, 7.7, + 3.5, 1, 9.5, 14.5, + 4.5, 0, 10.5, 10.5 + }; + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + + // in1:dim3, int2:dim4 + { + ov::Shape in1_shape = {2, 4, 2}; + + auto input = engine.allocate_memory({ ov::PartialShape(in1_shape), data_types::f32, format::bfyx }); + set_values(input, const_input); + + topology topology; + topology.add(input_layout("input", input->get_layout())); + topology.add(input_layout("input2", input2->get_layout())); + topology.add(eltwise("eltwise", { input_info("input"), input_info("input2") }, eltwise_mode::sum)); + + network network(engine, topology, config); + + network.set_input_data("input", input); + network.set_input_data("input2", input2); + auto outputs = network.execute(); + + ASSERT_EQ(outputs.size(), size_t(1)); + ASSERT_EQ(outputs.begin()->first, "eltwise"); + + auto output = outputs.at("eltwise").get_memory(); + + cldnn::mem_lock output_ptr(output, get_test_stream()); + + for (int i = 0; i < 16; i++) + { + ASSERT_TRUE(are_equal(answers[i], output_ptr[i])); + } + } + + // in1:extended_dim4_from_dim3, int2:dim4 + // in1_shape = {2, 4, 2} is extended to {1, 2, 4, 2} internally in case allow_new_shape_infer true. + // So explicit 4d input shpae {1, 2, 4, 2} should have same result from input{2, 4, 2} + { + ov::Shape in1_shape = {1, 2, 4, 2}; + + auto input = engine.allocate_memory({ ov::PartialShape(in1_shape), data_types::f32, format::bfyx }); + set_values(input, const_input); + + topology topology; + topology.add(input_layout("input", input->get_layout())); + topology.add(input_layout("input2", input2->get_layout())); + topology.add(eltwise("eltwise", { input_info("input"), input_info("input2") }, eltwise_mode::sum)); + + network network(engine, topology, config); + + network.set_input_data("input", input); + network.set_input_data("input2", input2); + auto outputs = network.execute(); + + ASSERT_EQ(outputs.size(), size_t(1)); + ASSERT_EQ(outputs.begin()->first, "eltwise"); + + auto output = outputs.at("eltwise").get_memory(); + + cldnn::mem_lock output_ptr(output, get_test_stream()); + + for (int i = 0; i < 16; i++) + { + ASSERT_TRUE(are_equal(answers[i], output_ptr[i])); + } + } +} + TEST(eltwise_gpu_f16, fs_b_yx_fsv32_basic) { // Inputs are 2x2x2x2 diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp index 5ac5dc06f1ed6f..945d52a2f57ec0 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp @@ -1212,7 +1212,7 @@ class fully_connected_gpu_tests: public ::testing::Test { input_layout("input", input_mem->get_layout()), data("weights", weights_mem), data("scale", scale_mem), - fully_connected("fc_prim", input_info("input"), "weights", "", "scale", "", data_types::f32, padding(), 2, 2) + fully_connected("fc_prim", input_info("input"), "weights", "", "scale", "", data_types::f16, padding(), 2, 2) ); auto config = get_test_default_config(engine); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp deleted file mode 100644 index a6a180a1ce7788..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp +++ /dev/null @@ -1,1005 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" -#include "random_generator.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef _MSC_VER -#pragma warning( disable : 4503 ) -#endif - -#define MEASURE_PERF false -#define MEASURE_LOOP 50 -using namespace cldnn; -using namespace ::tests; - -namespace { - float sigmoid(float x) { - return 1.f / (1.f + (float)std::exp((float)(-x))); - } -} - -struct offset_order_dynamic { - size_t it, ot, ft, zt; - offset_order_dynamic(size_t scale, const lstm_weights_order& t = lstm_weights_order::fizo) { - static const std::map> offset_map{ - { lstm_weights_order::fizo, { 1, 3, 0, 2 } }, - }; - std::vector v = offset_map.at(t); - it = v[0] * scale; - ot = v[1] * scale; - ft = v[2] * scale; - zt = v[3] * scale; - } -}; -lstm_weights_order default_offset_type_dynamic = lstm_weights_order::fizo; - -namespace dynamic_lstm -{ - template - T clip(T val, T threshold) { - if (threshold > 0) { - if (val > threshold) return threshold; - if (val < -threshold) return -threshold; - } - return val; - } - -template -VVVVF lstm_dynamic_input_ref(VVVVF& input, VVVVF& weights, VVVVF& bias, - VF dynamic_lengths, size_t seq, bool hasBias, size_t dir) { - size_t input_size = input[0][0][0].size(); - size_t hidden_size = weights[0][0].size() / 4; - size_t batch_size = input.size(); - - VVVVFoutput(batch_size, VVVF(seq, VVF(dir, VF(4 * hidden_size)))); - for (size_t b = 0; b < batch_size; ++b) - { - for (size_t l = 0; l < seq; ++l) - { - if (l > static_cast(dynamic_lengths[b])) - break; - for (size_t d = 0; d < dir; ++d) - { - for (size_t y = 0; y < 4 * hidden_size; ++y) - { - T res = 0; - for (size_t x = 0; x < input_size; ++x) - { - res += (T)weights[0][d][y][x] * (T)input[b][l][d][x]; - } - if (hasBias) - { - res += (T)bias[0][0][d][y]; - } - output[b][l][d][y] = res; - } - } - } - } - return output; -} - - template - VVVVF lstm_gemm_reference(VVVVF& input, VVVVF& weights, VVVVF& recurrent, VVVVF& bias, VVVVF& hidden, - size_t seq, bool hasBias = true, bool hasHidden = true, size_t dir = 0, size_t input_dir = 0) { - size_t input_size = input[0][0][0].size(); - size_t hidden_size = hidden[0][0][0].size(); - size_t batch_size = input.size(); - - // Temporary output from GEMM operations [f, i, o, z] - VVVVF tempGEMM(batch_size, VVVF(1, VVF(1, VF(4 * hidden_size)))); - for (size_t b = 0; b < batch_size; ++b) { - for (size_t y = 0; y < 4 * hidden_size; ++y) { - T res = 0; - for (size_t x = 0; x < input_size; ++x) { - res += (T)weights[0][dir][y][x] * (T)input[b][seq][input_dir][x]; - } - if (hasHidden) { - for (size_t x = 0; x < hidden_size; ++x) { - auto rec_v = (T)recurrent[0][dir][y][x]; - auto hid_v = (T)hidden[b][0][dir][x]; - auto temp = rec_v * hid_v; - res += temp; - } - } - if (hasBias) { - res += (T)bias[0][0][dir][y]; - } - tempGEMM[b][0][0][y] = res; - } - } - return tempGEMM; - } - - template - VVVVF lstm_elt_reference(VVVVF& tempGEMM, VVVVF& cell, - bool hasCell = true, float clip_threshold = 0, - bool input_forget = false, size_t dir = 0) - { - size_t hidden_size = tempGEMM[0][0][0].size() / 4; - size_t batch_size = tempGEMM.size(); - VVVVF tempOut(batch_size, VVVF(2, VVF(1, VF(hidden_size)))); - offset_order_dynamic off(hidden_size, default_offset_type_dynamic); - - for (size_t b = 0; b < batch_size; ++b) { - T *it = &tempGEMM[b][0][0][off.it]; - T *ot = &tempGEMM[b][0][0][off.ot]; - T *ft = &tempGEMM[b][0][0][off.ft]; - T *zt = &tempGEMM[b][0][0][off.zt]; - - for (size_t h = 0; h < hidden_size; ++h) { - - // Convert all inputs to float for all the elementwise operations. This is done to immitate - // how lstm kernel is performing the elementwise operations. - float fp32_it = (float)it[h]; - float fp32_ot = (float)ot[h]; - float fp32_ft = (float)ft[h]; - float fp32_zt = (float)zt[h]; - float val = sigmoid(clip(fp32_it, clip_threshold)) * std::tanh(clip(fp32_zt, clip_threshold)); - - if (input_forget) { - val *= (1 - fp32_ft); - } - if (hasCell) { - val += (float)cell[b][0][dir][h] * sigmoid(clip(fp32_ft, clip_threshold)); - } - - // Convert back to output data type before storing it into the output buffer. Currently, the output - // data type may be float or ov::float16 (half) - tempOut[b][0][0][h] = (T)(std::tanh(val) * sigmoid(fp32_ot)); - tempOut[b][1][0][h] = (T)val; - } - } - return tempOut; - } - - template - void lstm_dynamic_reference(VVVVF& input, VVVVF& hidden, VVVVF& cell, - VVVVF& weights, VVVVF& recurrent, VVVVF& bias, - VVVVF& output_hidden, VVVVF& output_cell, - bool hasBias = true, bool hasInitialHidden = true, bool hasInitialCell = true, - float clip_threshold = 0, bool input_forget = false) - { - size_t sequence_len = input[0].size(); - size_t dir_len = weights[0].size(); - size_t batch = input.size(); - for (size_t dir = 0; dir < dir_len; ++dir) { - bool tempHasInitialHidden = hasInitialHidden; - bool tempHasInitialCell = hasInitialCell; - for (size_t seq = 0; seq < sequence_len; ++seq) { - size_t seq_id = seq; - size_t input_direction = dir; - VVVVF tempGEMM = lstm_gemm_reference(input, weights, recurrent, bias, hidden, seq_id, hasBias, tempHasInitialHidden, dir, input_direction); - VVVVF tempOutput = lstm_elt_reference(tempGEMM, cell, tempHasInitialCell, clip_threshold, input_forget, dir); - // tempOutput[batch][0] = hidden and tempOutput[batch][1] = cell - for (size_t i = 0; i < batch; i++) { - output_hidden[i][seq][dir] = tempOutput[i][0][0]; - output_cell[i][seq][dir] = tempOutput[i][1][0]; - hidden[i][0][dir] = tempOutput[i][0][0]; - cell[i][0][dir] = tempOutput[i][1][0]; - } - tempHasInitialHidden = true; - tempHasInitialCell = true; - } - } - } -} -template -struct lstm_dynamic_input_layer_test : public ::testing::Test -{ - tests::random_generator rg; - - void SetUp() override { - rg.set_seed(GET_SUITE_NAME); - } - - void input_single_layer_generic_test(int32_t direction, int32_t batch_size, int32_t max_sequence_len, int32_t input_size, int32_t hidden_size, std::vector dynamic_lengths, - bool has_bias = false) - { - auto min_random = -2, max_random = 2; - VVVVF ref_input = rg.generate_random_4d(batch_size, max_sequence_len, direction, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - - auto& engine = get_test_engine(); - VF ref_dynamic_length; - for (auto& v : dynamic_lengths) - ref_dynamic_length.push_back((T)v); - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, direction } }); - set_values(input_mem, ref_input_vec); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - set_values(weights_mem, ref_weights_vec); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - set_values(dynamic_length_mem, ref_dynamic_length); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); - set_values(bias_mem, ref_bias_vec); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - - std::string bias_id = ""; - if (has_bias) { - bias_id = "bias"; - topology.add(data(bias_id, bias_mem)); - } - - topology.add(lstm_dynamic_input("dynamic_lstm_input", - input_info("input"), - "dyn_len", - "weights", - bias_id)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - -#if MEASURE_PERF == true - using clock = std::chrono::high_resolution_clock; - std::vector times(MEASURE_LOOP); - for (uint32_t i = 0; i < MEASURE_LOOP; i++) - { - auto t0 = clock::now(); - network.set_input_data("input", input_mem); - network.set_input_data("dynamic_lstm_input", dynamic_length_mem); - auto real_outs = network.execute(); - real_outs.at("dynamic_lstm_input").get_event().wait(); - auto t1 = clock::now(); - auto exec_time = t1 - t0; - times[i] = exec_time; - } - std::sort(times.begin(), times.end()); - std::nth_element(times.begin(), times.begin() + times.size() / 2, times.end()); - std::cout << "Perf: " << std::chrono::duration_cast(times[times.size() / 2]).count() << " micros. " << std::endl; -#else - network.set_input_data("input", input_mem); - network.set_input_data("dyn_len", dynamic_length_mem); - - auto outputs = network.execute(); - auto out = outputs.at("dynamic_lstm_input"); - auto out_layout = out.get_memory()->get_layout(); - cldnn::mem_lock out_ptr(out.get_memory(), get_test_stream()); - - - auto output_ref = dynamic_lstm::lstm_dynamic_input_ref(ref_input, ref_weights, ref_bias, dynamic_lengths, max_sequence_len, has_bias, direction); - - size_t i = 0; - for (auto b = 0; b < out_layout.batch(); b++) - { - for (auto len = 0; len < max_sequence_len; len++) - { - for (auto dir = 0; dir < direction; dir++) - { - for (auto x = 0; x < out_layout.spatial(0); x++) - { - ASSERT_NEAR(output_ref[b][len][dir][x], (float)out_ptr[i++], 1e-3f) - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - } - } - } -#endif - } -}; - -template -struct lstm_dynamic_single_layer_test : public ::testing::Test -{ - tests::random_generator rg; - - void SetUp() override { - rg.set_seed(GET_SUITE_NAME); - } - - void single_layer_generic_test(int32_t direction, int32_t batch_size, int32_t max_sequence_len, int32_t input_size, int32_t hidden_size, std::vector dynamic_lengths, - bool has_bias = false, bool has_initial_hidden = false, bool has_initial_cell = false, bool has_last_hidden_state = false, bool has_last_cell_state = false, float epsilon = 1e-3f) - { - float clip_threshold = 0; - bool input_forget = false; - - auto min_random = 0, max_random = 2; - VVVVF ref_input = rg.generate_random_4d(batch_size, max_sequence_len, direction, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random); - VVVVF ref_output_hidden = VVVVF(batch_size, VVVF(max_sequence_len, VVF(direction, VF(hidden_size)))); - VVVVF ref_output_cell = VVVVF(batch_size, VVVF(max_sequence_len, VVF(direction, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - - auto& engine = get_test_engine(); - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - VF ref_dynamic_length; - for (auto& v : dynamic_lengths) - ref_dynamic_length.push_back((T)v); - - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, direction } }); - set_values(input_mem, ref_input_vec); - - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - set_values(weights_mem, ref_weights_vec); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - set_values(recurrent_mem, ref_recurrent_vec); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - set_values(dynamic_length_mem, ref_dynamic_length); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); - set_values(bias_mem, ref_bias_vec); - auto initial_hidden_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - set_values(initial_hidden_mem, ref_hidden_vec); - auto initial_cell_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - set_values(initial_cell_mem, ref_cell_vec); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - - std::string bias_id = ""; - if (has_bias) - { - bias_id = "bias"; - topology.add(data(bias_id, bias_mem)); - } - - std::string initial_hidden_id = ""; - if (has_initial_hidden) - { - initial_hidden_id = "initial_hidden"; - topology.add(data(initial_hidden_id, initial_hidden_mem)); - } - - std::string initial_cell_id = ""; - if (has_initial_cell) - { - initial_cell_id = "initial_cell"; - topology.add(data(initial_cell_id, initial_cell_mem)); - } - - std::string last_hidden_state = ""; - auto last_hidden_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - last_hidden_mem->fill(get_test_stream()); - get_test_stream().finish(); - if (has_last_hidden_state) - { - last_hidden_state = "last_hidden_state"; - topology.add(mutable_data(last_hidden_state, last_hidden_mem)); - } - - std::string last_cell_state = ""; - auto last_cell_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, 1, hidden_size, direction } }); - last_cell_mem->fill(get_test_stream()); - get_test_stream().finish(); - if (has_last_cell_state) - { - last_cell_state = "last_cell_state"; - topology.add(mutable_data(last_cell_state, last_cell_mem)); - } - - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent", - last_hidden_state, - last_cell_state, - bias_id, - initial_hidden_id, - initial_cell_id)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - network.set_input_data("input", input_mem); - network.set_input_data("dyn_len", dynamic_length_mem); - -#if MEASURE_PERF == true - using clock = std::chrono::high_resolution_clock; - std::vector times(MEASURE_LOOP); - for (uint32_t i = 0; i < MEASURE_LOOP; i++) - { - auto t0 = clock::now(); - network.set_input_data("input", input_mem); - network.set_input_data("dyn_len", dynamic_length_mem); - auto real_outs = network.execute(); - real_outs.at("dynamic_lstm").get_event().wait(); - auto t1 = clock::now(); - auto exec_time = t1 - t0; - times[i] = exec_time; - } - std::sort(times.begin(), times.end()); - std::nth_element(times.begin(), times.begin() + times.size() / 2, times.end()); - std::cout << "Perf: " << std::chrono::duration_cast(times[times.size() / 2]).count() << " micros. " << std::endl; -#else - dynamic_lstm::lstm_dynamic_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output_hidden, - ref_output_cell, has_bias, has_initial_hidden, has_initial_cell, - clip_threshold, input_forget); - auto real_outs = network.execute(); - auto out = real_outs.at("dynamic_lstm"); - auto out_layout = out.get_memory()->get_layout(); - - cldnn::mem_lock out_ptr(out.get_memory(), get_test_stream()); - cldnn::mem_lock last_hidden_ptr(last_hidden_mem, get_test_stream()); - cldnn::mem_lock last_cell_ptr(last_cell_mem, get_test_stream()); - size_t i = 0, i_lh = 0, i_lc = 0; - for (auto b = 0; b < out_layout.batch(); b++) - { - for (auto len = 0; len < max_sequence_len; len++) - { - for (auto dir = 0; dir < direction; dir++) - { - for (auto x = 0; x < out_layout.spatial(0); x++) - { - //check hidden - if (len < dynamic_lengths[b]) - { - ASSERT_NEAR((float)ref_output_hidden[b][len][dir][x], (float)out_ptr[i++], epsilon) - << "check hidden, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - else - { - ASSERT_NEAR(0.0f, (float)out_ptr[i++], epsilon) - << "check hidden, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - - //check optional last hidden state output - if(has_last_hidden_state && len == dynamic_lengths[b] - 1) - { - auto ratio = (float)ref_output_hidden[b][len][dir][x] / (float)last_hidden_ptr[i_lh++]; - ASSERT_TRUE(std::abs(1.0f - ratio) < 0.01f) - << "check has_last_hidden_state with ratio: " << ratio << ", " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - - } - else if (has_last_hidden_state && len == 0 && dynamic_lengths[b] == 0) - { - ASSERT_NEAR(0.0f, (float)last_hidden_ptr[i_lh++], epsilon) - << "check has_last_hidden_state, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - - //check optional last cell state output - if(has_last_cell_state && len == dynamic_lengths[b] - 1) - { - auto ratio = (float)ref_output_cell[b][len][dir][x] / (float)last_cell_ptr[i_lc++]; - ASSERT_TRUE(std::abs(1.0f - ratio) < 0.01f) - << "check has_last_cell_state with ratio: " << ratio << ", " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - else if (has_last_cell_state && len == 0 && dynamic_lengths[b] == 0) - { - ASSERT_NEAR(0.0f, (float)last_cell_ptr[i_lc++], epsilon) - << "check has_last_cell_state, " - << "b:" << b << ", " - << "len:" << len << ", " - << "dir:" << dir << ", " - << "x:" << x << ", " - << std::endl; - } - } - } - } - } -#endif - } - -}; -typedef ::testing::Types lstm_dynamic_test_types; -TYPED_TEST_SUITE(lstm_dynamic_single_layer_test, lstm_dynamic_test_types); -TYPED_TEST_SUITE(lstm_dynamic_input_layer_test, lstm_dynamic_test_types); - -/* ----------------------------------------------- - DYNAMIC_LSTM INPUT TEST ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_b1_seq3_is3_hs2) -{ - auto dir = 1, batch_size = 1, max_seq_len = 5, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 3 }; - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_b3_seq5_is3_hs2) -{ - auto dir = 1, batch_size = 3, max_seq_len = 5, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 3, 4, 2 }; - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, b10_seq20_is16_hs64) -{ - auto dir = 1, batch = 10, max_seq_len = 20, input_size = 16, hidden_size = 64; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_b8_seq10_is4_hs16) -{ - auto batch_size = 8, max_seq_len = 10, input_size = 4, hidden_size = 16; - std::vector dynamic_lengths = { 1, 2, 3, 4, 5, 6, 7, 8}; - auto dir = 1; - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir2_b8_seq10_is4_hs16_options) -{ - auto batch_size = 8, max_seq_len = 10, input_size = 4, hidden_size = 16; - std::vector dynamic_lengths = { 1, 2, 3, 4, 5, 6, 7, 8 }; - auto dir = 2; - std::vector bias_options = { true, false }; - for (auto bias : bias_options) - { - this->input_single_layer_generic_test(dir, batch_size, max_seq_len, input_size, hidden_size, dynamic_lengths, bias); - } -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_1b1_seq1_is32_hs_128) -{ - auto dir = 1, batch = 1, max_seq_len = 1, input_size = 32, hidden_size = 128; - std::vector dynamic_lengths = - { - 1 - }; - bool bias = true; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir_b8_seq27_is16_hs_56) -{ - auto dir = 1, batch = 8, max_seq_len = 27, input_size = 16, hidden_size = 56; - std::vector dynamic_lengths = - { - 20, 25, 24, 10, 15, 8, 19, 26 - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, false); -} - - -/* ----------------------------------------------- - FULL DYNAMIC_LSTM TESTS ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq1_is3_hs2) -{ - auto dir = 1, batch = 1, max_seq_len = 1, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq3_is3_hs2_options) -{ - auto dir = 1, batch = 1, max_seq_len = 3, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq10_is10_hs32) -{ - auto dir = 1, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b1_seq10_is10_hs32_options) -{ - auto dir = 1, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b4_seq1_is3_hs2) -{ - auto dir = 1, batch = 2, max_seq_len = 3, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1, 2 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b4_seq3_is3_hs2_options) -{ - auto dir = 1, batch = 4, max_seq_len = 3, input_size = 3, hidden_size = 2; - std::vector dynamic_lengths = { 1, 2, 2, 0 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, b10_seq20_is16_hs64) -{ - auto dir = 1, batch = 10, max_seq_len = 20, input_size = 16, hidden_size = 64; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, - }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -// DISABLED beacuse it is veeery long -TYPED_TEST(lstm_dynamic_single_layer_test, DISABLED_b16_seq20_is32_hs32_options) -{ - auto dir = 1, batch = 16, max_seq_len = 20, input_size = 32, hidden_size = 32; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, 0, 0, 0, 0, 19, 18 - }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - std::vector last_hidden_state = { true, false }; - std::vector last_cell_state = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - for (auto l_h_s : last_hidden_state) - { - for (auto l_c_s : last_cell_state) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c, l_h_s, l_c_s, 1e-2f); - } - } - } - } - } -} - -/* ----------------------------------------------- - BIDIRECTIONAL TESTS ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b2_seq7_is3_hs4) -{ - auto dir = 2, batch = 2, max_seq_len = 7, input_size = 3, hidden_size = 4; - std::vector dynamic_lengths = { 3, 5 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir_b1_seq1_is32_hs_512) -{ - auto dir = 2, batch = 1, max_seq_len = 1, input_size = 8, hidden_size = 128; - std::vector dynamic_lengths = - { - 1 - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_input_layer_test, dlstm_input_dir_b8_seq5_is32_hs_512) -{ - auto dir = 2, batch = 8, max_seq_len = 5, input_size = 8, hidden_size = 128; - std::vector dynamic_lengths = - { - 3, 4, 5, 1, 3, 2, 2, 3 - }; - this->input_single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, true); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b10_seq7_is3_hs4) -{ - auto dir = 2, batch = 10, max_seq_len = 7, input_size = 3, hidden_size = 4; - std::vector dynamic_lengths = { 1, 2, 3, 4, 5, 6, 5, 4, 3, 2}; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b2_seq7_is3_hs4_options) -{ - auto dir = 2, batch = 2, max_seq_len = 7, input_size = 3, hidden_size = 4; - std::vector dynamic_lengths = { 3, 5 }; - std::vector bias_options = { false, true }; - std::vector init_hidden = { false, true }; - std::vector init_cell = { false, true}; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b1_seq10_is10_hs32) -{ - auto dir = 2, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b1_seq10_is10_hs32_options) -{ - auto dir = 2, batch = 1, max_seq_len = 10, input_size = 10, hidden_size = 32; - std::vector dynamic_lengths = { 8 }; - std::vector bias_options = { true, false }; - std::vector init_hidden = { true, false }; - std::vector init_cell = { true, false }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c, false, false, 1e-2f); - } - } - } -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b10_seq20_is16_hs64) -{ - auto dir = 2, batch = 10, max_seq_len = 20, input_size = 16, hidden_size = 64; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, - }; - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths); -} - -TYPED_TEST(lstm_dynamic_single_layer_test, bidir_b16_seq20_is4_hs8_options) -{ - auto dir = 2, batch = 16, max_seq_len = 20, input_size = 4, hidden_size = 8; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, 0, 0, 0, 0, 14, 18 - }; - std::vector bias_options = { false, true }; - std::vector init_hidden = { false, true }; - std::vector init_cell = { false, true }; - for (auto bias : bias_options) - { - for (auto i_h : init_hidden) - { - for (auto i_c : init_cell) - { - this->single_layer_generic_test(dir, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, bias, i_h, i_c); - } - } - } -} - -/* ----------------------------------------------- - OPTIONAL OUTPUTS ----------------------------------------------- -*/ - -TYPED_TEST(lstm_dynamic_single_layer_test, b16_seq20_is4_hs8_dirs_optional_outputs) -{ - auto batch = 16, max_seq_len = 20, input_size = 4, hidden_size = 8; - std::vector dynamic_lengths = - { - 5, 10, 12, 11, 5, 6, 7, 8, 9, 15, 0, 0, 0, 0, 14, 18 - }; - this->single_layer_generic_test(1, batch, max_seq_len, input_size, hidden_size, dynamic_lengths, false, false, false, true, true, 1e-3f); -} - -/* ----------------------------------------------- - NEGATIVE TESTS ----------------------------------------------- -*/ - -TEST(lstm_dynamic_negative, wrong_weights_size) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx, { batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, wrong_value } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_recurrent_size_0) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, wrong_value, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_recurrent_size_1) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, wrong_value, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, batch_size, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_dynamic_length_size_0) { - - auto batch_size = 1, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 50; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, wrong_value, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} - -TEST(lstm_dynamic_negative, wrong_dynamic_length_size_1) { - - auto batch_size = 50, max_sequence_len = 10, input_size = 16, hidden_size = 32, direction = 1; - auto wrong_value = 2; - auto& engine = get_test_engine(); - cldnn::data_types dt = cldnn::data_types::f32; - auto input_mem = engine.allocate_memory({ dt, format::bfyx,{ batch_size, max_sequence_len, input_size, 1 } }); - auto weights_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - auto recurrent_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - auto dynamic_length_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, wrong_value, 1 } }); - auto bias_mem = engine.allocate_memory({ dt, format::bfyx,{ 1, 1, 4 * hidden_size, 1 } }); - - topology topology; - topology.add(input_layout("input", input_mem->get_layout())); - topology.add(input_layout("dyn_len", dynamic_length_mem->get_layout())); - topology.add(data("weights", weights_mem)); - topology.add(data("recurrent", recurrent_mem)); - topology.add(lstm_dynamic("dynamic_lstm", - input_info("input"), - "dyn_len", - "weights", - "recurrent")); - ASSERT_ANY_THROW(network network(engine, topology, get_test_default_config(engine))); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp deleted file mode 100644 index 9d6cbc48aeddbc..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp +++ /dev/null @@ -1,2411 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" -#include "random_generator.hpp" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#ifdef _MSC_VER -# pragma warning(disable: 4503) -#endif - -using namespace cldnn; -using namespace ::tests; - -#define FERROR 1E-4 - -namespace { -float sigmoid(float x) { - return 1.f / (1.f + (float)std::exp((float)(-x))); -} -struct offset_order { - size_t it, ot, ft, zt; - offset_order(size_t scale, const lstm_weights_order& t = lstm_weights_order::iofz) { - static const std::map> offset_map{ - { lstm_weights_order::iofz,{ 0, 1, 2, 3 } }, - { lstm_weights_order::ifoz,{ 0, 2, 1, 3 } } - }; - std::vector v = offset_map.at(t); - it = v[0] * scale; - ot = v[1] * scale; - ft = v[2] * scale; - zt = v[3] * scale; - } -}; -lstm_weights_order default_offset_type = lstm_weights_order::iofz; -template -T clip(T val, T threshold) { - if (threshold > 0) { - if (val > threshold) return threshold; - if (val < -threshold) return -threshold; - } - return val; -} - -template -VVVVF lstm_gemm_reference(VVVVF& input, VVVVF& weights, VVVVF& recurrent, VVVVF& bias, VVVVF& hidden, - size_t seq, bool hasBias = true, bool hasHidden = true, size_t dir = 0, size_t input_dir = 0) { - size_t input_size = input[0][0][0].size(); - size_t hidden_size = hidden[0][0][0].size(); - size_t batch_size = input.size(); - - // Temporary output from GEMM operations [f, i, o, z] - VVVVF tempGEMM(batch_size, VVVF(1, VVF(1, VF(4 * hidden_size)))); - for (size_t b = 0; b < batch_size; ++b) { - for (size_t y = 0; y < 4 * hidden_size; ++y) { - T res = 0; - for (size_t x = 0; x < input_size; ++x) { - res += (T)weights[0][dir][y][x] * (T)input[b][seq][input_dir][x]; - } - if (hasHidden) { - for (size_t x = 0; x < hidden_size; ++x) { - res += (T)recurrent[0][dir][y][x] * (T)hidden[b][0][dir][x]; - } - } - if (hasBias) { - res += (T)bias[0][0][dir][y]; - } - tempGEMM[b][0][0][y] = res; - } - } - return tempGEMM; -} - -template -VVVVF lstm_elt_reference(VVVVF& tempGEMM, VVVVF& cell, - bool hasCell = true, float clip_threshold = 0, - bool input_forget = false, size_t dir = 0) -{ - size_t hidden_size = tempGEMM[0][0][0].size() / 4; - size_t batch_size = tempGEMM.size(); - VVVVF tempOut(batch_size, VVVF(2, VVF(1, VF(hidden_size)))); - offset_order off(hidden_size, default_offset_type); - - for (size_t b = 0; b < batch_size; ++b) { - T *it = &tempGEMM[b][0][0][off.it]; - T *ot = &tempGEMM[b][0][0][off.ot]; - T *ft = &tempGEMM[b][0][0][off.ft]; - T *zt = &tempGEMM[b][0][0][off.zt]; - - for (size_t h = 0; h < hidden_size; ++h) { - - // Convert all inputs to float for all the elementwise operations. This is done to immitate - // how lstm kernel is performing the elementwise operations. - float fp32_it = (float)it[h]; - float fp32_ot = (float)ot[h]; - float fp32_ft = (float)ft[h]; - float fp32_zt = (float)zt[h]; - float val = sigmoid(clip(fp32_it, clip_threshold)) * std::tanh(clip(fp32_zt, clip_threshold)); - - if (input_forget) { - val *= (1 - fp32_ft); - } - if (hasCell) { - val += (float)cell[b][0][dir][h] * sigmoid(clip(fp32_ft, clip_threshold)); - } - - // Convert back to output data type before storing it into the output buffer. Currently, the output - // data type may be float or ov::float16 (half) - tempOut[b][0][0][h] = (T)(std::tanh(val) * sigmoid(fp32_ot)); - tempOut[b][1][0][h] = (T)val; - } - } - return tempOut; -} - -template -void print(const std::string& s, VVVVF& input) { - printf("%s -------------\n", s.c_str()); - printf("Size = [%d, %d, %d, %d]\n", (int)input.size(), (int)input[0].size(), (int)input[0][0].size(), (int)input[0][0][0].size()); - for (size_t b = 0; b < input.size(); ++b) { - for (size_t f = 0; f < input[0].size(); ++f) { - for (size_t y = 0; y < input[0][0].size(); ++y) { - for (size_t x = 0; x < input[0][0][0].size(); ++x) { - printf("%f ", input[b][f][y][x]); - } - printf("\n"); - } - } - } - printf("---------------------------------------\n"); -} - -// input = [ batch, sequence, direction, input_size ] -// weights = [ 1, direction, 4 * hidden_size, input_size ] -// recurrent = [ 1, direction, 4 * hidden_size, hidden_size ] -// biases = [ 1, 1, direction, 4 * hidden_size ] optional -// cell = [ batch, direction, 1, hidden_size ] optional -// hidden = [ batch, direction, 1, hidden_size ] optional -// tempGEMM = [ batch, 1, 1, 4 * hidden_size ] temporary output -// output = [ batch, sequence, direction, hidden_size ] output -template -void lstm_reference(VVVVF& input, VVVVF& hidden, VVVVF& cell, - VVVVF& weights, VVVVF& recurrent, VVVVF& bias, - VVVVF& output, VVVVF& last_hidden, - VVVVF& last_cell, bool hasBias = true, - bool hasInitialHidden = true, bool hasInitialCell = true, - float clip_threshold = 0, bool input_forget = false, - bool scramble_input = true) -{ - size_t sequence_len = input[0].size(); - size_t dir_len = weights[0].size(); - size_t batch = input.size(); - size_t input_directions = input[0][0].size(); - for (size_t dir = 0; dir < dir_len; ++dir) { - bool tempHasInitialHidden = hasInitialHidden; - bool tempHasInitialCell = hasInitialCell; - for (size_t seq = 0; seq < sequence_len; ++seq) { - size_t seq_id = seq; - size_t input_direction = dir; - if (scramble_input) { - if (dir > 0) { - seq_id = input_directions == 1 ? sequence_len - seq - 1 : seq; - input_direction = input_directions - 1; - } - } - VVVVF tempGEMM = lstm_gemm_reference(input, weights, recurrent, bias, hidden, seq_id, hasBias, tempHasInitialHidden, dir, input_direction); - VVVVF tempOutput = lstm_elt_reference(tempGEMM, cell, tempHasInitialCell, clip_threshold, input_forget, dir); - // tempOutput[batch][0] = hidden and tempOutput[batch][1] = cell - for (size_t i = 0; i < batch; i++) { - output[i][seq][dir] = tempOutput[i][0][0]; - hidden[i][0][dir] = tempOutput[i][0][0]; - cell[i][0][dir] = tempOutput[i][1][0]; - } - tempHasInitialHidden = true; - tempHasInitialCell = true; - } - } - last_hidden = hidden; - last_cell = cell; -} - -template -void generic_lstm_gemm_gpu_test(int sequence_len, int direction, int batch_size, int input_size, int hidden_size, - bool hasBias, bool hasHidden, bool is_caching_test = false) { - int min_random = -2, max_random = 2; - - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - - VVVVF ref_output = lstm_gemm_reference(ref_input, ref_weights, ref_recurrent, ref_bias, ref_hidden, 0, hasBias, hasHidden); - - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - auto& engine = get_test_engine(); - - // If the input is of fp16 type then, the memory::ptr will be allocated as such - if (!engine.get_device_info().supports_fp16) - { - if (dt == data_types::f16) - { - return; - } - } - - memory::ptr input = engine.allocate_memory({ dt, format::bfyx, { batch_size, sequence_len, input_size, 1 } }); - memory::ptr weights = engine.allocate_memory({ dt, format::bfyx, { 1, direction, input_size, 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ dt, format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ dt, format::bfyx, { 1, 1, 4 * hidden_size, direction } }); - memory::ptr hidden = engine.allocate_memory({ dt, format::bfyx, { batch_size, direction, hidden_size, 1 } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - if (hasBias) { - topology.add(data("biases", biases)); - } - if (hasHidden) { - topology.add(input_layout("hidden", hidden->get_layout())); - } - - topology.add(lstm_gemm("lstm_gemm", input_info("input"), "weights", "recurrent", hasBias ? "biases" : "", hasHidden ? "hidden" : "")); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - if (hasHidden) { - network->set_input_data("hidden", hidden); - } - - auto outputs = network->execute(); - ASSERT_EQ(outputs.size(), size_t(1)); - - auto output = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - int i = 0; - for (int b = 0; b < batch_size; ++b) { - for (int x = 0; x < 4 * hidden_size; ++x) - ASSERT_FLOAT_EQ(ref_output[b][0][0][x], output_ptr[i++]); - } -} - -template -void generic_lstm_elt_gpu_test(int /* sequence_len */, int direction, int batch_size, - int /* input_size */, int hidden_size, bool hasCell, - T clip_threshold, bool input_forget, bool is_caching_test = false) { - // tempGEMM = [ 1, direction, batch, 4 * hidden_size ] input - // cell = [ 1, direction, batch, hidden_size ] optional - // output = [ 2, direction, batch, hidden_size ] output concat[hidden, cell] - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_tempGEMM = rg.generate_random_4d(batch_size, direction, 1, 4 * hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VF ref_tempGEMM_vec = flatten_4d(cldnn::format::bfyx, ref_tempGEMM); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - - VVVVF ref_output = lstm_elt_reference(ref_tempGEMM, ref_cell, hasCell, clip_threshold, input_forget); - - // We observe some mismatch in down-converting from fp32 to fp16 - // between the reference implementation and opencl kernel. This can be - // a simple rounding error. Thus, for fp16 we are increasing our tolerance - // to error from 1E-4 to 1E-2 - constexpr float ferror = std::is_same::value ? (float)1E-4 : (float)1E-2; - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - auto& engine = get_test_engine(); - - // If the input is of fp16 type then, the memory::ptr will be allocated as such - if (!engine.get_device_info().supports_fp16) - { - if (dt == data_types::f16) - { - return; - } - } - - memory::ptr tempGEMM = engine.allocate_memory({ dt, format::bfyx,{ batch_size, direction, 4 * hidden_size, 1 } }); - memory::ptr cell = engine.allocate_memory({ dt, format::bfyx,{ batch_size, direction, hidden_size, 1 } }); - set_values(tempGEMM, ref_tempGEMM_vec); - set_values(cell, ref_cell_vec); - - topology topology; - topology.add(input_layout("tempGEMM", tempGEMM->get_layout())); - if (hasCell) { - topology.add(input_layout("cell", cell->get_layout())); - } - topology.add(lstm_elt("lstm_elt", input_info("tempGEMM"), hasCell ? "cell" : "", clip_threshold, input_forget)); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("tempGEMM", tempGEMM); - if (hasCell) { - network->set_input_data("cell", cell); - } - - auto outputs = network->execute(); - ASSERT_EQ(outputs.size(), size_t(1)); - - auto output = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - for (int b = 0; b < batch_size; ++b) { - for (int j = 0; j < 2; ++j) { - for (int x = 0; x < hidden_size; ++x) - { - auto idx = b * 2 * hidden_size + j * hidden_size + x; - ASSERT_NEAR(ref_output[b][j][0][x], output_ptr[idx] , ferror); - } - } - } -} - -std::string get_string_id(size_t i) { - std::stringstream ss; - ss << std::setw(5) << std::setfill('0') << i; - return ss.str(); -} - -// --------------- Manually constructed LSTM ---------------------------------------- -// This function manually generates an lstm node sequence by conbining lstm_gemm and lstm_elt nodes -// it requires that the output of the lstm_elt node is croped to obtain the corresponding hidden and cell outputs -void generate_lstm_topology(topology& t, memory::ptr input, memory::ptr hidden, memory::ptr cell, - memory::ptr weights, memory::ptr recurrent, memory::ptr biases, int sequence_len, - bool hasBias = true, bool hasInitialHidden = true, bool hasInitialCell = true) { - auto hidden_size = hidden->get_layout().get_tensor(); - t.add(input_layout("input", input->get_layout())); - std::vector> input_ids_offsets; - std::vector output_ids_offsets; - for (int i = 0; i < sequence_len; ++i) - input_ids_offsets.push_back({ get_string_id(i),{ 0, i, 0, 0 } }); - t.add(split("inputSplit", input_info("input"), input_ids_offsets)); - t.add(data("weights", weights)); - t.add(data("recurrent", recurrent)); - - std::string biasStr = ""; - std::string hiddenStr = ""; - std::string cellStr = ""; - if (hasBias) - { - t.add(data("biases", biases)); - biasStr = "biases"; - } - if (hasInitialHidden) - { - t.add(input_layout("hidden", hidden->get_layout())); - hiddenStr = "hidden"; - } - if (hasInitialCell) - { - t.add(input_layout("cell", cell->get_layout())); - cellStr = "cell"; - } - for (int i = 0; i < sequence_len; ++i) { - std::string lstm_gemm_id = "lstm_gemm" + get_string_id(i); - std::string lstm_elt_id = "lstm_elt" + get_string_id(i); - std::string crop_id = "crop" + get_string_id(i); - - t.add(lstm_gemm(lstm_gemm_id, input_info("inputSplit:" + get_string_id(i)), "weights", "recurrent", biasStr, hiddenStr)); - t.add(lstm_elt(lstm_elt_id, input_info(lstm_gemm_id), cellStr)); - - hiddenStr = crop_id + ":hidden"; - t.add(crop(hiddenStr, input_info(lstm_elt_id), hidden_size, tensor{ 0,0,0,0 })); - if (i < sequence_len - 1) { - cellStr = crop_id + ":cell"; - t.add(crop(cellStr, input_info(lstm_elt_id), hidden_size, tensor{ 0,1,0,0 })); - } - output_ids_offsets.push_back(input_info(hiddenStr)); - } - t.add(concatenation("concatenation", output_ids_offsets, 1)); -} - -template -void generic_lstm_custom_gpu_test(int sequence_len, int direction, int batch_size, int input_size, int hidden_size, - bool hasBias, bool hasInitialHidden, bool hasInitialCell, bool is_caching_test = false) { - std::cout << "Input Size = " << input_size << " Hidden Size = " << hidden_size << " Sequence Len = " << sequence_len << " Batch Size = " << batch_size << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, direction, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, direction, 1, hidden_size, min_random, max_random); - VVVVF ref_output(batch_size, VVVF(sequence_len, VVF(direction, VF(hidden_size)))); - VVVVF last_hidden(batch_size, VVVF(direction, VVF(1, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(direction, VVF(1, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - lstm_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output, last_hidden, last_cell, - hasBias, hasInitialHidden, hasInitialCell); - - auto& engine = get_test_engine(); - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, sequence_len, input_size, 1 } }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, direction, hidden_size, 1 } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, direction, hidden_size, 1 } }); - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - topology topology; - generate_lstm_topology(topology, input, hidden, cell, weights, recurrent, biases, sequence_len, - hasBias, hasInitialHidden, hasInitialCell); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - if (hasInitialHidden) network->set_input_data("hidden", hidden); - if (hasInitialCell) network->set_input_data("cell", cell); - auto outputs = network->execute(); - - ASSERT_EQ(outputs.size(), size_t(1)); - size_t output_size = outputs.begin()->second.get_memory()->size() / sizeof(T); - ASSERT_EQ(output_size, size_t(hidden_size * sequence_len * batch_size * direction)); - - auto output = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - int i = 0; - for (int b = 0; b < batch_size; ++b) { - for (int s = 0; s < sequence_len; ++s) { - for (int x = 0; x < hidden_size; ++x) { - for (int d = 0; d < direction; ++d) { - ASSERT_NEAR(ref_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } -} - -// ------------------------------------------------------- -template -void generic_lstm_gpu_test(int layers, int sequence_len, int direction, int batch_size, int input_size, int hidden_size, - bool hasBias, bool hasInitialHidden, bool hasInitialCell, - T clip_threshold, bool input_forget, bool is_caching_test = false) { - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Direction = " << direction << " Batch Size = " << batch_size << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - - std::vector> ref_weights; - std::vector> ref_recurrent; - std::vector> ref_bias; - std::vector> ref_hidden; - std::vector> ref_cell; - std::vector> ref_output; - - for (int i = 0; i < layers; ++i) { - ref_weights.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, i==0 ? input_size : hidden_size, min_random, max_random)); - ref_recurrent.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random)); - ref_bias.push_back(rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random)); - ref_hidden.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_cell.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_output.push_back(VVVVF(batch_size, VVVF(sequence_len, VVF(direction, VF(hidden_size))))); - } - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - std::vector> ref_weights_vec; - std::vector> ref_recurrent_vec; - std::vector> ref_bias_vec; - std::vector> ref_hidden_vec; - std::vector> ref_cell_vec; - for (int i = 0; i < layers; ++i) { - ref_weights_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_weights[i])); - ref_recurrent_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_recurrent[i])); - ref_bias_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_bias[i])); - ref_hidden_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_hidden[i])); - ref_cell_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_cell[i])); - } - - VVVVF last_hidden(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden[0], ref_cell[0], ref_weights[0], ref_recurrent[0], ref_bias[0], ref_output[0], - last_hidden, last_cell, hasBias, hasInitialHidden, hasInitialCell, - clip_threshold, input_forget, true); - - for (int i = 1; i < layers; ++i) { - lstm_reference(ref_output[i - 1], ref_hidden[i], ref_cell[i], ref_weights[i], ref_recurrent[i], - ref_bias[i], ref_output[i], - last_hidden, last_cell, hasBias, hasInitialHidden, hasInitialCell, - clip_threshold, input_forget, false); - } - - // We observe some mismatch in down-converting from fp32 to fp16 - // between the reference implementation and opencl kernel. This can be - // a simple rounding error. Thus, for fp16 we are increasing our tolerance - // to error from 1E-4 to 1E-2 - constexpr float ferror = std::is_same::value ? (float)1E-4 : (float)1E-2; - constexpr auto dt = std::is_same::value ? data_types::f32 : data_types::f16; - auto& engine = get_test_engine(); - - // If the input is of fp16 type then, the memory::ptr will be allocated as such - if (!engine.get_device_info().supports_fp16) - { - if (dt == data_types::f16) - { - return; - } - } - - memory::ptr input = engine.allocate_memory({ dt, format::bfyx, {batch_size, sequence_len, input_size, 1} }); - set_values(input, ref_input_vec); - - std::vector weights; - std::vector recurrent; - std::vector biases; - std::vector hidden; - std::vector cell; - for(int i = 0; i < layers; ++i) { - weights.push_back(engine.allocate_memory({ dt, format::bfyx, { 1, direction, i==0 ? input_size : hidden_size, 4 * hidden_size } })); - set_values(weights[i], ref_weights_vec[i]); - recurrent.push_back(engine.allocate_memory({ dt, format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } })); - set_values(recurrent[i], ref_recurrent_vec[i]); - if (hasBias) { - biases.push_back(engine.allocate_memory({ dt, format::bfyx, { 1, 1, 4 * hidden_size, direction } })); - set_values(biases[i], ref_bias_vec[i]); - } - if (hasInitialHidden) { - hidden.push_back(engine.allocate_memory({ dt, format::bfyx, { batch_size, 1, hidden_size, direction } })); - set_values(hidden[i], ref_hidden_vec[i]); - } - if (hasInitialCell) { - cell.push_back(engine.allocate_memory({ dt, format::bfyx, { batch_size, 1, hidden_size, direction} })); - set_values(cell[i], ref_cell_vec[i]); - } - } - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - cldnn::primitive_id prev_lstm_id; - for(int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - std::string lstm_id = "lstm" + sid; - std::string weights_id = "weights" + sid; - std::string recurrent_id = "recurrent" + sid; - std::string biases_id = "biases" + sid; - std::string hidden_id = "hidden" + sid; - std::string cell_id = "cell" + sid; - - topology.add(data(weights_id, weights[i])); - topology.add(data(recurrent_id, recurrent[i])); - if (hasBias) topology.add(data(biases_id, biases[i])); - if (hasInitialHidden) topology.add(input_layout(hidden_id, hidden[i]->get_layout())); - if (hasInitialCell) topology.add(input_layout(cell_id, cell[i]->get_layout())); - if (i == 0) { - topology.add(lstm(lstm_id, lstm_inputs, weights_id, recurrent_id, - hasBias ? biases_id : "", hasInitialHidden ? hidden_id : "", hasInitialCell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence, default_offset_type)); - } - else { - topology.add(lstm(lstm_id, { input_info(prev_lstm_id) }, weights_id, recurrent_id, - hasBias ? biases_id : "", hasInitialHidden ? hidden_id : "", hasInitialCell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence, default_offset_type)); - } - prev_lstm_id = lstm_id; - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - for (int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - if (hasInitialHidden) network->set_input_data("hidden" + sid, hidden[i]); - if (hasInitialCell) network->set_input_data("cell" + sid, cell[i]); - } - auto outputs = network->execute(); - { - ASSERT_EQ(outputs.size(), size_t(1)); - size_t output_size = outputs.begin()->second.get_memory()->size() / sizeof(T); - ASSERT_EQ(output_size, size_t(hidden_size * sequence_len * batch_size * direction)); - - auto output = outputs.begin()->second.get_memory(); - - // Get the output tensor - cldnn::layout output_layout = output->get_layout(); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(batch_size, output_layout.batch()); - ASSERT_EQ(sequence_len, output_layout.feature()); - ASSERT_EQ(direction, output_layout.spatial(1)); - ASSERT_EQ(hidden_size, output_layout.spatial(0)); - - cldnn::mem_lock output_ptr(output, get_test_stream()); - int32_t i = 0; - for (int32_t b = 0; b < batch_size; ++b) { - for (int32_t s = 0; s < sequence_len; ++s) { - for (int32_t d = 0; d < direction; ++d) { - for (int32_t x = 0; x < hidden_size; ++x) { - ASSERT_NEAR(ref_output[layers - 1][b][s][d][x], output_ptr[i++], ferror); - } - } - } - } - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_output_test(const lstm_output_selection& output_selection, int directions, bool is_caching_test = false) { - int layers = 1; - int sequence_len = 4; - int batch_size = 3; - int input_size = 3; - int hidden_size = 4; - - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Directions = " << directions << " Batch Size = " << batch_size - << " Output selection: " << static_cast(output_selection) << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, directions, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_output = VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(cldnn::format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(cldnn::format::bfyx, ref_cell); - - VVVVF last_hidden(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output, - last_hidden, last_cell, true, true, true, - (T)0, false, true); - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - bool emit_last_cell = output_selection == lstm_output_selection::hidden_cell || - output_selection == lstm_output_selection::sequence_cell; - bool emit_last_hidden = output_selection == lstm_output_selection::hidden || - output_selection == lstm_output_selection::hidden_cell; - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) - { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - topology.add(data("biases", biases)); - topology.add(input_layout("hidden", hidden->get_layout())); - topology.add(input_layout("cell", cell->get_layout())); - topology.add(lstm("lstm", lstm_inputs, "weights", "recurrent", - "biases", "hidden", "cell", "", 0, false, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection, default_offset_type)); - if (emit_last_cell) - { - int32_t concatenation_len = emit_last_hidden ? 2 : sequence_len + 1; - tensor hidden_tensor {batch_size, concatenation_len - 1, hidden_size, directions}; - tensor cell_tensor {batch_size, 1, hidden_size, directions}; - topology.add(crop(emit_last_hidden ? "crop:last_hidden" : "crop:sequence", input_info("lstm"), hidden_tensor, tensor{0, 0, 0, 0})); - topology.add(crop("crop:last_cell", input_info("lstm"), cell_tensor, tensor{0, concatenation_len - 1, 0, 0})); - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - network->set_input_data("hidden", hidden); - network->set_input_data("cell", cell); - - auto outputs = network->execute(); - uint32_t ref_num_output_primitives = 1; // Output will return atleast 1 primitive - - if (emit_last_cell) { - // add another primitve to account for cell state if the output selection includes cell state - ref_num_output_primitives += 1; - } - - // check if the number of returned primitives match the expected number of output primitives - ASSERT_EQ(ref_num_output_primitives, outputs.size()); - - for (auto itr = outputs.begin(); itr != outputs.end(); itr++) - { - auto output_layout = itr->second.get_memory()->get_layout(); - primitive_id primitive_name = itr->first; - - cldnn::memory::ptr output_memory = itr->second.get_memory(); - int32_t output_size = (int32_t)(itr->second.get_memory()->size() / sizeof(T)); - cldnn::tensor ref_output_tensor; - VVVVF ref_primitive_output; - - int32_t ref_batch_size = batch_size; - int32_t ref_hidden_size = hidden_size; - int32_t ref_directions = directions; - - int32_t ref_seq_len = 1; - // Set the reference output against which the primitive's output will be compared - if (primitive_name.find("crop:last_cell") != std::string::npos) - { - ref_primitive_output = last_cell; - } - else if (emit_last_hidden || primitive_name.find("crop:last_hidden") != std::string::npos) - { - ref_primitive_output = last_hidden; - } - else - { - ref_seq_len = sequence_len; - ref_primitive_output = ref_output; - } - - ref_output_tensor = { ref_batch_size, ref_seq_len, ref_hidden_size, ref_directions }; - int32_t ref_output_size = ref_batch_size * ref_seq_len * ref_hidden_size * ref_directions; - - // The number of elements in reference should match the number of elements in the primitive's output - ASSERT_EQ(ref_output_size , output_size); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(ref_batch_size, output_layout.batch()); - ASSERT_EQ(ref_seq_len, output_layout.feature()); // Sequence length should match - ASSERT_EQ(ref_directions, output_layout.spatial(1)); // directions should match - ASSERT_EQ(ref_hidden_size, output_layout.spatial(0)); // input size should match - - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - int32_t i = 0; - for (int32_t b = 0; b < ref_batch_size; ++b) { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_format_test(const cldnn::format& format, int directions, bool is_caching_test = false) { - int layers = 1; - int sequence_len = 6; - int batch_size = 3; - int input_size = 4; - int hidden_size = 5; - - lstm_output_selection output_selection = lstm_output_selection::sequence; - - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Directions = " << directions << " Batch Size = " << batch_size - << " Output selection: " << static_cast(output_selection) << std::endl; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, directions, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_output = VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(format, ref_input); - VF ref_weights_vec = flatten_4d(cldnn::format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(cldnn::format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(cldnn::format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(format, ref_hidden); - VF ref_cell_vec = flatten_4d(format, ref_cell); - - VVVVF last_hidden(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden, ref_cell, ref_weights, ref_recurrent, ref_bias, ref_output, - last_hidden, last_cell, true, true, true, - (T)0, false, true); - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(),format, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format, { batch_size, 1, hidden_size, directions } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - bool emit_last_cell = output_selection == lstm_output_selection::hidden_cell || - output_selection == lstm_output_selection::sequence_cell; - bool emit_last_hidden = output_selection == lstm_output_selection::hidden || - output_selection == lstm_output_selection::hidden_cell; - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) - { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - topology.add(data("biases", biases)); - topology.add(input_layout("hidden", hidden->get_layout())); - topology.add(input_layout("cell", cell->get_layout())); - topology.add(lstm("lstm"+get_string_id(0), lstm_inputs, "weights", "recurrent", - "biases", "hidden", "cell", "", 0, false, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection, default_offset_type)); - - if (emit_last_cell) - { - int32_t concatenation_len = emit_last_hidden ? 2 : sequence_len + 1; - tensor hidden_tensor {batch_size, concatenation_len - 1, hidden_size, directions}; - tensor cell_tensor {batch_size, 1, hidden_size, directions}; - topology.add(crop(emit_last_hidden ? "crop:last_hidden" : "crop:sequence", input_info("lstm"), hidden_tensor, tensor{0, 0, 0, 0})); - topology.add(crop("crop:last_cell", input_info("lstm"), cell_tensor, tensor{0, concatenation_len - 1, 0, 0})); - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - - std::map outputs; - - network->set_input_data("input", input); - network->set_input_data("hidden", hidden); - network->set_input_data("cell", cell); - outputs = network->execute(); - - uint32_t ref_num_output_primitives = 1; // Output will return atleast 1 primitive - - if (emit_last_cell) { - // add another primitve to account for cell state if the output selection includes cell state - ref_num_output_primitives += 1; - } - - // check if the number of returned primitives match the expected number of output primitives - ASSERT_EQ(ref_num_output_primitives, outputs.size()); - - for (auto itr = outputs.begin(); itr != outputs.end(); itr++) - { - auto output_layout = itr->second.get_memory()->get_layout(); - primitive_id primitive_name = itr->first; - - cldnn::memory::ptr output_memory = itr->second.get_memory(); - int32_t output_size = (int32_t)(itr->second.get_memory()->size() / sizeof(T)); - cldnn::tensor ref_output_tensor; - VVVVF ref_primitive_output; - - int32_t ref_batch_size = batch_size; - int32_t ref_hidden_size = hidden_size; - int32_t ref_directions = directions; - - int32_t ref_seq_len = 1; - // Set the reference output against which the primitive's output will be compared - if (primitive_name.find("crop:last_cell") != std::string::npos) - { - ref_primitive_output = last_cell; - } - else if (emit_last_hidden || primitive_name.find("crop:last_hidden") != std::string::npos) - { - ref_primitive_output = last_hidden; - } - else - { - ref_seq_len = sequence_len; - ref_primitive_output = ref_output; - } - - ref_output_tensor = { ref_batch_size, ref_seq_len, ref_hidden_size, ref_directions }; - int32_t ref_output_size = ref_batch_size * ref_seq_len * ref_hidden_size * ref_directions; - - // The number of elements in reference should match the number of elements in the primitive's output - ASSERT_EQ(ref_output_size , output_size); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(ref_batch_size, output_layout.batch()); - ASSERT_EQ(ref_seq_len, output_layout.feature()); // Sequence length should match - ASSERT_EQ(ref_directions, output_layout.spatial(1)); // directions should match - ASSERT_EQ(ref_hidden_size, output_layout.spatial(0)); // input size should match - - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - int32_t i = 0; - if (format == cldnn::format::bfyx) { - for (int32_t b = 0; b < ref_batch_size; ++b) { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } - else if(format == cldnn::format::fyxb) - { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - for (int32_t b = 0; b < ref_batch_size; ++b) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } - - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_users_test(bool is_caching_test = false) { - int sequence_len = 2; - int batch_size = 1; - int input_size = 1; - int hidden_size = 1; - int directions = 1; - int min_random = -2, max_random = 2; - tests::random_generator rg(GET_SUITE_NAME); - - // The following test is designed to test the user dependencies of an LSTM node when replaced by subcomponents - // by the graph compiler. - // The output of an LSTM node is set to last_hidden only. Then we concatenate the last_hidden with the initial_hidden tensor: - // (input, weights, recurrent, bias, initial_hidden, inital_cell) -> LSTM -> last_hidden - // concatenation(last_hidden, initial_hidden) - // If the replacing is is done correctly then the initial_hidden tensor should match the output of the concatenation - // by an offset along the sequence. - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - VVVVF ref_weights = rg.generate_random_4d(1, directions, 4 * hidden_size, input_size, min_random, max_random); - VVVVF ref_recurrent = rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random); - VVVVF ref_bias = rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random); - VVVVF ref_hidden = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_cell = rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random); - VVVVF ref_output = VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size)))); - - VF ref_input_vec = flatten_4d(format::bfyx, ref_input); - VF ref_weights_vec = flatten_4d(format::bfyx, ref_weights); - VF ref_recurrent_vec = flatten_4d(format::bfyx, ref_recurrent); - VF ref_bias_vec = flatten_4d(format::bfyx, ref_bias); - VF ref_hidden_vec = flatten_4d(format::bfyx, ref_hidden); - VF ref_cell_vec = flatten_4d(format::bfyx, ref_cell); - - VVVVF last_hidden(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))); - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); - - set_values(input, ref_input_vec); - set_values(weights, ref_weights_vec); - set_values(recurrent, ref_recurrent_vec); - set_values(biases, ref_bias_vec); - set_values(hidden, ref_hidden_vec); - set_values(cell, ref_cell_vec); - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - - topology.add(input_layout("input", input->get_layout())); - for (int i = 0; i < sequence_len; ++i) - { - input_ids_offsets.push_back({get_string_id(i), {0, i, 0, 0}}); - lstm_inputs.push_back(input_info("inputSplit:"+get_string_id(i))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - topology.add(data("weights", weights)); - topology.add(data("recurrent", recurrent)); - topology.add(data("biases", biases)); - topology.add(input_layout("hidden", hidden->get_layout())); - topology.add(input_layout("cell", cell->get_layout())); - topology.add(lstm("lstm", lstm_inputs, "weights", "recurrent", - "biases", "hidden", "cell", "", 0, false, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::hidden, default_offset_type)); - std::vector output_ids_offsets { input_info("lstm"), input_info("hidden") }; - topology.add(concatenation("concatenation", output_ids_offsets, 1)); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - - std::map outputs; - - network->set_input_data("input", input); - network->set_input_data("hidden", hidden); - network->set_input_data("cell", cell); - outputs = network->execute(); - - // check if the number of returned primitives match the expected number of output primitives - ASSERT_EQ(size_t(1), outputs.size()); - cldnn::memory::ptr output_memory = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - for (int32_t b = 0; b < batch_size; ++b) { - for (int32_t s = 0; s < 1; ++s) { - for (int32_t d = 0; d < directions; ++d) { - for (int32_t x = 0; x < hidden_size; ++x) { - int32_t idx = x + hidden_size * (d + directions * ((s+1) + sequence_len * b)); - ASSERT_NEAR(ref_hidden[b][s][d][x], output_ptr[idx], FERROR); - } - } - } - } -} - -// ------------------------------------------------------- -template -void lstm_gpu_concatenated_input_test(int layers, int sequence_len, int direction, - int batch_size, int input_size, int hidden_size, - bool has_bias, bool has_initial_hidden, - bool has_initial_cell, float clip_threshold, - bool input_forget, bool is_caching_test = false) -{ - tests::random_generator rg(GET_SUITE_NAME); - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Direction = " << direction << " Batch Size = " << batch_size << std::endl; - int min_random = -2, max_random = 2; - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - - std::vector> ref_weights; - std::vector> ref_recurrent; - std::vector> ref_bias; - std::vector> ref_hidden; - std::vector> ref_cell; - std::vector> ref_output; - - for (int i = 0; i < layers; ++i) { - ref_weights.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, i == 0 ? input_size : hidden_size, min_random, max_random)); - ref_recurrent.push_back(rg.generate_random_4d(1, direction, 4 * hidden_size, hidden_size, min_random, max_random)); - ref_bias.push_back(rg.generate_random_4d(1, 1, direction, 4 * hidden_size, min_random, max_random)); - ref_hidden.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_cell.push_back(rg.generate_random_4d(batch_size, 1, direction, hidden_size, min_random, max_random)); - ref_output.push_back(VVVVF(batch_size, VVVF(sequence_len, VVF(direction, VF(hidden_size))))); - } - - VF ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - - std::vector> ref_weights_vec; - std::vector> ref_recurrent_vec; - std::vector> ref_bias_vec; - std::vector> ref_hidden_vec; - std::vector> ref_cell_vec; - for (int i = 0; i < layers; ++i) { - ref_weights_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_weights[i])); - ref_recurrent_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_recurrent[i])); - ref_bias_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_bias[i])); - ref_hidden_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_hidden[i])); - ref_cell_vec.push_back(flatten_4d(cldnn::format::bfyx, ref_cell[i])); - } - - VVVVF last_hidden(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - VVVVF last_cell(batch_size, VVVF(1, VVF(direction, VF(hidden_size)))); - - lstm_reference(ref_input, ref_hidden[0], ref_cell[0], ref_weights[0], ref_recurrent[0], ref_bias[0], ref_output[0], - last_hidden, last_cell, has_bias, has_initial_hidden, has_initial_cell, - clip_threshold, input_forget, true); - - for (int i = 1; i < layers; ++i) { - lstm_reference(ref_output[i - 1], ref_hidden[i], ref_cell[i], ref_weights[i], ref_recurrent[i], - ref_bias[i], ref_output[i], - last_hidden, last_cell, has_bias, has_initial_hidden, has_initial_cell, - clip_threshold, input_forget, false); - } - - auto& engine = get_test_engine(); - - memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); - set_values(input, ref_input_vec); - - std::vector weights; - std::vector recurrent; - std::vector biases; - std::vector hidden; - std::vector cell; - for (int i = 0; i < layers; ++i) { - weights.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, direction, i == 0 ? input_size : hidden_size, 4 * hidden_size } })); - set_values(weights[i], ref_weights_vec[i]); - recurrent.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } })); - set_values(recurrent[i], ref_recurrent_vec[i]); - if (has_bias) { - biases.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, direction } })); - set_values(biases[i], ref_bias_vec[i]); - } - if (has_initial_hidden) { - hidden.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, direction } })); - set_values(hidden[i], ref_hidden_vec[i]); - } - if (has_initial_cell) { - cell.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, direction} })); - set_values(cell[i], ref_cell_vec[i]); - } - } - - topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - cldnn::primitive_id prev_node_id; - - for (int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - std::string lstm_id = "lstm" + sid; - std::string weights_id = "weights" + sid; - std::string recurrent_id = "recurrent" + sid; - std::string biases_id = "biases" + sid; - std::string hidden_id = "hidden" + sid; - std::string cell_id = "cell" + sid; - std::string output_crop_id = "crop:sequence:" + sid; - - topology.add(data(weights_id, weights[i])); - topology.add(data(recurrent_id, recurrent[i])); - if (has_bias) topology.add(data(biases_id, biases[i])); - if (has_initial_hidden) topology.add(input_layout(hidden_id, hidden[i]->get_layout())); - if (has_initial_cell) topology.add(input_layout(cell_id, cell[i]->get_layout())); - if (i == 0) { - topology.add(lstm(lstm_id, { input_info("input") }, weights_id, recurrent_id, - has_bias ? biases_id : "", has_initial_hidden ? hidden_id : "", has_initial_cell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence_cell, default_offset_type)); - } - else { - topology.add(lstm(lstm_id, { input_info(prev_node_id) }, weights_id, recurrent_id, - has_bias ? biases_id : "", has_initial_hidden ? hidden_id : "", has_initial_cell ? cell_id : "", "", - clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - lstm_output_selection::sequence_cell, default_offset_type)); - } - - // Crop out the whole output sequence element - topology.add(crop(output_crop_id, input_info(lstm_id), {batch_size, sequence_len, hidden_size, direction}, {0, 0, 0, 0})); - - // Save the node id to provide it as input to the next lstm layer - prev_node_id = output_crop_id; - } - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - for (int i = 0; i < layers; ++i) { - std::string sid = get_string_id(i); - if (has_initial_hidden) network->set_input_data("hidden" + sid, hidden[i]); - if (has_initial_cell) network->set_input_data("cell" + sid, cell[i]); - } - auto outputs = network->execute(); - { - ASSERT_EQ(outputs.size(), size_t(1)); - size_t output_size = outputs.begin()->second.get_memory()->size() / sizeof(T); - ASSERT_EQ(output_size, size_t(hidden_size * sequence_len * batch_size * direction)); - - auto output = outputs.begin()->second.get_memory(); - - // Get the output tensor - cldnn::layout output_layout = output->get_layout(); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(batch_size, output_layout.batch()); - ASSERT_EQ(sequence_len, output_layout.feature()); - ASSERT_EQ(direction, output_layout.spatial(1)); - ASSERT_EQ(hidden_size, output_layout.spatial(0)); - - cldnn::mem_lock output_ptr(output, get_test_stream()); - int32_t i = 0; - for (int32_t b = 0; b < batch_size; ++b) { - for (int32_t s = 0; s < sequence_len; ++s) { - for (int32_t d = 0; d < direction; ++d) { - for (int32_t x = 0; x < hidden_size; ++x) { - ASSERT_NEAR(ref_output[layers - 1][b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } -} - -// This test checks chained and stacked LSTM topology. The configuration allows to create -// LSTM topology with multiple layers and can also be chained together. -template -void lstm_gpu_chain_test(int batch_size, int input_size, int hidden_size, - int directions, size_t layers, size_t chains, int sequence_len, - const lstm_output_selection& output_selection, bool is_caching_test = false) -{ - tests::random_generator rg(GET_SUITE_NAME); - int min_random = -2, max_random = 2; - bool has_bias = false; - bool has_initial_hidden = false; - bool has_initial_cell = false; - float clip_threshold = 0; - bool input_forget = false; - - std::cout << "Layers = " << layers << " Input Size = " << input_size << " Hidden Size = " << hidden_size - << " Sequence Len = " << sequence_len << " Directions = " << directions << " Batch Size = " << batch_size - << " Output selection: " << static_cast(output_selection) << std::endl; - - VVVVF ref_input = rg.generate_random_4d(batch_size, sequence_len, 1, input_size, min_random, max_random); - std::vector>> ref_weights; - std::vector>> ref_recurrent; - std::vector>> ref_bias; - std::vector>> ref_hidden; - std::vector>> ref_cell; - std::vector>> ref_output; - - // Create the 4 dimensional weight, bias, hidden, cell state and output vectors - for (size_t chain = 0; chain < chains; chain++) { - - std::vector> per_chain_ref_weights; - std::vector> per_chain_ref_recurrent; - std::vector> per_chain_ref_bias; - std::vector> per_chain_ref_hidden; - std::vector> per_chain_ref_cell; - std::vector> per_chain_ref_output; - - for (size_t layer = 0; layer < layers; layer++) { - per_chain_ref_weights.push_back(rg.generate_random_4d(1, directions, 4 * hidden_size, (layer == 0) ? input_size : hidden_size, min_random, max_random)); - per_chain_ref_recurrent.push_back(rg.generate_random_4d(1, directions, 4 * hidden_size, hidden_size, min_random, max_random)); - per_chain_ref_bias.push_back(rg.generate_random_4d(1, 1, directions, 4 * hidden_size, min_random, max_random)); - per_chain_ref_hidden.push_back(rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random)); - per_chain_ref_cell.push_back(rg.generate_random_4d(batch_size, 1, directions, hidden_size, min_random, max_random)); - per_chain_ref_output.push_back(VVVVF(batch_size, VVVF(sequence_len, VVF(directions, VF(hidden_size))))); - } - - ref_weights.push_back(per_chain_ref_weights); - ref_recurrent.push_back(per_chain_ref_recurrent); - ref_bias.push_back(per_chain_ref_bias); - ref_hidden.push_back(per_chain_ref_hidden); - ref_cell.push_back(per_chain_ref_cell); - ref_output.push_back(per_chain_ref_output); - } - - VF ref_input_vec; - std::vector>> ref_weights_vec; - std::vector>> ref_recurrent_vec; - std::vector>> ref_bias_vec; - std::vector>> ref_hidden_vec; - std::vector>> ref_cell_vec; - std::vector>> ref_output_vec; - - ref_input_vec = flatten_4d(cldnn::format::bfyx, ref_input); - - // flatten all the 4 dimensional vectors across chains and layers - for (size_t chain = 0; chain < chains; chain++) { - - std::vector> per_chain_ref_weights; - std::vector> per_chain_ref_recurrent; - std::vector> per_chain_ref_bias; - std::vector> per_chain_ref_hidden; - std::vector> per_chain_ref_cell; - std::vector> per_chain_ref_output; - - for (size_t layer = 0; layer < layers; layer++) { - per_chain_ref_weights.push_back(flatten_4d(cldnn::format::bfyx, ref_weights[chain][layer])); - per_chain_ref_recurrent.push_back(flatten_4d(cldnn::format::bfyx, ref_recurrent[chain][layer])); - per_chain_ref_bias.push_back(flatten_4d(cldnn::format::bfyx, ref_bias[chain][layer])); - per_chain_ref_hidden.push_back(flatten_4d(cldnn::format::bfyx, ref_hidden[chain][layer])); - per_chain_ref_cell.push_back(flatten_4d(cldnn::format::bfyx, ref_cell[chain][layer])); - per_chain_ref_output.push_back(flatten_4d(cldnn::format::bfyx, ref_output[chain][layer])); - } - - ref_weights_vec.push_back(per_chain_ref_weights); - ref_recurrent_vec.push_back(per_chain_ref_recurrent); - ref_bias_vec.push_back(per_chain_ref_bias); - ref_hidden_vec.push_back(per_chain_ref_hidden); - ref_cell_vec.push_back(per_chain_ref_cell); - ref_output_vec.push_back(per_chain_ref_output); - } - - std::vector>> last_hidden(chains, std::vector >(layers, VVVVF(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))))); - std::vector>> last_cell(chains, std::vector >(layers, VVVVF(batch_size, VVVF(1, VVF(directions, VF(hidden_size)))))); - - for (size_t chain = 0; chain < chains; chain++) { - lstm_reference(ref_input, ref_hidden[chain][0], ref_cell[chain][0], ref_weights[chain][0], - ref_recurrent[chain][0], ref_bias[chain][0], ref_output[chain][0], - last_hidden[chain][0], last_cell[chain][0], has_bias, - chain == 0 ? has_initial_hidden : true, - chain == 0 ? has_initial_cell : true, - clip_threshold, input_forget, true); - - if (chain < chains - 1) - { - ref_hidden[chain + 1][0] = last_hidden[chain][0]; - ref_cell[chain + 1][0] = last_cell[chain][0]; - } - } - - for (size_t layer = 1; layer < layers; ++layer) { - for (size_t chain = 0; chain < chains; chain++) { - lstm_reference(ref_output[chain][layer - 1], ref_hidden[chain][layer], ref_cell[chain][layer], - ref_weights[chain][layer], ref_recurrent[chain][layer], ref_bias[chain][layer], - ref_output[chain][layer], last_hidden[chain][layer], last_cell[chain][layer], has_bias, - chain == 0 ? has_initial_hidden : true, - chain == 0 ? has_initial_cell : true, - clip_threshold, input_forget, - false); - - if (chain < chains - 1) - { - ref_hidden[chain + 1][layer] = last_hidden[chain][layer]; - ref_cell[chain + 1][layer] = last_cell[chain][layer]; - } - } - } - - auto& engine = get_test_engine(); - tensor input_tensor = { batch_size, sequence_len, input_size, 1 }; - layout layout = { ov::element::from(), cldnn::format::bfyx, input_tensor }; - - memory::ptr input = engine.allocate_memory(layout); - set_values(input, ref_input_vec); - - // 2-dim vectors to support chain and layers - std::vector> weights; - std::vector> recurrent; - std::vector> biases; - std::vector> hidden; - std::vector> cell; - - for (size_t chain = 0; chain < chains; chain++) { - std::vector per_chain_weights; - std::vector per_chain_recurrent; - std::vector per_chain_biases; - std::vector per_chain_hidden; - std::vector per_chain_cell; - - for (size_t layer = 0; layer < layers; layer++) { - per_chain_weights.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, directions, layer == 0 ? input_size : hidden_size, 4 * hidden_size} })); - set_values(per_chain_weights[layer], ref_weights_vec[chain][layer]); - - per_chain_recurrent.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, directions, hidden_size, 4 * hidden_size} })); - set_values(per_chain_recurrent[layer], ref_recurrent_vec[chain][layer]); - - if (has_bias) - { - per_chain_biases.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, 4 * hidden_size, directions} })); - set_values(per_chain_biases[layer], ref_bias_vec[chain][layer]); - } - - if (has_initial_hidden) - { - per_chain_hidden.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, hidden_size, directions} })); - set_values(per_chain_hidden[layer], ref_hidden_vec[chain][layer]); - } - - if (has_initial_cell) - { - per_chain_cell.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, hidden_size, directions} })); - set_values(per_chain_cell[layer], ref_cell_vec[chain][layer]); - } - } - - weights.push_back(per_chain_weights); - recurrent.push_back(per_chain_recurrent); - biases.push_back(per_chain_biases); - hidden.push_back(per_chain_hidden); - cell.push_back(per_chain_cell); - } - - // Start creating the topology - cldnn::topology topology; - std::vector> input_ids_offsets; - std::vector lstm_inputs; - std::vector output_ids_offsets; - - topology.add(input_layout("input", input->get_layout())); - - for (int feature = 0; feature < sequence_len; feature++) { - input_ids_offsets.push_back({ get_string_id(feature), {0, feature, 0, 0} }); - lstm_inputs.push_back(input_info("inputSplit:" + get_string_id(feature))); - } - topology.add(split("inputSplit", input_info("input"), input_ids_offsets)); - - bool emit_last_hidden = output_selection == lstm_output_selection::hidden - || output_selection == lstm_output_selection::hidden_cell; - - std::vector output_sequence_ids; - std::vector last_hidden_ids; - std::vector last_cell_ids; - - for (size_t chain = 0; chain < chains; chain++) { - - // Add all the primitives to the network - std::vector prev_output_sequence_ids(output_sequence_ids); - std::vector prev_last_hidden_ids(last_hidden_ids); - std::vector prev_last_cell_ids(last_cell_ids); - - // Erase all the temporary primitive id containers - output_sequence_ids.clear(); - last_cell_ids.clear(); - last_hidden_ids.clear(); - - for (size_t layer = 0; layer < layers; layer++) { - std::string chain_id = get_string_id(chain); - std::string layer_id = get_string_id(layer); - std::string lstm_id = "lstm:" + chain_id + ":" + layer_id; - std::string weights_id = "weights:" + chain_id + ":" + layer_id; - std::string recurrent_id = "recurrent:" + chain_id + ":" + layer_id; - std::string biases_id = "biases:" + chain_id + ":" + layer_id; - std::string hidden_id = "hidden:" + chain_id + ":" + layer_id; - std::string cell_id = "cell:" + chain_id + ":" + layer_id; - std::string crop_seq_id = "crop:sequence:" + chain_id + ":" + layer_id; - std::string crop_last_cell_id = "crop:last_cell:" + chain_id + ":" + layer_id; - std::string crop_last_hidden_id = "crop:last_hidden:" + chain_id + ":" + layer_id; - - primitive_id initial_hidden_id; - primitive_id initial_cell_id; - lstm_output_selection output_selection_per_layer; - - topology.add(data(weights_id, weights[chain][layer])); - topology.add(data(recurrent_id, recurrent[chain][layer])); - if (has_bias) topology.add(data(biases_id, biases[chain][layer])); - - if (chain == 0 && layer == 0) - { - if (has_initial_hidden) topology.add(input_layout(hidden_id, hidden[chain][layer]->get_layout())); - if (has_initial_cell) topology.add(input_layout(cell_id, cell[chain][layer]->get_layout())); - } - - // Get the initial hidden and initial cell for each layer for each chain link - if (chain == 0) - { - initial_hidden_id = has_initial_hidden ? hidden_id : ""; - initial_cell_id = has_initial_cell ? cell_id : ""; - } - else - { - initial_hidden_id = prev_last_hidden_ids[layer]; - initial_cell_id = prev_last_cell_ids[layer]; - } - - // Output selection for all the layers except the last layer has to have the sequence, - // last hidden and last cell - if (layer < layers - 1) - { - output_selection_per_layer = lstm_output_selection::sequence_cell; - } - else - { - // For the last layer, use the output selection provided by the user - output_selection_per_layer = output_selection; - } - - if (layer == 0) - { - topology.add(lstm(lstm_id, lstm_inputs, weights_id, recurrent_id, - has_bias ? biases_id : "", - initial_hidden_id, initial_cell_id, - "", clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection_per_layer, default_offset_type)); - } - else - { - topology.add(lstm(lstm_id, { input_info(output_sequence_ids[layer - 1]) }, weights_id, recurrent_id, - has_bias ? biases_id : "", - initial_hidden_id, initial_cell_id, - "", clip_threshold, input_forget, - { activation_func::logistic, activation_func::hyperbolic_tan, activation_func::hyperbolic_tan }, {}, - output_selection_per_layer, default_offset_type)); - } - - tensor sequence_tensor{ batch_size, sequence_len, hidden_size, directions }; - tensor cell_tensor{ batch_size, 1, hidden_size, directions }; - tensor last_hidden_tensor{ batch_size, 1, hidden_size, directions }; - - // For all the layers except the last layer, we need to crop output sequence, - // last hidden and last cell. - // The output sequence goes into the next layer of lstm in a chain link - // The last cell state and last hidden go to the lstm node in the same layer - // next in chain - topology.add(crop(crop_seq_id, input_info(lstm_id), sequence_tensor, tensor{ 0, 0, 0, 0 })); // Add crop to get the sequence - topology.add(crop(crop_last_hidden_id, input_info(lstm_id), last_hidden_tensor, tensor{ 0, sequence_len - 1, 0, 0 })); // Add crop to get the last hidden element - topology.add(crop(crop_last_cell_id, input_info(lstm_id), cell_tensor, tensor{ 0, sequence_len, 0, 0 })); // Add crop to get the last cell element - - // Keep a copy of the sequence, last hidden and last cell primitve id for each layer - output_sequence_ids.push_back(crop_seq_id); - last_hidden_ids.push_back(crop_last_hidden_id); - last_cell_ids.push_back(crop_last_cell_id); - } - } - - // Creating network out of the above designed topology - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - for (size_t layer = 0; layer < layers; layer++) { - std::string sid = get_string_id(layer); - if (has_initial_hidden) network->set_input_data("hidden:000:" + sid, hidden[0][layer]); // 0 is the chain link index - if (has_initial_cell) network->set_input_data("cell:000:" + sid, cell[0][layer]); // 0 is the chain link index - } - - auto outputs = network->execute(); - for (auto itr = outputs.begin(); itr != outputs.end(); itr++) - { - auto output_layout = itr->second.get_memory()->get_layout(); - primitive_id primitive_name = itr->first; - - // Split the primitive id to get the chain id - // Eg: primitive id: crop:last_cell:XXX:YYY - // XXX is the chain id - // YYY is the layer id - std::string chain_str = primitive_name.substr(primitive_name.find(":", primitive_name.find(":") + 1) + 1, 5); - std::string layer_str = primitive_name.substr(primitive_name.find(":", primitive_name.find(":", primitive_name.find(":") + 1) + 1) + 1, 5); - size_t chain_id = stoi(chain_str); - size_t layer_id = stoi(layer_str); - - cldnn::memory::ptr output_memory = itr->second.get_memory(); - int32_t output_size = (int32_t)(itr->second.get_memory()->size() / sizeof(T)); - cldnn::tensor ref_output_tensor; - VVVVF ref_primitive_output; - - int32_t ref_batch_size = batch_size; - int32_t ref_hidden_size = hidden_size; - int32_t ref_directions = directions; - - int32_t ref_seq_len = 1; - - // Set the reference output against which the primitive's output will be compared - if (primitive_name.find("crop:last_cell") != std::string::npos) - { - ref_primitive_output = last_cell[chain_id][layer_id]; - } - else if (emit_last_hidden || primitive_name.find("crop:last_hidden") != std::string::npos) - { - ref_primitive_output = last_hidden[chain_id][layer_id]; - } - else - { - ref_seq_len = sequence_len; - ref_primitive_output = ref_output[chain_id][layers - 1]; - } - - ref_output_tensor = { ref_batch_size, ref_seq_len, ref_hidden_size, ref_directions }; - int32_t ref_output_size = ref_batch_size * ref_seq_len * ref_hidden_size * ref_directions; - - // The number of elements in reference should match the number of elements in the primitive's output - ASSERT_EQ(ref_output_size, output_size); - - // Compare the output tensor configuration against the reference value - // Output tensor is configured in bfyx format - ASSERT_EQ(ref_batch_size, output_layout.batch()); - ASSERT_EQ(ref_seq_len, output_layout.feature()); // Sequence length should match - ASSERT_EQ(ref_directions, output_layout.spatial(1)); // directions should match - ASSERT_EQ(ref_hidden_size, output_layout.spatial(0)); // input size should match - - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - - int32_t i = 0; - for (int32_t b = 0; b < ref_batch_size; ++b) { - for (int32_t s = 0; s < ref_seq_len; ++s) { - for (int32_t d = 0; d < ref_directions; ++d) { - for (int32_t x = 0; x < ref_hidden_size; ++x) { - ASSERT_NEAR(ref_primitive_output[b][s][d][x], output_ptr[i++], FERROR); - } - } - } - } - } -} -} // namespace - -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false); -} - -// LSTM GEMM tests to test LSTM GEMMV kernel implementation -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_test_f32) { - generic_lstm_gemm_gpu_test(5, 1, 1, 1024, 1024, true, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 1, 256, 2, false, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_f32) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, true, false); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_bias_f32) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, false, false); -} - -// LSTM ELT Tests -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f32) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_biasf32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, false); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_cell_f32) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, false); -} - -// generic_lstm_gpu_test paramters: -// layers, sequence, dir, batch, input, hidden, bias, initial_h, initial_cell, threshold, coupled_input_forget -TEST(lstm_gpu, generic_lstm_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f32) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0); -} - -TEST(lstm_gpu, generic_lstm_input_forget_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f32) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f32) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f32) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false); -} - -// bidirectional support -TEST(lstm_gpu, generic_lstm_bi_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, false, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f32) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false); -} - -// multi-layer support -TEST(lstm_gpu, generic_lstm_stacked_no_seq_f32) { - generic_lstm_gpu_test(4, 1, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_f32) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_bi_f32) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_bi_f32) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); -} - -// optional outputs support -TEST(lstm_gpu, output_test_sequence_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence, 1); -} - -TEST(lstm_gpu, output_test_hidden_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden, 1); -} - -TEST(lstm_gpu, output_test_hidden_cell_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 1); -} - -TEST(lstm_gpu, output_test_sequence_cell_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 1); -} - -TEST(lstm_gpu, output_test_sequence_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence, 2); -} - -TEST(lstm_gpu, output_test_hidden_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden, 2); -} - -TEST(lstm_gpu, output_test_hidden_cell_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 2); -} - -TEST(lstm_gpu, output_test_sequence_cell_bi_f32) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 2); -} - -// format tests -TEST(lstm_gpu, lstm_gpu_format_bfyx_f32) { - lstm_gpu_format_test(cldnn::format::bfyx, 1); -} - -TEST(lstm_gpu, lstm_gpu_format_bfyx_bi_f32) { - lstm_gpu_format_test(cldnn::format::bfyx, 2); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_f32) { - lstm_gpu_format_test(cldnn::format::fyxb, 1); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_bi_f32) { - lstm_gpu_format_test(cldnn::format::fyxb, 2); -} - -// test for LSTM users' dependencies -TEST(lstm_gpu, lstm_users_f32) { - lstm_gpu_users_test(); -} - -// Test for LSTM with concatenated input -TEST(lstm_gpu, generic_lstm_concatenated_input) { - lstm_gpu_concatenated_input_test(1, 2, 2, 1, 1, 1, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_concatenated_input_multi_layer) { - lstm_gpu_concatenated_input_test(5, 5, 2, 1, 1, 4, true, true, true, 0, false); -} - -// test for LSTM with chain and stack (multilayer) -TEST(lstm_gpu, generic_lstm_chained_unidirectional_f32) { - // batch size = 1 - // input size = 2 - // hidden size = 4 - // directions = 1 - // layers = 1 - // chains = 1 - // sequence length = 1 - // output selection = output sequence and cell - lstm_gpu_chain_test(1, 2, 4, 1, 1, 2, 1, lstm_output_selection::sequence_cell); -} - -TEST(lstm_gpu, generic_lstm_chained_bidirectional_f32) { - // batch size = 1 - // input size = 2 - // hidden size = 4 - // directions = 2 - // layers = 1 - // chains = 1 - // sequence length = 1 - // output selection = output sequence and cell - lstm_gpu_chain_test(1, 2, 4, 2, 1, 1, 1, lstm_output_selection::sequence_cell); -} - -TEST(lstm_gpu, generic_lstm_chained_no_stack_bidirectional_f32) { - // batch size = 2 - // input size = 2 - // hidden size = 4 - // directions = 2 - // layers = 1 - // chains = 2 - // sequence length = 5 - // output selection = output sequence and cell - lstm_gpu_chain_test(2, 2, 4, 2, 1, 2, 5, lstm_output_selection::sequence_cell); -} - -TEST(lstm_gpu, generic_lstm_chained_stacked_bidirectional_f32) { - // batch size = 2 - // input size = 2 - // hidden size = 4 - // directions = 2 - // layers = 4 - // chains = 2 - // sequence length = 5 - // output selection = output sequence and cell - lstm_gpu_chain_test(2, 2, 4, 2, 4, 2, 5, lstm_output_selection::sequence_cell); -} - -// FP16 Half precision tests -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false); -} - -TEST(lstm_gpu, generic_lstm_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0); -} - -TEST(lstm_gpu, generic_lstm_input_forget_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f16) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f16) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false); -} - -// bidirectional support -TEST(lstm_gpu, generic_lstm_bi_bias_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false); -} - -// multi-layer support -TEST(lstm_gpu, generic_lstm_stacked_seq_f16) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false); -} - -TEST(lstm_gpu, generic_lstm_stacked_bi_f16) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); -} - -// TODO: Add tests for the following: -// integration testing using multi-layer and chained LSTMs -// LSTMs single input -// optional activation list - -#ifdef RUN_ALL_MODEL_CACHING_TESTS -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_test_f32_cached) { - generic_lstm_gemm_gpu_test(5, 1, 1, 1024, 1024, true, true, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 1, 256, 2, false, true, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, true, false, true); -} - -TEST(lstm_gemm_gpu, gemv_bfyx_1x64_lstm_gemm_no_hidden_bias_f32_cached) { - generic_lstm_gemm_gpu_test(1, 1, 1, 64, 2, false, false, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f32_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_biasf32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, true, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, true, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, true, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_hidden_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, true, false, false, true); -} - -TEST(lstm_custom_gpu, generic_lstm_custom_no_bias_hidden_cell_f32_cached) { - generic_lstm_custom_gpu_test(3, 1, 3, 3, 2, false, false, false, true); -} - -TEST(lstm_gpu, generic_lstm_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0, true); -} - -TEST(lstm_gpu, generic_lstm_input_forget_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f32_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1, true); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f32_cached) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f32_cached) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, false, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f32_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_no_seq_f32_cached) { - generic_lstm_gpu_test(4, 1, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_f32_cached) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_bi_f32_cached) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_bi_f32_cached) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, output_test_sequence_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence, 1, true); -} - -TEST(lstm_gpu, output_test_hidden_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden, 1, true); -} - -TEST(lstm_gpu, output_test_hidden_cell_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 1, true); -} - -TEST(lstm_gpu, output_test_sequence_cell_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 1, true); -} - -TEST(lstm_gpu, output_test_sequence_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence, 2, true); -} - -TEST(lstm_gpu, output_test_hidden_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden, 2, true); -} - -TEST(lstm_gpu, output_test_hidden_cell_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::hidden_cell, 2, true); -} - -TEST(lstm_gpu, output_test_sequence_cell_bi_f32_cached) { - lstm_gpu_output_test(lstm_output_selection::sequence_cell, 2, true); -} - -TEST(lstm_gpu, lstm_gpu_format_bfyx_f32_cached) { - lstm_gpu_format_test(cldnn::format::bfyx, 1, true); -} - -TEST(lstm_gpu, lstm_gpu_format_bfyx_bi_f32_cached) { - lstm_gpu_format_test(cldnn::format::bfyx, 2, true); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_f32_cached) { - lstm_gpu_format_test(cldnn::format::fyxb, 1, true); -} - -TEST(lstm_gpu, lstm_gpu_format_fyxb_bi_f32_cached) { - lstm_gpu_format_test(cldnn::format::fyxb, 2, true); -} - -TEST(lstm_gpu, lstm_users_f32_cached) { - lstm_gpu_users_test(true); -} - -TEST(lstm_gpu, generic_lstm_concatenated_input_cached) { - lstm_gpu_concatenated_input_test(1, 2, 2, 1, 1, 1, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_concatenated_input_multi_layer_cached) { - lstm_gpu_concatenated_input_test(5, 5, 2, 1, 1, 4, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_chained_unidirectional_f32_cached) { - lstm_gpu_chain_test(1, 2, 4, 1, 1, 2, 1, lstm_output_selection::sequence_cell, true); -} - -TEST(lstm_gpu, generic_lstm_chained_bidirectional_f32_cached) { - lstm_gpu_chain_test(1, 2, 4, 2, 1, 1, 1, lstm_output_selection::sequence_cell, true); -} - -TEST(lstm_gpu, generic_lstm_chained_no_stack_bidirectional_f32_cached) { - lstm_gpu_chain_test(2, 2, 4, 2, 1, 2, 5, lstm_output_selection::sequence_cell, true); -} - -TEST(lstm_gpu, generic_lstm_chained_stacked_bidirectional_f32_cached) { - lstm_gpu_chain_test(2, 2, 4, 2, 4, 2, 5, lstm_output_selection::sequence_cell, true); -} - -// FP16 Half precision tests -TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false, true); -} - -TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true, true); -} - -TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_test_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false, true); -} - -TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false, true); -} - -TEST(lstm_gpu, generic_lstm_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_input_forget_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1, true); -} - -TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1, true); -} - -TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f16_cached) { - default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); - default_offset_type = lstm_weights_order::iofz; -} - -TEST(lstm_gpu, generic_lstm_canonical_f16_cached) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false, true); -} - -// bidirectional support -TEST(lstm_gpu, generic_lstm_bi_bias_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false, true); -} - -TEST(lstm_gpu, generic_lstm_stacked_seq_f16_cached) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false, true); -} -#endif -TEST(lstm_gpu, generic_lstm_stacked_bi_f16_cached) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp deleted file mode 100644 index 4cec9e2b18aac5..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" - -#include -#include -#include - -using namespace cldnn; -using namespace ::tests; - -template -struct pyramid_roi_align_typed_test : testing::Test { - const data_types data_type = ov::element::from(); - using Type = T; - - void execute(bool is_caching_test) { - auto& engine = get_test_engine(); - - const int rois_num = 3; - const int output_size = 2; - const int sampling_points = 2; - const int starting_level = 2; - const int P2_scale = 1; - const int P3_scale = 2; - const int P4_scale = 4; - const int P5_scale = 8; - const int P2_size = 8; - const int P3_size = P2_size * P2_scale / P3_scale; - const int P4_size = P2_size * P2_scale / P4_scale; - const int P5_size = P2_size * P2_scale / P5_scale; - - std::vector rois_data = { - Type(0.f), Type(0.f), Type(1.f), Type(1.f), - Type(0.f), Type(0.f), Type(0.5f), Type(0.5f), - Type(0.5f), Type(0.5f), Type(0.75f), Type(0.75f) - }; - - std::vector P2_data = { - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(0.f), Type(1.f), Type(2.f), Type(3.f), Type(4.f), Type(5.f), Type(6.f), Type(7.f), - Type(8.f), Type(9.f), Type(10.f), Type(11.f), Type(12.f), Type(13.f), Type(14.f), Type(15.f), - }; - - std::vector P3_data = { - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - Type(9.f), Type(13.f), Type(17.f), Type(21.f), - }; - - std::vector P4_data = { - Type(11.f), Type(19.f), - Type(11.f), Type(19.f), - }; - - std::vector P5_data = { - Type(15.f) - }; - - auto rois_lay = layout(this->data_type, format::bfyx, tensor(batch(rois_num), feature(4))); - auto P2_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P2_size, P2_size)); - auto P3_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P3_size, P3_size)); - auto P4_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P4_size, P4_size)); - auto P5_lay = layout(this->data_type, format::bfyx, tensor(1, 1, P5_size, P5_size)); - - auto rois_mem = engine.allocate_memory(rois_lay); - auto P2_mem = engine.allocate_memory(P2_lay); - auto P3_mem = engine.allocate_memory(P3_lay); - auto P4_mem = engine.allocate_memory(P4_lay); - auto P5_mem = engine.allocate_memory(P5_lay); - - tests::set_values(rois_mem, rois_data); - tests::set_values(P2_mem, P2_data); - tests::set_values(P3_mem, P3_data); - tests::set_values(P4_mem, P4_data); - tests::set_values(P5_mem, P5_data); - - topology topo; - topo.add(data("P2", P2_mem)); - topo.add(data("P3", P3_mem)); - topo.add(data("P4", P4_mem)); - topo.add(data("P5", P5_mem)); - topo.add(input_layout("rois", rois_lay)); - topo.add(pyramid_roi_align("pyramid", - input_info("rois"), - input_info("P2"), - input_info("P3"), - input_info("P4"), - input_info("P5"), - output_size, - sampling_points, - { P2_scale, P3_scale, P4_scale, P5_scale }, - starting_level)); - - cldnn::network::ptr net = get_network(engine, topo, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - - net->set_input_data("rois", rois_mem); - - std::vector expected_out = { - // RoI 0,0 - 1,1 from P4 - 14.f, 18.f, 14.f, 18.f, - // RoI 0,0 - 0.5,0.5 from P3 - 11.25f, 14.25f, 11.25f, 14.25f, - // RoI 0.5,0.5 - 0.75,0.75 from P2 - 12.15625f, 13.03125f, 7.40625f, 8.28125f, - }; - - auto result = net->execute(); - - auto out_mem = result.at("pyramid").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); - - ASSERT_EQ(expected_out.size(), out_ptr.size()); - for (size_t i = 0; i < expected_out.size(); ++i) { - ASSERT_EQ(expected_out[i], static_cast(out_ptr[i])) << "at i = " << i; - } - } -}; -using pyramid_roi_align_types = testing::Types; - -TYPED_TEST_SUITE(pyramid_roi_align_typed_test, pyramid_roi_align_types); - -TYPED_TEST(pyramid_roi_align_typed_test, smoke_4levels) { - this->execute(false); -} - -TYPED_TEST(pyramid_roi_align_typed_test, smoke_4levels_cached) { - this->execute(true); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp index 194fdb524bf313..d645f46fe080d0 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/removing_output_node_test.cpp @@ -89,7 +89,7 @@ void test_multiple_outputs(bool is_caching_test) { ASSERT_EQ(output_ptr2[i], out_vec[i]); } -TEST(removing_output_node, multiple_outputs) { +TEST(removing_output_node, DISABLED_multiple_outputs) { // Issue 129991 test_multiple_outputs(false); } @@ -164,7 +164,7 @@ TEST(removing_output_node, output_node_optimization) { } #ifdef RUN_ALL_MODEL_CACHING_TESTS -TEST(removing_output_node, multiple_outputs_cached) { +TEST(removing_output_node, DISABLED_multiple_outputs_cached) { // Issue 129991 test_multiple_outputs(true); } #endif diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp deleted file mode 100644 index 6aea709c0fe496..00000000000000 --- a/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils.h" -#include "random_generator.hpp" - -#include -#include -#include -#include - -#include -#include - -using namespace cldnn; -using namespace ::tests; - -template -void check_feature_map(T* output_ptr, std::vector &input_vec, size_t batch_num, size_t feature_num, size_t y_size, size_t x_size, size_t feature_id, size_t factor) -{ - for (size_t b = 0; b < batch_num; ++b) { //B - for (size_t y = 0; y < y_size; ++y) { //Y - for (size_t x = 0; x < x_size; ++x) { //X - auto linear_id = x + x_size * (y + y_size * (feature_id + feature_num * b)); - auto output_linear_id = x + x_size * (y + y_size * b); - ASSERT_EQ(output_ptr[output_linear_id], input_vec[linear_id] * factor); - } - } - } -} - -template -void split_test(int batch_num, int feature_num, int x_size, int y_size, std::vector split_offsets, - bool is_caching_test) -{ - auto& engine = get_test_engine(); - cldnn::tensor reference_input_size = { batch_num, feature_num, x_size, y_size }; - - cldnn::memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, reference_input_size }); - std::vector > input_ids_offsets; - - topology topology; - topology.add(input_layout("input", input->get_layout())); - - // lambda exoression to create the primitive id for the splits - auto create_split_id = [](size_t splitNum) { - std::stringstream ss; - ss << std::setw(5) << std::setfill('0') << splitNum; - - return ss.str(); - }; - - // Create the splits with the split ids for the topology - for (size_t splitNum = 0; splitNum < split_offsets.size(); splitNum++) - { - input_ids_offsets.push_back({ create_split_id(splitNum), split_offsets[splitNum]}); - } - - topology.add(split("split", input_info("input"), input_ids_offsets)); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); - network->set_input_data("input", input); - - auto outputs = network->execute(); - - // The number of splits should match the expected number of splits - ASSERT_EQ(outputs.size(), size_t(split_offsets.size())); - - std::vector expected_sizes; - for (size_t splitNum = 0; splitNum < split_offsets.size(); splitNum++) // Calculate the expected sizes - { - cldnn::tensor size; - - if (splitNum < (split_offsets.size() - 1)) - { - size = split_offsets[splitNum + 1] - split_offsets[splitNum]; - } - else - { - size = reference_input_size - split_offsets[splitNum]; - } - - // For all the other dimensions, copy from the split_input - for (int dimension = 0; dimension < cldnn::tensor_dim_max; dimension++) - { - size.raw[dimension] - = (size.raw[dimension] == 0) ? reference_input_size.raw[dimension] : size.raw[dimension]; - } - - expected_sizes.push_back(size); - } - - cldnn::mem_lock input_ptr(input, get_test_stream()); - - for (size_t splitNum = 0; splitNum < split_offsets.size(); splitNum++) - { - primitive_id split_id = "split:" + create_split_id(splitNum); - cldnn::memory::ptr output = outputs.at(split_id).get_memory(); - auto prim = output->get_layout(); - ASSERT_EQ(prim.get_tensor(), expected_sizes[splitNum]); - cldnn::mem_lock output_ptr(output, get_test_stream()); - - // Output tensor size - auto output_batch = prim.batch(); - auto output_feature = prim.feature(); - auto output_x = prim.spatial(0); - auto output_y = prim.spatial(1); - - // Input offsets, starting from which we will compare the output - auto input_batch_offset = split_offsets[splitNum].batch[0]; - auto input_feature_offset = split_offsets[splitNum].feature[0]; - auto input_y_offset = split_offsets[splitNum].spatial[1]; - auto input_x_offset = split_offsets[splitNum].spatial[0]; - - // iterator to iterate through input buffer - auto input_batch_itr = input_batch_offset; - auto input_feature_itr = input_feature_offset; - auto input_y_itr = input_y_offset; - auto input_x_itr = input_x_offset; - - for (auto b = 0; b < output_batch; ++b) { // B - - // reset the input feature iterator - input_feature_itr = input_feature_offset; - for (auto f = 0; f < output_feature; f++) { // F - - // reset the input y iterator - input_y_itr = input_y_offset; - for (auto y = 0; y < output_y; y++) { // Y - - // reset the input x iterator - input_x_itr = input_x_offset; - for (auto x = 0; x < output_x; x++) { // X - auto linear_id = input_x_itr + x_size * (input_y_itr + y_size * (input_feature_itr + feature_num * input_batch_itr)); // index in input - auto output_linear_id = x + output_x * (y + output_y * (f + output_feature * b)); // index in output - ASSERT_EQ(output_ptr[output_linear_id], input_vec[linear_id]); - input_x_itr++; // update the input x iterator - } - input_y_itr++; // update the input y iterator - } - input_feature_itr++; // update the input feature iterator - } - input_batch_itr++; // update the input batch iterator - } - } -} - -TEST(split_gpu_f32, split_1d_uneven_2_splits) { - - // Input : 2x4x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_1d_uneven_2_splits) { - - // Input : 2x4x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, basic_split_concat_optimization) { - - auto& engine = get_test_engine(); - - auto input = engine.allocate_memory({ data_types::f32,format::bfyx,{ 1, 25, 1, 256 } }); - tests::set_random_values(input); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - std::vector> offsets; - std::vector inputs; - for (int i = 0; i < 25; i++) - { - auto id = "crop_" + std::to_string(i); - inputs.push_back(input_info("split:" + id)); - offsets.push_back({ id, {0, i, 0, 0} }); - } - - topology.add(split("split", input_info("input"), offsets)); - topology.add(concatenation("concat", inputs, 1)); - topology.add(reorder("output", input_info("concat"), format::bfyx, data_types::f32)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - auto output = outputs.at("output").get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - cldnn::mem_lock input_ptr(input, get_test_stream()); - - for (int i = 0; i < 25*256; ++i) - { - ASSERT_EQ(output_ptr[i], input_ptr[i]); - } -} - -TEST(split_gpu_i64, basic_split_concat_optimization) { - - auto& engine = get_test_engine(); - - auto input = engine.allocate_memory({ data_types::i64,format::bfyx,{ 1, 25, 1, 256 } }); - tests::set_random_values(input); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - std::vector> offsets; - std::vector inputs; - for (int i = 0; i < 25; i++) - { - auto id = "crop_" + std::to_string(i); - inputs.push_back(input_info("split:" + id)); - offsets.push_back({ id, {0, i, 0, 0} }); - } - - topology.add(split("split", input_info("input"), offsets)); - topology.add(concatenation("concat", inputs, 1)); - topology.add(reorder("output", input_info("concat"), format::bfyx, data_types::i64)); - - ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::optimize_data(true)); - network network(engine, topology, config); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - auto output = outputs.at("output").get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - cldnn::mem_lock input_ptr(input, get_test_stream()); - - for (int i = 0; i < 25*256; ++i) - { - ASSERT_EQ(output_ptr[i], input_ptr[i]); - } -} - -TEST(split_gpu_f32, split_1d_uneven_3_splits) { - - // Input : 2x8x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 4, 0, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_1d_uneven_3_splits) { - - // Input : 2x8x3x3 - // Output1 : 2x1x3x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 4, 0, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_2d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x6x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_2d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x6x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_2d_uneven_3_split3) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - // id: "out2", offsets: { 0, 4, 7, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_2d_uneven_3_split3) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x3 - // Output2 : 2x3x3x3 - // Output3 : 2x4x3x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 0 } - // id: "out2", offsets: { 0, 4, 7, 0 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_3d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x1 - // Output2 : 2x7x6x2 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_3d_uneven_2_splits) { - - // Input : 2x8x10x3 - // Output1 : 2x1x4x1 - // Output2 : 2x7x6x2 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, split_3d_uneven_3_splits) { - - // Input : 2x8x10x5 - // Output1 : 2x1x4x1 - // Output2 : 2x6x4x1 - // Output3 : 2x1x2x1 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - // id: "out2", offsets: { 0, 7, 8, 2 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_i64, split_3d_uneven_3_splits) { - - // Input : 2x8x10x5 - // Output1 : 2x1x4x1 - // Output2 : 2x6x4x1 - // Output3 : 2x1x2x1 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 4, 1 } - // id: "out2", offsets: { 0, 7, 8, 2 } - - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, false); -} - -TEST(split_gpu_f32, basic_in2x3x2x2_split_feature_bfyx) { - // Input : 6x3x4x3 - // 3 x Outputs: 6x1x4x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 2, 0, 0 } - - auto& engine = get_test_engine(); - - auto batch_num = 6; - auto feature_num = 3; - auto x_size = 4; - auto y_size = 3; - - auto input = engine.allocate_memory({ data_types::f32,format::bfyx,{ batch_num, feature_num, x_size, y_size } }); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(split("split", input_info("input"), - { - { "out0", { 0, 0, 0, 0 } }, - { "out1", { 0, 1, 0, 0 } }, - { "out2", { 0, 2, 0, 0 } } - } )); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - network network(engine, topology, get_test_default_config(engine)); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - ASSERT_EQ(outputs.size(), size_t(3)); - - for (unsigned int i = 0; i < 3; i++) - { - auto split_id = "split:out" + std::to_string(i); - auto output = outputs.at(split_id).get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - check_feature_map(output_ptr.data(), input_vec, batch_num, feature_num, y_size, x_size, i, 1); - } -} - -TEST(split_gpu_i64, basic_in2x3x2x2_split_feature_bfyx) { - // Input : 6x3x4x3 - // 3 x Outputs: 6x1x4x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 2, 0, 0 } - - auto& engine = get_test_engine(); - - auto batch_num = 6; - auto feature_num = 3; - auto x_size = 4; - auto y_size = 3; - - auto input = engine.allocate_memory({ data_types::i64,format::bfyx,{ batch_num, feature_num, x_size, y_size } }); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(split("split", input_info("input"), - { - { "out0", { 0, 0, 0, 0 } }, - { "out1", { 0, 1, 0, 0 } }, - { "out2", { 0, 2, 0, 0 } } - } )); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - network network(engine, topology, get_test_default_config(engine)); - - network.set_input_data("input", input); - - auto outputs = network.execute(); - - ASSERT_EQ(outputs.size(), size_t(3)); - - for (unsigned int i = 0; i < 3; i++) - { - auto split_id = "split:out" + std::to_string(i); - auto output = outputs.at(split_id).get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - check_feature_map(output_ptr.data(), input_vec, batch_num, feature_num, y_size, x_size, i, 1); - } -} - -TEST(split_gpu_f32, basic_in2x3x2x2_split_scale_feature_bfyx) { - // Input : 6x3x4x3 - // 3 x Outputs: 6x1x4x3 - // Split params: - // id: "out0", offsets: { 0, 0, 0, 0 } - // id: "out1", offsets: { 0, 1, 0, 0 } - // id: "out2", offsets: { 0, 2, 0, 0 } - // Additional scale layer at the end - - auto& engine = get_test_engine(); - - auto batch_num = 6; - auto feature_num = 3; - auto x_size = 4; - auto y_size = 3; - - auto input = engine.allocate_memory({ data_types::f32,format::bfyx,{ batch_num, feature_num, x_size, y_size } }); - auto scale_input0 = engine.allocate_memory({ data_types::f32, format::bfyx,{ 1, 1, 1, 1 } }); - auto scale_input1 = engine.allocate_memory({ data_types::f32, format::bfyx,{ 1, 1, 1, 1 } }); - auto scale_input2 = engine.allocate_memory({ data_types::f32, format::bfyx,{ 1, 1, 1, 1 } }); - - topology topology; - topology.add(input_layout("input", input->get_layout())); - topology.add(input_layout("scale_input0", scale_input0->get_layout())); - topology.add(input_layout("scale_input1", scale_input1->get_layout())); - topology.add(input_layout("scale_input2", scale_input2->get_layout())); - topology.add(split("split", input_info("input"), - { - { "out0",{ 0, 0, 0, 0 } }, - { "out1",{ 0, 1, 0, 0 } }, - { "out2",{ 0, 2, 0, 0 } } - })); - topology.add(eltwise("scale0", { input_info("split:out0"), input_info("scale_input0") }, eltwise_mode::prod)); - topology.add(eltwise("scale1", { input_info("split:out1"), input_info("scale_input1") }, eltwise_mode::prod)); - topology.add(eltwise("scale2", { input_info("split:out2"), input_info("scale_input2") }, eltwise_mode::prod)); - - std::vector scale_input_vec0 = { 1.f }; - set_values(scale_input0, scale_input_vec0); - std::vector scale_input_vec1 = { 2.f }; - set_values(scale_input1, scale_input_vec1); - std::vector scale_input_vec2 = { 3.f }; - set_values(scale_input2, scale_input_vec2); - - tests::random_generator rg(GET_SUITE_NAME); - std::vector input_vec = rg.generate_random_1d(batch_num * feature_num * y_size * x_size, -10, 10); - set_values(input, input_vec); - - network network(engine, topology, get_test_default_config(engine)); - - network.set_input_data("input", input); - network.set_input_data("scale_input0", scale_input0); - network.set_input_data("scale_input1", scale_input1); - network.set_input_data("scale_input2", scale_input2); - - auto outputs = network.execute(); - - ASSERT_EQ(outputs.size(), size_t(3)); - - for (unsigned int i = 0; i < 3; i++) - { - auto split_id = "scale" + std::to_string(i); - auto output = outputs.at(split_id).get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); - check_feature_map(output_ptr.data(), input_vec, batch_num, feature_num, y_size, x_size, i, i + 1); - } -} - -#ifdef RUN_ALL_MODEL_CACHING_TESTS -TEST(split_gpu_f32, split_1d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_1d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 4; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_1d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_1d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 3; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 0, 0}, - {0, 4, 0, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_2d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_2d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_2d_uneven_3_split3_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_2d_uneven_3_split3_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 0}, - {0, 4, 7, 0}, - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_3d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_i64, split_3d_uneven_2_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} - -TEST(split_gpu_f32, split_3d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} -#endif -TEST(split_gpu_i64, split_3d_uneven_3_splits_cached) { - auto batch_num = 2; - auto feature_num = 8; - auto x_size = 10; - auto y_size = 3; - std::vector split_offsets = { - {0, 0, 0, 0}, - {0, 1, 4, 1}, - {0, 7, 8, 2} - }; - - split_test(batch_num, feature_num, x_size, y_size, split_offsets, true); -} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp index 827a649159d6ae..809f81d263d686 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/strided_slice_gpu_test.cpp @@ -2779,7 +2779,7 @@ TEST_F(strided_slice_cpu_impl_constants, test_2x2x4x3_stride) { this->test_2x2x4x3_stride(false, impl_types::cpu); } -TEST_F(strided_slice_cpu_impl_constants, test_2x2x4x1_new_axis_mask) { +TEST_F(strided_slice_cpu_impl_constants, DISABLED_test_2x2x4x1_new_axis_mask) { // Issue 129991 this->test_2x2x4x1_new_axis_mask(false, impl_types::cpu); } diff --git a/src/plugins/intel_gpu/tests/unit/transformations/clamp_fp16_output_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/clamp_fp16_output_test.cpp new file mode 100644 index 00000000000000..3973b7701108f5 --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/transformations/clamp_fp16_output_test.cpp @@ -0,0 +1,110 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include + +#include +#include +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/type/element_type.hpp" +#include +#include "openvino/op/clamp.hpp" +#include "openvino/op/reshape.hpp" +#include +#include +#include + +#include "common_test_utils/ov_test_utils.hpp" + +using namespace testing; +using namespace ov::intel_gpu; + +TEST_F(TransformationTestsF, ClampFp16OutputTest1) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto softmax = std::make_shared(matmul, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto min = static_cast(std::numeric_limits::lowest()); + auto max = static_cast(std::numeric_limits::max()); + auto clamp = std::make_shared(matmul, min, max); + auto softmax = std::make_shared(clamp, 1); + + model_ref = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, ClampFp16OutputTest2) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto target_shape = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{ 2 }, { 3, 4 }); + auto reshape = std::make_shared(matmul, target_shape, false); + auto softmax = std::make_shared(reshape, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f16, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto min = static_cast(std::numeric_limits::lowest()); + auto max = static_cast(std::numeric_limits::max()); + auto clamp = std::make_shared(matmul, min, max); + auto target_shape = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{ 2 }, { 3, 4 }); + auto reshape = std::make_shared(clamp, target_shape, false); + auto softmax = std::make_shared(reshape, 1); + + model_ref = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + +TEST_F(TransformationTestsF, ClampFp16OutputTest3) { + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{ 3, 2, 2 }); + auto input2 = std::make_shared(ov::element::f32, ov::Shape{ 1, 2, 2 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto softmax = std::make_shared(matmul, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1, input2 }); + manager.register_pass(); + } + { + model_ref = model->clone(); // not changed due to f32 precision + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} + + +TEST_F(TransformationTestsF, ClampFp16OutputTest4) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{ 3, 2, 2 }); + auto input2 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 1, 2, 2 }, { 1 }); + auto matmul = std::make_shared(input1, input2, true, false); + auto softmax = std::make_shared(matmul, 1); + + model = std::make_shared(ov::NodeVector{ softmax }, ov::ParameterVector{ input1 }); + manager.register_pass(); + } + { + model_ref = model->clone(); // Not changed due to const input2 + } + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); +} diff --git a/src/plugins/intel_gpu/tests/unit/transformations/fc_convert_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/fc_convert_fusion_test.cpp new file mode 100644 index 00000000000000..0440918e9f8caf --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/transformations/fc_convert_fusion_test.cpp @@ -0,0 +1,66 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include +#include +#include "common_test_utils/ov_test_utils.hpp" +#include + +#include + +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/parameter.hpp" +#include "intel_gpu/op/fully_connected.hpp" +#include "intel_gpu/op/fully_connected_compressed.hpp" + +using namespace testing; +using namespace ov::intel_gpu; + +TEST_F(TransformationTestsF, FullyConnectedConvertFusionTest1) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, 16 }); + auto weights_const = ov::op::v0::Constant::create(ov::element::u8, ov::Shape{ 32, 16 }, { 1 }); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto zp_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto fc_compressed = std::make_shared(input, weights_const, scale_const, zp_const); + auto convert = std::make_shared(fc_compressed, ov::element::f32); + + model = std::make_shared(ov::NodeVector{convert}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, 16 }); + auto weights_const = ov::op::v0::Constant::create(ov::element::u8, ov::Shape{ 32, 16 }, { 1 }); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto zp_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{ 32, 1 }, { 1 }); + auto fc_compressed = std::make_shared(input, weights_const, scale_const, zp_const, ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{ fc_compressed }, ov::ParameterVector{ input }); + } +} + +TEST_F(TransformationTestsF, FullyConnectedConvertFusionTest2) { + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{3, 2, 2}); + auto input2 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{2, 2}, {1}); + auto matmul = std::make_shared(input1, input2); + auto convert = std::make_shared(matmul, ov::element::f32); + + model = std::make_shared(ov::NodeVector{convert}, ov::ParameterVector{input1}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f16, ov::Shape{3, 2, 2}); + auto input2 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{2, 2}, {1}); + auto matmul = std::make_shared(input1, input2, ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{ matmul }, ov::ParameterVector{ input1 }); + } +} diff --git a/src/plugins/proxy/src/remote_tensor.cpp b/src/plugins/proxy/src/remote_tensor.cpp index 49dc25fcebd92d..b8fe5237ebe107 100644 --- a/src/plugins/proxy/src/remote_tensor.cpp +++ b/src/plugins/proxy/src/remote_tensor.cpp @@ -10,7 +10,6 @@ #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/so_ptr.hpp" -#include "remote_utils.hpp" namespace { std::shared_ptr cast_tensor(const ov::SoPtr& tensor) { @@ -68,15 +67,6 @@ ov::SoPtr ov::proxy::RemoteTensor::get_hardware_tensor(const ov::So if (auto remote_tensor = std::dynamic_pointer_cast(tensor._ptr)) hw_tensor = remote_tensor->m_tensor; - if (unwrap) { - if (auto wrapper = std::dynamic_pointer_cast(hw_tensor._ptr)) { - auto blob = ov::get_hardware_blob(wrapper->blob.get()); - if (auto tensor_holder = dynamic_cast(blob)) { - hw_tensor = tensor_holder->get_tensor(); - } - } - } - return hw_tensor; } diff --git a/src/plugins/template/src/plugin.cpp b/src/plugins/template/src/plugin.cpp index d96ea739d58c03..a72549a81eaa15 100644 --- a/src/plugins/template/src/plugin.cpp +++ b/src/plugins/template/src/plugin.cpp @@ -128,7 +128,16 @@ std::shared_ptr ov::template_plugin::Plugin::import_model( const ov::AnyMap& properties) const { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::import_model"); - auto fullConfig = Configuration{properties, m_cfg}; + // check ov::loaded_from_cache property and erase it due to not needed any more. + auto _properties = properties; + const auto& it = _properties.find(ov::loaded_from_cache.name()); + bool loaded_from_cache = false; + if (it != _properties.end()) { + loaded_from_cache = it->second.as(); + _properties.erase(it); + } + + auto fullConfig = Configuration{_properties, m_cfg}; // read XML content std::string xmlString; std::uint64_t dataSize = 0; @@ -154,7 +163,7 @@ std::shared_ptr ov::template_plugin::Plugin::import_model( context, get_executor_manager()->get_idle_cpu_streams_executor(streamsExecutorConfig), fullConfig, - true); + loaded_from_cache); return compiled_model; } // ! [plugin:import_model_with_remote] diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp index d5f5a3a4f19b96..ed8621d0351a3e 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp @@ -105,6 +105,22 @@ void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, threshold, abs_threshold); break; + case ov::element::f8e4m3: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); + break; + case ov::element::f8e5m2: + LayerTestsUtils::LayerTestsCommon::Compare( + refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); + break; case ov::element::f32: LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), outBlob.data(), diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp index 2add89bdbdaf78..4c2c6711d0ae63 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp @@ -6,6 +6,7 @@ #include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/element_type_traits.hpp" #include "openvino/runtime/allocator.hpp" #include "openvino/runtime/core.hpp" #include "openvino/runtime/tensor.hpp" diff --git a/src/plugins/template/tests/functional/op_reference/constant.cpp b/src/plugins/template/tests/functional/op_reference/constant.cpp index ababb046e9c2b6..8178544a7d940a 100644 --- a/src/plugins/template/tests/functional/op_reference/constant.cpp +++ b/src/plugins/template/tests/functional/op_reference/constant.cpp @@ -201,6 +201,50 @@ std::vector generateConstantDefinedTypeParams() { std::vector{0x4000000000000001, 0x4000000000000002}, std::vector{0x4000000000000001, 0x4000000000000002}, "tensor_constant_int64"), + ConstantParams( + {3, 9}, + element::Type_t::f8e4m3, + element::Type_t::f8e4m3, + std::vector{4.75f, 4.5f, -5.25f, 0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, + 0.6f, 0.7f, 0.8f, 0.9f, 1.f, -0.0f, -0.1f, -0.2f, -0.3f, + -0.4f, -0.5f, -0.6f, -0.7f, -0.8f, -0.9f, -1.f, 0.001953125f, 448.f}, + std::vector{5.0f, 4.5f, -5.0f, 0.0f, 0.1015625f, 0.203125f, 0.3125f, + 0.40625f, 0.5f, 0.625f, 0.6875f, 0.8125f, 0.875f, 1.f, + -0.f, -0.1015625f, -0.203125f, -0.3125f, -0.40625f, -0.5f, -0.625f, + -0.6875f, -0.8125f, -0.875f, -1.f, 0.001953125f, 448.f}, + "tensor_constant_f8e4m3"), + ConstantParams({3, 9}, + element::Type_t::f8e5m2, + element::Type_t::f8e5m2, + std::vector{4.75f, 4.5f, + -5.25f, 0.0f, + 0.1f, 0.2f, + 0.3f, 0.4f, + 0.5f, 0.6f, + 0.7f, 0.8f, + 0.9f, 1.f, + -0.0f, -0.1f, + -0.2f, -0.3f, + -0.4f, -0.5f, + -0.6f, -0.7f, + -0.8f, -0.9f, + -1.f, 0.0000152587890625f, + 57344.f}, + std::vector{4.75f, 4.5f, + -5.25f, 0.0f, + 0.09375f, 0.1875f, + 0.3125f, 0.375f, + 0.5f, 0.625f, + 0.75f, 0.75f, + 0.875f, 1.f, + -0.f, -0.09375f, + -0.1875f, -0.3125f, + -0.375f, -0.5f, + -0.625f, -0.75f, + -0.75f, -0.875f, + -1.f, 0.0000152587890625f, + 57344.f}, + "tensor_constant_f8e5m2"), }; return constantParams; } diff --git a/src/plugins/template/tests/functional/op_reference/convert.cpp b/src/plugins/template/tests/functional/op_reference/convert.cpp index b6195744c9c6f3..461daa56d80b14 100644 --- a/src/plugins/template/tests/functional/op_reference/convert.cpp +++ b/src/plugins/template/tests/functional/op_reference/convert.cpp @@ -57,6 +57,163 @@ INSTANTIATE_TEST_SUITE_P( std::numeric_limits::infinity(), -std::numeric_limits::infinity()}, std::vector{0, 1, 1, 0, 1, 1, 1, 1, 1}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 7}, + ov::element::f32, + ov::element::f8e5m2, + std::vector{ + 0.017578125f, 0.021484375f, 0.025390625f, 0.029296875f, 0.03515625f, 0.0703125f, 0.140625f, + 0.28125f, 0.5625f, 1.125f, 1.625f, 1.875f, 2.25f, 3.75f, + 4.5f, 9.f, 18.f, 36.f, 72.f, 144.f, 288.f}, + std::vector{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, + 0.125f, 0.25f, 0.5f, 1.f, 1.5, 2.f, + 2.f, 4.f, 4.f, 8.f, 16.f, 32.f, + 64.f, 128.f, 256.f}), + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 7}, + ov::element::f8e5m2, + ov::element::f32, + std::vector{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, + 0.125f, 0.25f, 0.5f, 1.f, 1.5, 2.f, + 2.f, 4.f, 4.f, 8.f, 16.f, 32.f, + 64.f, 128.f, 256.f}, + std::vector{0.015625f, 0.0234375f, 0.0234375f, 0.03125f, 0.03125f, 0.0625f, 0.125f, + 0.25f, 0.5f, 1.f, 1.5, 2.f, 2.f, 4.f, + 4.f, 8.f, 16.f, 32.f, 64.f, 128.f, 256.f}), + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 7}, + ov::element::f16, + ov::element::f8e5m2, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}), + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 7}, + ov::element::f8e5m2, + ov::element::f16, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}, + std::vector{0.f, -0.f, 0.5f, 1.5f, 2.5f, 1.5f, 3.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 3, 5}, + ov::element::f16, + ov::element::f8e4m3, + std::vector{0.0f, + 0.1f, + 0.2f, + 0.3f, + 0.4f, + 0.5f, + 0.6f, + 0.7f, + 0.8f, + 0.9f, + 1.f, + 1.5f, + 2.5f, + 1.5f, + 3.5f}, + std::vector{0.f, + 0.1015625f, + 0.203125f, + 0.3125f, + 0.40625f, + 0.5f, + 0.625f, + 0.6875f, + 0.8125f, + 0.875f, + 1.f, + 1.5f, + 2.5f, + 1.5f, + 3.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3}, + + ov::element::f8e4m3, + ov::element::f16, + std::vector{0.5f, 1.5f, 0.f}, + std::vector{0.5f, 1.5f, 0.f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3}, + + ov::element::f8e4m3, + ov::element::f8e4m3, + std::vector{0.5f, 1.5f, 0.f}, + std::vector{0.5f, 1.5f, 0.f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f8e5m2, + ov::element::f8e5m2, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f32, + ov::element::f32, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f8e4m3, + ov::element::f32, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 2}, + + ov::element::f8e4m3, + ov::element::f16, + std::vector{0.5f, 1.5f}, + std::vector{0.5f, 1.5f}), + + ConvertParams(ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 2}, + ov::element::f8e4m3, + ov::element::f32, + std::vector{ + 0.5f, + 1.5f, + 0.5f, + 2.5f, + 1.5f, + 3.5f, + }, + std::vector{0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 3.5f}), + + ConvertParams( + ConversionTypes::CONVERT, + ov::PartialShape{1, 1, 3, 5}, + ov::element::f32, + ov::element::f8e4m3, + std:: + vector{0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}, + std::vector{0.5f, + 1.5f, + 0.5f, + 2.5f, + 1.5f, + 0.5f, + 3.5f, + 2.5f, + 0.5f, + 0.5f, + 2.5f, + 0.5f, + 0.5f, + 0.5f, + 1.5f}), + // destination bf16 ConvertParams( ConversionTypes::CONVERT, diff --git a/src/plugins/template/tests/functional/op_reference/sign.cpp b/src/plugins/template/tests/functional/op_reference/sign.cpp index ae08360aa38eac..67ba7a81f19e66 100644 --- a/src/plugins/template/tests/functional/op_reference/sign.cpp +++ b/src/plugins/template/tests/functional/op_reference/sign.cpp @@ -59,36 +59,48 @@ TEST_P(ReferenceSignLayerTest, CompareWithHardcodedRefs) { Exec(); } -INSTANTIATE_TEST_SUITE_P(smoke_Sign_With_Hardcoded_Refs, - ReferenceSignLayerTest, - ::testing::Values(SignParams(PartialShape{6}, - element::f32, - element::f32, - std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, - std::vector{1, -1, 0, -1, 1, 0}), - SignParams(PartialShape{6}, - element::f16, - element::f16, - std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, - std::vector{1, -1, 0, -1, 1, 0}), - SignParams(PartialShape{6}, - element::u64, - element::u64, - std::vector{1, 2, 0, 4, 4, 0}, - std::vector{1, 1, 0, 1, 1, 0}), - SignParams(PartialShape{6}, - element::u32, - element::u32, - std::vector{1, 2, 0, 4, 4, 0}, - std::vector{1, 1, 0, 1, 1, 0}), - SignParams(PartialShape{6}, - element::i32, - element::i32, - std::vector{1, -2, 0, -4, 4, -0}, - std::vector{1, -1, 0, -1, 1, 0}), - SignParams(PartialShape{6}, - element::i64, - element::i64, - std::vector{1, -2, 0, -4, 4, -0}, - std::vector{1, -1, 0, -1, 1, 0})), - ReferenceSignLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_Sign_With_Hardcoded_Refs, + ReferenceSignLayerTest, + ::testing::Values( + SignParams(PartialShape{6}, + element::f32, + element::f32, + std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, -1, 1, 0}), + SignParams(PartialShape{7}, + element::f32, + element::f32, + std::vector{1, -2, 0, std::numeric_limits::quiet_NaN(), -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, std::numeric_limits::quiet_NaN(), -1, 1, 0}), + SignParams(PartialShape{6}, + element::f16, + element::f16, + std::vector{1, -2, 0, -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, -1, 1, 0}), + SignParams(PartialShape{7}, + element::f16, + element::f16, + std::vector{1, -2, 0, std::numeric_limits::quiet_NaN(), -4.8f, 4.8f, -0.0f}, + std::vector{1, -1, 0, std::numeric_limits::quiet_NaN(), -1, 1, 0}), + SignParams(PartialShape{6}, + element::u64, + element::u64, + std::vector{1, 2, 0, 4, 4, 0}, + std::vector{1, 1, 0, 1, 1, 0}), + SignParams(PartialShape{6}, + element::u32, + element::u32, + std::vector{1, 2, 0, 4, 4, 0}, + std::vector{1, 1, 0, 1, 1, 0}), + SignParams(PartialShape{6}, + element::i32, + element::i32, + std::vector{1, -2, 0, -4, 4, -0}, + std::vector{1, -1, 0, -1, 1, 0}), + SignParams(PartialShape{6}, + element::i64, + element::i64, + std::vector{1, -2, 0, -4, 4, -0}, + std::vector{1, -1, 0, -1, 1, 0})), + ReferenceSignLayerTest::getTestCaseName); diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 5622be5f16cce5..d279e2154c2bce 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -4,6 +4,7 @@ #include "behavior/compiled_model/properties.hpp" +#include "ie_plugin_config.hpp" #include "openvino/runtime/properties.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index f24cf6449a02a7..2c9e058b1c42f8 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -33,4 +33,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_Template, ::testing::Combine(::testing::ValuesIn(TestTemplateTargets), ::testing::ValuesIn(TemplateConfigs)), CompileModelLoadFromMemoryTestBase::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_Template, + CompileModelLoadFromCacheTest, + ::testing::Combine(::testing::ValuesIn(TestTemplateTargets), + ::testing::ValuesIn(TemplateConfigs)), + CompileModelLoadFromCacheTest::getTestCaseName); } // namespace diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index bfc64eb0454962..f6d55269b6cbf2 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -4,6 +4,7 @@ #include "behavior/ov_plugin/properties_tests.hpp" +#include "ie_plugin_config.hpp" #include "openvino/runtime/properties.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp index 858ba1f0d7b254..8ae5896dd84498 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -65,54 +65,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, ::testing::Values(ov::test::utils::DEVICE_TEMPLATE)); -// -// IE Class SetConfig -// - -class IEClassSetConfigTestHETERO : public BehaviorTestsUtils::IEClassNetworkTest, - public BehaviorTestsUtils::IEPluginTestBase { - void SetUp() override { - IEClassNetworkTest::SetUp(); - IEPluginTestBase::SetUp(); - } -}; - -TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) { - { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO")); - ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT))); - bool dump = p.as(); - - ASSERT_TRUE(dump); - } - - { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO)}}, "HETERO")); - ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT))); - bool dump = p.as(); - - ASSERT_FALSE(dump); - } - - { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO")); - ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT))); - bool dump = p.as(); - - ASSERT_TRUE(dump); - } -} - // // IE Class GetConfig // diff --git a/src/plugins/template/tests/functional/skip_tests_config.cpp b/src/plugins/template/tests/functional/skip_tests_config.cpp index 0743b5837f2dab..288877da4c7bfa 100644 --- a/src/plugins/template/tests/functional/skip_tests_config.cpp +++ b/src/plugins/template/tests/functional/skip_tests_config.cpp @@ -121,6 +121,9 @@ std::vector disabledTestPatterns() { R"(.*eltwiseOpType=Mod_secondaryInputType=PARAMETER_opType=VECTOR_NetType=(f16|f32).*)", // Interpreter backend doesn't implement evaluate method for OP Multiply (by GroupNormalizationDecomposition) R"(.*ReferenceGroupNormalization.*_f64*)", + // Issue: 128924 + R"(.*OVClassModelTestP/OVClassModelTestP.ImportModelWithNullContextThrows.*)", + }; #ifdef _WIN32 diff --git a/src/plugins/template/tests/functional/subgraph_reference/preprocess_legacy.cpp b/src/plugins/template/tests/functional/subgraph_reference/preprocess_legacy.cpp deleted file mode 100644 index e691495452d01c..00000000000000 --- a/src/plugins/template/tests/functional/subgraph_reference/preprocess_legacy.cpp +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "base_reference_cnn_test.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "shared_test_classes/single_layer/convert_color_i420.hpp" -#include "shared_test_classes/single_layer/convert_color_nv12.hpp" - -using namespace ov; -using namespace ov::preprocess; -using namespace reference_tests; -namespace { - -class ReferencePreprocessLegacyTest : public testing::Test, public ReferenceCNNTest { -public: - void SetUp() override { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - } -}; - -} // namespace - -static std::shared_ptr create_simple_function(element::Type type, const PartialShape& shape) { - auto data1 = std::make_shared(type, shape); - data1->set_friendly_name("input1"); - data1->get_output_tensor(0).set_names({"tensor_input1", "input1"}); - auto c = op::v0::Constant::create(type, {1}, {0}); - auto op = std::make_shared(data1, c); - op->set_friendly_name("Add0"); - auto res = std::make_shared(op); - res->set_friendly_name("Result1"); - res->get_output_tensor(0).set_names({"tensor_output1", "Result1", "Add0"}); - return std::make_shared(ResultVector{res}, ParameterVector{data1}); -} - -TEST_F(ReferencePreprocessLegacyTest, mean) { - function = create_simple_function(element::f32, Shape{1, 3, 2, 2}); - auto p = PrePostProcessor(function); - p.input().preprocess().mean(1.f); - p.build(); - - auto f2 = create_simple_function(element::f32, Shape{1, 3, 2, 2}); - legacy_network = InferenceEngine::CNNNetwork(f2); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.init(3); - preProcess[0]->meanValue = 1; - preProcess[1]->meanValue = 1; - preProcess[2]->meanValue = 1; - preProcess[0]->stdScale = 1; - preProcess[1]->stdScale = 1; - preProcess[2]->stdScale = 1; - preProcess.setVariant(InferenceEngine::MEAN_VALUE); - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, mean_scale) { - function = create_simple_function(element::f32, Shape{1, 3, 20, 20}); - auto p = PrePostProcessor(function); - p.input().preprocess().scale(2.f); - p.build(); - - auto f2 = create_simple_function(element::f32, Shape{1, 3, 20, 20}); - legacy_network = InferenceEngine::CNNNetwork(f2); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.init(3); - preProcess[0]->meanValue = 0; - preProcess[1]->meanValue = 0; - preProcess[2]->meanValue = 0; - preProcess[0]->stdScale = 2; - preProcess[1]->stdScale = 2; - preProcess[2]->stdScale = 2; - preProcess.setVariant(InferenceEngine::MEAN_VALUE); - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, resize) { - function = create_simple_function(element::f32, Shape{1, 3, 5, 5}); - auto f2 = create_simple_function(element::f32, Shape{1, 3, 5, 5}); - legacy_network = InferenceEngine::CNNNetwork(f2); - - auto p = PrePostProcessor(function); - p.input().tensor().set_layout("NCHW").set_spatial_static_shape(42, 30); - p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR); - p.input().model().set_layout("NCHW"); - p.build(); - - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, bgrx_to_bgr) { - const int h = 160; - const int w = 160; - auto rgbx_input = std::vector(h * w * 4, 0); - for (auto i = 0; i < h * w * 4; i++) { - rgbx_input[i] = i % 256; - } - function = create_simple_function(element::f32, Shape{1, 3, h, w}); - auto f2 = create_simple_function(element::f32, Shape{1, 3, h, w}); - legacy_network = InferenceEngine::CNNNetwork(f2); - - auto p = PrePostProcessor(function); - auto& input = p.input(); - input.tensor().set_color_format(ColorFormat::BGRX).set_element_type(element::u8); - input.preprocess().convert_color(ColorFormat::BGR); - input.model().set_layout("NCHW"); - function = p.build(); - inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data()); - - InferenceEngine::TensorDesc rgbx_plane_desc(InferenceEngine::Precision::U8, - {1, 4, h, w}, - InferenceEngine::Layout::NHWC); - legacy_network.getInputsInfo().begin()->second->setLayout(InferenceEngine::NHWC); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setColorFormat(InferenceEngine::ColorFormat::BGRX); - legacy_input_blobs["input1"] = InferenceEngine::make_shared_blob(rgbx_plane_desc, rgbx_input.data()); - - Exec(); -} - -TEST_F(ReferencePreprocessLegacyTest, rgbx_to_bgr) { - const int h = 160; - const int w = 160; - auto rgbx_input = std::vector(h * w * 4, 0); - for (auto i = 0; i < h * w * 4; i++) { - rgbx_input[i] = i % 256; - } - function = create_simple_function(element::f32, Shape{1, 3, h, w}); - auto f2 = create_simple_function(element::f32, Shape{1, 3, h, w}); - legacy_network = InferenceEngine::CNNNetwork(f2); - - auto p = PrePostProcessor(function); - auto& input = p.input(); - input.tensor().set_color_format(ColorFormat::RGBX).set_element_type(element::u8); - input.preprocess().convert_color(ColorFormat::BGR); - input.model().set_layout("NCHW"); - function = p.build(); - inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data()); - - InferenceEngine::TensorDesc rgbx_plane_desc(InferenceEngine::Precision::U8, - {1, 4, h, w}, - InferenceEngine::Layout::NHWC); - legacy_network.getInputsInfo().begin()->second->setLayout(InferenceEngine::NHWC); - auto& preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess(); - preProcess.setColorFormat(InferenceEngine::ColorFormat::RGBX); - legacy_input_blobs["input1"] = InferenceEngine::make_shared_blob(rgbx_plane_desc, rgbx_input.data()); - - Exec(); -} diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/compiled_model/import_export.cpp similarity index 58% rename from src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_executable_network/exec_graph_info.cpp rename to src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/compiled_model/import_export.cpp index 3fc385eb81701c..2d89f454c82db5 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/compiled_model/import_export.cpp @@ -1,17 +1,15 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/compiled_model/import_export.hpp" -#include "ov_api_conformance_helpers.hpp" -#include "ie_plugin_config.hpp" -#include +#include "behavior/compiled_model/import_export.hpp" +#include "common_test_utils/test_constants.hpp" +#include "ov_api_conformance_helpers.hpp" using namespace ov::test::behavior; using namespace ov::test::conformance; namespace { - const std::vector ovExecGraphInfoElemTypes = { ov::element::i8, ov::element::i16, @@ -27,7 +25,6 @@ const std::vector ovExecGraphInfoElemTypes = { ov::element::bf16, ov::element::boolean, }; - INSTANTIATE_TEST_SUITE_P(ov_compiled_model, OVCompiledGraphImportExportTest, ::testing::Combine( @@ -35,9 +32,29 @@ INSTANTIATE_TEST_SUITE_P(ov_compiled_model, ::testing::Values(targetDevice), ::testing::Values(pluginConfig)), OVCompiledGraphImportExportTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P( ov_compiled_model, OVClassCompiledModelImportExportTestP, ::testing::Values(targetDevice)); -} // namespace +const std::vector nPrc = { + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, + ov::element::f16, + ov::element::f32, + ov::element::f64, + ov::element::bf16, +}; + +INSTANTIATE_TEST_SUITE_P(ov_compiled_model, + OVCompiledModelGraphUniqueNodeNamesTest, + ::testing::Combine(::testing::ValuesIn(nPrc), + ::testing::Values(ov::Shape{1, 2, 5, 5}), + ::testing::Values(targetDevice)), + OVCompiledModelGraphUniqueNodeNamesTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp index 426e8a262573ed..5401768ed9cc2a 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/caching_tests.cpp @@ -65,4 +65,14 @@ INSTANTIATE_TEST_SUITE_P(ov_plugin_floating_point, CompileModelCacheTestBase, ::testing::Values(pluginConfig)), CompileModelCacheTestBase::getTestCaseName); +const std::vector default_properties = { + {ov::enable_profiling(false)} +}; + +INSTANTIATE_TEST_SUITE_P(ov_plugin, CompileModelCacheRuntimePropertiesTestBase, + ::testing::Combine( + ::testing::Values(targetDevice), + ::testing::ValuesIn(ov::test::conformance::generate_ov_configs(default_properties))), + CompileModelCacheRuntimePropertiesTestBase::getTestCaseName); + } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/core_threading_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/core_threading_tests.cpp new file mode 100644 index 00000000000000..2d565d04df0dee --- /dev/null +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/core_threading_tests.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "behavior/ov_plugin/core_threading.hpp" +#include "ov_api_conformance_helpers.hpp" + +using namespace ov::test::behavior; +using namespace ov::test::conformance; + +namespace { + +INSTANTIATE_TEST_SUITE_P(ov_plugin, CoreThreadingTest, + testing::Values(std::tuple{targetDevice, {{ov::enable_profiling(false)}}}), + CoreThreadingTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(ov_plugin, + CoreThreadingTestsWithIter, + testing::Combine(testing::Values(std::tuple{targetDevice, {{ov::enable_profiling(false)}}}), + testing::Values(4), + testing::Values(50)), + CoreThreadingTestsWithIter::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(ov_plugin, CoreThreadingTestsWithCacheEnabled, + testing::Combine(testing::Values(std::tuple{targetDevice, {{ov::enable_profiling(false)}}}), + testing::Values(20), + testing::Values(10)), + CoreThreadingTestsWithCacheEnabled::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp index 1302cbcd11c020..1d654443bee0bb 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp @@ -112,6 +112,6 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(targetDevice)); INSTANTIATE_TEST_SUITE_P( - ov_plugin_remove_mandatory, OVBasicPropertiesTestsP, + ov_plugin_mandatory, OVBasicPropertiesTestsP, ::testing::ValuesIn(generate_ov_pairs_plugin_name_by_device())); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/version.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/version.cpp new file mode 100644 index 00000000000000..2ace60f5425dfb --- /dev/null +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/version.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_api_conformance_helpers.hpp" +#include "behavior/ov_plugin/version.hpp" + +using namespace ov::test::behavior; +using namespace ov::test::conformance; + +namespace { + +INSTANTIATE_TEST_SUITE_P(ov_plugin_mandatory, + VersionTests, + ::testing::Values(targetDevice), + VersionTests::getTestCaseName); + +} \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp index 95311b492f3f59..a8646cd3881a66 100644 --- a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp @@ -6,6 +6,7 @@ #include "ov_behavior_test_utils.hpp" +#include "ie_core.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "common_test_utils/file_utils.hpp" #include "openvino/util/file_util.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index bfbd7437668efb..413d3cecebeb86 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -2,7 +2,6 @@ // SPDX-License-Identifcorer: Apache-2.0 // -#include #include #include #include @@ -13,6 +12,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "openvino/op/concat.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "openvino/runtime/tensor.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" @@ -367,10 +367,10 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoBeforeExecution) { }; // Each layer from the execGraphInfo network must have PM data option set - EXPECT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); + EXPECT_EQ("not_executed", getExecValue(ov::exec_model_info::PERF_COUNTER)); // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); if (origFromExecLayer.empty()) { constCnt++; } else { @@ -420,7 +420,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { // At least one layer in the topology should be executed and have valid perf counter value try { - float x = static_cast(std::atof(getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); + float x = static_cast(std::atof(getExecValue(ov::exec_model_info::PERF_COUNTER).c_str())); std::cout << "TIME: " << x << std::endl; EXPECT_GE(x, 0.0f); hasOpWithValidTime = true; @@ -429,7 +429,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); std::vector origFromExecLayerSep = ov::test::utils::splitStringByDelimiter(origFromExecLayer); if (origFromExecLayer.empty()) { constCnt++; diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp index 6a611b6cb2a60b..bfee36762478ef 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/import_export.hpp @@ -4,15 +4,14 @@ #include -#include -#include #include "base/ov_behavior_test_utils.hpp" -#include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/common_utils.hpp" #include "common_test_utils/file_utils.hpp" - -#include "functional_test_utils/plugin_cache.hpp" +#include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "openvino/pass/serialize.hpp" +#include "openvino/runtime/exec_model_info.hpp" namespace ov { namespace test { @@ -49,9 +48,9 @@ class OVCompiledGraphImportExportTest : public testing::WithParamInterfaceGetParam(); // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED(); + std::tie(elementType, target_device, configuration) = this->GetParam(); APIBaseTest::SetUp(); } @@ -317,6 +316,75 @@ TEST_P(OVClassCompiledModelImportExportTestP, smoke_ImportNetworkNoThrowWithDevi OV_ASSERT_NO_THROW(executableNetwork.create_infer_request()); } +// +// GetRuntimeModel +// +typedef std::tuple + OVCompiledModelGraphUniqueNodeNamesTestParams; + +class OVCompiledModelGraphUniqueNodeNamesTest + : public testing::WithParamInterface, + public OVCompiledNetworkTestBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + ov::element::Type netPrecision; + ov::Shape inputShapes; + std::string targetDevice; + std::tie(netPrecision, inputShapes, targetDevice) = obj.param; + std::replace(targetDevice.begin(), targetDevice.end(), ':', '_'); + + std::ostringstream result; + result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; + result << "netPRC=" << netPrecision.to_string() << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); + } + + void SetUp() override { + ov::Shape inputShape; + ov::element::Type netPrecision; + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + std::tie(netPrecision, inputShape, target_device) = this->GetParam(); + + APIBaseTest::SetUp(); + + ov::ParameterVector params{std::make_shared(netPrecision, ov::Shape(inputShape))}; + auto split_axis_op = + std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + + auto concat = std::make_shared(split->outputs(), 1); + + ov::ResultVector results{std::make_shared(concat)}; + model = std::make_shared(results, params, "SplitConvConcat"); + } + +protected: + std::shared_ptr model; +}; + +TEST_P(OVCompiledModelGraphUniqueNodeNamesTest, CheckUniqueNodeNames) { + std::shared_ptr core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(model, target_device); + auto exec_graph = compiled_model.get_runtime_model(); + + std::unordered_set names; + ASSERT_NE(exec_graph, nullptr); + + for (const auto& op : exec_graph->get_ops()) { + ASSERT_TRUE(names.find(op->get_friendly_name()) == names.end()) + << "Node with name " << op->get_friendly_name() << "already exists"; + names.insert(op->get_friendly_name()); + + const auto& rtInfo = op->get_rt_info(); + auto it = rtInfo.find(ov::exec_model_info::LAYER_TYPE); + ASSERT_NE(rtInfo.end(), it); + } +}; + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp index db7b2ba376a3db..13904d2fa055ce 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "base/behavior_test_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/file_utils.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "openvino/core/model.hpp" #include "openvino/op/relu.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" @@ -183,10 +183,10 @@ TEST_P(ExecutableNetworkBaseTest, CheckExecGraphInfoBeforeExecution) { }; // Each layer from the execGraphInfo network must have PM data option set - ASSERT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); + ASSERT_EQ("not_executed", getExecValue(ov::exec_model_info::PERF_COUNTER)); // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); if (origFromExecLayer.empty()) { constCnt++; } else { @@ -236,7 +236,7 @@ TEST_P(ExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // At least one layer in the topology should be executed and have valid perf counter value try { - float x = static_cast(std::atof(getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); + float x = static_cast(std::atof(getExecValue(ov::exec_model_info::PERF_COUNTER).c_str())); std::cout << "TIME: " << x << std::endl; ASSERT_GE(x, 0.0f); hasOpWithValidTime = true; @@ -244,7 +244,7 @@ TEST_P(ExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); std::vector origFromExecLayerSep = ov::test::utils::splitStringByDelimiter(origFromExecLayer); if (origFromExecLayer.empty()) { constCnt++; diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp index cf55d7514b2330..5f9fa25bf2c65b 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp @@ -351,20 +351,4 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThro std::cout << "Exe network name: " << std::endl << networkname << std::endl; } - -TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, GetMetricNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - setHeteroNetworkAffinity(target_device); - - InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(actualCnnNetwork, heteroDeviceName); - - ASSERT_NO_THROW(p = exeNetwork.GetConfig("TARGET_FALLBACK")); - auto targets = p.as(); - auto expectedTargets = target_device; - - std::cout << "Exe network fallback targets: " << targets << std::endl; - ASSERT_EQ(expectedTargets, targets); -} } // namespace BehaviorTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp b/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp deleted file mode 100644 index 92282dc7ecff7d..00000000000000 --- a/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "base/behavior_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" - -namespace BehaviorTestsDefinitions { - -using InferRequestSetBlobByTypeParams = std::tuple< - FuncTestUtils::BlobType, // Blob type - std::string, // Device name - std::map // Device config ->; - -class InferRequestSetBlobByType : public testing::WithParamInterface, - public BehaviorTestsUtils::IEInferRequestTestBase { -public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - using namespace ov::test::utils; - - FuncTestUtils::BlobType BlobType; - std::string targetDevice; - std::map configuration; - std::tie(BlobType, targetDevice, configuration) = obj.param; - std::replace(targetDevice.begin(), targetDevice.end(), ':', '.'); - - std::ostringstream result; - result << "BlobType=" << BlobType << "_"; - result << "Device="<< targetDevice << "_"; - result << "Config=" << configuration; - return result.str(); - } - - void SetUp() override { - std::map config; - std::tie(blobType, target_device, config) = this->GetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - APIBaseTest::SetUp(); - std::shared_ptr function = ov::test::utils::make_conv_pool_relu({4, 3, 6, 8}, ov::element::u8); - InferenceEngine::CNNNetwork cnnNetwork(function); - executableNetwork = ie->LoadNetwork(cnnNetwork, target_device, config); - } - -protected: - bool blobTypeIsSupportedByDevice() { - switch (blobType) { - case FuncTestUtils::BlobType::Memory: - return true; - case FuncTestUtils::BlobType::Compound: - case FuncTestUtils::BlobType::Remote: - return false; - case FuncTestUtils::BlobType::Batched: { - auto supported_metrics = ie->GetMetric(target_device, METRIC_KEY(SUPPORTED_METRICS)).as>(); - if (std::find(supported_metrics.begin(), supported_metrics.end(), - METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == supported_metrics.end()) { - return false; - } - - auto optimization_caps = - ie->GetMetric(target_device, METRIC_KEY(OPTIMIZATION_CAPABILITIES)).as>(); - return std::find(optimization_caps.begin(), optimization_caps.end(), - METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end(); - } - default: - IE_THROW() << "Test does not support the blob kind"; - } - } - - FuncTestUtils::BlobType blobType; - InferenceEngine::ExecutableNetwork executableNetwork; - std::shared_ptr ie = PluginCache::get().ie(); -}; - -TEST_P(InferRequestSetBlobByType, setInputBlobsByType) { - // Create InferRequest - auto req = executableNetwork.CreateInferRequest(); - for (const auto &input : executableNetwork.GetInputsInfo()) { - const auto &info = input.second; - auto blob = FuncTestUtils::createBlobByType(info->getTensorDesc(), blobType); - if (blobTypeIsSupportedByDevice()) { - EXPECT_NO_THROW(req.SetBlob(info->name(), blob)); - } else { - EXPECT_THROW(req.SetBlob(info->name(), blob), InferenceEngine::Exception); - } - } -} -} // namespace BehaviorTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index a0e360c596922c..b8f6f691cf5737 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -5,34 +5,34 @@ #include -#include "exec_graph_info.hpp" #include "base/ov_behavior_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" +#include "openvino/runtime/exec_model_info.hpp" #include "pugixml.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { namespace test { namespace behavior { -typedef std::tuple< - ov::element::Type_t, // Element type - std::string, // Device name - ov::AnyMap // Config -> OVExecGraphImportExportTestParams; +typedef std::tuple + OVExecGraphImportExportTestParams; class OVExecGraphImportExportTest : public testing::WithParamInterface, public OVCompiledNetworkTestBase { - public: +public: static std::string getTestCaseName(testing::TestParamInfo obj); void SetUp() override; void TearDown() override; - protected: +protected: std::shared_ptr core = utils::PluginCache::get().core(); ov::AnyMap configuration; - ov::element::Type_t elementType; + ov::element::Type elementType; std::shared_ptr function; }; @@ -78,4 +78,4 @@ class OVExecGraphSerializationTest : public testing::WithParamInterface #include -#include #include "base/ov_behavior_test_utils.hpp" #include "common_test_utils/file_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "openvino/op/concat.hpp" -#include "openvino/runtime/tensor.hpp" +#include "common_test_utils/subgraph_builders/concat_with_params.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" #include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp" -#include "common_test_utils/subgraph_builders/concat_with_params.hpp" #include "common_test_utils/subgraph_builders/single_split.hpp" #include "common_test_utils/subgraph_builders/split_concat.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/pass/serialize.hpp" +#include "openvino/runtime/exec_model_info.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { namespace test { @@ -291,10 +291,10 @@ TEST_P(OVExecutableNetworkBaseTest, CheckExecGraphInfoBeforeExecution) { }; // Each layer from the execGraphInfo network must have PM data option set - EXPECT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); + EXPECT_EQ("not_executed", getExecValue(ov::exec_model_info::PERF_COUNTER)); // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); if (origFromExecLayer.empty()) { constCnt++; } else { @@ -343,7 +343,7 @@ TEST_P(OVExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // At least one layer in the topology should be executed and have valid perf counter value try { - float x = static_cast(std::atof(getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); + float x = static_cast(std::atof(getExecValue(ov::exec_model_info::PERF_COUNTER).c_str())); std::cout << "TIME: " << x << std::endl; EXPECT_GE(x, 0.0f); hasOpWithValidTime = true; @@ -352,7 +352,7 @@ TEST_P(OVExecutableNetworkBaseTest, CheckExecGraphInfoAfterExecution) { // Parse origin layer names (fused/merged layers) from the executable graph // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + auto origFromExecLayer = getExecValue(ov::exec_model_info::ORIGINAL_NAMES); std::vector origFromExecLayerSep = ov::test::utils::splitStringByDelimiter(origFromExecLayer); if (origFromExecLayer.empty()) { constCnt++; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp index d9a64036e8c668..a306d741276241 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp @@ -59,7 +59,6 @@ using OVClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS = O using OVClassExecutableNetworkGetMetricTest_ThrowsUnsupported = OVCompiledModelClassBaseTestP; using OVClassExecutableNetworkGetConfigTest = OVCompiledModelClassBaseTestP; using OVClassExecutableNetworkSetConfigTest = OVCompiledModelClassBaseTestP; -using OVClassExecutableNetworkGetConfigTest = OVCompiledModelClassBaseTestP; class OVClassExecutableNetworkGetMetricTestForSpecificConfig : public OVClassNetworkTest, diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp new file mode 100644 index 00000000000000..d9b93c2f8352f5 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/memory_states.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "base/behavior_test_utils.hpp" +#include "common_test_utils/test_common.hpp" + +namespace ov { +namespace test { +namespace behavior { + +using memoryStateParams = std::tuple, // Model to work with + std::vector, // Memory States to query + std::string, // Target device name + ov::AnyMap>; // device configuration + +class OVInferRequestVariableStateTest : public testing::WithParamInterface, + public OVInferRequestTestBase { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + void SetUp() override; + void TearDown() override; + static std::shared_ptr get_network(); + +protected: + std::shared_ptr net; + std::vector statesToQuery; + std::string deviceName; + ov::AnyMap configuration; + ov::CompiledModel prepare_network(); +}; +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp index d6c2bafecea453..956f7be1df8824 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp @@ -98,6 +98,24 @@ class CompileModelCacheRuntimePropertiesTestBase void run() override; }; +using CompileModelLoadFromCacheParams = std::tuple; +class CompileModelLoadFromCacheTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + virtual public OVPluginTestBase { + std::string m_cacheFolderName; + std::string m_modelName; + std::string m_weightsName; + +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + + void SetUp() override; + void TearDown() override; + void run() override; +}; + using compileModelLoadFromMemoryParams = std::tuple; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index bc6df2494a9012..8ac84b3abf0dd2 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -521,12 +521,12 @@ TEST_P(OVClassBasicTestP, SetConfigAllNoThrow) { TEST_P(OVClassBasicTestP, SetGetConfigForTbbTerminateThrows) { ov::Core ie = createCoreWithTemplate(); bool value = false; - ASSERT_NO_THROW(ie.set_property({ov::force_tbb_terminate(true)})); - ASSERT_NO_THROW(value = ie.get_property(target_device, ov::force_tbb_terminate)); + ASSERT_NO_THROW(ie.set_property(target_device, {ov::force_tbb_terminate(true)})); + ASSERT_NO_THROW(value = ie.get_property(ov::force_tbb_terminate.name()).as()); ASSERT_TRUE(value); - ASSERT_NO_THROW(ie.set_property({{ov::force_tbb_terminate(false)}})); - ASSERT_NO_THROW(value = ie.get_property(target_device, ov::force_tbb_terminate)); + ASSERT_NO_THROW(ie.set_property(target_device, {ov::force_tbb_terminate(false)})); + ASSERT_NO_THROW(value = ie.get_property(ov::force_tbb_terminate.name()).as()); ASSERT_FALSE(value); } diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp index 56c4e0f9e6230d..b7aebac307a351 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp @@ -106,8 +106,8 @@ class CoreThreadingTestsWithCacheEnabled : public testing::WithParamInterface model; - void SetupModel() { + std::vector> models; + void SetupModels() { ov::Core core; std::string ir_with_meta = R"V0G0N( @@ -278,23 +278,33 @@ class CoreThreadingTestsWithCacheEnabled : public testing::WithParamInterface )V0G0N"; ov::Tensor weights = {}; - model = core.read_model(ir_with_meta, weights); + auto model = core.read_model(ir_with_meta, weights); OPENVINO_ASSERT(model); + models.emplace_back(model); // model with cli_parameter + // test model with runtime attributes -- layout + model = ov::test::utils::make_split_multi_conv_concat(); + for (auto& iter : model->get_parameters()) + iter->set_layout("NCHW"); + for (auto& iter : model->get_results()) + iter->set_layout("NHCW"); + models.emplace_back(model); } }; // tested function: set_property, compile_model TEST_P(CoreThreadingTestsWithCacheEnabled, smoke_compilemodel_cache_enabled) { ov::Core core; - SetupModel(); + SetupModels(); core.set_property(target_device, config); core.set_property(ov::cache_dir(cache_path)); - runParallel( - [&]() { - (void)core.compile_model(model, target_device); - }, - numIterations, - numThreads); + for (auto& model : models) { + runParallel( + [&]() { + (void)core.compile_model(model, target_device); + }, + numIterations, + numThreads); + } core.set_property(ov::cache_dir("")); } diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp index ff0710e3edc9af..98473b6b9cad6c 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/query_model.hpp @@ -39,6 +39,13 @@ TEST_P(OVClassModelTestP, QueryModelWithKSO) { } } +TEST_P(OVClassModelTestP, ImportModelWithNullContextThrows) { + ov::Core ie = createCoreWithTemplate(); + ov::RemoteContext context; + std::istringstream stream("None"); + ASSERT_THROW(ie.import_model(stream, context, {}), ov::Exception); +} + TEST_P(OVClassQueryModelTest, QueryModelWithMatMul) { ov::Core ie = createCoreWithTemplate(); diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index 2be633a2441166..fdca29e5522b2d 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -308,26 +308,6 @@ TEST_P(IEClassBasicTestP, SetGetConfigForTbbTerminateThrows) { ASSERT_FALSE(value); } -TEST_P(IEClassBasicTestP, SetConfigHeteroTargetFallbackThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - ASSERT_NO_THROW(ie.SetConfig({{"TARGET_FALLBACK", target_device}}, ov::test::utils::DEVICE_HETERO)); -} - -TEST(IEClassBasicTest, smoke_SetConfigHeteroNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - bool value = false; - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), InferenceEngine::PluginConfigParams::YES}}, - ov::test::utils::DEVICE_HETERO)); - ASSERT_NO_THROW(value = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as()); - ASSERT_TRUE(value); - - ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), InferenceEngine::PluginConfigParams::NO}}, - ov::test::utils::DEVICE_HETERO)); - ASSERT_NO_THROW(value = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as()); - ASSERT_FALSE(value); -} - TEST_P(IEClassSpecificDeviceTestSetConfig, SetConfigSpecificDeviceNoThrow) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); @@ -369,13 +349,6 @@ TEST(IEClassBasicTest, smoke_ImportNetworkMultiThrows) { ASSERT_THROW(ie.ImportNetwork("model", ov::test::utils::DEVICE_MULTI), InferenceEngine::NetworkNotRead); } -TEST_P(IEClassBasicTestP, ImportNetworkWithNullContextThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::RemoteContext::Ptr context = nullptr; - std::istringstream stream("None"); - ASSERT_THROW(ie.ImportNetwork(stream, context, {}), InferenceEngine::Exception); -} - // // QueryNetwork // @@ -516,13 +489,6 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithKSO) { } } -TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::QueryNetworkResult res; - ASSERT_NO_THROW(res = ie.QueryNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", target_device}})); - ASSERT_LT(0, res.supportedLayersMap.size()); -} - TEST_P(IEClassNetworkTestP, DISABLED_QueryNetworkMultiThrows) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); try { @@ -778,14 +744,6 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) { ASSERT_THROW(p = ie.GetConfig(ov::test::utils::DEVICE_HETERO, "unsupported_config"), InferenceEngine::Exception); } -TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - InferenceEngine::Parameter p; - - ASSERT_THROW(p = ie.GetConfig(ov::test::utils::DEVICE_HETERO + std::string(":") + target_device, HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)), - InferenceEngine::Exception); -} - TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigThrow) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); InferenceEngine::Parameter p; @@ -848,19 +806,6 @@ TEST_P(IEClassGetAvailableDevices, GetAvailableDevicesNoThrow) { // QueryNetwork with HETERO on particular device // -TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - auto deviceIDs = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - if (deviceIDs.empty()) - GTEST_FAIL() << "Incorrect DeviceID number" << std::endl; - ASSERT_NO_THROW(ie.QueryNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, - {{"TARGET_FALLBACK", target_device + "." + deviceIDs[0] + "," + target_device}})); -} - TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); @@ -891,16 +836,6 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithInvalidDeviceIDThrows) { ASSERT_THROW(ie.QueryNetwork(actualCnnNetwork, target_device + ".l0"), InferenceEngine::Exception); } -TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - ASSERT_THROW(ie.QueryNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, - {{"TARGET_FALLBACK", target_device + ".100," + target_device}}), InferenceEngine::Exception); -} - // // LoadNetwork // @@ -937,11 +872,6 @@ TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDeviceNoThrow) { ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO + std::string(":") + target_device)); } -TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", target_device}})); -} - TEST_P(IEClassNetworkTestP, LoadNetworkCreateDefaultExecGraphResult) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); auto net = ie.LoadNetwork(actualCnnNetwork, target_device); @@ -1051,150 +981,6 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithInvalidDeviceIDThrows) { ASSERT_THROW(ie.LoadNetwork(actualCnnNetwork, target_device + ".l0"), InferenceEngine::Exception); } -TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - ASSERT_THROW(ie.LoadNetwork(actualCnnNetwork, "HETERO", - {{"TARGET_FALLBACK", target_device + ".100," + ov::test::utils::DEVICE_CPU}}), InferenceEngine::Exception); -} - -TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - ASSERT_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, - {{"TARGET_FALLBACK", target_device + "," + ov::test::utils::DEVICE_CPU}, - {CONFIG_KEY(DEVICE_ID), "110"}}), InferenceEngine::Exception); -} - -// -// LoadNetwork with HETERO on MULTI combinations particular device -// - -TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + '.' + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - std::string targetFallback(ov::test::utils::DEVICE_MULTI + std::string(",") + target_device); - ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_HETERO, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", targetFallback}})); -} - -TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + std::string(".") + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, ov::test::utils::DEVICE_MULTI, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_HETERO}, - {"TARGET_FALLBACK", devices}})); -} - -// -// QueryNetwork with HETERO on MULTI combinations particular device -// - -TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + '.' + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - auto function = multinputCnnNetwork.getFunction(); - ASSERT_NE(nullptr, function); - std::unordered_set expectedLayers; - for (auto &&node : function->get_ops()) { - expectedLayers.emplace(node->get_friendly_name()); - } - InferenceEngine::QueryNetworkResult result; - std::string targetFallback(ov::test::utils::DEVICE_MULTI + std::string(",") + target_device); - ASSERT_NO_THROW(result = ie.QueryNetwork(multinputCnnNetwork, ov::test::utils::DEVICE_HETERO, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", targetFallback}})); - - std::unordered_set actualLayers; - for (auto &&layer : result.supportedLayersMap) { - actualLayers.emplace(layer.first); - } - ASSERT_EQ(expectedLayers, actualLayers); -} - -TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { - InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); - - if (!supportsDeviceID(ie, target_device)) { - GTEST_FAIL() << "Device does not support DeviceID" << std::endl; - } - if (!supportsAvaliableDevices(ie, target_device)) { - GTEST_FAIL() << "Device does not support AvailableDevices" << std::endl; - } - std::string devices; - auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += target_device + "." + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - auto function = multinputCnnNetwork.getFunction(); - ASSERT_NE(nullptr, function); - std::unordered_set expectedLayers; - for (auto &&node : function->get_ops()) { - expectedLayers.emplace(node->get_friendly_name()); - } - InferenceEngine::QueryNetworkResult result; - ASSERT_NO_THROW(result = ie.QueryNetwork(multinputCnnNetwork, ov::test::utils::DEVICE_MULTI, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_HETERO}, - {"TARGET_FALLBACK", devices}})); - - std::unordered_set actualLayers; - for (auto &&layer : result.supportedLayersMap) { - actualLayers.emplace(layer.first); - } - ASSERT_EQ(expectedLayers, actualLayers); -} - TEST_P(IEClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); { diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp index d4dee3c250a83f..244b600204b166 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp @@ -67,16 +67,6 @@ class CoreThreadingTestsBase { } } - void safeAddExtension(InferenceEngine::Core & ie) { - try { - auto extension = std::make_shared( - FileUtils::makePluginLibraryName(ov::test::utils::getExecutableDirectory(), "template_extension")); - ie.AddExtension(extension); - } catch (const InferenceEngine::Exception & ex) { - ASSERT_STR_CONTAINS(ex.what(), "name: experimental"); - } - } - Config config; }; diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp index bbe9239e439d93..0ee4e3af0c6673 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp @@ -8,6 +8,9 @@ #include +#include "common_test_utils/test_common.hpp" +#include "openvino/opsets/opset8.hpp" + using namespace InferenceEngine; namespace HeteroTests { diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp index 463266084624a8..08a50c9f356e40 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/version.hpp @@ -10,7 +10,6 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ie_preprocess.hpp" #include "base/behavior_test_utils.hpp" namespace BehaviorTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp index b0466f52aa47b3..59b8550c5fe22a 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp @@ -16,13 +16,13 @@ class AddTestValues{ ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize1; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize2; bool broadcast; - std::vector precisionOnActivations; - std::vector expectedPrecisions; + std::vector precisionOnActivations; + std::vector expectedPrecisions; }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, AddTestValues > AddTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp index be011e89aaead3..b9a4bd9e71c688 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp @@ -15,8 +15,8 @@ class AssignAndReadValueTransformationParam { }; typedef std::tuple < - ngraph::element::Type, // input precision - ngraph::PartialShape, // input shape + ov::element::Type, // input precision + ov::PartialShape, // input shape size_t, // opset version std::string, // device ov::pass::low_precision::LayerTransformation::Params, // transformation params diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp index 88e6eb9b90f051..a05c52c5ce8ebf 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp @@ -14,7 +14,7 @@ namespace LayerTestsDefinitions { class BatchToSpaceTransformationParam { public: - ngraph::PartialShape input_shape; + ov::PartialShape input_shape; std::vector block_shape; std::vector crops_begin; std::vector crops_end; @@ -24,7 +24,7 @@ class BatchToSpaceTransformationParam { }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, BatchToSpaceTransformationParam > BatchToSpaceTransformationParams; @@ -37,7 +37,7 @@ class BatchToSpaceTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp index 50d687b1bd88aa..2462a1ea35ccb6 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp @@ -18,8 +18,8 @@ class ClampTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ClampTransformationParam diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp index da93245b4fe0a4..383bd5fa4bbdcc 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp @@ -24,8 +24,8 @@ class ConcatTransformationTestValues { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ConcatTransformationTestValues> ConcatTransformationParams; @@ -34,7 +34,6 @@ class ConcatTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp index 96d57afe7ad62d..e350b0be317b49 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp @@ -18,8 +18,8 @@ class ConcatWithChildAndOutputTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, // target device: CPU, GPU ConcatWithChildAndOutputTransformationParam, ov::pass::low_precision::LayerTransformation::Params // transformation parameters diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp index 4baadf52c0a62d..6d46bac97a8f08 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp @@ -19,8 +19,8 @@ class ConcatWithDifferentChildrenTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, // target device: CPU, GPU ConcatWithDifferentChildrenTransformationParam, ov::pass::low_precision::LayerTransformation::Params // transformation parameters @@ -31,7 +31,6 @@ class ConcatWithDifferentChildrenTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_intermediate_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_intermediate_transformation.hpp index a77069376562b0..7ee3f5d5ac0593 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_intermediate_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_intermediate_transformation.hpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, // target device: CPU, GPU ov::pass::low_precision::LayerTransformation::Params, // transformation parameters bool, // transparent intermediate @@ -25,7 +25,6 @@ class ConcatWithIntermediateTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_neighbors_graph_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_neighbors_graph_transformation.hpp index fc55d9f2ec6b4a..ea04d0e074cd79 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_neighbors_graph_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_neighbors_graph_transformation.hpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params> ConcatNeighboringGraphTransformationParams; @@ -22,7 +22,6 @@ class ConcatWithNeighborsGraphTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp index 43f0491dc89305..3384d3be810510 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp @@ -18,8 +18,8 @@ class ConcatWithSplitTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ConcatWithSplitTransformationParam, ov::pass::low_precision::LayerTransformation::Params> ConcatWithSplitTransformationParams; @@ -29,7 +29,6 @@ class ConcatWithSplitTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp index 13aa0a4f26260c..6ef505f9fb2317 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp @@ -42,9 +42,9 @@ class ConvolutionBackpropDataTransformationParam { }; typedef std::tuple< - ngraph::element::Type, // netPrecision - std::pair, // input shape and shape support flag - ngraph::Shape, // outputShape + ov::element::Type, // netPrecision + std::pair, // input shape and shape support flag + ov::Shape, // outputShape std::string, // targetDevice ov::pass::low_precision::LayerTransformation::Params, ConvolutionBackpropDataTransformationParam @@ -59,7 +59,7 @@ class ConvolutionBackpropDataTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp index 8475b146af9b0f..23c2ff21deda39 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp @@ -46,8 +46,8 @@ inline std::ostream& operator<<(std::ostream& out, const ConvolutionQDqTransform } typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ConvolutionQDqTransformationParam @@ -62,7 +62,7 @@ class ConvolutionQDqTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp index 851c8f57cb4789..2558b6948b3f88 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp @@ -24,8 +24,8 @@ class ConvolutionTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::Shape, + ov::element::Type, + ov::Shape, std::string, ov::pass::low_precision::LayerTransformation::Params, ConvolutionTransformationParam @@ -40,7 +40,7 @@ class ConvolutionTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp index 9f919e5cdd5195..dd79d53053ee0f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp @@ -21,8 +21,8 @@ class ConvolutionWIthIncorrectWeightsParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::Shape, + ov::element::Type, + ov::Shape, std::string, ov::pass::low_precision::LayerTransformation::Params, ConvolutionWIthIncorrectWeightsParam diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp index 45b1947bb37c57..1061832ee35d24 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp @@ -8,12 +8,13 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" +#include "openvino/op/depth_to_space.hpp" namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::op::v0::DepthToSpace::DepthToSpaceMode, size_t> DepthToSpaceTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp index e98af3352dae83..2ec87aee0b9874 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp @@ -30,8 +30,8 @@ class ElementwiseBranchSelectionTestValues{ }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ElementwiseBranchSelectionTestValues, std::string @@ -45,7 +45,7 @@ class ElementwiseBranchSelectionTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp index bc49cae1cb5755..76bf6127799b42 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp @@ -7,7 +7,6 @@ #include #include -#include #include "ov_lpt_models/common/add.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" @@ -19,7 +18,7 @@ class EliminateFakeQuantizeTransformationTestValues { public: class Actual { public: - ngraph::element::Type precisionBefore; + ov::element::Type precisionBefore; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData1; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData2; }; @@ -31,7 +30,7 @@ class EliminateFakeQuantizeTransformationTestValues { size_t int8_convolutions; }; - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; Actual actual; Expected expected; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp index 27140d9bd07c54..5705bb0821ae27 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp @@ -13,8 +13,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ngraph::builder::subgraph::FakeQuantizeOnData> FakeQuantizeAndAvgPoolTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp index 499e4bd686b887..2a31898b978304 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp @@ -13,8 +13,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ngraph::builder::subgraph::FakeQuantizeOnData> FakeQuantizeAndMaxPoolTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp index e17076acb062c6..4872d1b477fc9c 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp @@ -21,8 +21,8 @@ class FakeQuantizeAndTwoOutputBranchesWithConvolution { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, FakeQuantizeAndTwoOutputBranchesWithConvolution diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp index fa4077ece6cd44..f9c04f0d21cfd0 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp @@ -26,7 +26,7 @@ inline std::ostream& operator<<(std::ostream& out, const FakeQuantizePrecisionSe class FakeQuantizePrecisionSelectionTransformationExpectedValues { public: - ngraph::element::Type fakeQuantizeOnDataOutPrecision; + ov::element::Type fakeQuantizeOnDataOutPrecision; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; }; @@ -37,8 +37,8 @@ inline std::ostream& operator<<(std::ostream& out, const FakeQuantizePrecisionSe class FakeQuantizePrecisionSelectionTransformationTestValues { public: - std::vector precisionsOnActivations; - std::vector precisionsOnActivationForLimitedOperation; + std::vector precisionsOnActivations; + std::vector precisionsOnActivationForLimitedOperation; bool operationBeforeLimitedOperationIsPrecisionTransparent; FakeQuantizePrecisionSelectionTransformationActualValues actual; FakeQuantizePrecisionSelectionTransformationExpectedValues expected; @@ -49,8 +49,8 @@ inline std::ostream& operator<<(std::ostream& out, const FakeQuantizePrecisionSe } typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, FakeQuantizePrecisionSelectionTransformationTestValues> FakeQuantizeTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp index 1e07645754f014..7240f8d9e32e6b 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp @@ -19,8 +19,8 @@ class FakeQuantizeTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, FakeQuantizeTransformationParam, @@ -34,7 +34,7 @@ class FakeQuantizeTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp index 78557a195a84e2..cf2fe6802800e7 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp @@ -49,8 +49,8 @@ inline std::ostream& operator<<(std::ostream& out, const FakeQuantizeWithNotOpti // ngraph::builder::subgraph::FakeQuantizeOnData typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, FakeQuantizeWithNotOptimalTransformationTestValues> FakeQuantizeTransformationParams; @@ -63,7 +63,7 @@ class FakeQuantizeWithNotOptimalTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp index f598aeed560f67..224fa417a3e9ce 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fully_connected_transformation.hpp @@ -6,19 +6,18 @@ #include #include -#include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" class MatMulShapes { public: - ngraph::PartialShape inputA; - ngraph::PartialShape inputB; + ov::PartialShape inputA; + ov::PartialShape inputB; bool transposeA; bool transposeB; }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, MatMulShapes, std::string, ov::pass::low_precision::LayerTransformation::Params> FullyConnectedTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp index 2c16fea5538e8c..590958687a440b 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp @@ -6,7 +6,6 @@ #include -#include #include "ov_lpt_models/common/add.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" @@ -18,15 +17,15 @@ class FuseDequantizeToFakeQuantizeTransformationTestValues { public: class Actual { public: - ngraph::element::Type precisionBeforeAdd; + ov::element::Type precisionBeforeAdd; ngraph::builder::subgraph::Add add; - ngraph::element::Type precisionBeforeDequantization; + ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantization; - ngraph::element::Type precisionAfterDequantization; + ov::element::Type precisionAfterDequantization; ngraph::builder::subgraph::FakeQuantizeOnDataWithConstant fakeQuantizeOnData; }; - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; Actual actual; }; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp index 14e98de6a43844..29c11ed750a86f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ngraph::builder::subgraph::FakeQuantizeOnData> FuseFakeQuantizeAndScaleShiftTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp index 9153c8cdf6d6f3..dea9f20c799c9b 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp @@ -6,7 +6,6 @@ #include -#include #include "ov_lpt_models/common/add.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" @@ -22,7 +21,7 @@ class FuseMultiplyToFakeQuantizeTransformationTestValues { ngraph::builder::subgraph::DequantizationOperations dequantization; }; - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; Actual actual; }; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp index e6128bb9ebf44f..9cb05fe3610fd8 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp @@ -6,7 +6,6 @@ #include -#include #include "ov_lpt_models/common/add.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" @@ -22,7 +21,7 @@ class FuseSubtractToFakeQuantizeTransformationTestValues { ngraph::builder::subgraph::DequantizationOperations dequantization; }; - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; Actual actual; }; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp index 1dee4f8157940e..d297d20762277e 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp @@ -14,18 +14,18 @@ namespace LayerTestsDefinitions { class GatherTransformationTestValues { public: - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; std::vector gatherIndicesShape; std::vector gatherIndicesValues; std::vector axis; int64_t batch_dims; ov::pass::low_precision::LayerTransformation::Params params; - ngraph::element::Type precisionBeforeFq; + ov::element::Type precisionBeforeFq; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData; }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, GatherTransformationTestValues, int> GatherTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/gemm_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/gemm_transformation.hpp index 231f7bae9c85ff..b3b0fd28593f04 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/gemm_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/gemm_transformation.hpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params> GemmTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp index 09e3d90210414f..f6da842fd7f630 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp @@ -41,10 +41,10 @@ class GroupConvolutionTransformationParam { }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, ov::pass::low_precision::LayerTransformation::Params, - std::pair, + std::pair, GroupConvolutionTransformationParam, bool // add precision preserved operation > GroupConvolutionTransformationParams; @@ -58,7 +58,7 @@ class GroupConvolutionTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp index f23036150c0ee5..f2ca698b9cb6f3 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp @@ -50,8 +50,8 @@ inline std::ostream& operator<<(std::ostream& out, const GroupConvolutionQDqTran } typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, GroupConvolutionQDqTransformationParam @@ -66,7 +66,7 @@ class GroupConvolutionQDqTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/interpolate_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/interpolate_transformation.hpp index fd731780c13782..c66ba47b65881e 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/interpolate_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/interpolate_transformation.hpp @@ -13,7 +13,7 @@ namespace LayerTestsDefinitions { class interpAttributes { public: - ngraph::AxisSet axes; + ov::AxisSet axes; std::string mode; bool align_corners; bool antialias; @@ -24,7 +24,7 @@ class interpAttributes { interpAttributes() = default; - interpAttributes(const ngraph::AxisSet& axes, + interpAttributes(const ov::AxisSet& axes, const std::string& mode, const bool& align_corners, const bool& antialias, @@ -36,8 +36,8 @@ class interpAttributes { }; typedef std::tuple< - ngraph::element::Type, - std::pair, + ov::element::Type, + std::pair, std::string, interpAttributes> InterpolateTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp index 285c599591d7ca..2a6f49591ab2b9 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp @@ -15,17 +15,17 @@ namespace LayerTestsDefinitions { class MatMulTransformationTestValues { public: - ngraph::Shape inputShape1; + ov::Shape inputShape1; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData1; - ngraph::Shape inputShape2; + ov::Shape inputShape2; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData2; std::string expectedKernelName; std::string expectedRuntimePrecision; }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, MatMulTransformationTestValues> MatMulTransformationParams; @@ -34,11 +34,10 @@ class MatMulTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp index 7eaab1f0d93f4b..c005df22247de1 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp @@ -19,7 +19,7 @@ namespace LayerTestsDefinitions { class MatMulWithConstantTransformationTestValues { public: - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; ngraph::builder::subgraph::FakeQuantizeOnDataWithConstant fqOnData; ngraph::builder::subgraph::Constant weights; @@ -31,7 +31,7 @@ class MatMulWithConstantTransformationTestValues { }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, MatMulWithConstantTransformationTestValues> MatMulWithConstantTransformationParams; @@ -40,12 +40,11 @@ class MatMulWithConstantTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp index 55b5f35f2b101f..ecddcb1a3ba2d8 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp @@ -19,8 +19,8 @@ class MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues { }; typedef std::tuple< - ngraph::element::Type, - std::pair, + ov::element::Type, + std::pair, std::string, MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues > MatMulWithOptimizedConstantFakeQuantizeTransformationTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp index 7b0bdf2e5603bf..2e4ee88ec43c99 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp @@ -30,8 +30,8 @@ class MoveFakeQuantizeTransformationParam { }; typedef std::tuple < - ngraph::element::Type, - std::vector, + ov::element::Type, + std::vector, std::string, ov::pass::low_precision::LayerTransformation::Params, bool, @@ -47,7 +47,7 @@ class MoveFakeQuantizeTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp index 6bd801816daefa..c6e2df72d195db 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp @@ -39,7 +39,7 @@ class MultiplyToGroupConvolutionTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp index 06ac05ecb03b46..f79ce40f645604 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp @@ -19,13 +19,13 @@ class MultiplyTestValues { bool broadcast2; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize2; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeAfter; - ngraph::element::Type expectedPrecisions; + ov::element::Type expectedPrecisions; bool secondInputIsConstant; }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, MultiplyTestValues > MultiplyTransformationParams; @@ -38,7 +38,7 @@ class MultiplyTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp index 0f5bcc77779bcd..529046f07be87c 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp @@ -18,8 +18,8 @@ class MultiplyWithOneParentTransformationValues { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, MultiplyWithOneParentTransformationValues > MultiplyWithOneParentTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp index 3c3bd8dd29461e..deaf0a630bbdb3 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp @@ -13,8 +13,8 @@ namespace LayerTestsDefinitions { typedef std::tuple < - ngraph::element::Type, - std::pair, + ov::element::Type, + std::pair, std::string, std::vector, bool, diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers.hpp index 9f7da614c30462..543f06724c64bd 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers.hpp @@ -16,7 +16,6 @@ class OutputLayers : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat.hpp index f111bbfa1fc2fd..7bbe793bbc1535 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat.hpp @@ -16,7 +16,6 @@ class OutputLayersConcat : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat_multi_channel.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat_multi_channel.hpp index 48c4a33d79f7d6..12a133d0825435 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat_multi_channel.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_concat_multi_channel.hpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - InferenceEngine::Precision, - InferenceEngine::SizeVector, + ov::element::Type, + ov::Shape, std::string, ov::pass::low_precision::LayerTransformation::Params, bool> OutputLayersHandlingInTransformationsParams; @@ -23,7 +23,6 @@ class OutputLayersConcatMultiChannel : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp index 17fb3a7663a385..4028d4ef0b0b9e 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp @@ -19,8 +19,8 @@ class PadTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, ov::op::PadMode, std::string, ov::pass::low_precision::LayerTransformation::Params, @@ -35,6 +35,6 @@ class PadTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp index d0256c905685e2..0377e34675a084 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp @@ -18,8 +18,8 @@ class PReluTestValues { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, PReluTestValues> PReluTransformationParams; @@ -28,7 +28,6 @@ class PReluTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp index abceb85983bc8c..43270feebfed0c 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp @@ -19,7 +19,7 @@ namespace LayerTestsDefinitions { class PullReshapeThroughDequantizationTestValues { public: - ngraph::element::Type precisionBeforeDequantization; + ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::FakeQuantizeOnDataWithConstant fakeQuantizeOnData; ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; ngraph::builder::subgraph::Constant weights; @@ -28,18 +28,18 @@ class PullReshapeThroughDequantizationTestValues { ngraph::builder::subgraph::DequantizationOperations::Multiply multiply; ngraph::builder::subgraph::Transpose transpose; ngraph::builder::subgraph::Reshape reshape2; - ngraph::element::Type precisionAfterOperation; + ov::element::Type precisionAfterOperation; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; std::string operationName; std::string expectedKernelType; }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, - ngraph::Shape, + ov::Shape, PullReshapeThroughDequantizationTestValues> PullReshapeThroughDequantizationParams; class PullReshapeThroughDequantizationTransformation : @@ -50,7 +50,7 @@ class PullReshapeThroughDequantizationTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp index 6e9e9f375ff62e..6f64a37f727368 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp @@ -37,9 +37,9 @@ class RecurrentCellTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - std::vector, - std::vector, + ov::element::Type, + std::vector, + std::vector, std::string, ov::pass::low_precision::LayerTransformation::Params, RecurrentCellTransformationParam @@ -54,7 +54,7 @@ class RecurrentCellTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp index 9dabccfbdb65f0..f68742e0e164cf 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp @@ -19,8 +19,8 @@ class ReduceMaxTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ReduceMaxTransformationParam @@ -34,6 +34,6 @@ class ReduceMaxTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp index bef0cf1e823528..1200fcc1ed0175 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp @@ -31,8 +31,8 @@ class ReduceMeanTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ReduceMeanTransformationParam @@ -46,6 +46,6 @@ class ReduceMeanTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp index 14a6e3b043902f..a412c3c07cb30a 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp @@ -19,8 +19,8 @@ class ReduceMinTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ReduceMinTransformationParam @@ -34,6 +34,6 @@ class ReduceMinTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp index 0da5e29777f79b..7c36cc5658f444 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp @@ -19,8 +19,8 @@ class ReduceSumTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ReduceSumTransformationParam @@ -34,6 +34,6 @@ class ReduceSumTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp index 25219937997769..8f95935d3d5651 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp @@ -18,8 +18,8 @@ class ReluTestValues { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ReluTestValues> ReluTransformationParams; @@ -28,7 +28,6 @@ class ReluTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp index 02fa5b75ed802c..119c2b86723041 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp @@ -14,7 +14,7 @@ namespace LayerTestsDefinitions { class ReshapeTransformationParam { public: - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; std::vector reshapeConstValues; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; std::string layerType; @@ -22,7 +22,7 @@ class ReshapeTransformationParam { }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, ov::pass::low_precision::LayerTransformation::Params, ReshapeTransformationParam @@ -36,7 +36,7 @@ class ReshapeTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp index cd54ec38c3c122..ed379426535bcb 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp @@ -22,8 +22,8 @@ class ShuffleChannelsTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, ShuffleChannelsTransformationParam @@ -37,7 +37,7 @@ class ShuffleChannelsTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp index 95884e9e8f1422..6de5eca158b70f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp @@ -14,7 +14,7 @@ namespace LayerTestsDefinitions { class SpaceToBatchTransformationParam { public: - ngraph::PartialShape input_shape; + ov::PartialShape input_shape; std::vector block_shape; std::vector pads_begin; std::vector pads_end; @@ -24,7 +24,7 @@ class SpaceToBatchTransformationParam { }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, SpaceToBatchTransformationParam > SpaceToBatchTransformationParams; @@ -37,7 +37,7 @@ class SpaceToBatchTransformation : protected: void SetUp() override; - void Run() override; + void run() override; }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp index 5fede273161e71..97d5b1464dc448 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp @@ -16,8 +16,8 @@ class SplitTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, SplitTransformationParam @@ -28,7 +28,6 @@ class SplitTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; protected: void SetUp() override; }; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp index 0a5579d13d68bd..5db76a7e028668 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp @@ -16,13 +16,13 @@ class SqueezeTransformationParam { public: ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; std::vector squeezeAxes; - ngraph::PartialShape shape; + ov::PartialShape shape; }; std::string stringifySqueezeArgs(const std::vector& axes); typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, ov::pass::low_precision::LayerTransformation::Params, SqueezeTransformationParam @@ -32,7 +32,6 @@ class SqueezeTransformation : public testing::WithParamInterface, public LayerTestsUtils::LayerTransformation { public: - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp index 13d255ef575e63..695e47c2edd9d0 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp @@ -23,8 +23,8 @@ class StridedSliceTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, StridedSliceTransformationParam diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp index 2ac54b9b98a7c3..decc2bce66f069 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp @@ -14,8 +14,8 @@ namespace LayerTestsDefinitions { class SubtractMultiplyToMultiplyAddTransformationTestValues { public: - ngraph::PartialShape inputShape; - ngraph::element::Type precision; + ov::PartialShape inputShape; + ov::element::Type precision; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData; }; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_transformation.hpp index 803d330c65b587..90abaf00531305 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_transformation.hpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params > SubtractTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_after_matmul_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_after_matmul_transformation.hpp index ee96f953344ebc..86801d31d514a0 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_after_matmul_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_after_matmul_transformation.hpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, bool, diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp index 52e164d05f96d4..2f27b7ec8cb87f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp @@ -14,15 +14,15 @@ namespace LayerTestsDefinitions { class TransposeTransformationTestValues { public: - ngraph::PartialShape inputShape; + ov::PartialShape inputShape; std::vector transposeConstValues; ov::pass::low_precision::LayerTransformation::Params params; - ngraph::element::Type precisionBeforeFq; + ov::element::Type precisionBeforeFq; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData; }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, TransposeTransformationTestValues> TransposeTransformationParams; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp index 0d726ee4eda42b..d66018cd13bf86 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp @@ -16,11 +16,11 @@ class UnsqueezeTransformationParam { public: ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; std::vector unsqueezeAxes; - ngraph::PartialShape shape; + ov::PartialShape shape; }; typedef std::tuple< - ngraph::element::Type, + ov::element::Type, std::string, ov::pass::low_precision::LayerTransformation::Params, UnsqueezeTransformationParam @@ -30,7 +30,6 @@ class UnsqueezeTransformation : public testing::WithParamInterface, public LayerTestsUtils::LayerTransformation { public: - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp index 11b0b67e1e9ed1..069014107bc73f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp @@ -16,8 +16,8 @@ class VariadicSplitTransformationParam { }; typedef std::tuple< - ngraph::element::Type, - ngraph::PartialShape, + ov::element::Type, + ov::PartialShape, std::string, ov::pass::low_precision::LayerTransformation::Params, VariadicSplitTransformationParam @@ -28,7 +28,6 @@ class VariadicSplitTransformation : public LayerTestsUtils::LayerTransformation { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; protected: void SetUp() override; }; diff --git a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp b/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp deleted file mode 100644 index 3b2f7e40aacab9..00000000000000 --- a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "openvino/runtime/intel_gpu/ocl/ocl.hpp" -#include "openvino/runtime/core.hpp" -#include "openvino/runtime/properties.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" -#include -#include "common_test_utils/ov_tensor_utils.hpp" - -TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity) { - auto ie = ov::Core(); - using namespace ov::preprocess; - auto p = PrePostProcessor(fn_ptr); - p.input().tensor().set_element_type(ov::element::i8); - p.input().preprocess().convert_element_type(ov::element::f32); - - auto function = p.build(); - ov::CompiledModel exec_net; - try { - exec_net = ie.compile_model(function, device_names, ov::hint::allow_auto_batching(false)); - } catch (...) { - // device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test - return; - } - std::vector inf_req_shared = {}; - auto input = function->get_parameters().at(0); - auto output = function->get_results().at(0); - auto fakeImageData = ov::test::utils::create_and_fill_tensor(input->get_element_type(), input->get_shape()); - auto inf_req_regular = exec_net.create_infer_request(); - inf_req_regular.set_tensor(input, fakeImageData); - // infer using system memory - inf_req_regular.infer(); - auto output_tensor_regular = inf_req_regular.get_tensor(output); - auto imSize = ov::shape_size(input->get_shape()); - std::vector contexts = {}; - std::vector cldnn_tensor = {}; - for (auto& iter : device_lists) { - try { - auto cldnn_context = ie.get_default_context(iter).as(); - contexts.push_back(cldnn_context); - cl_context ctx = cldnn_context; - auto ocl_instance = std::make_shared(ctx); - cl_int err; - cl::Buffer shared_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, imSize, NULL, &err); - { - void* buffer = fakeImageData.data(); - ocl_instance->_queue.enqueueWriteBuffer(shared_buffer, true, 0, imSize, buffer); - } - cldnn_tensor.emplace_back(cldnn_context.create_tensor(input->get_element_type(), input->get_shape(), shared_buffer)); - } catch(...) { - // device does not support remote context - continue; - } - } - for (size_t i = 0; i < cldnn_tensor.size(); i++) { - auto temprequest = exec_net.create_infer_request(); - temprequest.set_input_tensor(cldnn_tensor.at(i)); - inf_req_shared.emplace_back(temprequest); - } - for (size_t i = 0; i < inf_req_shared.size(); i++) - inf_req_shared.at(i).start_async(); - for (size_t i = 0; i < inf_req_shared.size(); i++) - inf_req_shared.at(i).wait(); - - // compare results - for (size_t i = 0; i < inf_req_shared.size(); i++) { - auto output_tensor_shared = inf_req_shared.at(i).get_tensor(output); - - { - ASSERT_EQ(output->get_element_type(), ov::element::f32); - ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); - ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); - } - } -} diff --git a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_tests.hpp b/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_tests.hpp deleted file mode 100644 index 16676330103e0c..00000000000000 --- a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_tests.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "ie_core.hpp" -#include "base/multi/multi_helpers.hpp" -#include "functional_test_utils/plugin_cache.hpp" - -TEST_P(MultiDevice_SupportTest, canCreateContextThenRequestThenBlobsAndInfer) { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - InferenceEngine::CNNNetwork net(fn_ptr); - net.getInputsInfo().begin()->second->setLayout(InferenceEngine::Layout::NCHW); - net.getInputsInfo().begin()->second->setPrecision(InferenceEngine::Precision::U8); - - auto ie = PluginCache::get().ie(); - - std::map configs; - for (auto&& value : _properties) { - configs.emplace(value.first, value.second.as()); - } - - auto exec_net = ie->LoadNetwork(net, device_names, configs); - if (expected_status) { - std::shared_ptr ctx; - ASSERT_NE(ctx = exec_net.GetContext(), nullptr); - InferenceEngine::InferRequest req = exec_net.CreateInferRequest(); - ASSERT_TRUE(req); - const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo(); - for (auto i : inputInfo) { - auto rblob = InferenceEngine::make_shared_blob(i.second->getTensorDesc(), ctx); - rblob->allocate(); - req.SetBlob(i.first, rblob); - } - ASSERT_NO_THROW(req.StartAsync()); - ASSERT_EQ(req.Wait(InferenceEngine::InferRequest::RESULT_READY), InferenceEngine::StatusCode::OK); - - } else { - ASSERT_THROW(exec_net.GetContext(), InferenceEngine::NotImplemented); - } -} diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp index e9aa3517cafcc7..dad263771c7e36 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp @@ -9,7 +9,7 @@ namespace ov { namespace test { -TEST_P(ConstantResultSubgraphTest, CompareWithRefs) { +TEST_P(ConstantResultSubgraphTest, Inference) { run(); } diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result_legacy.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result_legacy.hpp deleted file mode 100644 index d41ceef9ed5a08..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result_legacy.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/constant_result.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConstantResultSubgraphTest, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp index 40123974846ea3..7600b2eac579d7 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp @@ -6,18 +6,10 @@ #include "shared_test_classes/subgraph/parameter_result.hpp" -namespace SubgraphTestsDefinitions { - -TEST_P(ParameterResultSubgraphTestLegacyApi, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions - namespace ov { namespace test { -TEST_P(ParameterResultSubgraphTest, CompareWithRefs) { +TEST_P(ParameterResultSubgraphTest, Inference) { run(); } diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/reduce_eltwise.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/reduce_eltwise.hpp index 1fac483210afd5..dc16665db9d884 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/reduce_eltwise.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/reduce_eltwise.hpp @@ -6,10 +6,10 @@ #include "shared_test_classes/subgraph/reduce_eltwise.hpp" -namespace SubgraphTestsDefinitions { - -TEST_P(ReduceEltwiseTest, CompareWithRefs) { - Run(); +namespace ov { +namespace test { +TEST_P(ReduceEltwiseTest, Inference) { + run(); }; - -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp index cc1c7c49225a30..cddf185ec05f67 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp @@ -6,9 +6,10 @@ #include "shared_test_classes/subgraph/scaleshift.hpp" -namespace SubgraphTestsDefinitions { - -TEST_P(ScaleShiftLayerTest, CompareWithRefs){ - Run(); +namespace ov { +namespace test { +TEST_P(ScaleShiftLayerTest, Inference){ + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/models/custom_template_op.prototxt b/src/tests/functional/plugin/shared/models/custom_template_op.prototxt index 38e0fec48b6dd1..6a8f28425108d0 100644 --- a/src/tests/functional/plugin/shared/models/custom_template_op.prototxt +++ b/src/tests/functional/plugin/shared/models/custom_template_op.prototxt @@ -5,13 +5,8 @@ graph { input: "A" output: "Y" name: "operation" - op_type: "Template" - domain: "custom_domain" - attribute { - name: "add" - type: INT - i: 11 - } + op_type: "Identity" + domain: "" } name: "test_graph" input { diff --git a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp index 5e2c3bbfd9ea80..9436b8426cda00 100644 --- a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties_hetero.cpp @@ -49,7 +49,7 @@ TEST_P(OVClassHeteroCompiledModelGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricN ov::Any heteroConfigValue = heteroExeNetwork.get_property(deviceConf); ov::Any deviceConfigValue = deviceExeNetwork.get_property(deviceConf); - if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf && + if (ov::internal::exclusive_async_requests.name() != deviceConf && ov::supported_properties.name() != deviceConf) { std::stringstream strm; deviceConfigValue.print(strm); diff --git a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp index 38072636f3d370..cebdc2ded6775d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "common_test_utils/test_common.hpp" -#include #include "behavior/executable_network/exec_graph_info.hpp" +#include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/exec_model_info.hpp" + namespace ExecutionGraphTests { const char serialize_test_model[] = R"V0G0N( @@ -490,7 +491,7 @@ TEST_P(ExecGraphUniqueNodeNames, CheckUniqueNodeNames) { names.insert(op->get_friendly_name()); const auto & rtInfo = op->get_rt_info(); - auto it = rtInfo.find(ExecGraphInfoSerialization::LAYER_TYPE); + auto it = rtInfo.find(ov::exec_model_info::LAYER_TYPE); ASSERT_NE(rtInfo.end(), it); } }; diff --git a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp index ad1bac678f81de..76da288eb2cab0 100644 --- a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp @@ -2,14 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "behavior/infer_request/memory_states.hpp" - #include +#include "behavior/infer_request/memory_states.hpp" #include "blob_factory.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/sigmoid.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/sigmoid.hpp" namespace BehaviorTestsDefinitions { std::string InferRequestVariableStateTest::getTestCaseName(const testing::TestParamInfo& obj) { @@ -36,8 +35,8 @@ void InferRequestVariableStateTest::SetUp() { } InferenceEngine::CNNNetwork InferRequestVariableStateTest::getNetwork() { - ngraph::Shape shape = {1, 200}; - ngraph::element::Type type = ngraph::element::f32; + ov::Shape shape = {1, 200}; + ov::element::Type type = ov::element::f32; auto input = std::make_shared(type, shape); auto mem_i1 = std::make_shared(type, shape, 0); @@ -60,8 +59,7 @@ InferenceEngine::CNNNetwork InferRequestVariableStateTest::getNetwork() { mem_w2->add_control_dependency(mem_r2); sigm->add_control_dependency(mem_w2); - auto function = - std::make_shared(ngraph::NodeVector{sigm}, ngraph::ParameterVector{input}, "addOutput"); + auto function = std::make_shared(ov::NodeVector{sigm}, ov::ParameterVector{input}, "addOutput"); return InferenceEngine::CNNNetwork{function}; } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp index c05b5540243984..2e098ed80884af 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/get_metric.cpp @@ -261,7 +261,7 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet ov::Any heteroConfigValue = heteroExeNetwork.get_property(deviceConf); ov::Any deviceConfigValue = deviceExeNetwork.get_property(deviceConf); - if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf && + if (ov::internal::exclusive_async_requests.name() != deviceConf && ov::supported_properties.name() != deviceConf) { std::stringstream strm; deviceConfigValue.print(strm); @@ -298,7 +298,7 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN ov::Any deviceConfigValue = deviceExeNetwork.get_property(deviceConf); // HETERO returns EXCLUSIVE_ASYNC_REQUESTS as a boolean value - if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf) { + if (ov::internal::exclusive_async_requests.name() != deviceConf) { std::stringstream strm; deviceConfigValue.print(strm); strm << " "; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp new file mode 100644 index 00000000000000..b075da8365ddad --- /dev/null +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/memory_states.cpp @@ -0,0 +1,270 @@ +// // Copyright (C) 2018-2023 Intel Corporation +// // SPDX-License-Identifier: Apache-2.0 +// // + +#include "behavior/ov_infer_request/memory_states.hpp" + +#include "base/behavior_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/sigmoid.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVInferRequestVariableStateTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::ostringstream result; + std::shared_ptr net; + std::string deviceName; + std::vector statesToQuery; + ov::AnyMap configuration; + std::tie(net, statesToQuery, deviceName, configuration) = obj.param; + result << "targetDevice=" << deviceName; + if (!configuration.empty()) { + using namespace ov::test::utils; + for (auto& configItem : configuration) { + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + } + } + return result.str(); +} + +void OVInferRequestVariableStateTest::SetUp() { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + std::tie(net, statesToQuery, deviceName, configuration) = GetParam(); + OVInferRequestTestBase::SetUp(); +} + +void OVInferRequestVariableStateTest::TearDown() { + OVInferRequestTestBase::TearDown(); +} + +std::shared_ptr OVInferRequestVariableStateTest::get_network() { + ov::Shape shape = {1, 200}; + ov::element::Type type = ov::element::f32; + + auto input = std::make_shared(type, shape); + auto mem_i1 = std::make_shared(type, shape, 0); + auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); + auto mul1 = std::make_shared(mem_r1, input); + + auto mem_i2 = std::make_shared(type, shape, 0); + auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); + auto mul2 = std::make_shared(mem_r2, mul1); + auto mem_w2 = std::make_shared(mul2, "c_1-3"); + + auto mem_w1 = std::make_shared(mul2, "r_1-3"); + auto sigm = std::make_shared(mul2); + sigm->set_friendly_name("sigmod_state"); + sigm->get_output_tensor(0).set_names({"sigmod_state"}); + mem_r1->set_friendly_name("Memory_1"); + mem_r1->get_output_tensor(0).set_names({"Memory_1"}); + mem_w1->add_control_dependency(mem_r1); + sigm->add_control_dependency(mem_w1); + + mem_r2->set_friendly_name("Memory_2"); + mem_r2->get_output_tensor(0).set_names({"Memory_2"}); + mem_w2->add_control_dependency(mem_r2); + sigm->add_control_dependency(mem_w2); + + auto function = std::make_shared(ov::NodeVector{sigm}, ov::ParameterVector{input}, "add_output"); + return function; +} + +ov::CompiledModel OVInferRequestVariableStateTest::prepare_network() { + net->add_output("Memory_1"); + net->add_output("Memory_2"); + ov::Core core = createCoreWithTemplate(); + return core.compile_model(net, deviceName, configuration); +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_QueryState) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + + auto states = infer_req.query_state(); + ASSERT_TRUE(states.size() == 2) << "Incorrect number of VariableStates"; + + for (auto&& state : states) { + auto name = state.get_name(); + ASSERT_TRUE(std::find(statesToQuery.begin(), statesToQuery.end(), name) != statesToQuery.end()) + << "State " << name << "expected to be in memory states but it is not!"; + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_SetState) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + + const float new_state_val = 13.0f; + for (auto&& state : infer_req.query_state()) { + state.reset(); + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + + for (auto&& state : infer_req.query_state()) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + for (int i = 0; i < last_state_size; i++) { + EXPECT_NEAR(new_state_val, last_state_data[i], 1e-5); + } + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_Reset) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + + const float new_state_val = 13.0f; + for (auto&& state : infer_req.query_state()) { + state.reset(); + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + + infer_req.query_state().front().reset(); + + auto states = infer_req.query_state(); + for (int i = 0; i < states.size(); ++i) { + auto last_state = states[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + if (i == 0) { + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(0, last_state_data[j], 1e-5); + } + } else { + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(new_state_val, last_state_data[j], 1e-5); + } + } + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_2infers_set) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + auto infer_req2 = executable_net.create_infer_request(); + + const float new_state_val = 13.0f; + for (auto&& state : infer_req.query_state()) { + state.reset(); + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + for (auto&& state : infer_req2.query_state()) { + state.reset(); + } + + auto states = infer_req.query_state(); + auto states2 = infer_req2.query_state(); + for (int i = 0; i < states.size(); ++i) { + auto last_state = states[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(13.0f, last_state_data[j], 1e-5); + } + } + for (int i = 0; i < states2.size(); ++i) { + auto last_state = states2[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(0, last_state_data[j], 1e-5); + } + } +} + +TEST_P(OVInferRequestVariableStateTest, inferreq_smoke_VariableState_2infers) { + auto executable_net = prepare_network(); + auto infer_req = executable_net.create_infer_request(); + auto infer_req2 = executable_net.create_infer_request(); + const float new_state_val = 13.0f; + + // set the input data for the network + auto input = executable_net.input(); + auto tensor = utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + infer_req.set_tensor(input, tensor); + // } + + // initial state for 2nd infer request + for (auto&& state : infer_req2.query_state()) { + auto state_val = state.get_state(); + auto element_count = state_val.get_size(); + + auto state_tensor = ov::Tensor(state_val.get_element_type(), ov::Shape({1, element_count})); + std::fill_n(state_tensor.data(), element_count, new_state_val); + state.set_state(state_tensor); + } + + // reset state for 1st infer request + for (auto&& state : infer_req.query_state()) { + state.reset(); + } + + infer_req.infer(); + auto states = infer_req.query_state(); + auto states2 = infer_req2.query_state(); + // check the output and state of 1st request + auto output_tensor = infer_req.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + auto data = static_cast(output_data); + for (int i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, data[i], 1e-5); + } + for (int i = 0; i < states.size(); ++i) { + auto last_state = states[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(0.0, last_state_data[j], 1e-5); + } + } + + // // check the output and state of 2nd request + for (int i = 0; i < states2.size(); ++i) { + auto last_state = states2[i].get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (int j = 0; j < last_state_size; ++j) { + EXPECT_NEAR(new_state_val, last_state_data[j], 1e-5); + } + } +} + +} // namespace behavior +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp index 8ff25a6b3771e8..5b1231df8e382d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp @@ -4,6 +4,7 @@ #include "behavior/ov_infer_request/wait.hpp" #include "openvino/runtime/exception.hpp" +#include "ie_plugin_config.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp index 88ff16545f2f2e..6136823f7e09e4 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp @@ -444,6 +444,68 @@ TEST_P(CompileModelCacheRuntimePropertiesTestBase, CanLoadFromFileWithoutExcepti run(); } +std::string CompileModelLoadFromCacheTest::getTestCaseName( + testing::TestParamInfo obj) { + auto param = obj.param; + auto deviceName = std::get<0>(param); + auto configuration = std::get<1>(param); + std::ostringstream result; + std::replace(deviceName.begin(), deviceName.end(), ':', '.'); + result << "device_name=" << deviceName << "_"; + for (auto& iter : configuration) { + result << "_" << iter.first << "_" << iter.second.as() << "_"; + } + return result.str(); +} + +void CompileModelLoadFromCacheTest::SetUp() { + ovModelWithName funcPair; + std::tie(targetDevice, configuration) = GetParam(); + target_device = targetDevice; + APIBaseTest::SetUp(); + std::stringstream ss; + std::string filePrefix = ov::test::utils::generateTestFilePrefix(); + ss << "testCache_" << filePrefix; + m_modelName = ss.str() + ".xml"; + m_weightsName = ss.str() + ".bin"; + for (auto& iter : configuration) { + ss << "_" << iter.first << "_" << iter.second.as() << "_"; + } + m_cacheFolderName = ss.str(); + core->set_property(ov::cache_dir()); + ov::pass::Manager manager; + manager.register_pass(m_modelName, m_weightsName); + manager.run_passes(ov::test::utils::make_conv_pool_relu({1, 3, 227, 227}, ov::element::f32)); +} + +void CompileModelLoadFromCacheTest::TearDown() { + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); + ov::test::utils::removeIRFiles(m_modelName, m_weightsName); + std::remove(m_cacheFolderName.c_str()); + core->set_property(ov::cache_dir()); + APIBaseTest::TearDown(); +} + +void CompileModelLoadFromCacheTest::run() { + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + core->set_property(ov::cache_dir(m_cacheFolderName)); + compiledModel = core->compile_model(m_modelName, targetDevice, configuration); + EXPECT_EQ(false, compiledModel.get_property(ov::loaded_from_cache.name()).as()); + + std::stringstream strm; + compiledModel.export_model(strm); + ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration); + EXPECT_EQ(false, importedCompiledModel.get_property(ov::loaded_from_cache.name()).as()); + + compiledModel = core->compile_model(m_modelName, targetDevice, configuration); + EXPECT_EQ(true, compiledModel.get_property(ov::loaded_from_cache.name()).as()); +} + +TEST_P(CompileModelLoadFromCacheTest, CanGetCorrectLoadedFromCacheProperty) { + run(); +} + std::string CompileModelLoadFromMemoryTestBase::getTestCaseName( testing::TestParamInfo obj) { auto param = obj.param; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index 56ffa5a1be4448..6db5526b33e941 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -6,6 +6,7 @@ #include "behavior/ov_plugin/properties_tests.hpp" #include "openvino/runtime/properties.hpp" +#include "ie_plugin_config.hpp" #include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { @@ -266,14 +267,6 @@ std::vector OVPropertiesTestsWithCompileModelProps::getRWMandatoryPr res.push_back({{ov::enable_profiling(false)}}); } - if (props.empty() || std::find(props.begin(), props.end(), ov::log::level.name()) != props.end()) { - ov::log::Level log_levels[] = {ov::log::Level::NO , ov::log::Level::ERR, ov::log::Level::WARNING, - ov::log::Level::INFO, ov::log::Level::DEBUG, ov::log::Level::TRACE}; - for (auto &log_level : log_levels) { - res.push_back({ov::log::level(log_level)}); - } - } - if (props.empty() || std::find(props.begin(), props.end(), ov::streams::num.name()) != props.end()) { res.push_back({ov::streams::num(3)}); } @@ -346,6 +339,14 @@ std::vector OVPropertiesTestsWithCompileModelProps::getRWOptionalPro res.push_back({ov::enable_mmap(false)}); } + if (props.empty() || std::find(props.begin(), props.end(), ov::log::level.name()) != props.end()) { + ov::log::Level log_levels[] = {ov::log::Level::NO , ov::log::Level::ERR, ov::log::Level::WARNING, + ov::log::Level::INFO, ov::log::Level::DEBUG, ov::log::Level::TRACE}; + for (auto &log_level : log_levels) { + res.push_back({ov::log::level(log_level)}); + } + } + return res; } diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp index 5e257bee4903db..e8a0640f563459 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp @@ -9,7 +9,6 @@ #include "common_test_utils/file_utils.hpp" #include "openvino/util/file_util.hpp" #include -#include "ie_algorithm.hpp" #include "common_test_utils/subgraph_builders/split_conv_concat.hpp" #include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" #include "common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp" diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp index aba0d057a4320e..ee78994247f238 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp @@ -2,28 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include -#include - -#include "exec_graph_info.hpp" +#include "execution_graph_tests/runtime_precision.hpp" #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/ov_plugin_cache.hpp" -#include "functional_test_utils/skip_tests_config.hpp" #include "common_test_utils/node_builders/binary_convolution.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" #include "common_test_utils/node_builders/constant.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" +#include "functional_test_utils/ov_plugin_cache.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/runtime/exec_model_info.hpp" -#include "execution_graph_tests/runtime_precision.hpp" +#include +#include +#include +#include +#include +#include namespace ExecutionGraphTests { std::shared_ptr makeEltwiseFunction(const std::vector& inputPrecisions) { - IE_ASSERT(inputPrecisions.size() == 2); + OPENVINO_ASSERT(inputPrecisions.size() == 2); ov::ParameterVector inputs{std::make_shared(inputPrecisions[0], ov::Shape{1, 16, 5, 4}), @@ -38,7 +37,7 @@ std::shared_ptr makeEltwiseFunction(const std::vector makeFakeQuantizeReluFunction(const std::vector& inputPrecisions) { - IE_ASSERT(inputPrecisions.size() == 1); + OPENVINO_ASSERT(inputPrecisions.size() == 1); ov::ParameterVector inputs{std::make_shared(inputPrecisions[0], ov::Shape{1, 16, 5, 4})}; auto inputLowNode = ov::test::utils::deprecated::make_constant(ov::element::f32, {1, 1, 1, 1}, {0}); @@ -56,7 +55,7 @@ std::shared_ptr makeFakeQuantizeReluFunction(const std::vector makeFakeQuantizeBinaryConvolutionFunction(const std::vector &inputPrecisions) { - IE_ASSERT(inputPrecisions.size() == 1); + OPENVINO_ASSERT(inputPrecisions.size() == 1); ov::ParameterVector inputs{std::make_shared(inputPrecisions[0], ov::Shape{1, 16, 5, 4})}; auto inputLowNode = ov::test::utils::deprecated::make_constant(ov::element::f32, {1, 1, 1, 1}, {1}); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp index 3faf532bdebfaa..afcb3053933f4a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp @@ -8,25 +8,24 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/add.hpp" #include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { std::string AddTransformation::getTestCaseName(const testing::TestParamInfo< AddTransformationParams>& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); AddTestValues param; std::tie(netPrecision, inputShapes, targetDevice, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << - (param.broadcast ? "_broadcast" : ""); + result << get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << + (param.broadcast ? "_broadcast" : ""); for (const auto& elem : param.precisionOnActivations) { result << "_" << elem << "_"; } @@ -53,11 +52,20 @@ std::string AddTransformation::getTestCaseName(const testing::TestParamInfo< Add } void AddTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + abs_threshold = 1.1; + rel_threshold = 3; + ov::element::Type precision; + ov::PartialShape inputShape; AddTestValues param; std::tie(precision, inputShape, targetDevice, param) = this->GetParam(); + ov::PartialShape inputShape2 = inputShape; + if (param.broadcast) { + inputShape2[2] = 1; + inputShape2[3] = 1; + } + init_input_shapes({ inputShape, inputShape2 }); + function = ngraph::builder::subgraph::AddFunction::getOriginal( precision, inputShape, param.broadcast, param.fakeQuantize1, param.fakeQuantize2); @@ -66,7 +74,7 @@ void AddTransformation::SetUp() { } TEST_P(AddTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp index 2c55bd00187bd6..83a63907427896 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp @@ -6,15 +6,14 @@ #include #include #include -#include #include "ov_lpt_models/assign_and_read_value.hpp" namespace LayerTestsDefinitions { std::string AssignAndReadValueTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; size_t opset; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; @@ -22,19 +21,21 @@ std::string AssignAndReadValueTransformation::getTestCaseName(const testing::Tes std::tie(netPrecision, inputShape, opset, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantize << "_" << opset; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantize << "_" << opset; return result.str(); } void AssignAndReadValueTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; size_t opset; ov::pass::low_precision::LayerTransformation::Params params; AssignAndReadValueTransformationParam param; std::tie(netPrecision, inputShape, opset, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::AssignAndReadValueFunction::getOriginal( netPrecision, inputShape, @@ -43,7 +44,7 @@ void AssignAndReadValueTransformation::SetUp() { } TEST_P(AssignAndReadValueTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp index 5b0e5997147a86..e221666dc1808e 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp @@ -6,15 +6,14 @@ #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/batch_to_space.hpp" namespace LayerTestsDefinitions { std::string BatchToSpaceTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type input_type; + ov::element::Type input_type; std::string target_device; BatchToSpaceTransformationParam param; std::tie(input_type, target_device, param) = obj.param; @@ -25,10 +24,14 @@ std::string BatchToSpaceTransformation::getTestCaseName(const testing::TestParam } void BatchToSpaceTransformation::SetUp() { - ngraph::element::Type input_type; + abs_threshold = 1.1; + + ov::element::Type input_type; BatchToSpaceTransformationParam param; std::tie(input_type, targetDevice, param) = this->GetParam(); + init_input_shapes(param.input_shape); + function = ngraph::builder::subgraph::BatchToSpaceFunction::get( param.input_shape, input_type, @@ -38,8 +41,8 @@ void BatchToSpaceTransformation::SetUp() { param.crops_end); } -void BatchToSpaceTransformation::Run() { - LayerTestsCommon::Run(); +void BatchToSpaceTransformation::run() { + LayerTransformation::run(); const auto params = std::get<2>(GetParam()); auto expected_type = params.expected_kernel_type; @@ -48,13 +51,13 @@ void BatchToSpaceTransformation::Run() { expected_type = "f16"; } - const auto actual_type = getRuntimePrecisionByType(params.layer_type); + const auto actual_type = get_runtime_precision_by_type(params.layer_type); EXPECT_EQ(actual_type, expected_type); } TEST_P(BatchToSpaceTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp index 8f14935ee8ba95..c6eb973f776160 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp @@ -6,35 +6,38 @@ #include #include #include -#include #include "ov_lpt_models/clamp.hpp" namespace LayerTestsDefinitions { std::string ClampTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ClampTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantize << "_" << + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantize << "_" << "min=" << param.clampLowConst << "max=" << param.clampHighConst; return result.str(); } void ClampTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 1.1; + + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ClampTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::ClampFunction::getOriginal( netPrecision, inputShape, @@ -44,7 +47,7 @@ void ClampTransformation::SetUp() { } TEST_P(ClampTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp index 392b14b2416587..c5a87037f349fa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp @@ -8,17 +8,16 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/concat.hpp" namespace LayerTestsDefinitions { std::string ConcatTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShapes; + ov::element::Type precision; + ov::PartialShape inputShapes; std::string targetDevice; ConcatTransformationTestValues testValues; std::tie(precision, inputShapes, targetDevice, testValues) = obj.param; @@ -26,31 +25,32 @@ std::string ConcatTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); - - const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f); - return LayerTransformation::GenerateInput(ngraph::element::u8, info.getTensorDesc(), k); -} - void ConcatTransformation::SetUp() { - ngraph::PartialShape inputShape; - ngraph::element::Type precision; + abs_threshold = 0.1; + rel_threshold = 4.2; + + ov::PartialShape inputShape; + ov::element::Type precision; ConcatTransformationTestValues testValues; std::tie(precision, inputShape, targetDevice, testValues) = this->GetParam(); + std::vector inputs; + if (testValues.input_constant1 == nullptr) { + inputs.push_back(inputShape); + } + if (testValues.input_constant2 == nullptr) { + inputs.push_back(inputShape); + } + init_input_shapes(inputs); + function = ngraph::builder::subgraph::ConcatFunction::getOriginal( precision, inputShape, @@ -63,7 +63,7 @@ void ConcatTransformation::SetUp() { } TEST_P(ConcatTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp index adbef1bcbf7aea..ce6005c5328e8d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp @@ -8,20 +8,16 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_models/builders.hpp" #include "ov_lpt_models/concat.hpp" -using namespace InferenceEngine; -using namespace InferenceEngine::details; - namespace LayerTestsDefinitions { std::string ConcatWithChildAndOutputTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ConcatWithChildAndOutputTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; @@ -29,8 +25,8 @@ std::string ConcatWithChildAndOutputTransformation::getTestCaseName(const testin std::ostringstream result; result << - getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << - param.fqOnData1 << param.fqOnData2; + get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << + param.fqOnData1 << param.fqOnData2; return result.str(); } @@ -46,18 +42,23 @@ std::string ConcatWithChildAndOutputTransformation::getTestCaseName(const testin */ void ConcatWithChildAndOutputTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + rel_threshold = 5; + abs_threshold = 0.1; + + ov::element::Type netPrecision; + ov::PartialShape inputShapes; ConcatWithChildAndOutputTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam(); + init_input_shapes({ inputShapes, inputShapes }); + function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithChildAndOutput( netPrecision, inputShapes, param.fqOnData1, param.fqOnData2); } TEST_P(ConcatWithChildAndOutputTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp index 0114a1ed06c1aa..9312eeaedd5504 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -8,20 +8,16 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_models/builders.hpp" #include "ov_lpt_models/concat.hpp" -using namespace InferenceEngine; -using namespace InferenceEngine::details; - namespace LayerTestsDefinitions { std::string ConcatWithDifferentChildrenTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ConcatWithDifferentChildrenTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; @@ -29,37 +25,29 @@ std::string ConcatWithDifferentChildrenTransformation::getTestCaseName(const tes std::ostringstream result; result << - getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << - "_axis_" << param.axis << "_" << param.fqOnData1 << param.fqOnData2; + get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << + "_axis_" << param.axis << "_" << param.fqOnData1 << param.fqOnData2; return result.str(); } -InferenceEngine::Blob::Ptr ConcatWithDifferentChildrenTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; - std::string targetDevice; - ConcatWithDifferentChildrenTransformationParam param; - ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam(); - - const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f); - return LayerTransformation::GenerateInput(ngraph::element::u8, info.getTensorDesc(), k); -} - void ConcatWithDifferentChildrenTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + abs_threshold = 0.1; + + ov::element::Type netPrecision; + ov::PartialShape inputShapes; ConcatWithDifferentChildrenTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam(); + init_input_shapes({ inputShapes, inputShapes }); + function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithDifferentPrecisionOnChildren( netPrecision, inputShapes, param.axis, param.fqOnData1, param.fqOnData2); } TEST_P(ConcatWithDifferentChildrenTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp index 55d9b7b6b2577f..6a75449709eed6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp @@ -8,20 +8,16 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_models/builders.hpp" #include "ov_lpt_models/concat.hpp" -using namespace InferenceEngine; -using namespace InferenceEngine::details; - namespace LayerTestsDefinitions { std::string ConcatWithIntermediateTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; bool transparentIntermediate; @@ -30,25 +26,13 @@ std::string ConcatWithIntermediateTransformation::getTestCaseName(const testing: std::ostringstream result; result << - getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << - (transparentIntermediate ? "" : "_notTransparentIntermediate") << + get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << + (transparentIntermediate ? "" : "_notTransparentIntermediate") << (multichannel ? "_multichannel" : ""); return result.str(); } -InferenceEngine::Blob::Ptr ConcatWithIntermediateTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; - std::string targetDevice; - ov::pass::low_precision::LayerTransformation::Params trasformationParams; - bool transparentIntermediate; - bool multichannel; - std::tie(netPrecision, inputShape, targetDevice, trasformationParams, transparentIntermediate, multichannel) = this->GetParam(); - - const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f); - return LayerTransformation::GenerateInput(ngraph::element::u8, info.getTensorDesc(), k); -} /* * FQ FQ @@ -59,23 +43,36 @@ InferenceEngine::Blob::Ptr ConcatWithIntermediateTransformation::GenerateInput(c */ void ConcatWithIntermediateTransformation::SetUp() { - ngraph::element::Type ngPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 0.1; + + ov::element::Type ngPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params trasformationParams; bool transparentIntermediate; bool multichannel; std::tie(ngPrecision, inputShape, targetDevice, trasformationParams, transparentIntermediate, multichannel) = this->GetParam(); + ov::PartialShape inputShape1 = inputShape; + if (inputShape1[2].is_static() && transparentIntermediate) { + inputShape1[2] = inputShape1[2].get_length() - 2; + } + + if (inputShape1[3].is_static() && transparentIntermediate) { + inputShape1[3] = inputShape1[3].get_length() - 2; + } + + init_input_shapes({ inputShape1, inputShape }); + function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithIntermediate( ngPrecision, inputShape, transparentIntermediate, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }); + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }); } TEST_P(ConcatWithIntermediateTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp index b81799f933e242..65bfa89181328d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp @@ -8,57 +8,46 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_models/builders.hpp" #include "ov_lpt_models/concat.hpp" namespace LayerTestsDefinitions { std::string ConcatWithNeighborsGraphTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShapes; + ov::element::Type precision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; std::tie(precision, inputShapes, targetDevice, params) = obj.param; - return getTestCaseNameByParams(precision, inputShapes, targetDevice, params); + return get_test_case_name_by_params(precision, inputShapes, targetDevice, params); } -InferenceEngine::Blob::Ptr ConcatWithNeighborsGraphTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; - std::string targetDevice; - ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); - - if ((info.name() != "input1") && (info.name() != "input2") && (info.name() != "input3")) { - IE_THROW() << "unexpected input name " << info.name(); - } - const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f); - return LayerTransformation::GenerateInput(ngraph::element::u8, info.getTensorDesc(), k); -} void ConcatWithNeighborsGraphTransformation::SetUp() { - threshold = 2.e-2; - ngraph::element::Type ngPrecision; - ngraph::PartialShape inputShape; + rel_threshold = 0.1; + abs_threshold = 0.1; + ov::element::Type ngPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; std::tie(ngPrecision, inputShape, targetDevice, params) = this->GetParam(); + init_input_shapes({ inputShape, inputShape, inputShape }); + function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithNeighbors( ngPrecision, inputShape, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }, - { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 3.f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }, + { 256ul, ov::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 3.f} }, "concat", ""); } TEST_P(ConcatWithNeighborsGraphTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp index c12ba682644daf..c7e59167aad0cb 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp @@ -8,41 +8,26 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_models/builders.hpp" #include "ov_lpt_models/concat.hpp" -using namespace InferenceEngine; -using namespace InferenceEngine::details; - namespace LayerTestsDefinitions { std::string ConcatWithSplitTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ConcatWithSplitTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, param, params) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << param.fqOnData1 << "_" << param.fqOnData2; + result << get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << param.fqOnData1 << "_" << param.fqOnData2; return result.str(); } -InferenceEngine::Blob::Ptr ConcatWithSplitTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; - std::string targetDevice; - ConcatWithSplitTransformationParam param; - ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam(); - - const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f); - return LayerTransformation::GenerateInput(ngraph::element::u8, info.getTensorDesc(), k); -} /* * FQ FQ @@ -53,12 +38,20 @@ InferenceEngine::Blob::Ptr ConcatWithSplitTransformation::GenerateInput(const In */ void ConcatWithSplitTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + abs_threshold = 0.1; + rel_threshold = 4.2; + + ov::element::Type netPrecision; + ov::PartialShape inputShapes; ConcatWithSplitTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam(); + auto inputShape1 = inputShapes; + const size_t numSplit = 2; + inputShape1[1] = inputShape1[1].get_length() / numSplit; + init_input_shapes({ inputShape1, inputShapes }); + function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithSplitedIntermediate( netPrecision, inputShapes, @@ -68,7 +61,7 @@ void ConcatWithSplitTransformation::SetUp() { } TEST_P(ConcatWithSplitTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp index fd4e9f4fdf1550..d61cd20dac8560 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp @@ -13,17 +13,17 @@ namespace LayerTestsDefinitions { std::string ConvolutionBackpropDataTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - std::pair inputShape; - ngraph::Shape outputShape; + ov::element::Type netPrecision; + std::pair inputShape; + ov::Shape outputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionBackpropDataTransformationParam param; std::tie(netPrecision, inputShape, outputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape.first, targetDevice, params) << "_" << - outputShape << "_" << + result << get_test_case_name_by_params(netPrecision, inputShape.first, targetDevice, params) << "_" << + outputShape << "_" << param.fakeQuantizeOnData << "_" << param.fakeQuantizeOnWeights << "_" << param.dequantizationOnWeights; @@ -31,19 +31,24 @@ std::string ConvolutionBackpropDataTransformation::getTestCaseName(const testing } void ConvolutionBackpropDataTransformation::SetUp() { - threshold = 0.1f; + rel_threshold = 3.1; + abs_threshold = 800.1; - ngraph::element::Type netPrecision; - std::pair inputShapeAndHandling; - ngraph::Shape outputShape; + ov::element::Type netPrecision; + std::pair inputShapeAndHandling; + ov::Shape outputShape; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionBackpropDataTransformationParam param; std::tie(netPrecision, inputShapeAndHandling, outputShape, targetDevice, params, param) = this->GetParam(); - std::shared_ptr weights; + + std::shared_ptr weights; const auto inputShape = inputShapeAndHandling.first; - ngraph::Shape weightsShape(4, 1ul); + + init_input_shapes(inputShape); + + ov::Shape weightsShape(4, 1ul); weightsShape[0] = inputShape[1].get_length(); weightsShape[1] = inputShape[1].get_length() / 2; @@ -67,20 +72,20 @@ void ConvolutionBackpropDataTransformation::SetUp() { weights); } -void ConvolutionBackpropDataTransformation::Run() { - LayerTestsCommon::Run(); +void ConvolutionBackpropDataTransformation::run() { + LayerTransformation::run(); const auto inputShape = std::get<1>(GetParam()); if (inputShape.second) { const auto params = std::get<5>(GetParam()); - const auto actualType = getRuntimePrecision(params.layerName); + const auto actualType = get_runtime_precision(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } } TEST_P(ConvolutionBackpropDataTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp index 202576fed9ecee..1ff7b6cf06f6f5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -21,27 +20,30 @@ namespace LayerTestsDefinitions { std::string ConvolutionQDqTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionQDqTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << param; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << param; return result.str(); } void ConvolutionQDqTransformation::SetUp() { - // threshold = 0.1f; + rel_threshold = 0.1; + abs_threshold = 12.8; - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionQDqTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::FakeQuantizeAndConvolutionFunction::get( netPrecision, inputShape, @@ -57,17 +59,17 @@ void ConvolutionQDqTransformation::SetUp() { this->configuration[ov::hint::inference_precision.name()] = "f32"; } -void ConvolutionQDqTransformation::Run() { - LayerTestsCommon::Run(); +void ConvolutionQDqTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecisionByType(params.layerName); + const auto actualType = get_runtime_precision_by_type(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(ConvolutionQDqTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp index 8d524a19c8c5a9..0bdda925817f3f 100755 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -21,30 +20,33 @@ namespace LayerTestsDefinitions { std::string ConvolutionTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << - "_rank=" << inputShape.rank().get_length() << + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << + "_rank=" << inputShape.rank().get_length() << "D_fq_on_data={" << param.fakeQuantizeOnData << "}_fq_on_weights={" << param.fakeQuantizeOnWeights << "}"; return result.str(); } void ConvolutionTransformation::SetUp() { - threshold = 0.1f; + rel_threshold = 1.0e+10; + abs_threshold = 1.4; - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::FakeQuantizeAndConvolutionFunction::get( netPrecision, inputShape, @@ -53,13 +55,13 @@ void ConvolutionTransformation::SetUp() { param.fakeQuantizeOnWeights); } -void ConvolutionTransformation::Run() { - LayerTestsCommon::Run(); +void ConvolutionTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualPrecision = getRuntimePrecisionByType(params.layerName); + const auto actualPrecision = get_runtime_precision_by_type(params.layerName); auto expectedPrecision = params.expectedKernelType; - if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ngraph::element::f16) { + if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ov::element::f16) { expectedPrecision = "FP16"; } EXPECT_EQ(actualPrecision, expectedPrecision); @@ -67,7 +69,7 @@ void ConvolutionTransformation::Run() { TEST_P(ConvolutionTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp index f4d1806ac47a57..f40dd478fdc3e5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -21,30 +20,33 @@ namespace LayerTestsDefinitions { std::string ConvolutionWIthIncorrectWeightsTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionWIthIncorrectWeightsParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << - (param.isCorrect ? "_correct_weights" : "_incorrect_weights") << + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << + (param.isCorrect ? "_correct_weights" : "_incorrect_weights") << (param.fakeQuantizeOnData.empty() ? "_noFqOnActivations" : "") << (param.fakeQuantizeOnWeights.empty() ? "_noFqOnWeights" : ""); return result.str(); } void ConvolutionWIthIncorrectWeightsTransformation::SetUp() { - threshold = 0.1f; + rel_threshold = 0.1; + abs_threshold = 16.1; - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ConvolutionWIthIncorrectWeightsParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::ConvolutionFunction::getOriginalWithIncorrectWeights( inputShape, netPrecision, @@ -54,7 +56,7 @@ void ConvolutionWIthIncorrectWeightsTransformation::SetUp() { } TEST_P(ConvolutionWIthIncorrectWeightsTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index ef1348d619ce46..af65c153c13e4c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -2,30 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "low_precision_transformations/depth_to_space_transformation.hpp" - +#include #include +#include #include #include -#include -#include #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" - -#include "ov_models/pass/convert_prc.hpp" -#include "ov_models/builders.hpp" - -#include -#include -#include -#include -#include -#include - +#include "low_precision_transformations/depth_to_space_transformation.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/depth_to_space.hpp" +#include "openvino/pass/constant_folding.hpp" #include "ov_lpt_models/depth_to_space.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "transformations/common_optimizations/depth_to_space_fusion.hpp" +#include "transformations/init_node_info.hpp" +#include "transformations/utils/utils.hpp" namespace LayerTestsDefinitions { @@ -35,8 +30,8 @@ std::string DepthToSpaceTransformation::getTestCaseName(const testing::TestParam {ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, }; - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + ov::element::Type precision; + ov::PartialShape inputShape; std::string targetDevice; ov::op::v0::DepthToSpace::DepthToSpaceMode mode; size_t blockSize; @@ -44,18 +39,20 @@ std::string DepthToSpaceTransformation::getTestCaseName(const testing::TestParam std::tie(precision, inputShape, targetDevice, mode, blockSize) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(precision, inputShape, targetDevice, params) << - "_" << names[mode] << "_" << blockSize; + result << get_test_case_name_by_params(precision, inputShape, targetDevice, params) << + "_" << names[mode] << "_" << blockSize; return result.str(); } void DepthToSpaceTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + ov::element::Type precision; + ov::PartialShape inputShape; ov::op::v0::DepthToSpace::DepthToSpaceMode mode; size_t blockSize; std::tie(precision, inputShape, targetDevice, mode, blockSize) = this->GetParam(); + init_input_shapes(inputShape); + if (inputShape.rank().is_dynamic() || inputShape.rank().get_length() != 4) { IE_THROW() << "not supported input shape size " << inputShape.rank(); } @@ -64,7 +61,7 @@ void DepthToSpaceTransformation::SetUp() { } TEST_P(DepthToSpaceTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp index f21455b9b45c5c..a21727902d6c64 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp @@ -7,14 +7,14 @@ #include #include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/add.hpp" namespace LayerTestsDefinitions { std::string ElementwiseBranchSelectionTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); ElementwiseBranchSelectionTestValues param; @@ -22,8 +22,8 @@ std::string ElementwiseBranchSelectionTransformation::getTestCaseName(const test std::tie(netPrecision, inputShapes, targetDevice, param, elementwiseType) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << - "_elementwiseType_" << elementwiseType; + result << get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << + "_elementwiseType_" << elementwiseType; auto toString = [](const ngraph::builder::subgraph::FakeQuantizeOnData& fqOnData) -> std::string { if (fqOnData.empty()) { @@ -48,12 +48,14 @@ std::string ElementwiseBranchSelectionTransformation::getTestCaseName(const test } void ElementwiseBranchSelectionTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + ov::element::Type precision; + ov::PartialShape inputShape; ElementwiseBranchSelectionTestValues param; std::string elementwiseType; std::tie(precision, inputShape, targetDevice, param, elementwiseType) = this->GetParam(); + init_input_shapes({ inputShape, inputShape }); + function = ngraph::builder::subgraph::AddFunction::getOriginalSubgraphWithConvolutions( precision, inputShape, @@ -70,15 +72,15 @@ void ElementwiseBranchSelectionTransformation::SetUp() { ov::pass::InitNodeInfo().run_on_model(function); } -void ElementwiseBranchSelectionTransformation::Run() { - LayerTestsCommon::Run(); +void ElementwiseBranchSelectionTransformation::run() { + LayerTransformation::run(); const auto params = std::get<3>(GetParam()); const auto elementwiseType = std::get<4>(GetParam()); std::vector> expectedReorders = params.expectedReorders; if (!expectedReorders.empty()) { - auto rtInfo = LayerTestsCommon::getRuntimeInfo(); + auto rtInfo = LayerTransformation::get_runtime_info(); for (auto it : rtInfo) { const auto& typeIt = it.second.find("layerType"); const auto type = typeIt->second.as(); @@ -108,14 +110,15 @@ void ElementwiseBranchSelectionTransformation::Run() { } for (auto it : params.expectedPrecisions) { - const auto actualPrecision = getRuntimePrecisionByFusedName(it.first == "eltwise" ? elementwiseType : it.first); + const auto actualPrecision = get_runtime_precision_by_fused_name( + it.first == "eltwise" ? elementwiseType : it.first); ASSERT_EQ(it.second, actualPrecision) << "actual precision for operation '" << it.first << "' is not correct"; } } TEST_P(ElementwiseBranchSelectionTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp index 14cb7b9daffe48..9485fea4293053 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include "transformations/init_node_info.hpp" #include "openvino/util/common_util.hpp" #include "ov_lpt_models/fuse_fake_quantize.hpp" @@ -32,6 +32,8 @@ void EliminateFakeQuantizeTransformation::SetUp() { EliminateFakeQuantizeTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); + init_input_shapes(testValues.inputShape); + // Convolution is used in a model as operation with specific precision requirements on data branch // to test the transformation place in LPT pipeline: // markup transformations and FakeQuantize operation decomposition transformation have to handle FakeQuantize as usual @@ -47,12 +49,13 @@ void EliminateFakeQuantizeTransformation::SetUp() { TEST_P(EliminateFakeQuantizeTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + abs_threshold = 2.3; + run(); EliminateFakeQuantizeTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); - const auto& rtInfo = LayerTestsCommon::getRuntimeInfo(); + const auto& rtInfo = LayerTransformation::get_runtime_info(); auto exist = testValues.expected.exist; auto absent = testValues.expected.absent; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index f8e69d84d4dec0..9697080a247e08 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -8,33 +8,35 @@ #include #include #include -//#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/avg_pool.hpp" namespace LayerTestsDefinitions { std::string FakeQuantizeAndAvgPoolTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShapes; + ov::element::Type precision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; std::tie(precision, inputShapes, targetDevice, params, fakeQuantize) = obj.param; - return getTestCaseNameByParams(precision, inputShapes, targetDevice, params); + return get_test_case_name_by_params(precision, inputShapes, targetDevice, params); } void FakeQuantizeAndAvgPoolTransformation::SetUp() { - threshold = 0.5f; - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + rel_threshold = 0.5f; + abs_threshold = 1.0; + ov::element::Type precision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; std::tie(precision, inputShape, targetDevice, params, fakeQuantize) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::AvgPoolFunction::getOriginal( precision, inputShape, @@ -44,7 +46,7 @@ void FakeQuantizeAndAvgPoolTransformation::SetUp() { } TEST_P(FakeQuantizeAndAvgPoolTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index 74725c0c965ac9..1e2a07bef51b15 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -8,32 +8,34 @@ #include #include #include -//#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/max_pool.hpp" namespace LayerTestsDefinitions { std::string FakeQuantizeAndMaxPoolTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShapes; + ov::element::Type precision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; std::tie(precision, inputShapes, targetDevice, params, fakeQuantize) = obj.param; - return getTestCaseNameByParams(precision, inputShapes, targetDevice, params); + return get_test_case_name_by_params(precision, inputShapes, targetDevice, params); } void FakeQuantizeAndMaxPoolTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + abs_threshold = 1.0; + ov::element::Type precision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; std::tie(precision, inputShape, targetDevice, params, fakeQuantize) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::MaxPoolFunction::getOriginal( precision, inputShape, @@ -43,7 +45,7 @@ void FakeQuantizeAndMaxPoolTransformation::SetUp() { } TEST_P(FakeQuantizeAndMaxPoolTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index f4949d770044d9..e751ce1ba18240 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -21,8 +20,8 @@ namespace LayerTestsDefinitions { std::string FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::getTestCaseName( const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizeAndTwoOutputBranchesWithConvolution testValues; @@ -36,13 +35,17 @@ std::string FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::getTe } void FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::SetUp() { - threshold = 0.1f; - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + rel_threshold = 0.1; + abs_threshold = 0.1; + + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizeAndTwoOutputBranchesWithConvolution testValues; std::tie(netPrecision, inputShape, targetDevice, params, testValues) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::FakeQuantizeAndTwoOutputBranchesWithConvolutionFunction::getOriginal( netPrecision, inputShape, @@ -52,7 +55,7 @@ void FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::SetUp() { } TEST_P(FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index 8507c678cd9a9b..21213476d721f7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -9,32 +9,35 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" namespace LayerTestsDefinitions { std::string FakeQuantizePrecisionSelectionTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizePrecisionSelectionTransformationTestValues testValues; std::tie(netPrecision, inputShape, targetDevice, params, testValues) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << testValues; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << testValues; return result.str(); } void FakeQuantizePrecisionSelectionTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 0.01; + + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizePrecisionSelectionTransformationTestValues testValues; std::tie(netPrecision, inputShape, targetDevice, params, testValues) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::FakeQuantizePrecisionSelectionFunction::getOriginal( netPrecision, inputShape, @@ -48,7 +51,7 @@ void FakeQuantizePrecisionSelectionTransformation::SetUp() { } TEST_P(FakeQuantizePrecisionSelectionTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp index 00cf867321a4b9..500b732490f997 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp @@ -8,9 +8,8 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "low_precision/fuse_subtract_to_fake_quantize.hpp" #include "low_precision/fuse_multiply_to_fake_quantize.hpp" @@ -18,8 +17,8 @@ namespace LayerTestsDefinitions { std::string FakeQuantizeTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizeTransformationParam testParams; @@ -27,19 +26,23 @@ std::string FakeQuantizeTransformation::getTestCaseName(const testing::TestParam std::tie(netPrecision, inputShape, targetDevice, params, testParams, isConvertOnConstants) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - isConvertOnConstants << "_" << testParams.fakequantize; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + isConvertOnConstants << "_" << testParams.fakequantize; return result.str(); } void FakeQuantizeTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 1.0e-3; + + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizeTransformationParam testParams; bool isConvertOnConstants; std::tie(netPrecision, inputShape, targetDevice, params, testParams, isConvertOnConstants) = this->GetParam(); + init_input_shapes(inputShape); + testParams.fakequantize.addConverts = isConvertOnConstants; function = ngraph::builder::subgraph::FakeQuantizeFunction::getOriginal( @@ -52,13 +55,13 @@ void FakeQuantizeTransformation::SetUp() { ov::pass::InitNodeInfo().run_on_model(function); } -void FakeQuantizeTransformation::Run() { - LayerTestsCommon::Run(); +void FakeQuantizeTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualPrecision = getRuntimePrecisionByType(params.layerName); + const auto actualPrecision = get_runtime_precision_by_type(params.layerName); auto expectedPrecision = params.expectedKernelType; - if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ngraph::element::f16) { + if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ov::element::f16) { expectedPrecision = "FP16"; } @@ -67,7 +70,7 @@ void FakeQuantizeTransformation::Run() { TEST_P(FakeQuantizeTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp index e2ce31b6ecbe47..9beaad990b66ed 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp @@ -8,34 +8,37 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/fake_quantize_and_convolution.hpp" namespace LayerTestsDefinitions { std::string FakeQuantizeWithNotOptimalTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizeWithNotOptimalTransformationTestValues testValues; std::tie(netPrecision, inputShapes, targetDevice, params, testValues) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << "_" << testValues; + result << get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << "_" << testValues; return result.str(); } void FakeQuantizeWithNotOptimalTransformation::SetUp() { + abs_threshold = 4; + rel_threshold = 2778; SKIP_IF_CURRENT_TEST_IS_DISABLED(); - ngraph::PartialShape inputShape; - ngraph::element::Type netPrecision; + ov::PartialShape inputShape; + ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; FakeQuantizeWithNotOptimalTransformationTestValues testValues; std::tie(netPrecision, inputShape, targetDevice, params, testValues) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::FakeQuantizeAndConvolutionFunction::get( netPrecision, inputShape, @@ -49,16 +52,17 @@ void FakeQuantizeWithNotOptimalTransformation::SetUp() { testValues.dequantizationAfter); } -void FakeQuantizeWithNotOptimalTransformation::Run() { - LayerTestsCommon::Run(); +void FakeQuantizeWithNotOptimalTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecisionByType("Convolution"); + const auto actualType = get_runtime_precision_by_type("Convolution"); EXPECT_EQ(actualType, params.expectedPrecision); } TEST_P(FakeQuantizeWithNotOptimalTransformation, CompareWithRefImpl) { - Run(); + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index 7beabbd522c785..37f5a3bd11ee93 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -22,7 +21,7 @@ namespace LayerTestsDefinitions { std::string FullyConnectedTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; + ov::element::Type precision; MatMulShapes shapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; @@ -30,8 +29,8 @@ std::string FullyConnectedTransformation::getTestCaseName(const testing::TestPar std::ostringstream result; result << - getTestCaseNameByParams(precision, shapes.inputA, targetDevice, params) << - shapes.inputB << "_" << + get_test_case_name_by_params(precision, shapes.inputA, targetDevice, params) << + shapes.inputB << "_" << shapes.transposeA << "_" << shapes.transposeB; @@ -39,11 +38,15 @@ std::string FullyConnectedTransformation::getTestCaseName(const testing::TestPar } void FullyConnectedTransformation::SetUp() { - ngraph::element::Type precision; + abs_threshold = 0.6; + + ov::element::Type precision; MatMulShapes shapes; ov::pass::low_precision::LayerTransformation::Params params; std::tie(precision, shapes, targetDevice, params) = this->GetParam(); + init_input_shapes({ shapes.inputA, shapes.inputB }); + function = ngraph::builder::subgraph::MatMulFunction::getOriginal( precision, shapes.inputA, @@ -53,7 +56,8 @@ void FullyConnectedTransformation::SetUp() { } TEST_P(FullyConnectedTransformation, CompareWithRefImpl) { - Run(); + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp index 595d8b06ebfa53..3cc6532cae5652 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -23,26 +22,29 @@ namespace LayerTestsDefinitions { std::string FuseConvertTransformation::getTestCaseName(const testing::TestParamInfo& obj) { std::string targetDevice; - ngraph::PartialShape shape; - ngraph::element::Type precision; + ov::PartialShape shape; + ov::element::Type precision; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); ngraph::builder::subgraph::DequantizationOperations deqOperations; bool constInput; std::tie(precision, shape, targetDevice, deqOperations, constInput) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(precision, shape, targetDevice, params) << + result << get_test_case_name_by_params(precision, shape, targetDevice, params) << "_" << deqOperations << "_" << constInput; return result.str(); } void FuseConvertTransformation::SetUp() { - ngraph::PartialShape shape; - ngraph::element::Type precision; + abs_threshold = 0.01; + ov::PartialShape shape; + ov::element::Type precision; ngraph::builder::subgraph::DequantizationOperations deqOperations; bool constInput; std::tie(precision, shape, targetDevice, deqOperations, constInput) = this->GetParam(); + init_input_shapes(constInput ? std::vector{ shape } : std::vector{ shape, shape }); + function = ngraph::builder::subgraph::FuseConvertFunction::getWithFQ( shape, precision, @@ -51,7 +53,7 @@ void FuseConvertTransformation::SetUp() { } TEST_P(FuseConvertTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp index b814cb1ec4fecb..087b3f2a515a46 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/fuse_fake_quantize.hpp" namespace LayerTestsDefinitions { @@ -33,9 +33,13 @@ std::string FuseDequantizeToFakeQuantizeTransformation::getTestCaseName(const te } void FuseDequantizeToFakeQuantizeTransformation::SetUp() { + abs_threshold = 0.1; + FuseDequantizeToFakeQuantizeTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); + init_input_shapes(testValues.inputShape); + function = ngraph::builder::subgraph::FuseFakeQuantizeFunction::getOriginal( testValues.inputShape, testValues.actual.precisionBeforeAdd, @@ -48,7 +52,7 @@ void FuseDequantizeToFakeQuantizeTransformation::SetUp() { } TEST_P(FuseDequantizeToFakeQuantizeTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index b8525a25817d79..e8943eeed5b2fc 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -8,15 +8,14 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" namespace LayerTestsDefinitions { std::string FuseFakeQuantizeAndScaleShiftTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; @@ -28,12 +27,15 @@ std::string FuseFakeQuantizeAndScaleShiftTransformation::getTestCaseName(const t } void FuseFakeQuantizeAndScaleShiftTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 1.8; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; std::tie(netPrecision, inputShape, targetDevice, params, fakeQuantizeOnData) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::FuseFakeQuantizeAndScaleShiftFunction::getOriginal( netPrecision, inputShape, @@ -43,7 +45,7 @@ void FuseFakeQuantizeAndScaleShiftTransformation::SetUp() { } TEST_P(FuseFakeQuantizeAndScaleShiftTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp index 150ddebadddd31..bb21a147a11b38 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/fuse_multiply_to_fake_quantize.hpp" namespace LayerTestsDefinitions { @@ -30,6 +30,8 @@ void FuseMultiplyToFakeQuantizeTransformation::SetUp() { FuseMultiplyToFakeQuantizeTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); + init_input_shapes(testValues.inputShape); + function = ngraph::builder::subgraph::FuseMultiplyToFakeQuantizeFunction::get( testValues.inputShape, testValues.actual.fakeQuantizeOnData, @@ -39,7 +41,7 @@ void FuseMultiplyToFakeQuantizeTransformation::SetUp() { } TEST_P(FuseMultiplyToFakeQuantizeTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp index db9fc21b08d74d..fc4e077b9d2b58 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/fuse_subtract_to_fake_quantize.hpp" namespace LayerTestsDefinitions { @@ -30,6 +30,8 @@ void FuseSubtractToFakeQuantizeTransformation::SetUp() { FuseSubtractToFakeQuantizeTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); + init_input_shapes(testValues.inputShape); + function = ngraph::builder::subgraph::FuseSubtractToFakeQuantizeFunction::get( testValues.inputShape, testValues.actual.fakeQuantizeOnData, @@ -39,7 +41,7 @@ void FuseSubtractToFakeQuantizeTransformation::SetUp() { } TEST_P(FuseSubtractToFakeQuantizeTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp index 8858cbdf1f2980..7a0db9bcca4dfa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp @@ -8,15 +8,14 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/gather.hpp" namespace LayerTestsDefinitions { std::string GatherTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; + ov::element::Type precision; std::string targetDevice; GatherTransformationTestValues testValues; int opset_version; @@ -33,11 +32,13 @@ std::string GatherTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); + init_input_shapes(testValues.inputShape); + function = ngraph::builder::subgraph::GatherFunction::getOriginal( testValues.inputShape, testValues.gatherIndicesShape, @@ -50,7 +51,7 @@ void GatherTransformation::SetUp() { } TEST_P(GatherTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp index 9b6c8fbe3bb300..130685f1bc2f5a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -22,23 +21,27 @@ namespace LayerTestsDefinitions { std::string GemmTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape, targetDevice, params) = obj.param; - return getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params); + return get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params); } void GemmTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 17; + + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); - const float low = 0.f; // params.precisionsOnActivations[0] == ngraph::element::u8 ? 0.f : -128.f; - const float high = 255.f; // params.precisionsOnActivations[0] == ngraph::element::u8 ? 255.f : 127.f; + init_input_shapes({ inputShape, inputShape }); + + const float low = 0.f; // params.precisionsOnActivations[0] == ov::element::u8 ? 0.f : -128.f; + const float high = 255.f; // params.precisionsOnActivations[0] == ov::element::u8 ? 255.f : 127.f; function = ngraph::builder::subgraph::MatMulFunction::getOriginal( netPrecision, @@ -48,7 +51,7 @@ void GemmTransformation::SetUp() { } TEST_P(GemmTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp index e2857490ebaae8..6462255744ab1d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -20,18 +19,18 @@ namespace LayerTestsDefinitions { std::string GroupConvolutionTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; + ov::element::Type netPrecision; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; - std::pair inputShapes; + std::pair inputShapes; GroupConvolutionTransformationParam param; bool addPrecisionPreserved; std::tie(netPrecision, targetDevice, params, inputShapes, param, addPrecisionPreserved) = obj.param; std::ostringstream result; result << - getTestCaseNameByParams(netPrecision, inputShapes.first, targetDevice, params) << "_" << - inputShapes.first.rank().get_length() << "D_" << + get_test_case_name_by_params(netPrecision, inputShapes.first, targetDevice, params) << "_" << + inputShapes.first.rank().get_length() << "D_" << inputShapes.first << "_" << inputShapes.second << "_" << param.group << "_" << @@ -44,15 +43,18 @@ std::string GroupConvolutionTransformation::getTestCaseName(const testing::TestP } void GroupConvolutionTransformation::SetUp() { - threshold = 0.1f; + rel_threshold = 0.1; + abs_threshold = 0.1; - ngraph::element::Type netPrecision; + ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; - std::pair inputShapes; + std::pair inputShapes; GroupConvolutionTransformationParam param; bool addPrecisionPreserved; std::tie(netPrecision, targetDevice, params, inputShapes, param, addPrecisionPreserved) = this->GetParam(); + init_input_shapes(inputShapes.first); + while (param.fakeQuantizeOnData.constantShape.size() > inputShapes.first.size()) { param.fakeQuantizeOnData.constantShape.pop_back(); } @@ -68,14 +70,14 @@ void GroupConvolutionTransformation::SetUp() { addPrecisionPreserved); } -void GroupConvolutionTransformation::Run() { - LayerTestsCommon::Run(); +void GroupConvolutionTransformation::run() { + LayerTransformation::run(); const auto param = std::get<4>(GetParam()); if (!param.layerName.empty()) { - const auto actualPrecision = getRuntimePrecisionByType(param.layerName); + const auto actualPrecision = get_runtime_precision_by_type(param.layerName); auto expectedPrecision = param.expectedKernelType; - if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ngraph::element::f16) { + if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ov::element::f16) { expectedPrecision = "FP16"; } EXPECT_EQ(actualPrecision, expectedPrecision); @@ -84,7 +86,7 @@ void GroupConvolutionTransformation::Run() { TEST_P(GroupConvolutionTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp index 1ffff8920255fa..9678ecfdff3850 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -21,27 +20,30 @@ namespace LayerTestsDefinitions { std::string GroupConvolutionQDqTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; GroupConvolutionQDqTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << param; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << param; return result.str(); } void GroupConvolutionQDqTransformation::SetUp() { - // threshold = 0.1f; + abs_threshold = 153.7; - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; GroupConvolutionQDqTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::FakeQuantizeAndConvolutionFunction::get( netPrecision, inputShape, @@ -55,17 +57,17 @@ void GroupConvolutionQDqTransformation::SetUp() { {}, {}, {}, param.reshape, {}, "GroupConvolution", param.multiplyAfter); } -void GroupConvolutionQDqTransformation::Run() { - LayerTestsCommon::Run(); +void GroupConvolutionQDqTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecision(params.layerName); + const auto actualType = get_runtime_precision(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(GroupConvolutionQDqTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp index c3ca18b5bf485c..950886b65a1230 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp @@ -8,9 +8,8 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/interpolate.hpp" namespace LayerTestsDefinitions { @@ -29,16 +28,16 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector& values) } std::string InterpolateTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - std::pair shapes; + ov::element::Type precision; + std::pair shapes; std::string targetDevice; interpAttributes attributes; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); std::tie(precision, shapes, targetDevice, attributes) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(precision, shapes.first, targetDevice, params) << "_" << - shapes.second << "_" << + result << get_test_case_name_by_params(precision, shapes.first, targetDevice, params) << "_" << + shapes.second << "_" << attributes.align_corners << "_" << attributes.antialias << "_" << attributes.axes << "_" << @@ -49,12 +48,13 @@ std::string InterpolateTransformation::getTestCaseName(const testing::TestParamI } void InterpolateTransformation::SetUp() { - SetRefMode(LayerTestsUtils::RefMode::IE); - ngraph::element::Type precision; - std::pair shapes; + ov::element::Type precision; + std::pair shapes; interpAttributes attributes; std::tie(precision, shapes, targetDevice, attributes) = this->GetParam(); + init_input_shapes(shapes.first); + ov::op::v0::Interpolate::Attributes interpAttrs; interpAttrs.axes = attributes.axes; interpAttrs.mode = attributes.mode; @@ -67,7 +67,7 @@ void InterpolateTransformation::SetUp() { } TEST_P(InterpolateTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index 1c0c7ac7a7050a..e7c8a06bf41fa1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -2,26 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "low_precision_transformations/mat_mul_transformation.hpp" - +#include #include +#include +#include #include #include #include #include -#include -#include "ngraph/op/op.hpp" -#include +#include "transformations/init_node_info.hpp" #include "low_precision_transformations/mat_mul_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/mat_mul.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { std::string MatMulTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + ov::element::Type precision; + ov::PartialShape inputShape; std::string targetDevice; MatMulTransformationTestValues testValues; std::tie(precision, inputShape, targetDevice, testValues) = obj.param; @@ -38,32 +37,17 @@ std::string MatMulTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); + init_input_shapes({ testValues.inputShape1, testValues.inputShape2 }); + function = ngraph::builder::subgraph::MatMulFunction::getOriginal( precision, testValues.inputShape1, @@ -74,19 +58,19 @@ void MatMulTransformation::SetUp() { ov::pass::InitNodeInfo().run_on_model(function); } -void MatMulTransformation::Run() { +void MatMulTransformation::run() { SKIP_IF_CURRENT_TEST_IS_DISABLED() - LayerTestsCommon::Run(); + LayerTransformation::run(); const auto params = std::get<3>(GetParam()); - const auto actualType = getRuntimePrecision(params.expectedKernelName); + const auto actualType = get_runtime_precision(params.expectedKernelName); EXPECT_EQ(actualType, params.expectedRuntimePrecision); } TEST_P(MatMulTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 5708daf3b43ca4..9ab4c619a9ae7a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -2,25 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "low_precision_transformations/mat_mul_with_constant_transformation.hpp" - +#include #include +#include +#include #include #include #include #include -#include -#include "ngraph/op/op.hpp" -#include +#include "transformations/init_node_info.hpp" #include "low_precision_transformations/mat_mul_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" +#include "low_precision_transformations/mat_mul_with_constant_transformation.hpp" #include "ov_lpt_models/mat_mul.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { std::string MatMulWithConstantTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; + ov::element::Type precision; std::string targetDevice; MatMulWithConstantTransformationTestValues testValues; std::tie(precision, targetDevice, testValues) = obj.param; @@ -37,31 +37,16 @@ std::string MatMulWithConstantTransformation::getTestCaseName(const testing::Tes return result.str(); } -InferenceEngine::Blob::Ptr MatMulWithConstantTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { - if ((info.name() != "input1") && (info.name() != "input2")) { - IE_THROW() << "unexpected layer name " << info.name(); - } - - size_t low; - size_t high; - if (info.name() == "input1") { - low = 1ul; - high = 5ul; - } else if (info.name() == "input2") { - low = 5ul; - high = 10ul; - } else { - IE_THROW() << "unexpected input name " << info.name(); - } - - return FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), high - low, low, 1ul); -} void MatMulWithConstantTransformation::SetUp() { - ngraph::element::Type precision; + abs_threshold = 1.0e-3; + + ov::element::Type precision; MatMulWithConstantTransformationTestValues testValues; std::tie(precision, targetDevice, testValues) = this->GetParam(); + init_input_shapes(testValues.inputShape); + function = ngraph::builder::subgraph::MatMulFunction::getOriginal( precision, testValues.inputShape, @@ -73,13 +58,13 @@ void MatMulWithConstantTransformation::SetUp() { ov::pass::InitNodeInfo().run_on_model(function); } -void MatMulWithConstantTransformation::Run() { - LayerTestsCommon::Run(); +void MatMulWithConstantTransformation::run() { + LayerTransformation::run(); const auto params = std::get<2>(GetParam()); - const auto actualPrecision = getRuntimePrecisionByType(params.layerName); + const auto actualPrecision = get_runtime_precision_by_type(params.layerName); auto expectedPrecision = params.expectedKernelType; - if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ngraph::element::f16) { + if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ov::element::f16) { expectedPrecision = "FP16"; } EXPECT_EQ(actualPrecision, expectedPrecision); @@ -87,7 +72,7 @@ void MatMulWithConstantTransformation::Run() { TEST_P(MatMulWithConstantTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index f807061e96c622..eac38bdfa4429b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -22,8 +21,8 @@ namespace LayerTestsDefinitions { std::string MatMulWithOptimizedConstantFq::getTestCaseName( const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - std::pair shapes; + ov::element::Type netPrecision; + std::pair shapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues param; @@ -40,14 +39,17 @@ std::string MatMulWithOptimizedConstantFq::getTestCaseName( } void MatMulWithOptimizedConstantFq::SetUp() { - threshold = 0.01f; + rel_threshold = 0.01; + abs_threshold = 2.1; - ngraph::element::Type precision; - std::pair shapes; + ov::element::Type precision; + std::pair shapes; ov::pass::low_precision::LayerTransformation::Params params; MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues param; std::tie(precision, shapes, targetDevice, param) = this->GetParam(); + init_input_shapes({ shapes.first, shapes.second }); + function = ngraph::builder::subgraph::MatMulWithOptimizedConstantFakeQuantizeFunction::getOriginal( precision, shapes.first, @@ -57,7 +59,7 @@ void MatMulWithOptimizedConstantFq::SetUp() { } TEST_P(MatMulWithOptimizedConstantFq, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp index 4ea5038a026036..b68e336dffcaa9 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" @@ -19,8 +18,8 @@ namespace LayerTestsDefinitions { std::string MoveFakeQuantizeTransformation::getTestCaseName(testing::TestParamInfo obj) { - ngraph::element::Type netPrecision; - std::vector inputShape; + ov::element::Type netPrecision; + std::vector inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; bool oneInputWithSplit; @@ -28,8 +27,8 @@ std::string MoveFakeQuantizeTransformation::getTestCaseName(testing::TestParamIn std::tie(netPrecision, inputShape, targetDevice, params, oneInputWithSplit, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape[0], targetDevice, params) << - "SPLIT:" << oneInputWithSplit << "_" << + result << get_test_case_name_by_params(netPrecision, inputShape[0], targetDevice, params) << + "SPLIT:" << oneInputWithSplit << "_" << "OP:" << param.operation << "_" << "FQ:" << param.fakeQuantizeAfter << "_" << "DQ:" << param.dequantizationAfter; @@ -37,13 +36,34 @@ std::string MoveFakeQuantizeTransformation::getTestCaseName(testing::TestParamIn } void MoveFakeQuantizeTransformation::SetUp() { - ngraph::element::Type netPrecision; - std::vector inputShapes; + abs_threshold = 1.1; + + ov::element::Type netPrecision; + std::vector inputShapes; ov::pass::low_precision::LayerTransformation::Params params; bool oneInputWithSplit; MoveFakeQuantizeTransformationParam param; std::tie(netPrecision, inputShapes, targetDevice, params, oneInputWithSplit, param) = this->GetParam(); + if (oneInputWithSplit) { + auto newInputShape = inputShapes[0]; + int channels = 0; + bool channelsWasIdentified = false; + for (const auto inputShape : inputShapes) { + if (inputShape[param.axis].is_static()) { + channels += inputShape[param.axis].get_length(); + channelsWasIdentified = true; + } + } + + if (channelsWasIdentified) { + newInputShape[param.axis] = channels; + } + init_input_shapes(newInputShape); + } else { + init_input_shapes(inputShapes); + } + function = ngraph::builder::subgraph::MoveFakeQuantize::get( netPrecision, inputShapes, @@ -61,13 +81,13 @@ void MoveFakeQuantizeTransformation::SetUp() { oneInputWithSplit); } -void MoveFakeQuantizeTransformation::Run() { - LayerTestsCommon::Run(); +void MoveFakeQuantizeTransformation::run() { + LayerTransformation::run(); const auto params = std::get<5>(GetParam()); - const auto actualPrecision = getRuntimePrecisionByType(params.layerName); + const auto actualPrecision = get_runtime_precision_by_type(params.layerName); auto expectedPrecision = params.expectedKernelType; - if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ngraph::element::f16) { + if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ov::element::f16) { expectedPrecision = "FP16"; } EXPECT_EQ(actualPrecision, expectedPrecision); @@ -75,7 +95,7 @@ void MoveFakeQuantizeTransformation::Run() { TEST_P(MoveFakeQuantizeTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp index ae2affdb42e4f4..edfb371e2d3363 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -23,15 +22,15 @@ namespace LayerTestsDefinitions { std::string MultiplyToGroupConvolutionTransformation::getTestCaseName(const testing::TestParamInfo& obj) { std::string targetDevice; - ngraph::element::Type precision; - ngraph::PartialShape shape; + ov::element::Type precision; + ov::PartialShape shape; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); MultiplyToGroupConvolutionTransformationParam param; std::tie(precision, shape, targetDevice, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(precision, shape, targetDevice, params) << "_" << - param.fqOnData << "_" << + result << get_test_case_name_by_params(precision, shape, targetDevice, params) << "_" << + param.fqOnData << "_" << param.constant << "_" << param.layerName << "_" << param.expectedKernelType << "_" << @@ -40,11 +39,13 @@ std::string MultiplyToGroupConvolutionTransformation::getTestCaseName(const test } void MultiplyToGroupConvolutionTransformation::SetUp() { - ngraph::PartialShape shape; - ngraph::element::Type precision; + ov::PartialShape shape; + ov::element::Type precision; MultiplyToGroupConvolutionTransformationParam param; std::tie(precision, shape, targetDevice, param) = this->GetParam(); + init_input_shapes(shape); + function = ngraph::builder::subgraph::MultiplyToGroupConvolutionFunction::getOriginal( precision, shape, @@ -53,13 +54,13 @@ void MultiplyToGroupConvolutionTransformation::SetUp() { param.parentHasOneConsumer); } -void MultiplyToGroupConvolutionTransformation::Run() { - LayerTestsCommon::Run(); +void MultiplyToGroupConvolutionTransformation::run() { + LayerTransformation::run(); const auto param = std::get<3>(GetParam()); - const auto actualPrecision = getRuntimePrecision(param.layerName); + const auto actualPrecision = get_runtime_precision(param.layerName); auto expectedPrecision = param.expectedKernelType; - if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ngraph::element::f16) { + if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ov::element::f16) { expectedPrecision = "FP16"; } EXPECT_EQ(actualPrecision, expectedPrecision); @@ -67,7 +68,7 @@ void MultiplyToGroupConvolutionTransformation::Run() { TEST_P(MultiplyToGroupConvolutionTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp index 6aa3f96aeb0871..cec8ccdace32bb 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp @@ -8,8 +8,7 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/multiply_partial_function.hpp" #include "ov_models/subgraph_builders.hpp" @@ -18,16 +17,16 @@ namespace LayerTestsDefinitions { std::string MultiplyTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShapes; + ov::element::Type precision; + ov::PartialShape inputShapes; std::string targetDevice; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); MultiplyTestValues param; std::tie(precision, inputShapes, targetDevice, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(precision, inputShapes, targetDevice, params) << - (param.broadcast1 ? "_broadcast1" : "") << + result << get_test_case_name_by_params(precision, inputShapes, targetDevice, params) << + (param.broadcast1 ? "_broadcast1" : "") << (param.broadcast2 ? "_broadcast2" : ""); result << "_" << param.expectedPrecisions << "_"; @@ -51,11 +50,34 @@ std::string MultiplyTransformation::getTestCaseName(const testing::TestParamInfo } void MultiplyTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + abs_threshold = 0.1; + + ov::element::Type precision; + ov::PartialShape inputShape; MultiplyTestValues param; std::tie(precision, inputShape, targetDevice, param) = this->GetParam(); + auto inputShape1 = inputShape; + if (param.broadcast1) { + inputShape1[2] = 1; + inputShape1[3] = 1; + } + + ov::PartialShape inputShape2; + if (param.secondInputIsConstant) { + inputShape2 = {}; + } else { + inputShape2 = inputShape; + if (param.broadcast2) { + inputShape2[2] = 1; + inputShape2[3] = 1; + } + } + init_input_shapes( + param.secondInputIsConstant ? + std::vector{ inputShape1 } : + std::vector{ inputShape1, inputShape2 }); + function = ngraph::builder::subgraph::MultiplyPartialFunction::get( precision, inputShape, @@ -69,20 +91,20 @@ void MultiplyTransformation::SetUp() { ov::pass::InitNodeInfo().run_on_model(function); } -void MultiplyTransformation::Run() { - LayerTestsCommon::Run(); +void MultiplyTransformation::run() { + LayerTransformation::run(); const auto params = std::get<3>(GetParam()); - auto to_string = [](const ngraph::element::Type& precision) -> std::string { + auto to_string = [](const ov::element::Type& precision) -> std::string { switch (precision) { - case ngraph::element::f32: { + case ov::element::f32: { return "f32"; } - case ngraph::element::i8: { + case ov::element::i8: { return "i8"; } - case ngraph::element::u8: { + case ov::element::u8: { return "u8"; } default: { @@ -92,13 +114,13 @@ void MultiplyTransformation::Run() { }; const auto expectedFqPrecision = to_string(params.expectedPrecisions); - const auto actualFqPrecision = getRuntimePrecision("multiply"); + const auto actualFqPrecision = get_runtime_precision("multiply"); EXPECT_EQ(expectedFqPrecision, actualFqPrecision); } TEST_P(MultiplyTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp index 4c8b6020c8449d..3588d9b54977b5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp @@ -9,15 +9,14 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "ov_lpt_models/multiply_with_one_parent.hpp" namespace LayerTestsDefinitions { std::string MultiplyWithOneParentTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; MultiplyWithOneParentTransformationValues values; @@ -29,19 +28,21 @@ std::string MultiplyWithOneParentTransformation::getTestCaseName(const testing:: } void MultiplyWithOneParentTransformation::SetUp() { - threshold = 0.01f; + rel_threshold = 0.01f; - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; MultiplyWithOneParentTransformationValues values; std::tie(netPrecision, inputShape, targetDevice, values) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::MultiplyWithOneParentFunction::getOriginal(netPrecision, inputShape, values.fakeQuantize); } TEST_P(MultiplyWithOneParentTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp index e28afed0f47de4..9dc6735efc664d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -23,26 +22,29 @@ namespace LayerTestsDefinitions { std::string MVNTransformation::getTestCaseName(const testing::TestParamInfo& obj) { std::string targetDevice; - ngraph::PartialShape shape; - ngraph::element::Type precision; + ov::PartialShape shape; + ov::element::Type precision; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); - ngraph::AxisSet reductionAxes; + ov::AxisSet reductionAxes; bool normalizeVariance; std::tie(precision, shape, targetDevice, reductionAxes, normalizeVariance) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(precision, shape, targetDevice, params) << - "_" << reductionAxes << "_" << normalizeVariance; + result << get_test_case_name_by_params(precision, shape, targetDevice, params) << + "_" << reductionAxes << "_" << normalizeVariance; return result.str(); } void MVNTransformation::SetUp() { - ngraph::PartialShape shape; - ngraph::element::Type precision; - ngraph::AxisSet reductionAxes; + abs_threshold = 0.1; + ov::PartialShape shape; + ov::element::Type precision; + ov::AxisSet reductionAxes; bool normalizeVariance; std::tie(precision, shape, targetDevice, reductionAxes, normalizeVariance) = this->GetParam(); + init_input_shapes(shape); + function = ngraph::builder::subgraph::MVNFunction::getOriginal( precision, shape, @@ -51,7 +53,7 @@ void MVNTransformation::SetUp() { } TEST_P(MVNTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp index fda8edf0d18513..27f1b24c6c6ce6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -22,8 +21,8 @@ namespace LayerTestsDefinitions { std::string NormalizeL2Transformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - std::pair shapes; + ov::element::Type netPrecision; + std::pair shapes; std::string targetDevice; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); std::vector axes; @@ -33,36 +32,40 @@ std::string NormalizeL2Transformation::getTestCaseName(const testing::TestParamI std::ostringstream result; result << netPrecision << "_" << - shapes.first << "_" << - shapes.second << "_" << - targetDevice << "_" << - toString(params) << "_" << - "_axes" << axes.size() << + shapes.first << "_" << + shapes.second << "_" << + targetDevice << "_" << + to_string(params) << "_" << + "_axes" << axes.size() << (fuseMultiply ? "_multiply" : "") << (shift ? "_shift" : ""); return result.str(); } void NormalizeL2Transformation::SetUp() { - threshold = 3.e-3; - std::pair shapes; - ngraph::element::Type precision; + rel_threshold = 0.1; + abs_threshold = 0.1; + + std::pair shapes; + ov::element::Type precision; std::vector axes; bool fuseMultiply; bool shift; std::tie(precision, shapes, targetDevice, axes, fuseMultiply, shift) = this->GetParam(); + init_input_shapes(shapes.first); + function = ngraph::builder::subgraph::NormalizeL2Function::getOriginal( precision, shapes, - ngraph::element::u8, + ov::element::u8, axes, fuseMultiply, shift); } TEST_P(NormalizeL2Transformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index 95fe14627c70d1..9908346881f580 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -23,32 +22,15 @@ namespace LayerTestsDefinitions { std::string OutputLayersConcat::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; + ov::element::Type netPrecision; + ov::Shape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param; - return getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params); + return get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params); } -InferenceEngine::Blob::Ptr OutputLayersConcat::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); - - if ((info.name() != "input1") && (info.name() != "input2")) { - IE_THROW() << "unexpected input name " << info.name(); - } - const float k = (info.name() == "input1") ? 1.f : 2.f; - - const float low = 0.f / k; - const float hight = 255.f / k; - InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast(low), 1ul); - return input; -} /* * FQ1 FQ2 @@ -64,14 +46,24 @@ InferenceEngine::Blob::Ptr OutputLayersConcat::GenerateInput(const InferenceEngi */ void OutputLayersConcat::SetUp() { - InferenceEngine::SizeVector inputShape1; - InferenceEngine::Precision netPrecision; - ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShape1, targetDevice, params) = this->GetParam(); + abs_threshold = 4.1; - auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); + ov::Shape inputShape1; + ov::element::Type ngPrecision; + ov::pass::low_precision::LayerTransformation::Params params; + std::tie(ngPrecision, inputShape1, targetDevice, params) = this->GetParam(); + + init_input_shapes({ + ov::PartialShape(inputShape1), + ov::PartialShape(std::vector({ + static_cast(inputShape1[0]), + static_cast(inputShape1[1] * 2ul), + static_cast(inputShape1[2]), + static_cast(inputShape1[3]) + })) + }); + + const auto input1 = std::make_shared(ngPrecision, ov::Shape(inputShape1)); input1->set_friendly_name("input1"); const auto fakeQuantize1 = ov::test::utils::make_fake_quantize( @@ -80,8 +72,8 @@ void OutputLayersConcat::SetUp() { fakeQuantize1->set_friendly_name("fakeQuantize1"); ASSERT_EQ(4ul, inputShape1.size()) << "unexpected input layout"; - const InferenceEngine::SizeVector inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; - const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); + const ov::Shape inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; + const auto input2 = std::make_shared(ngPrecision, ov::Shape(inputShape2)); input2->set_friendly_name("input2"); const auto fakeQuantize2 = ov::test::utils::make_fake_quantize( @@ -90,13 +82,13 @@ void OutputLayersConcat::SetUp() { fakeQuantize2->set_friendly_name("fakeQuantize2"); const std::shared_ptr concat = std::make_shared( - ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); + ov::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); concat->set_friendly_name("concat"); const float k = 1.f; const auto weights = ov::op::v0::Constant::create( ngPrecision, - ngraph::Shape{ inputShape1[1ul] + inputShape2[1ul], inputShape1[1ul] + inputShape2[1ul], 1ul, 1ul }, + ov::Shape{ inputShape1[1ul] + inputShape2[1ul], inputShape1[1ul] + inputShape2[1ul], 1ul, 1ul }, std::vector((inputShape1[1ul] + inputShape2[1ul]) * (inputShape1[1ul] + inputShape2[1ul]), 1ul)); weights->set_friendly_name("weights"); const auto fakeQuantizeOnWeights = ov::test::utils::make_fake_quantize( @@ -107,23 +99,23 @@ void OutputLayersConcat::SetUp() { const std::shared_ptr convolution = std::make_shared( concat->output(0), fakeQuantizeOnWeights, - ngraph::Strides{ 1ul, 1ul }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::Strides{ 1ul, 1ul }); + ov::Strides{ 1ul, 1ul }, + ov::CoordinateDiff{ 0, 0 }, + ov::CoordinateDiff{ 0, 0 }, + ov::Strides{ 1ul, 1ul }); convolution->set_friendly_name("convolution"); - ngraph::ResultVector results { + ov::ResultVector results { std::make_shared(concat), std::make_shared(convolution), std::make_shared(fakeQuantize2) }; - function = std::make_shared(results, ngraph::ParameterVector { input1, input2 }, "OutputLayersHandling"); + function = std::make_shared(results, ov::ParameterVector { input1, input2 }, "OutputLayersHandling"); } TEST_P(OutputLayersConcat, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp index 732d93ddde145d..11c386afa40093 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -22,8 +21,8 @@ namespace LayerTestsDefinitions { -std::pair outputLayersHandlingInTransformationsForConcatMultiChannelGetInterval(const std::vector& precisions) { - const bool unsignedInterval = std::find(precisions.begin(), precisions.end(), ngraph::element::u8) != precisions.end(); +std::pair outputLayersHandlingInTransformationsForConcatMultiChannelGetInterval(const std::vector& precisions) { + const bool unsignedInterval = std::find(precisions.begin(), precisions.end(), ov::element::u8) != precisions.end(); const float low = unsignedInterval ? 0.f : -128.f; const float hight = unsignedInterval ? 255.f : 127.f; return std::make_pair(low, hight); @@ -31,34 +30,15 @@ std::pair outputLayersHandlingInTransformationsForConcatMultiChann std::string OutputLayersConcatMultiChannel::getTestCaseName( const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; + ov::element::Type netPrecision; + ov::Shape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param; - return getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params); + return get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params); } -InferenceEngine::Blob::Ptr OutputLayersConcatMultiChannel::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); - - if ((info.name() != "input1") && (info.name() != "input2")) { - IE_THROW() << "unexpected input name " << info.name(); - } - const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f); - - const auto interval = outputLayersHandlingInTransformationsForConcatMultiChannelGetInterval({ ngraph::element::u8, ngraph::element::i8 }); - const float low = interval.first / k; - const float hight = interval.second / k; - - InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast(low), 1ul); - return input; -} /* * FQ1 FQ2 @@ -74,48 +54,48 @@ InferenceEngine::Blob::Ptr OutputLayersConcatMultiChannel::GenerateInput(const I */ void OutputLayersConcatMultiChannel::SetUp() { - threshold = 0.05; + rel_threshold = 0.05; - InferenceEngine::SizeVector inputShape1; - InferenceEngine::Precision netPrecision; + ov::Shape inputShape1; + ov::element::Type ngPrecision; ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShape1, targetDevice, params) = this->GetParam(); + std::tie(ngPrecision, inputShape1, targetDevice, params) = this->GetParam(); - auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + const ov::Shape inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; + init_input_shapes({ov::PartialShape(inputShape1), ov::PartialShape(inputShape1)}); - const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); + const auto input1 = std::make_shared(ngPrecision, ov::Shape(inputShape1)); input1->set_friendly_name("input1"); const auto fakeQuantize1 = ov::test::utils::make_fake_quantize(input1->output(0), ngPrecision, 256ul, { 1ul }); fakeQuantize1->set_friendly_name("fakeQuantize1"); ASSERT_EQ(4ul, inputShape1.size()) << "unexpected input layout"; - const InferenceEngine::SizeVector inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; - const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); + const auto input2 = std::make_shared(ngPrecision, ov::Shape(inputShape2)); input2->set_friendly_name("input2"); const auto fakeQuantize2 = ov::test::utils::make_fake_quantize(input2->output(0), ngPrecision, 256ul, { 1ul }); fakeQuantize2->set_friendly_name("fakeQuantize2"); const std::shared_ptr concat = std::make_shared( - ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); + ov::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); concat->set_friendly_name("concat"); - auto const1 = ov::op::v0::Constant::create(ngPrecision, ngraph::Shape{ 1, 1, 1, 1 }, { 1 }); + auto const1 = ov::op::v0::Constant::create(ngPrecision, ov::Shape{ 1, 1, 1, 1 }, { 1 }); std::shared_ptr convolution = std::make_shared(concat, const1); convolution->set_friendly_name("convolution"); - ngraph::ResultVector results { + ov::ResultVector results { std::make_shared(concat), std::make_shared(convolution), std::make_shared(fakeQuantize2) }; - function = std::make_shared(results, ngraph::ParameterVector { input1, input2 }, "OutputLayersHandling"); + function = std::make_shared(results, ov::ParameterVector { input1, input2 }, "OutputLayersHandling"); } TEST_P(OutputLayersConcatMultiChannel, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index ea03aded58c1c8..737c0c8bd364af 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -23,38 +22,25 @@ namespace LayerTestsDefinitions { std::string OutputLayers::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; + ov::element::Type netPrecision; + ov::Shape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param; - return getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params); + return get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params); } -InferenceEngine::Blob::Ptr OutputLayers::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); - - const float k = 1.f; - const float low = 0.f / k; - const float hight = 255.f / k; - - InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast(low), 1ul); - return input; -} void OutputLayers::SetUp() { - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; + ov::Shape inputShape; + ov::element::Type ngPrecision; ov::pass::low_precision::LayerTransformation::Params params; - std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); - auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + std::tie(ngPrecision, inputShape, targetDevice, params) = this->GetParam(); + + init_input_shapes(ov::PartialShape(inputShape)); - const auto input = std::make_shared(ngPrecision, ngraph::Shape(inputShape)); + const auto input = std::make_shared(ngPrecision, ov::Shape(inputShape)); input->set_friendly_name("input"); const float k = 1.f; @@ -65,7 +51,7 @@ void OutputLayers::SetUp() { const auto weights = ov::op::v0::Constant::create( ngPrecision, - ngraph::Shape{ inputShape[1ul], inputShape[1ul], 1ul, 1ul }, + ov::Shape{ inputShape[1ul], inputShape[1ul], 1ul, 1ul }, std::vector(inputShape[1ul] * inputShape[1ul], 1ul)); weights->set_friendly_name("weights"); const auto fakeQuantizeOnWeights = ov::test::utils::make_fake_quantize( @@ -76,22 +62,22 @@ void OutputLayers::SetUp() { std::shared_ptr convolution = std::make_shared( fakeQuantizeOnActivations, fakeQuantizeOnWeights, - ngraph::Strides{ 1ul, 1ul }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::Strides{ 1ul, 1ul }); + ov::Strides{ 1ul, 1ul }, + ov::CoordinateDiff{ 0, 0 }, + ov::CoordinateDiff{ 0, 0 }, + ov::Strides{ 1ul, 1ul }); convolution->set_friendly_name("convolution"); - ngraph::ResultVector results { + ov::ResultVector results { std::make_shared(convolution), std::make_shared(fakeQuantizeOnActivations) }; - function = std::make_shared(results, ngraph::ParameterVector { input }, "OutputLayersHandling"); + function = std::make_shared(results, ov::ParameterVector { input }, "OutputLayersHandling"); } TEST_P(OutputLayers, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp index da6e3ba2af3666..279bf12252d7b7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp @@ -6,15 +6,14 @@ #include #include #include -#include #include "ov_lpt_models/pad.hpp" namespace LayerTestsDefinitions { std::string PadTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::op::PadMode padMode; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; @@ -22,7 +21,7 @@ std::string PadTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::PadFunction::get( inputShape, netPrecision, @@ -47,11 +49,11 @@ void PadTransformation::SetUp() { param.padValue); } -void PadTransformation::Run() { - LayerTestsCommon::Run(); +void PadTransformation::run() { + LayerTransformation::run(); const auto params = std::get<5>(GetParam()); - const auto actualPrecision = getRuntimePrecisionByType(params.layerName); + const auto actualPrecision = get_runtime_precision_by_type(params.layerName); const auto expectedPrecision = params.expectedKernelType; EXPECT_EQ(actualPrecision, expectedPrecision); @@ -59,7 +61,7 @@ void PadTransformation::Run() { TEST_P(PadTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp index eb28b5dbf5750c..b798285eb3068d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp @@ -8,16 +8,15 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/prelu.hpp" namespace LayerTestsDefinitions { std::string PReluTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + ov::element::Type precision; + ov::PartialShape inputShape; std::string targetDevice; PReluTestValues testValues; std::tie(precision, inputShape, targetDevice, testValues) = obj.param; @@ -31,34 +30,23 @@ std::string PReluTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); - - const auto fqOnData = testValues.fakeQuantize; - return FuncTestUtils::createAndFillBlobConsistently( - info.getTensorDesc(), - static_cast(fqOnData.empty() ? 25.f : fqOnData.outputHighValues[0] - fqOnData.outputLowValues[0]), - static_cast(fqOnData.empty() ? -12.5f : fqOnData.outputLowValues[0]), - 1ul); -} void PReluTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + ov::element::Type precision; + ov::PartialShape inputShape; PReluTestValues testValues; std::tie(precision, inputShape, targetDevice, testValues) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::PReluFunction::getOriginal(inputShape, precision, testValues.fakeQuantize); ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(PReluTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions + diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp index 4b8f6713f8839a..a0215a4ddb3ba8 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -21,17 +20,17 @@ namespace LayerTestsDefinitions { std::string PullReshapeThroughDequantizationTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; - ngraph::Shape elementwiseConstantShapes; + ov::Shape elementwiseConstantShapes; PullReshapeThroughDequantizationTestValues testValues; std::tie(netPrecision, inputShape, targetDevice, params, elementwiseConstantShapes, testValues) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - inputShape << "_" << + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + inputShape << "_" << elementwiseConstantShapes << "_" << testValues.precisionBeforeDequantization << "_" << testValues.dequantizationOnActivations << "_" << @@ -42,15 +41,17 @@ std::string PullReshapeThroughDequantizationTransformation::getTestCaseName(cons } void PullReshapeThroughDequantizationTransformation::SetUp() { - // threshold = 0.1f; + abs_threshold = 1.0e-3; - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; - ngraph::Shape elementwiseConstantShapes; + ov::Shape elementwiseConstantShapes; PullReshapeThroughDequantizationTestValues testValues; std::tie(netPrecision, inputShape, targetDevice, params, elementwiseConstantShapes, testValues) = this->GetParam(); + init_input_shapes(inputShape); + // to prevent test cases increasing let's parameterize test by dequantization shape and // initialize values here if (!testValues.dequantizationOnWeights.subtract.empty()) { @@ -79,17 +80,17 @@ void PullReshapeThroughDequantizationTransformation::SetUp() { "GroupConvolution"); } -void PullReshapeThroughDequantizationTransformation::Run() { - LayerTestsCommon::Run(); +void PullReshapeThroughDequantizationTransformation::run() { + LayerTransformation::run(); const auto params = std::get<5>(GetParam()); - const auto actualType = getRuntimePrecision(params.operationName); + const auto actualType = get_runtime_precision(params.operationName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(PullReshapeThroughDequantizationTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp index c377c370c76597..7d261aaf48ace8 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" @@ -19,17 +18,17 @@ namespace LayerTestsDefinitions { std::string RecurrentCellTransformation::getTestCaseName(testing::TestParamInfo obj) { - ngraph::element::Type netPrecision; - std::vector activationsShape; - std::vector weightsShape; + ov::element::Type netPrecision; + std::vector activationsShape; + std::vector weightsShape; std::string targetDevice; RecurrentCellTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, activationsShape, weightsShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, activationsShape[0], targetDevice, params) << - "FQ_X_" << param.fakeQuantize_X << "_" << + result << get_test_case_name_by_params(netPrecision, activationsShape[0], targetDevice, params) << + "FQ_X_" << param.fakeQuantize_X << "_" << "DQ_X_" << param.dequantization_X << "_" << "FQ_W_" << param.fakeQuantize_W << "_" << "DQ_W_" << param.dequantization_W; @@ -37,14 +36,18 @@ std::string RecurrentCellTransformation::getTestCaseName(testing::TestParamInfo< } void RecurrentCellTransformation::SetUp() { - ngraph::element::Type precision; - std::vector activations_shapes; - std::vector weights_shapes; + abs_threshold = 0.01; + + ov::element::Type precision; + std::vector activations_shapes; + std::vector weights_shapes; RecurrentCellTransformationParam param; ov::pass::low_precision::LayerTransformation::Params params; std::tie(precision, activations_shapes, weights_shapes, targetDevice, params, param) = this->GetParam(); + init_input_shapes(activations_shapes); + function = ngraph::builder::subgraph::RecurrentCellFunction::get(precision, activations_shapes, weights_shapes, @@ -69,23 +72,21 @@ void RecurrentCellTransformation::SetUp() { }); } -void RecurrentCellTransformation::Run() { - LayerTestsCommon::Run(); - - if (!executableNetwork) - return; +void RecurrentCellTransformation::run() { + LayerTransformation::run(); const auto params = std::get<5>(GetParam()); - const auto actualPrecision = getRuntimePrecisionByType(params.layerName); + const auto actualPrecision = get_runtime_precision_by_type(params.layerName); auto expectedPrecision = params.expectedKernelType; - if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ngraph::element::f16) { + if (expectedPrecision == "FP32" && std::get<0>(GetParam()) == ov::element::f16) { expectedPrecision = "FP16"; } EXPECT_EQ(actualPrecision, expectedPrecision); } TEST_P(RecurrentCellTransformation, CompareWithRefImpl) { - Run(); + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp index 9980d12ac8199b..d919f18466e0b3 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp @@ -6,23 +6,22 @@ #include #include #include -#include #include "ov_lpt_models/reduce.hpp" namespace LayerTestsDefinitions { std::string ReduceMaxTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ReduceMaxTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantize << (param.keepDims ? "_keepDims_" : "") << "_reduce_axis_"; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantize << (param.keepDims ? "_keepDims_" : "") << "_reduce_axis_"; for (const auto& elem : param.constantValues) { result << elem << "_"; } @@ -31,12 +30,15 @@ std::string ReduceMaxTransformation::getTestCaseName(const testing::TestParamInf } void ReduceMaxTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 1.1; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ReduceMaxTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = GetParam(); + init_input_shapes(inputShape); + ngraph::builder::subgraph::DequantizationOperations::Convert convert; ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; @@ -52,17 +54,17 @@ void ReduceMaxTransformation::SetUp() { dequantizationAfter); } -void ReduceMaxTransformation::Run() { - LayerTestsCommon::Run(); +void ReduceMaxTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecision(params.layerName); + const auto actualType = get_runtime_precision(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(ReduceMaxTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp index 35f54b7cc8a128..0ea01a9a51aa6d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include "ov_lpt_models/reduce.hpp" @@ -20,16 +19,16 @@ ReduceMeanOperation::ReduceMeanOperation(const std::vector& constantVal } std::string ReduceMeanTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ReduceMeanTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantize << + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantize << param.convert << param.dequantizationBefore << (param.reduceMean.keepDims ? "_keepDims_" : ""); @@ -43,12 +42,15 @@ std::string ReduceMeanTransformation::getTestCaseName(const testing::TestParamIn } void ReduceMeanTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 4.1; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ReduceMeanTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, @@ -60,17 +62,17 @@ void ReduceMeanTransformation::SetUp() { param.dequantizationAfter); } -void ReduceMeanTransformation::Run() { - LayerTestsCommon::Run(); +void ReduceMeanTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecision(params.layerName); + const auto actualType = get_runtime_precision(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(ReduceMeanTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp index a1b3b9cb24fb28..af2552960ce113 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp @@ -6,23 +6,22 @@ #include #include #include -#include #include "ov_lpt_models/reduce.hpp" namespace LayerTestsDefinitions { std::string ReduceMinTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ReduceMinTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantize << (param.keepDims ? "_keepDims_" : "") << "_reduce_axis_"; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantize << (param.keepDims ? "_keepDims_" : "") << "_reduce_axis_"; for (const auto& elem : param.constantValues) { result << elem << "_"; } @@ -31,12 +30,15 @@ std::string ReduceMinTransformation::getTestCaseName(const testing::TestParamInf } void ReduceMinTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 0.1; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ReduceMinTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = GetParam(); + init_input_shapes(inputShape); + ngraph::builder::subgraph::DequantizationOperations::Convert convert; ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; @@ -52,17 +54,17 @@ void ReduceMinTransformation::SetUp() { dequantizationAfter); } -void ReduceMinTransformation::Run() { - LayerTestsCommon::Run(); +void ReduceMinTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecision(params.layerName); + const auto actualType = get_runtime_precision(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(ReduceMinTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp index 14d9e3cc97f0a6..3ad7869fd08d89 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp @@ -6,23 +6,22 @@ #include #include #include -#include #include "ov_lpt_models/reduce.hpp" namespace LayerTestsDefinitions { std::string ReduceSumTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ReduceSumTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantize << (param.keepDims ? "_keepDims_" : "") << "_reduce_axis_"; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantize << (param.keepDims ? "_keepDims_" : "") << "_reduce_axis_"; for (const auto& elem : param.constantValues) { result << elem << "_"; } @@ -31,12 +30,16 @@ std::string ReduceSumTransformation::getTestCaseName(const testing::TestParamInf } void ReduceSumTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 4.1; + + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ReduceSumTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = GetParam(); + init_input_shapes(inputShape); + ngraph::builder::subgraph::DequantizationOperations::Convert convert; ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; @@ -52,17 +55,17 @@ void ReduceSumTransformation::SetUp() { dequantizationAfter); } -void ReduceSumTransformation::Run() { - LayerTestsCommon::Run(); +void ReduceSumTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecision(params.layerName); + const auto actualType = get_runtime_precision(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(ReduceSumTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp index b0acf0f885852d..ab7fe4cfed7457 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp @@ -8,16 +8,15 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/relu.hpp" namespace LayerTestsDefinitions { std::string ReluTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + ov::element::Type precision; + ov::PartialShape inputShape; std::string targetDevice; ReluTestValues testValues; std::tie(precision, inputShape, targetDevice, testValues) = obj.param; @@ -31,34 +30,23 @@ std::string ReluTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); - - const auto fqOnData = testValues.fakeQuantize; - return FuncTestUtils::createAndFillBlobConsistently( - info.getTensorDesc(), - static_cast(fqOnData.empty() ? 25.f : fqOnData.outputHighValues[0] - fqOnData.outputLowValues[0]), - static_cast(fqOnData.empty() ? -12.5f : fqOnData.outputLowValues[0]), - 1ul); -} void ReluTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + abs_threshold = 1.0; + ov::element::Type precision; + ov::PartialShape inputShape; ReluTestValues testValues; std::tie(precision, inputShape, targetDevice, testValues) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::ReluFunction::getOriginal(inputShape, precision, testValues.fakeQuantize); ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(ReluTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp index 7a45da7385e478..5addda86377624 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp @@ -6,23 +6,22 @@ #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/reshape.hpp" namespace LayerTestsDefinitions { std::string ReshapeTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; + ov::element::Type netPrecision; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ReshapeTransformationParam param; std::tie(netPrecision, targetDevice, params, param) = obj.param; std::ostringstream result; - result << netPrecision << "_" << targetDevice << "_" << toString(params) << - "_" << param.inputShape << "_" << param.fakeQuantize << "_{"; + result << netPrecision << "_" << targetDevice << "_" << to_string(params) << + "_" << param.inputShape << "_" << param.fakeQuantize << "_{"; for (size_t i = 0; i < param.reshapeConstValues.size(); ++i) { result << param.reshapeConstValues[i]; if (i != (param.reshapeConstValues.size() - 1ul)) { @@ -34,11 +33,13 @@ std::string ReshapeTransformation::getTestCaseName(const testing::TestParamInfo< } void ReshapeTransformation::SetUp() { - ngraph::element::Type netPrecision; + ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; ReshapeTransformationParam param; std::tie(netPrecision, targetDevice, params, param) = this->GetParam(); + init_input_shapes(param.inputShape); + function = ngraph::builder::subgraph::ReshapeFunction::getOriginal( param.inputShape, param.reshapeConstValues, @@ -46,11 +47,11 @@ void ReshapeTransformation::SetUp() { param.fakeQuantize); } -void ReshapeTransformation::Run() { - LayerTestsCommon::Run(); +void ReshapeTransformation::run() { + LayerTransformation::run(); const auto params = std::get<3>(GetParam()); - auto actualPrecision = getRuntimePrecisionByType(params.layerType); + auto actualPrecision = get_runtime_precision_by_type(params.layerType); const auto expectedPrecision = params.expectedKernelType; if ((expectedPrecision == "FP32") && (actualPrecision == "FP16")) { actualPrecision = "FP32"; @@ -60,7 +61,7 @@ void ReshapeTransformation::Run() { TEST_P(ReshapeTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp index b1405b805828e3..f8897324b32054 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -19,26 +18,29 @@ namespace LayerTestsDefinitions { std::string ShuffleChannelsTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; ShuffleChannelsTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantizeOnData << "_axis_" << param.axis << "_group_" << param.group; + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantizeOnData << "_axis_" << param.axis << "_group_" << param.group; return result.str(); } void ShuffleChannelsTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 1.0; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; ShuffleChannelsTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::ShuffleChannelsFunction::getOriginal( netPrecision, inputShape, @@ -47,17 +49,17 @@ void ShuffleChannelsTransformation::SetUp() { param.group); } -void ShuffleChannelsTransformation::Run() { - LayerTestsCommon::Run(); +void ShuffleChannelsTransformation::run() { + LayerTransformation::run(); const auto params = std::get<4>(GetParam()); - const auto actualType = getRuntimePrecision(params.layerName); + const auto actualType = get_runtime_precision(params.layerName); EXPECT_EQ(actualType, params.expectedKernelType); } TEST_P(ShuffleChannelsTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp index e8887dcf4902da..2136c6ca9aa7da 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp @@ -6,15 +6,14 @@ #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/space_to_batch.hpp" namespace LayerTestsDefinitions { std::string SpaceToBatchTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type input_type; + ov::element::Type input_type; std::string target_device; SpaceToBatchTransformationParam param; std::tie(input_type, target_device, param) = obj.param; @@ -25,10 +24,13 @@ std::string SpaceToBatchTransformation::getTestCaseName(const testing::TestParam } void SpaceToBatchTransformation::SetUp() { - ngraph::element::Type input_type; + abs_threshold = 1.1; + ov::element::Type input_type; SpaceToBatchTransformationParam param; std::tie(input_type, targetDevice, param) = this->GetParam(); + init_input_shapes(param.input_shape); + function = ngraph::builder::subgraph::SpaceToBatchFunction::get( param.input_shape, input_type, @@ -38,8 +40,8 @@ void SpaceToBatchTransformation::SetUp() { param.pads_end); } -void SpaceToBatchTransformation::Run() { - LayerTestsCommon::Run(); +void SpaceToBatchTransformation::run() { + LayerTransformation::run(); const auto params = std::get<2>(GetParam()); auto expected_type = params.expected_kernel_type; @@ -48,13 +50,13 @@ void SpaceToBatchTransformation::Run() { expected_type = "f16"; } - const auto actual_type = getRuntimePrecisionByType(params.layer_type); + const auto actual_type = get_runtime_precision_by_type(params.layer_type); EXPECT_EQ(actual_type, expected_type); } TEST_P(SpaceToBatchTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp index 161db8402514fb..297e186bb4bd05 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp @@ -8,50 +8,37 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "low_precision/split.hpp" #include "ov_lpt_models/split.hpp" namespace LayerTestsDefinitions { std::string SplitTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; SplitTransformationParam param; std::tie(netPrecision, inputShapes, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << "_" << - param.fakeQuantize << "_axis=" << param.splitedAxis << "_n_splits=" << param.numSplit; + result << get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << "_" << + param.fakeQuantize << "_axis=" << param.splitedAxis << "_n_splits=" << param.numSplit; return result.str(); } -InferenceEngine::Blob::Ptr SplitTransformation::GenerateInput(const InferenceEngine::InputInfo& info) const { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; - std::string targetDevice; - ov::pass::low_precision::LayerTransformation::Params params; - SplitTransformationParam param; - std::tie(precision, inputShape, targetDevice, params, param) = this->GetParam(); - const auto& fqOnData = param.fakeQuantize; - - return FuncTestUtils::createAndFillBlobConsistently( - info.getTensorDesc(), - static_cast(fqOnData.empty() ? 25.f : fqOnData.outputHighValues[0] - fqOnData.outputLowValues[0]), - static_cast(fqOnData.empty() ? -12.5f : fqOnData.outputLowValues[0]), - 1ul); -} void SplitTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + abs_threshold = 1.0; + ov::element::Type precision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; SplitTransformationParam param; std::tie(precision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::SplitFunction::getOriginal( precision, inputShape, @@ -61,6 +48,6 @@ void SplitTransformation::SetUp() { } TEST_P(SplitTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp index 4a6a22d4fdcee4..ab3ddef713e929 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp @@ -2,18 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include +#include +#include #include #include #include #include -#include -#include "ngraph/op/op.hpp" -#include +#include "transformations/init_node_info.hpp" #include "low_precision_transformations/squeeze_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/squeeze.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { @@ -29,33 +30,17 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector& valu return os; } -InferenceEngine::Blob::Ptr SqueezeTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { - ngraph::element::Type netPrecision; - ov::pass::low_precision::LayerTransformation::Params params; - SqueezeTransformationParam squeezeParam; - std::string targetDevice; - - std::tie(netPrecision, targetDevice, params, squeezeParam) = this->GetParam(); - - const ngraph::builder::subgraph::FakeQuantizeOnData& fqOnData = squeezeParam.fakeQuantize; - - return FuncTestUtils::createAndFillBlobConsistently( - info.getTensorDesc(), - static_cast(fqOnData.empty() ? 25.f : fqOnData.outputHighValues[0] - fqOnData.outputLowValues[0]), - static_cast(fqOnData.empty() ? -12.5f : fqOnData.outputLowValues[0]), - 1ul); -} std::string SqueezeTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; + ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; std::string targetDevice; SqueezeTransformationParam squeezeParam; std::tie(netPrecision, targetDevice, params, squeezeParam) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, squeezeParam.shape, targetDevice, params) << "_" << - squeezeParam.fakeQuantize << "_" << + result << get_test_case_name_by_params(netPrecision, squeezeParam.shape, targetDevice, params) << "_" << + squeezeParam.fakeQuantize << "_" << squeezeParam.squeezeAxes << "_" << params.updatePrecisions << "_" << squeezeParam.shape; @@ -63,12 +48,15 @@ std::string SqueezeTransformation::getTestCaseName(const testing::TestParamInfo< return result.str(); } void SqueezeTransformation::SetUp() { - ngraph::element::Type netPrecision; + abs_threshold = 0.2; + ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; SqueezeTransformationParam squeezeParam; std::tie(netPrecision, targetDevice, params, squeezeParam) = this->GetParam(); + init_input_shapes(squeezeParam.shape); + function = ngraph::builder::subgraph::SqueezeFunction::getOriginal( netPrecision, squeezeParam.shape, @@ -79,7 +67,7 @@ void SqueezeTransformation::SetUp() { } TEST_P(SqueezeTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp index 7208df27695db4..d8e96595f2cf78 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include "ov_lpt_models/strided_slice.hpp" @@ -25,28 +24,31 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector& va } std::string StridedSliceTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + ov::element::Type netPrecision; + ov::PartialShape inputShape; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; StridedSliceTransformationParam param;; std::tie(netPrecision, inputShape, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" << - param.fakeQuantize << "_" << param.begin << "_" << param.beginMask << "_" << + result << get_test_case_name_by_params(netPrecision, inputShape, targetDevice, params) << "_" << + param.fakeQuantize << "_" << param.begin << "_" << param.beginMask << "_" << param.end << "_" << param.endMask << "_" << param.strides << "_" << param.newAxisMask << param.shrinkAxisMask << "_" << param.elipsisMask; return result.str(); } void StridedSliceTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 1.0; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; StridedSliceTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::StridedSliceFunction::getOriginal( netPrecision, inputShape, @@ -62,7 +64,7 @@ void StridedSliceTransformation::SetUp() { } TEST_P(StridedSliceTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp index bfa10aa4b75b29..ed10cac15c6fc9 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp @@ -8,9 +8,8 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/subtract_multiply_to_multiply_add.hpp" namespace LayerTestsDefinitions { @@ -33,6 +32,8 @@ void SubtractMultiplyToMultiplyAddTransformation::SetUp() { SubtractMultiplyToMultiplyAddTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); + init_input_shapes(testValues.inputShape); + function = ngraph::builder::subgraph::SubtractMultiplyToMultiplyAddFunction::getOriginal( testValues.inputShape, testValues.precision, @@ -40,7 +41,7 @@ void SubtractMultiplyToMultiplyAddTransformation::SetUp() { } TEST_P(SubtractMultiplyToMultiplyAddTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp index 1866315bffb0f0..8c0c341611d249 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp @@ -8,9 +8,8 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/subtract.hpp" @@ -18,26 +17,29 @@ namespace LayerTestsDefinitions { std::string SubtractTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param; - return getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params); + return get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params); } void SubtractTransformation::SetUp() { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShape; + abs_threshold = 0.1; + ov::element::Type netPrecision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::SubtractFunction::getOriginal(netPrecision, inputShape); } TEST_P(SubtractTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp index fa466880570f07..2339e6b2d11865 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -22,8 +21,8 @@ namespace LayerTestsDefinitions { std::string TransposeAfterMatMulTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; bool perTensor; @@ -31,25 +30,28 @@ std::string TransposeAfterMatMulTransformation::getTestCaseName(const testing::T std::tie(netPrecision, inputShapes, targetDevice, params, perTensor, transposeChannelDim) = obj.param; std::ostringstream result; - result << netPrecision << "_" << targetDevice << "_" << toString(params) << - (perTensor ? "_perTensor" : "_perChannel") << + result << netPrecision << "_" << targetDevice << "_" << to_string(params) << + (perTensor ? "_perTensor" : "_perChannel") << (transposeChannelDim ? "_transposeChannelDim" : "_notTransposeChannelDim"); return result.str(); } void TransposeAfterMatMulTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + abs_threshold = 0.6; + ov::element::Type precision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; bool perTensor; bool transposeChannelDim; std::tie(precision, inputShape, targetDevice, params, perTensor, transposeChannelDim) = this->GetParam(); + init_input_shapes({ inputShape, inputShape }); + function = ngraph::builder::subgraph::TransposeAfterMatMulFunction::getOriginal(precision, inputShape); } TEST_P(TransposeAfterMatMulTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp index ef7fc3ba6f52f0..116d54e37751e4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp @@ -8,15 +8,14 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "ov_lpt_models/transpose.hpp" namespace LayerTestsDefinitions { std::string TransposeTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type precision; + ov::element::Type precision; std::string targetDevice; TransposeTransformationTestValues testValues; std::tie(precision, targetDevice, testValues) = obj.param; @@ -31,10 +30,12 @@ std::string TransposeTransformation::getTestCaseName(const testing::TestParamInf } void TransposeTransformation::SetUp() { - ngraph::element::Type precision; + ov::element::Type precision; TransposeTransformationTestValues testValues; std::tie(precision, targetDevice, testValues) = this->GetParam(); + init_input_shapes(testValues.inputShape); + function = ngraph::builder::subgraph::TransposeFunction::getOriginal( testValues.inputShape, testValues.transposeConstValues, @@ -43,7 +44,7 @@ void TransposeTransformation::SetUp() { } TEST_P(TransposeTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index 3eb1626f23daf4..df8280db1a2b02 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -2,18 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include +#include +#include #include #include #include #include -#include -#include "ngraph/op/op.hpp" -#include +#include "transformations/init_node_info.hpp" #include "low_precision_transformations/unsqueeze_transformation.hpp" -#include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/unsqueeze.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { @@ -29,33 +30,17 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector& valu return os; } -InferenceEngine::Blob::Ptr UnsqueezeTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { - ngraph::element::Type netPrecision; - ov::pass::low_precision::LayerTransformation::Params params; - UnsqueezeTransformationParam squeezeParam; - std::string targetDevice; - - std::tie(netPrecision, targetDevice, params, squeezeParam) = this->GetParam(); - - const ngraph::builder::subgraph::FakeQuantizeOnData& fqOnData = squeezeParam.fakeQuantize; - - return FuncTestUtils::createAndFillBlobConsistently( - info.getTensorDesc(), - static_cast(fqOnData.empty() ? 25.f : fqOnData.outputHighValues[0] - fqOnData.outputLowValues[0]), - static_cast(fqOnData.empty() ? -12.5f : fqOnData.outputLowValues[0]), - 1ul); -} std::string UnsqueezeTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; + ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; std::string targetDevice; UnsqueezeTransformationParam unsqueezeParam; std::tie(netPrecision, targetDevice, params, unsqueezeParam) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, unsqueezeParam.shape, targetDevice, params) << "_" << - unsqueezeParam.fakeQuantize << "_" << + result << get_test_case_name_by_params(netPrecision, unsqueezeParam.shape, targetDevice, params) << "_" << + unsqueezeParam.fakeQuantize << "_" << unsqueezeParam.unsqueezeAxes << "_" << params.updatePrecisions << "_" << unsqueezeParam.shape; @@ -63,12 +48,16 @@ std::string UnsqueezeTransformation::getTestCaseName(const testing::TestParamInf return result.str(); } void UnsqueezeTransformation::SetUp() { - ngraph::element::Type netPrecision; + abs_threshold = 1.0; + rel_threshold = 31.0; + ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; UnsqueezeTransformationParam unsqueezeParam; std::tie(netPrecision, targetDevice, params, unsqueezeParam) = this->GetParam(); + init_input_shapes(unsqueezeParam.shape); + function = ngraph::builder::subgraph::UnsqueezeFunction::getOriginal( netPrecision, unsqueezeParam.shape, @@ -79,7 +68,7 @@ void UnsqueezeTransformation::SetUp() { } TEST_P(UnsqueezeTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp index 4124c033807f4e..ea410a89e677d7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp @@ -8,24 +8,23 @@ #include #include #include -#include -#include +#include "transformations/init_node_info.hpp" #include "low_precision/variadic_split.hpp" #include "ov_lpt_models/variadic_split.hpp" namespace LayerTestsDefinitions { std::string VariadicSplitTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::element::Type netPrecision; - ngraph::PartialShape inputShapes; + ov::element::Type netPrecision; + ov::PartialShape inputShapes; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; VariadicSplitTransformationParam param; std::tie(netPrecision, inputShapes, targetDevice, params, param) = obj.param; std::ostringstream result; - result << getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) << "_" << - param.fakeQuantize << "_axis=" << param.splitedAxis << "_splitLengths={ "; + result << get_test_case_name_by_params(netPrecision, inputShapes, targetDevice, params) << "_" << + param.fakeQuantize << "_axis=" << param.splitedAxis << "_splitLengths={ "; for (size_t i = 0; i < param.splitLengths.size(); ++i) { result << param.splitLengths[i]; if (i != (param.splitLengths.size() - 1ul)) { @@ -36,29 +35,17 @@ std::string VariadicSplitTransformation::getTestCaseName(const testing::TestPara return result.str(); } -InferenceEngine::Blob::Ptr VariadicSplitTransformation::GenerateInput(const InferenceEngine::InputInfo& info) const { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; - std::string targetDevice; - ov::pass::low_precision::LayerTransformation::Params params; - VariadicSplitTransformationParam param; - std::tie(precision, inputShape, targetDevice, params, param) = this->GetParam(); - const auto& fqOnData = param.fakeQuantize; - - return FuncTestUtils::createAndFillBlobConsistently( - info.getTensorDesc(), - static_cast(fqOnData.empty() ? 25.f : fqOnData.outputHighValues[0] - fqOnData.outputLowValues[0]), - static_cast(fqOnData.empty() ? -12.5f : fqOnData.outputLowValues[0]), - 1ul); -} void VariadicSplitTransformation::SetUp() { - ngraph::element::Type precision; - ngraph::PartialShape inputShape; + abs_threshold = 1.0; + ov::element::Type precision; + ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; VariadicSplitTransformationParam param; std::tie(precision, inputShape, targetDevice, params, param) = this->GetParam(); + init_input_shapes(inputShape); + function = ngraph::builder::subgraph::VariadicSplitFunction::getOriginal( precision, inputShape, @@ -68,6 +55,6 @@ void VariadicSplitTransformation::SetUp() { } TEST_P(VariadicSplitTransformation, CompareWithRefImpl) { - Run(); + run(); }; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/snippets/add.cpp b/src/tests/functional/plugin/shared/src/snippets/add.cpp index b0a695b0097106..5c4542f516bf5a 100644 --- a/src/tests/functional/plugin/shared/src/snippets/add.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/add.cpp @@ -106,8 +106,7 @@ std::string AddPair::getTestCaseName(testing::TestParamInfo class BenchmarkLayerTest : public BaseLayerTest { static_assert(std::is_base_of::value, - "BaseLayerTest should inherit from LayerTestsUtils::LayerTestsCommon"); + "BaseLayerTest should inherit from ov::test::SubgraphBaseTest"); public: static constexpr int kDefaultNumberOfAttempts = 100; @@ -226,6 +226,7 @@ class BenchmarkLayerTest : public BaseLayerTest { } void validate() override { + infer(); for (const auto& res : curr_bench_results_) { const auto& node_type_name = res.first; const auto curr_time = static_cast(res.second); diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index 8558504cbf6a14..eb29fa1094c984 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -11,8 +11,7 @@ #include #include #include -#include -#include +#include #include #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp index 05e5fa6874c289..20806c90f728c3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/low_precision_transformations/layer_transformation.hpp @@ -11,11 +11,11 @@ #include #include -#include -#include +#include "ov_ops/type_relaxed.hpp" #include "low_precision/layer_transformation.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "functional_test_utils/skip_tests_config.hpp" namespace LayerTestsUtils { @@ -30,35 +30,39 @@ class LayerTransformationParamsNGraphFactory { class LayerTransformationParamsFactory : public LayerTransformationParamsNGraphFactory { }; -class LayerTransformation : virtual public LayerTestsUtils::LayerTestsCommon { +class LayerTransformation : virtual public ov::test::SubgraphBaseTest { protected: LayerTransformation(); - static InferenceEngine::Blob::Ptr GenerateInput( - const ngraph::element::Type precision, - const InferenceEngine::TensorDesc& tensorDesc, - const float k = 1.f); + static std::pair get_quantization_interval(ov::element::Type precision); - static std::pair getQuantizationInterval(const ngraph::element::Type precision); + static std::string to_string(const ov::pass::low_precision::LayerTransformation::Params& params); - static std::string toString(const ov::pass::low_precision::LayerTransformation::Params& params); - - static std::string getTestCaseNameByParams( - const InferenceEngine::Precision precision, - const InferenceEngine::SizeVector& inputShapes, + static std::string get_test_case_name_by_params( + ov::element::Type precision, + const ov::PartialShape& inputShapes, const std::string& targetDevice, const ov::pass::low_precision::LayerTransformation::Params& params); - static std::string getTestCaseNameByParams( - const ngraph::element::Type precision, - const ngraph::PartialShape& inputShapes, - const std::string& targetDevice, - const ov::pass::low_precision::LayerTransformation::Params& params); + // get runtime precision by operation friendly name + std::string get_runtime_precision(const std::string& layerName); + + // get runtime precision by operation type + std::string get_runtime_precision_by_type(const std::string& layerType); + + // get runtime precision by operation friendly name which can be fused + std::string get_runtime_precision_by_fused_name(const std::string& layerName); + + std::map get_runtime_info(); + + void init_input_shapes(const ov::PartialShape& shape); + + void init_input_shapes(const std::vector& shapes); }; typedef std::tuple< - InferenceEngine::Precision, - InferenceEngine::SizeVector, + ov::element::Type, + ov::Shape, std::string, ov::pass::low_precision::LayerTransformation::Params> LayerTransformationParams; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp index e5d8a854ffa087..4e92c0d8187502 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp @@ -5,11 +5,10 @@ #pragma once #include -#include #include #include +#include -#include "ngraph/op/detection_output.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp index 2574eef28c6f7e..0e7cf8de26d7a2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp @@ -4,16 +4,18 @@ #pragma once -#include -#include -#include +#include + +#include #include #include #include -#include -#include - +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" #include "ie_core.hpp" #include "ie_precision.hpp" @@ -23,7 +25,8 @@ #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" - +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp index 218fb3028f67e0..08761d7110d809 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp @@ -4,16 +4,18 @@ #pragma once -#include -#include -#include +#include + +#include #include #include #include -#include -#include - +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" #include "ie_core.hpp" #include "ie_precision.hpp" @@ -23,6 +25,8 @@ #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { using priorBoxSpecificParams = std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp index 60642609388e4a..1f35f829f5d61a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp @@ -4,16 +4,18 @@ #pragma once -#include -#include -#include +#include + +#include #include #include #include -#include -#include - +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" #include "ie_core.hpp" #include "ie_precision.hpp" @@ -23,6 +25,8 @@ #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp index b979473ead34d1..4cac8b28298c85 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp @@ -9,8 +9,6 @@ #include #include -#include "openvino/core/type/element_type.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { @@ -24,8 +22,7 @@ typedef std::tuple - constResultParams; + > constResultParams; class ConstantResultSubgraphTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { @@ -41,28 +38,3 @@ class ConstantResultSubgraphTest : public testing::WithParamInterface - constResultParams; - -class ConstantResultSubgraphTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void createGraph(const ConstantSubgraphType& type, - const InferenceEngine::SizeVector& inputShape, - const InferenceEngine::Precision& inputPrecision); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp index 5384d369b7b725..8bb69e5db0d3f8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp @@ -9,7 +9,6 @@ #include #include -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { @@ -18,28 +17,15 @@ namespace test { using parameterResultParams = std::tuple; // Device name -class ParameterResultSubgraphTestBase : public testing::WithParamInterface { +class ParameterResultSubgraphTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - std::shared_ptr createModel(const ov::PartialShape& shape); -}; - -class ParameterResultSubgraphTest : public ParameterResultSubgraphTestBase, virtual public ov::test::SubgraphBaseTest { protected: void SetUp() override; + std::shared_ptr createModel(const ov::PartialShape& shape); }; } // namespace test } // namespace ov - -namespace SubgraphTestsDefinitions { - -class ParameterResultSubgraphTestLegacyApi : public ov::test::ParameterResultSubgraphTestBase, - virtual public LayerTestsUtils::LayerTestsCommon { -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp index e0b305c7e54369..703a3eb27e7cec 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp @@ -8,29 +8,25 @@ #include #include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/test_constants.hpp" -namespace SubgraphTestsDefinitions { +#include "shared_test_classes/base/ov_subgraph.hpp" +namespace ov { +namespace test { using ReduceEltwiseParamsTuple = typename std::tuple< - std::vector, // Input shapes - std::vector, // Axis to reduce order - ov::test::utils::OpType, // Scalar or vector type axis - bool, // Keep dims - InferenceEngine::Precision, // Network precision - std::string>; // Device name + ov::Shape, // Input shapes + std::vector, // Axis to reduce order + ov::test::utils::OpType, // Scalar or vector type axis + bool, // Keep dims + ov::element::Type, // Network precision + std::string>; // Device name -class ReduceEltwiseTest: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ +class ReduceEltwiseTest: public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest{ public: - std::shared_ptr fn; static std::string getTestCaseName(const testing::TestParamInfo &obj); protected: void SetUp() override; }; - -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift.hpp index 497c9a1c964e3c..ac72c943319c2a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift.hpp @@ -8,9 +8,32 @@ #include #include #include + #include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/test_constants.hpp" +namespace ov { +namespace test { +using ScaleShiftParamsTuple = typename std::tuple< + std::vector, //input shapes + ov::element::Type, //Model type + std::string, //Device name + std::vector, //scale + std::vector>; //shift + +class ScaleShiftLayerTest: + public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest{ +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov + +// legacy impl for npu repo namespace SubgraphTestsDefinitions { using ScaleShiftParamsTuple = typename std::tuple< diff --git a/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp b/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp index 13b30787755769..ae9524bd2f41a1 100644 --- a/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp +++ b/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp @@ -144,15 +144,9 @@ void LayerTestsCommon::QueryNetwork() { std::set actual; for (auto&& res : queryNetworkResult.supportedLayersMap) { - std::shared_ptr ctx = nullptr; - try { - // Try to take fully specified name from the context to match it with query network result for devices that support remote contexts - ctx = core->GetDefaultContext(targetDevice); - ASSERT_EQ(res.second, ctx->getDeviceName()); - } catch (...) { - // otherwise, compare with originally used device name - ASSERT_EQ(ov::DeviceIDParser(res.second).get_device_name(), targetDevice); - } + // compare with originally used device name + ASSERT_EQ(ov::DeviceIDParser(res.second).get_device_name(), targetDevice); + actual.insert(res.first); } ASSERT_EQ(expected, actual); diff --git a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp index e625cc2bdbfe6c..cacdd7df76badb 100644 --- a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp +++ b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp @@ -34,30 +34,19 @@ ov::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNG } LayerTransformation::LayerTransformation() { - threshold = 0.05; - auto& configuration = GetConfiguration(); + rel_threshold = 1.1; + abs_threshold = 1.0e-4; configuration[PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE] = PluginConfigParams::YES; } -InferenceEngine::Blob::Ptr LayerTransformation::GenerateInput( - const ngraph::element::Type precision, - const InferenceEngine::TensorDesc& tensorDesc, - const float k) { - const auto interval = getQuantizationInterval(precision); - const float low = interval.first / k; - const float hight = interval.second / k; - - return FuncTestUtils::createAndFillBlobConsistently(tensorDesc, hight - low, static_cast(low), 1ul); -} - -std::pair LayerTransformation::getQuantizationInterval(const ngraph::element::Type precision) { - const bool unsignedInterval = precision == ngraph::element::u8; +std::pair LayerTransformation::get_quantization_interval(ov::element::Type precision) { + const bool unsignedInterval = precision == ov::element::u8; const float low = unsignedInterval ? 0.f : -128.f; const float hight = unsignedInterval ? 255.f : 127.f; return std::make_pair(low, hight); } -std::string LayerTransformation::toString(const ov::pass::low_precision::LayerTransformation::Params& params) { +std::string LayerTransformation::to_string(const ov::pass::low_precision::LayerTransformation::Params& params) { using namespace ov::pass::low_precision; std::ostringstream result; result << @@ -67,24 +56,96 @@ std::string LayerTransformation::toString(const ov::pass::low_precision::LayerTr return result.str(); } -std::string LayerTransformation::getTestCaseNameByParams( - const InferenceEngine::Precision precision, - const InferenceEngine::SizeVector& inputShapes, +std::string LayerTransformation::get_test_case_name_by_params( + ov::element::Type precision, + const ov::PartialShape& inputShapes, const std::string& targetDevice, const ov::pass::low_precision::LayerTransformation::Params& params) { std::ostringstream result; - result << precision.name() << "_" << ngraph::Shape(inputShapes) << "_" << targetDevice << "_" << toString(params); + result << precision << "_" << inputShapes << "_" << targetDevice << "_" << to_string(params); return result.str(); } -std::string LayerTransformation::getTestCaseNameByParams( - const ngraph::element::Type precision, - const ngraph::PartialShape& inputShapes, - const std::string& targetDevice, - const ov::pass::low_precision::LayerTransformation::Params& params) { - std::ostringstream result; - result << precision << "_" << inputShapes << "_" << targetDevice << "_" << toString(params); - return result.str(); +namespace { +template +std::string find_node_by_runtime_precision(const ov::CompiledModel& execNet, IsNodeF is_node_f) { + const std::shared_ptr& execFunction = execNet.get_runtime_model(); + + for (const auto& op : execFunction->get_ops()) { + if (!is_node_f(op)) + continue; + const ov::RTMap& rtInfo = op->get_rt_info(); + const auto& it = rtInfo.find("runtimePrecision"); + OPENVINO_ASSERT(it != rtInfo.end(), "Runtime precision is not found for node: ", op->get_friendly_name()); + return it->second.as(); + } + + return ""; +} +} // namespace + +std::string LayerTransformation::get_runtime_precision(const std::string& layerName) { + auto is_node_f = [layerName](const std::shared_ptr& op) { + return op->get_friendly_name() == layerName; + }; + return find_node_by_runtime_precision(compiledModel, is_node_f); +} + +std::string LayerTransformation::get_runtime_precision_by_type(const std::string& layerType) { + auto is_node_f = [layerType](const std::shared_ptr& op) { + const auto& rtInfo = op->get_rt_info(); + const auto& typeIt = rtInfo.find("layerType"); + + OPENVINO_ASSERT(typeIt != rtInfo.end(), "Layer is not found for type: ", layerType); + return typeIt->second.as() == layerType; + }; + return find_node_by_runtime_precision(compiledModel, is_node_f); +} + +namespace { +bool has_layer(const std::string& names, const std::string& layer_name) { + size_t beginPosition = 0ul; + size_t endPosition; + while ((endPosition = names.find(',', beginPosition)) != std::string::npos) { + if (names.substr(beginPosition, endPosition - beginPosition) == layer_name) + return true; + beginPosition = endPosition + 1; + } + + return names.substr(beginPosition, endPosition - beginPosition) == layer_name; +} +} // namespace + +std::string LayerTransformation::get_runtime_precision_by_fused_name(const std::string& layerName) { + auto is_node_f = [layerName](const std::shared_ptr& op) { + const auto& rtInfo = op->get_rt_info(); + + const auto& nameIt = rtInfo.find("originalLayersNames"); + OPENVINO_ASSERT(nameIt != rtInfo.end(), "originalLayersNames is not found for node: ", layerName); + return has_layer(nameIt->second.as(), layerName); + }; + return find_node_by_runtime_precision(compiledModel, is_node_f); +} + +std::map LayerTransformation::get_runtime_info() { + const ov::CompiledModel& execNet = compiledModel; + const std::shared_ptr& function = execNet.get_runtime_model(); + + std::map runtimeInfo; + for (const auto& op : function->get_ops()) { + runtimeInfo[op->get_friendly_name()] = op->get_rt_info(); + } + return runtimeInfo; +} + +void LayerTransformation::init_input_shapes(const ov::PartialShape& shape) { + std::pair> input_shapes(shape, { shape.to_shape() }); + SubgraphBaseTest::init_input_shapes({ input_shapes }); +} + +void LayerTransformation::init_input_shapes(const std::vector& shapes) { + auto input_shapes = ov::test::static_partial_shapes_to_test_representation(shapes); + SubgraphBaseTest::init_input_shapes(input_shapes); } } // namespace LayerTestsUtils diff --git a/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp b/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp index d6bf5c5b487c42..433da771cb2b6d 100644 --- a/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp +++ b/src/tests/functional/shared_test_classes/src/base/snippets_test_utils.cpp @@ -3,8 +3,9 @@ // #include "shared_test_classes/base/snippets_test_utils.hpp" + #include "functional_test_utils/skip_tests_config.hpp" -#include "exec_graph_info.hpp" +#include "openvino/runtime/exec_model_info.hpp" namespace ov { namespace test { @@ -17,7 +18,7 @@ void SnippetsTestsCommon::validateNumSubgraphs() { size_t num_subgraphs = 0; size_t num_nodes = 0; for (const auto &op : compiled_model->get_ops()) { - auto layer_type = op->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + auto layer_type = op->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); // todo: Ignore reorders only after (Const or Inputs) or before outputs. // Alternatively, force plain layouts for convolutions, matmuls, FCs, etc., so reorders won't be inserted. if (layer_type == "Const" || diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index 9c70fc47f3c2ff..6a982171cc6bcd 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -2,6 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include +#include +#include + #include "shared_test_classes/base/utils/generate_inputs.hpp" #include "openvino/op/ops.hpp" @@ -246,6 +250,41 @@ ov::Tensor generate(const std::shared_ptr& node, return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } +namespace { +template +bool get_const_value(const std::shared_ptr& node, float& value, const GetItemF& get_item_func) { + auto const_node = ov::as_type_ptr(node); + if (!const_node) + return false; + + auto const_value = const_node->cast_vector(); + + const auto it = get_item_func(const_value); + if (it == const_value.end()) { + return false; + } + value = *it; + return true; +} + +using Vec = std::vector; +bool get_fq_scalar_range(const std::shared_ptr &node, float& min_value, float& max_value) { + auto get_min_value = [](const Vec& v) { + return std::min_element(v.begin(), v.end()); + }; + if (!get_const_value(node->get_input_node_shared_ptr(1), min_value, get_min_value)) + return false; + + auto get_max_value = [](const Vec& v) { + return std::max_element(v.begin(), v.end()); + }; + if (!get_const_value(node->get_input_node_shared_ptr(2), max_value, get_max_value)) + return false; + + return true; +} +} // namespace + ov::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, @@ -297,6 +336,11 @@ ov::Tensor generate(const std::shared_ptr& node, case 4: return ov::test::utils::create_tensor(elemType, targetShape, outputHighData, outputHighData.size()); default: { + float min_value = {}, max_value = {}; + if (get_fq_scalar_range(node, min_value, max_value)) { + return ov::test::utils::create_and_fill_tensor_real_distribution(elemType, targetShape, min_value, max_value, 0); + } + InputGenerateData inGenData; inGenData.range = 10.f; inGenData.resolution = 1.0f; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp b/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp index 10f744cc31d3f1..0f4737e49f5733 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp @@ -230,8 +230,8 @@ void ActivationDynamicLayerTest::Run() { // make each parameter dimension dynamic with range {1 .. prev_dim * 2} for (const auto& parameter : params) { auto& dynamic_pshape = parameter->get_partial_shape(); - NGRAPH_CHECK(dynamic_pshape.rank().is_static(), - "tests are not prepared to work with dynamically ranked inputs"); + OPENVINO_ASSERT(dynamic_pshape.rank().is_static(), + "tests are not prepared to work with dynamically ranked inputs"); for (size_t i = 0; i < dynamic_pshape.rank().get_length(); ++i) { if (static_dims.count(i)) continue; @@ -244,7 +244,7 @@ void ActivationDynamicLayerTest::Run() { function->validate_nodes_and_infer_types(); const auto& results = function->get_results(); - NGRAPH_CHECK(results.size() == 1); + OPENVINO_ASSERT(results.size() == 1); ASSERT_EQ(results[0]->get_output_partial_shape(0), output_shape); // no inference and checks are done here -- just shape check because we miss CNNNetwork functionality // to handle dynamic inputs-outputs and test functionality to generate blob of a certain shape diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp index 95105c34b9a91a..95f9beabbed829 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp @@ -1,13 +1,12 @@ // Copyright (C) 2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/eye.hpp" - #include #include #include #include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/eye.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp index cec0846756b65b..d8ed69769fc839 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/memory.hpp" - #include #include -#include #include #include "ngraph/pass/low_latency.hpp" +#include "openvino/op/util/variable.hpp" #include "openvino/op/util/variable_context.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/memory.hpp" using namespace ngraph; using ov::op::v1::Add; @@ -50,9 +50,9 @@ void MemoryTest::SetUp() { ApplyLowLatency(); } - auto hostTensor = std::make_shared(ngPrc, inputShape); + auto tensor = ov::Tensor(ngPrc, inputShape); auto variable_context = ov::op::util::VariableContext(); - auto variable_value = std::make_shared(hostTensor); + auto variable_value = std::make_shared(tensor); variable_context.set_variable_value(function->get_variable_by_id("v0"), variable_value); eval_context["VariableContext"] = variable_context; } @@ -116,21 +116,19 @@ std::vector>> MemoryTest::Cal auto referenceInputs = std::vector>(inputs.size()); auto refInputsTypes = std::vector(inputs.size()); - HostTensorVector inputTensors; + ov::TensorVector inputTensors; for (auto& input : inputs) { const auto& dataSize = input->byteSize(); const auto& tensorDesc = input->getTensorDesc(); auto memory = InferenceEngine::as(input); - IE_ASSERT(memory); + OPENVINO_ASSERT(memory); const auto lockedMemory = memory->wmap(); const auto buffer = lockedMemory.as(); - auto hostTensor = - std::make_shared(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(tensorDesc.getPrecision()), - tensorDesc.getDims()); - hostTensor->write(buffer, dataSize); - inputTensors.push_back(hostTensor); + inputTensors.emplace_back(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(tensorDesc.getPrecision()), + tensorDesc.getDims()); + std::memcpy(inputTensors.back().data(), buffer, dataSize); } // evaluate method is not implemented for TI op. @@ -139,19 +137,14 @@ std::vector>> MemoryTest::Cal manager.run_passes(function); const auto& outInfo = executableNetwork.GetOutputsInfo(); - HostTensorVector outputTensors(outInfo.size()); - for (auto& outTensor : outputTensors) { - outTensor = std::make_shared(); - } - OPENVINO_SUPPRESS_DEPRECATED_START + ov::TensorVector outputTensors(outInfo.size()); function->evaluate(outputTensors, inputTensors, eval_context); - OPENVINO_SUPPRESS_DEPRECATED_END std::vector>> outputs(outInfo.size()); for (size_t idx = 0; idx < outInfo.size(); ++idx) { - outputs[idx].first = outputTensors[idx]->get_element_type(); - outputs[idx].second.resize(outputTensors[idx]->get_size_in_bytes()); - outputTensors[idx]->read(outputs[idx].second.data(), outputTensors[idx]->get_size_in_bytes()); + outputs[idx].first = outputTensors[idx].get_element_type(); + outputs[idx].second.resize(outputTensors[idx].get_byte_size()); + std::memcpy(outputs[idx].second.data(), outputTensors[idx].data(), outputTensors[idx].get_byte_size()); } return outputs; } @@ -188,14 +181,14 @@ void MemoryTest::CreateTIFunc() { void MemoryTest::CreateCommonFunc() { ov::ParameterVector param{std::make_shared(ngPrc, ov::Shape(inputShape))}; const auto variable_info = targetDevice == ov::test::utils::DEVICE_GPU - ? VariableInfo{Shape{inputShape}, ngPrc, "v0"} - : VariableInfo{inputShape, ngPrc, "v0"}; - auto variable = std::make_shared(variable_info); + ? ov::op::util::VariableInfo{Shape{inputShape}, ngPrc, "v0"} + : ov::op::util::VariableInfo{inputShape, ngPrc, "v0"}; + auto variable = std::make_shared(variable_info); auto read_value = CreateReadValueOp(param.at(0), variable); auto add = std::make_shared(read_value, param.at(0)); auto assign = CreateAssignOp(add, variable); auto res = std::make_shared(add); - function = std::make_shared(ResultVector{res}, SinkVector{assign}, param, "TestMemory"); + function = std::make_shared(ResultVector{res}, ov::SinkVector{assign}, param, "TestMemory"); } void MemoryTest::ApplyLowLatency() { @@ -213,4 +206,3 @@ void MemoryTest::ApplyLowLatency() { } } // namespace LayerTestsDefinitions - diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp index eedb35fe746ac2..acc382398bb344 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp @@ -5,6 +5,7 @@ #include "shared_test_classes/single_layer/reverse.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/reverse.hpp" using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 87e02e82dd7f70..9f23a9719455ec 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -6,6 +6,9 @@ #include "ov_models/builders.hpp" #include "openvino/core/enum_names.hpp" +#include "openvino/opsets/opset3.hpp" +#include "ov_models/builders.hpp" +#include "shared_test_classes/single_layer/roi_align.hpp" using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp b/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp index 3a67c205ddec24..374d5aa69fe121 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp @@ -120,7 +120,7 @@ namespace LayerTestsDefinitions { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[0], -1, -1, 1, 0, sequence_axis); } else { - NGRAPH_CHECK(false, "Bidirectional case is not supported."); + OPENVINO_ASSERT(false, "Bidirectional case is not supported."); } tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[1]); @@ -169,7 +169,7 @@ namespace LayerTestsDefinitions { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { - NGRAPH_CHECK(false, "Bidirectional case is not supported."); + OPENVINO_ASSERT(false, "Bidirectional case is not supported."); } tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[0]); @@ -214,7 +214,7 @@ namespace LayerTestsDefinitions { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { - NGRAPH_CHECK(false, "Bidirectional case is not supported."); + OPENVINO_ASSERT(false, "Bidirectional case is not supported."); } tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[0]); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp index a7ecf6121175f3..50c502141927b3 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp @@ -75,58 +75,3 @@ void ConstantResultSubgraphTest::SetUp() { } } // namespace test } // namespace ov - -namespace SubgraphTestsDefinitions { - -std::string ConstantResultSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { - ConstantSubgraphType type; - InferenceEngine::SizeVector IS; - InferenceEngine::Precision inputPrecision; - std::string targetDevice; - - std::tie(type, IS, inputPrecision, targetDevice) = obj.param; - std::ostringstream result; - result << "SubgraphType=" << type << "_"; - result << "IS=" << ov::test::utils::vec2str(IS) << "_"; - result << "inPrc=" << inputPrecision << "_"; - result << "Device=" << targetDevice; - return result.str(); -} - -void ConstantResultSubgraphTest::createGraph(const ConstantSubgraphType& type, - const InferenceEngine::SizeVector& inputShape, - const InferenceEngine::Precision& inputPrecision) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - - ov::ParameterVector params; - ov::ResultVector results; - switch (type) { - case ConstantSubgraphType::SINGLE_COMPONENT: { - auto input = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, {}, true); - results.push_back(std::make_shared(input)); - break; - } - case ConstantSubgraphType::SEVERAL_COMPONENT: { - auto input1 = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, {}, true); - results.push_back(std::make_shared(input1)); - auto input2 = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, {}, true); - results.push_back(std::make_shared(input2)); - break; - } - default: { - throw std::runtime_error("Unsupported constant graph type"); - } - } - function = std::make_shared(results, params, "ConstResult"); -} - -void ConstantResultSubgraphTest::SetUp() { - ConstantSubgraphType type; - InferenceEngine::SizeVector IS; - InferenceEngine::Precision inputPrecision; - std::tie(type, IS, inputPrecision, targetDevice) = this->GetParam(); - - createGraph(type, IS, inputPrecision); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp index 2e184d9289c254..4f65fd88357d77 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp @@ -76,10 +76,10 @@ void MemoryLSTMCellTest::SetUp() { auto permute_in = std::make_shared(unsqueeze_input, permute_in_params); auto cell_memory_constant = ov::test::utils::deprecated::make_constant(element_type, cell_memory_dims, cell_memory_init); - auto var_cell = - std::make_shared(VariableInfo{PartialShape(cell_memory_dims), element_type, "cell_state_1"}); - auto var_hidden = - std::make_shared(VariableInfo{PartialShape(cell_memory_dims), element_type, "hidden_state_1"}); + auto var_cell = std::make_shared( + ov::op::util::VariableInfo{PartialShape(cell_memory_dims), element_type, "cell_state_1"}); + auto var_hidden = std::make_shared( + ov::op::util::VariableInfo{PartialShape(cell_memory_dims), element_type, "hidden_state_1"}); auto cell_memory_read = std::make_shared(cell_memory_constant, var_cell); auto hidden_memory_constant = diff --git a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp index 901a00ef841c8b..07f942798669c9 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp @@ -2,13 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/subgraph/mul_conv_fusion.hpp" - #include "common_test_utils/graph_comparator.hpp" -#include "openvino/core/validation_util.hpp" -#include "openvino/pass/manager.hpp" #include "common_test_utils/node_builders/constant.hpp" +#include "openvino/pass/manager.hpp" +#include "shared_test_classes/subgraph/mul_conv_fusion.hpp" #include "transformations/common_optimizations/mul_conv_fusion.hpp" +#include "validation_util.hpp" namespace ov { namespace test { @@ -82,7 +81,7 @@ void MulConvFusion::SetUp() { std::shared_ptr conv; if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { weights = std::make_shared(weights, mul_const); - weights = ov::get_constant_from_source(weights); + weights = ov::util::get_constant_from_source(weights); ASSERT_NE(nullptr, weights); conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp index 1bf29f54c76b1a..d93b83d75388ab 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp @@ -7,7 +7,7 @@ namespace ov { namespace test { -std::string ParameterResultSubgraphTestBase::getTestCaseName(const testing::TestParamInfo& obj) { +std::string ParameterResultSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { ov::test::InputShape inShape; std::string targetDevice; std::tie(inShape, targetDevice) = obj.param; @@ -22,7 +22,7 @@ std::string ParameterResultSubgraphTestBase::getTestCaseName(const testing::Test return result.str(); } -std::shared_ptr ParameterResultSubgraphTestBase::createModel(const ov::PartialShape& shape) { +std::shared_ptr ParameterResultSubgraphTest::createModel(const ov::PartialShape& shape) { auto parameter = std::make_shared(ov::element::f32, shape); const ngraph::ResultVector results{std::make_shared(parameter)}; ngraph::ParameterVector params = {parameter}; @@ -41,15 +41,3 @@ void ParameterResultSubgraphTest::SetUp() { } // namespace test } // namespace ov - -namespace SubgraphTestsDefinitions { -void ParameterResultSubgraphTestLegacyApi::SetUp() { - ov::test::InputShape inShape; - std::tie(inShape, targetDevice) = this->GetParam(); - - OPENVINO_ASSERT(inShape.first.is_static()); - - function = createModel(inShape.first); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp index c852fefdb401b7..05ed5e6c744128 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/subgraph/parameter_shapeof_result.hpp" - #include +#include "shared_test_classes/subgraph/parameter_shapeof_result.hpp" + namespace SubgraphTestsDefinitions { std::string ParameterShapeOfResultSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index f5c141432f9d9c..f354f14d874dd6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -7,6 +7,7 @@ #include "ov_models/builders.hpp" #include "common_test_utils/node_builders/constant.hpp" #include "ov_models/utils/ov_helpers.hpp" +#include "ie_common.h" #include "common_test_utils/node_builders/fake_quantize.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp index 48d03880089670..684efb24ae38d3 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp @@ -171,7 +171,7 @@ void QuantizedConvolutionBatchNorm::TearDown() { auto get_layer_type = [] (const std::shared_ptr& node) -> const std::string& { const auto& rt_info = node->get_rt_info(); auto it = rt_info.find(ov::exec_model_info::LAYER_TYPE); - IE_ASSERT(it != rt_info.end()); + OPENVINO_ASSERT(it != rt_info.end()); return it->second.as(); }; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp index d9b52b1415b4b6..dbbbd559ea7ae7 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp @@ -2,40 +2,48 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" #include "shared_test_classes/subgraph/reduce_eltwise.hpp" + +#include "common_test_utils/node_builders/constant.hpp" #include "common_test_utils/node_builders/eltwise.hpp" +#include "common_test_utils/test_enums.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" -namespace SubgraphTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/reduce_sum.hpp" + +namespace ov { +namespace test { std::string ReduceEltwiseTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShapes; + ov::Shape inputShapes; std::vector axes; ov::test::utils::OpType opType; bool keepDims; - InferenceEngine::Precision netPrecision; + ov::element::Type type; std::string targetName; - std::tie(inputShapes, axes, opType, keepDims, netPrecision, targetName) = obj.param; + std::tie(inputShapes, axes, opType, keepDims, type, targetName) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; result << "axes=" << ov::test::utils::vec2str(axes) << "_"; result << "opType=" << opType << "_"; if (keepDims) result << "KeepDims_"; - result << "netPRC=" << netPrecision.name() << "_"; + result << "netPRC=" << type.get_type_name() << "_"; result << "targetDevice=" << targetName; return result.str(); } void ReduceEltwiseTest::SetUp() { - std::vector inputShape; + ov::Shape inputShape; std::vector axes; ov::test::utils::OpType opType; bool keepDims; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, axes, opType, keepDims, netPrecision, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; + ov::element::Type type; + std::tie(inputShape, axes, opType, keepDims, type, targetDevice) = this->GetParam(); + + ov::ParameterVector params{std::make_shared(type, inputShape)}; std::vector shapeAxes; switch (opType) { @@ -51,17 +59,19 @@ void ReduceEltwiseTest::SetUp() { default: FAIL() << "Reduce op doesn't support operation type: " << opType; } - auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + auto reductionAxesNode = std::dynamic_pointer_cast( + std::make_shared(ov::element::i64, ov::Shape(shapeAxes), axes)); auto reduce = std::make_shared(params[0], reductionAxesNode, keepDims); std::vector constShape(reduce.get()->get_output_partial_shape(0).rank().get_length(), 1); ASSERT_GT(constShape.size(), 2); constShape[2] = inputShape.back(); - auto constant = ov::test::utils::deprecated::make_constant(ngPrc, constShape, {}, true); - auto eltw = ov::test::utils::make_eltwise(reduce, constant, ngraph::helpers::EltwiseTypes::MULTIPLY); - ngraph::ResultVector results{std::make_shared(eltw)}; - function = std::make_shared(results, params, "ReduceEltwise"); + auto constant_tensor = ov::test::utils::create_and_fill_tensor(type, constShape); + auto constant = std::make_shared(constant_tensor); + auto eltw = ov::test::utils::make_eltwise(reduce, constant, ov::test::utils::EltwiseTypes::MULTIPLY); + ov::ResultVector results{std::make_shared(eltw)}; + function = std::make_shared(results, params, "ReduceEltwise"); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp index 36ea849035635c..cda8494ecd99df 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "shared_test_classes/subgraph/reshape_permute_reshape.hpp" +#include "openvino/util/common_util.hpp" namespace SubgraphTestsDefinitions { std::string ReshapePermuteReshape::getTestCaseName(const testing::TestParamInfo &obj) { @@ -23,7 +23,7 @@ namespace SubgraphTestsDefinitions { std::vector> inputs; InferenceEngine::Precision netPrecision; std::tie(inputs, netPrecision, targetDevice) = this->GetParam(); - const std::size_t input_dim = InferenceEngine::details::product(inputs[0]); + const std::size_t input_dim = ov::util::product(inputs[0]); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); std::vector shape_input{1, input_dim}; ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(shape_input))}; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp index 5f2a36a584769a..ec29d122715a65 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp @@ -4,6 +4,50 @@ #include "shared_test_classes/subgraph/scaleshift.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/add.hpp" + +namespace ov { +namespace test { +std::string ScaleShiftLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector inputShapes; + ov::element::Type type; + std::string targetName; + std::vector scale, shift; + std::tie(inputShapes, type, targetName, scale, shift) = obj.param; + std::ostringstream results; + + results << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; + results << "Scale=" << ov::test::utils::vec2str(scale) << "_"; + results << "Shift=" << ov::test::utils::vec2str(shift) << "_"; + results << "netPRC=" << type.get_type_name() << "_"; + results << "targetDevice=" << targetName << "_"; + return results.str(); +} + +void ScaleShiftLayerTest::SetUp() { + std::vector inputShapes; + ov::element::Type type; + std::vector scale, shift; + std::tie(inputShapes, type, targetDevice, scale, shift) = this->GetParam(); + auto paramsShape = ov::Shape{1}; + if (inputShapes.size() > 1) + paramsShape = inputShapes[1]; + + ov::ParameterVector paramsIn{std::make_shared(type, inputShapes[0])}; + auto mul_const = std::make_shared(type, paramsShape, scale); + auto mul = std::make_shared(paramsIn[0], mul_const); + auto add_const = std::make_shared(type, paramsShape, shift); + auto add = std::make_shared(mul, add_const); + function = std::make_shared(add, paramsIn, "scale_shift"); +} +} // namespace test +} // namespace ov + +// legacy impl for npu repo namespace SubgraphTestsDefinitions { std::string ScaleShiftLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector> inputShapes; diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp index 6398f540bd5f5c..bc5567d288c132 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp @@ -23,7 +23,7 @@ class DequantizationOperations { return equal(value); } - ov::element::Type outPrecision = element::undefined; + ov::element::Type outPrecision = ov::element::undefined; bool addDequantizationAttribute = true; private: bool isEmpty; @@ -120,7 +120,7 @@ inline std::ostream& operator<<(std::ostream& out, const DequantizationOperation if (convert.empty()) { return out << "{}"; } - return out << "_" << (convert.outPrecision != element::undefined ? convert.outPrecision.get_type_name() : ""); + return out << "_" << (convert.outPrecision != ov::element::undefined ? convert.outPrecision.get_type_name() : ""); } inline std::ostream& operator<<(std::ostream& out, const DequantizationOperations::Subtract& subtract) { diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp index 3eebe42611a01e..e58a56c609a924 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp @@ -36,14 +36,13 @@ class MatMulFunction { const ov::Shape& inputShape2, const FakeQuantizeOnData& fqOnData2); - static std::shared_ptr getOriginal( - const element::Type netPrecision, - const ov::PartialShape& inputShape1, - const ov::element::Type precisionBeforeDequantization1, - const DequantizationOperations& dequantization1, - const ov::PartialShape& inputShape2, - const ov::element::Type precisionBeforeDequantization2, - const DequantizationOperations& dequantization2); + static std::shared_ptr getOriginal(const ov::element::Type netPrecision, + const ov::PartialShape& inputShape1, + const ov::element::Type precisionBeforeDequantization1, + const DequantizationOperations& dequantization1, + const ov::PartialShape& inputShape2, + const ov::element::Type precisionBeforeDequantization2, + const DequantizationOperations& dequantization2); static std::shared_ptr getOriginal( const ov::element::Type precision, diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp index 8c57f2a3a2fd92..5b4e1b700874b2 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp @@ -14,9 +14,9 @@ namespace subgraph { class MVNFunction { public: static std::shared_ptr getOriginal( - const element::Type precision, + const ov::element::Type precision, const ov::PartialShape& inputShape, - const AxisSet& reductionAxes, + const ov::AxisSet& reductionAxes, const bool& normalizeVariance, const ov::element::Type precisionBeforeDequantization, const ngraph::builder::subgraph::DequantizationOperations& dequantization, @@ -25,13 +25,13 @@ class MVNFunction { static std::shared_ptr getOriginal( const ov::element::Type precision, const ov::PartialShape& inputShape, - const AxisSet& reductionAxes, + const ov::AxisSet& reductionAxes, const bool& normalizeVariance); static std::shared_ptr getReference( - const element::Type precision, + const ov::element::Type precision, const ov::PartialShape& inputShape, - const AxisSet& reductionAxes, + const ov::AxisSet& reductionAxes, const bool& normalizeVariance, const ov::element::Type precisionBeforeDequantization, const ngraph::builder::subgraph::DequantizationOperations& dequantizationBefore, diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp index a3670af138f1ae..f77704b025b52d 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp @@ -19,7 +19,7 @@ namespace subgraph { class SplitFunction { public: static std::shared_ptr getOriginal( - const element::Type& precision, + const ov::element::Type& precision, const ov::PartialShape& inputShape, const ov::element::Type precisionBeforeDequantization, const ngraph::builder::subgraph::DequantizationOperations& dequantization, @@ -34,7 +34,7 @@ class SplitFunction { const size_t numSplit); static std::shared_ptr getReference( - const element::Type& precision, + const ov::element::Type& precision, const ov::PartialShape& inputShape, const ov::element::Type inputPrecision, const ngraph::builder::subgraph::DequantizationOperations& dequantizationBefore, diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transformations_after_split.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transformations_after_split.hpp index 364310482edef3..73c8daa29b5a3a 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transformations_after_split.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transformations_after_split.hpp @@ -17,9 +17,8 @@ class TransformationsAfterSplitFunction { public: static std::shared_ptr get(const std::string transformationName); - static std::shared_ptr getLayerByTransformationName( - const std::string transformationName, - const Output parent); + static std::shared_ptr getLayerByTransformationName(const std::string transformationName, + const ov::Output parent); }; } // namespace subgraph diff --git a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp index 21edaeb9c81522..2f68cf4de340cf 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp @@ -22,6 +22,9 @@ namespace ngraph { namespace builder { namespace subgraph { +using ov::op::util::Variable; +using ov::op::util::VariableInfo; + std::shared_ptr AssignAndReadValueFunction::getOriginal( const ov::PartialShape& inputShape, const element::Type& inputPrecision, diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp b/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp index 3b6e6c8be448ed..64c2ec8d99576e 100644 --- a/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp +++ b/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp @@ -26,7 +26,7 @@ namespace { /// \return The vector with axes indexes mapping . /// std::vector get_axes_mapping(const Shape& output_shape, const AxisSet& broadcast_axes) { - NGRAPH_CHECK((broadcast_axes.size() <= output_shape.size())); + OPENVINO_ASSERT((broadcast_axes.size() <= output_shape.size())); std::vector axes_mapping(output_shape.size()); iota(axes_mapping.begin(), axes_mapping.end(), 0); for (auto i = broadcast_axes.rbegin(); i != broadcast_axes.rend(); ++i) { diff --git a/src/tests/ov_helpers/ov_models/src/gather_nd.cpp b/src/tests/ov_helpers/ov_models/src/gather_nd.cpp index c44f8e640c4fc9..5a1a1c7ea60d2a 100644 --- a/src/tests/ov_helpers/ov_models/src/gather_nd.cpp +++ b/src/tests/ov_helpers/ov_models/src/gather_nd.cpp @@ -34,7 +34,7 @@ std::shared_ptr makeGatherND(const ov::Output& dataNode, indicesData++; } } - return op::v0::Constant::create(indicesType, indicesShape, indicesValues); + return ov::op::v0::Constant::create(indicesType, indicesShape, indicesValues); }(); auto gatherNdNode = std::make_shared(dataNode, indices, batchDims); @@ -64,7 +64,7 @@ std::shared_ptr makeGatherND8(const ov::Output& dataNode, indicesData++; } } - return op::v0::Constant::create(indicesType, indicesShape, indicesValues); + return ov::op::v0::Constant::create(indicesType, indicesShape, indicesValues); }(); auto gatherNdNode = std::make_shared(dataNode, indices, batchDims); diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index 4f318ef98b3f03..a64ae64f01e9e7 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -204,6 +204,20 @@ void inline fill_data_random(T* pointer, } } +template +void inline fill_data_ptr_real_random_float(T* pointer, + std::size_t size, + const float min, + const float max, + const int seed) { + std::mt19937 gen(seed); + std::uniform_real_distribution dist(min, max); + + for (std::size_t i = 0; i < size; i++) { + pointer[i] = static_cast(dist(gen)); + } +} + template void inline fill_data_random_act_dft(T* pointer, std::size_t size, @@ -525,6 +539,14 @@ inline ov::float16 ie_abs(const ov::float16& val) { return ov::float16::from_bits(val.to_bits() & 0x7FFF); } +inline ov::float8_e4m3 ie_abs(const ov::float8_e4m3& val) { + return ov::float8_e4m3::from_bits(val.to_bits() & 0x7F); +} + +inline ov::float8_e5m2 ie_abs(const ov::float8_e5m2& val) { + return ov::float8_e5m2::from_bits(val.to_bits() & 0x7F); +} + } // namespace utils } // namespace test } // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp index cdebe22701d5c0..f4d62900dec31b 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp @@ -75,9 +75,16 @@ class FunctionsComparator { return compare(f, f_ref); } + void set_accuracy_thresholds(float abs_threshold, float rel_threshold) { + m_abs_threshold = abs_threshold; + m_rel_threshold = rel_threshold; + } + private: explicit FunctionsComparator(CmpValues f) noexcept : m_comparison_flags(f) {} CmpValues m_comparison_flags; + float m_abs_threshold = 1e-7f; + float m_rel_threshold = 1e-7f; }; /// @@ -295,7 +302,10 @@ class Comparator { using Result = FunctionsComparator::Result; using ComparedNodes = std::pair; - explicit Comparator(CmpValues f) : m_comparison_flags(f) {} + explicit Comparator(CmpValues f, float abs_threshold = 1e-7f, float rel_threshold = 1e-7f) + : m_comparison_flags(f), + m_abs_threshold(abs_threshold), + m_rel_threshold(rel_threshold) {} Result compare(const std::shared_ptr& f, const std::shared_ptr& f_ref); @@ -337,6 +347,9 @@ class Comparator { std::queue q; std::unordered_set used; + + float m_abs_threshold = 1e-7f; + float m_rel_threshold = 1e-7f; }; inline namespace tools { @@ -1014,4 +1027,6 @@ struct AccuracyCheckResult { }; AccuracyCheckResult accuracy_check(const std::shared_ptr& ref_function, - const std::shared_ptr& cur_function); + const std::shared_ptr& cur_function, + float abs_threshold, + float rel_threshold); diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index 57f8060e281f02..3c9d97eeb72822 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -71,6 +71,12 @@ ov::runtime::Tensor create_and_fill_tensor_consistently(const ov::element::Type const int32_t start_from, const int32_t resolution); +ov::runtime::Tensor create_and_fill_tensor_real_distribution(const ov::element::Type element_type, + const ov::Shape& shape, + const float min, + const float max, + const int seed); + void compare(const ov::Tensor& expected, const ov::Tensor& actual, const double abs_threshold = std::numeric_limits::max(), diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_test_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_test_utils.hpp index 87d12fd0cee2ac..ba7b56526284a4 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_test_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_test_utils.hpp @@ -35,6 +35,10 @@ class TransformationTestsF : public ov::test::TestsCommon { ov::pass::Manager manager; FunctionsComparator comparator; +protected: + float m_abs_threshold = 5e-4f; + float m_rel_threshold = 1e-3f; + private: std::shared_ptr m_unh; bool m_disable_rt_info_check{false}; diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp index f2b26523c5cf58..7917c22fb6fcf9 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp @@ -8,11 +8,6 @@ #include "gmock/gmock-matchers.h" #include "gtest/gtest.h" -#include "ie_blob.h" -#include "ie_data.h" -#include "ie_input_info.hpp" -#include "ie_preprocess.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/util/pp.hpp" inline bool strContains(const std::string& str, const std::string& substr) { @@ -29,20 +24,6 @@ inline bool strDoesnotContain(const std::string& str, const std::string& substr) #define EXPECT_STR_CONTAINS(str, substr) EXPECT_PRED2(&strContains, str, substr) -#define ASSERT_BLOB_EQ(lhs, rhs) compare_blob(lhs, rhs) - -#define ASSERT_DIMS_EQ(lhs, rhs) compare_dims(lhs, rhs) - -#define ASSERT_DATA_EQ(lhs, rhs) compare_data(lhs, rhs) - -#define ASSERT_PREPROCESS_CHANNEL_EQ(lhs, rhs) compare_preprocess(lhs, rhs) - -#define ASSERT_PREPROCESS_INFO_EQ(lhs, rhs) compare_preprocess_info(lhs, rhs) - -#define ASSERT_OUTPUTS_INFO_EQ(lhs, rhs) compare_outputs_info(lhs, rhs) - -#define ASSERT_INPUTS_INFO_EQ(lhs, rhs) compare_inputs_info(lhs, rhs) - #define ASSERT_STRINGEQ(lhs, rhs) compare_cpp_strings(lhs, rhs) #define OV_ASSERT_NO_THROW(statement) OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_) @@ -86,67 +67,6 @@ inline bool strDoesnotContain(const std::string& str, const std::string& substr) FAIL() << "Unknown exception"; \ } -OPENVINO_SUPPRESS_DEPRECATED_START -inline void compare_blob(InferenceEngine::Blob::Ptr lhs, InferenceEngine::Blob::Ptr rhs) { - ASSERT_EQ(lhs.get(), rhs.get()); - // TODO: add blob specific comparison for general case -} - -inline void compare_dims(const InferenceEngine::SizeVector& lhs, const InferenceEngine::SizeVector& rhs) { - ASSERT_EQ(lhs.size(), rhs.size()); - for (size_t i = 0; i < lhs.size(); i++) { - ASSERT_EQ(lhs[i], rhs[i]); - } -} - -inline void compare_data(const InferenceEngine::Data& lhs, const InferenceEngine::Data& rhs) { - ASSERT_DIMS_EQ(lhs.getDims(), rhs.getDims()); - ASSERT_STREQ(lhs.getName().c_str(), rhs.getName().c_str()); - ASSERT_EQ(lhs.getPrecision(), rhs.getPrecision()); -} - -inline void compare_preprocess(const InferenceEngine::PreProcessChannel& lhs, - const InferenceEngine::PreProcessChannel& rhs) { - ASSERT_FLOAT_EQ(lhs.meanValue, rhs.meanValue); - ASSERT_FLOAT_EQ(lhs.stdScale, rhs.stdScale); - ASSERT_BLOB_EQ(lhs.meanData, rhs.meanData); -} - -inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo& lhs, - const InferenceEngine::PreProcessInfo& rhs) { - ASSERT_EQ(lhs.getMeanVariant(), rhs.getMeanVariant()); - ASSERT_EQ(lhs.getNumberOfChannels(), rhs.getNumberOfChannels()); - for (size_t i = 0; i < lhs.getNumberOfChannels(); i++) { - ASSERT_PREPROCESS_CHANNEL_EQ(*lhs[i].get(), *rhs[i].get()); - } -} - -inline void compare_outputs_info(const InferenceEngine::OutputsDataMap& lhs, - const InferenceEngine::OutputsDataMap& rhs) { - ASSERT_EQ(lhs.size(), rhs.size()); - auto i = lhs.begin(); - auto j = rhs.begin(); - - for (size_t k = 0; k != lhs.size(); k++, i++, j++) { - ASSERT_STREQ(i->first.c_str(), j->first.c_str()); - ASSERT_DATA_EQ(*i->second.get(), *j->second.get()); - } -} - -inline void compare_inputs_info(const InferenceEngine::InputsDataMap& lhs, const InferenceEngine::InputsDataMap& rhs) { - ASSERT_EQ(lhs.size(), rhs.size()); - auto i = lhs.begin(); - auto j = rhs.begin(); - - for (size_t k = 0; k != lhs.size(); k++, i++, j++) { - ASSERT_STREQ(i->first.c_str(), j->first.c_str()); - ASSERT_DIMS_EQ(i->second->getTensorDesc().getDims(), j->second->getTensorDesc().getDims()); - ASSERT_PREPROCESS_INFO_EQ(i->second->getPreProcess(), j->second->getPreProcess()); - ASSERT_DATA_EQ(*i->second->getInputData().get(), *j->second->getInputData().get()); - } -} -OPENVINO_SUPPRESS_DEPRECATED_END - inline void compare_cpp_strings(const std::string& lhs, const std::string& rhs) { ASSERT_STREQ(lhs.c_str(), rhs.c_str()); } diff --git a/src/tests/test_utils/common_test_utils/src/data_utils.cpp b/src/tests/test_utils/common_test_utils/src/data_utils.cpp index 61860b6de3e8eb..1b662ed3df9fbc 100644 --- a/src/tests/test_utils/common_test_utils/src/data_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/data_utils.cpp @@ -5,7 +5,6 @@ #include "common_test_utils/data_utils.hpp" #include "blob_factory.hpp" -#include "debug.h" // to allow putting vector into exception string stream #include "ie_blob.h" #include "openvino/core/deprecated.hpp" #include "openvino/core/type/element_type_traits.hpp" diff --git a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp index 3ec3fa06827e5f..5b4fa456fb7e5c 100644 --- a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp +++ b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp @@ -470,7 +470,8 @@ class CompareSubGraphs { using Result = Comparator::Result; using SubGraphOp = ov::op::util::SubGraphOp; - CompareSubGraphs(Comparator::CmpValues flags) : sub_comparator{flags} {}; + CompareSubGraphs(Comparator::CmpValues flags, float m_abs_threshold, float m_rel_threshold) + : sub_comparator{flags, m_abs_threshold, m_rel_threshold} {}; Result compare(SubGraphOp* sub_lhs, SubGraphOp* sub_rhs, bool compare_in_outs) { const auto lhs_it_no = get_num_iterations(sub_lhs); @@ -706,7 +707,7 @@ Comparator::Result Comparator::compare(const std::shared_ptr& f, cons return msg.empty() ? Result::ok() : Result::error(msg); } else if (should_compare(CmpValues::ACCURACY)) { - auto status = accuracy_check(f_ref, f); + auto status = accuracy_check(f_ref, f, m_abs_threshold, m_rel_threshold); return status.status ? Result::ok() : Result::error(status.message); } else { @@ -729,10 +730,8 @@ Comparator::Result Comparator::compare(ov::Node* node1, ov::Node* node2, std::os const bool subgraph_nodes = subgraph1 && subgraph2; if (subgraph_nodes) { - const auto result = subgraph::detail::CompareSubGraphs{get_comparison_flags()}.compare( - subgraph1, - subgraph2, - should_compare(CmpValues::SUBGRAPH_DESCRIPTORS)); + const auto result = subgraph::detail::CompareSubGraphs{get_comparison_flags(), m_abs_threshold, m_rel_threshold} + .compare(subgraph1, subgraph2, should_compare(CmpValues::SUBGRAPH_DESCRIPTORS)); if (!result.valid) { return result; } @@ -870,7 +869,8 @@ void Comparator::add_nodes_inputs_to_queue(ov::Node* node1, ov::Node* node2) { FunctionsComparator::Result FunctionsComparator::compare(const std::shared_ptr& f, const std::shared_ptr& f_ref) const { - return Comparator(m_comparison_flags).compare(f, f_ref); + auto comparator = Comparator(m_comparison_flags, m_abs_threshold, m_rel_threshold); + return comparator.compare(f, f_ref); } void check_rt_info(const std::shared_ptr& f) { @@ -1017,7 +1017,9 @@ Comparator::Result compare(ov::Node* node1, ov::Node* node2, Comparator::CmpValu } // namespace attributes AccuracyCheckResult accuracy_check(const std::shared_ptr& ref_function, - const std::shared_ptr& cur_function) { + const std::shared_ptr& cur_function, + float abs_threshold, + float rel_threshold) { if (ref_function->is_dynamic() || cur_function->is_dynamic()) { return AccuracyCheckResult{true, ""}; } @@ -1039,7 +1041,7 @@ AccuracyCheckResult accuracy_check(const std::shared_ptr& ref_functio IE_ASSERT(ref_outputs.size() == outputs.size()); for (int i = 0; i < ref_outputs.size(); i++) { - ov::test::utils::compare(ref_outputs[i], outputs[i], 5e-4, 1e-3); + ov::test::utils::compare(ref_outputs[i], outputs[i], abs_threshold, rel_threshold); } } catch (const std::runtime_error& re) { return AccuracyCheckResult{false, re.what()}; diff --git a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp index 2377ad37aaa2f4..7743d4b9098455 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp @@ -218,6 +218,46 @@ ov::runtime::Tensor create_and_fill_tensor_normal_distribution(const ov::element return tensor; } +ov::runtime::Tensor create_and_fill_tensor_real_distribution(const ov::element::Type element_type, + const ov::Shape& shape, + const float min, + const float max, + const int seed) { + auto tensor = ov::runtime::Tensor{element_type, shape}; +#define CASE(X) \ + case X: \ + fill_data_ptr_real_random_float(tensor.data::value_type>(), \ + shape_size(shape), \ + min, \ + max, \ + seed); \ + break; + switch (element_type) { + CASE(ov::element::Type_t::boolean) + CASE(ov::element::Type_t::i8) + CASE(ov::element::Type_t::i16) + CASE(ov::element::Type_t::i32) + CASE(ov::element::Type_t::i64) + CASE(ov::element::Type_t::u8) + CASE(ov::element::Type_t::u16) + CASE(ov::element::Type_t::u32) + CASE(ov::element::Type_t::u64) + CASE(ov::element::Type_t::bf16) + CASE(ov::element::Type_t::f16) + CASE(ov::element::Type_t::f32) + CASE(ov::element::Type_t::f64) + case ov::element::Type_t::u1: + case ov::element::Type_t::i4: + case ov::element::Type_t::u4: + fill_data_ptr_real_random_float(static_cast(tensor.data()), tensor.get_byte_size(), min, max, seed); + break; + default: + OPENVINO_THROW("Unsupported element type: ", element_type); + } +#undef CASE + return tensor; +} + ov::runtime::Tensor create_and_fill_tensor_consistently(const ov::element::Type element_type, const ov::Shape& shape, const uint32_t range, @@ -352,18 +392,22 @@ void compare(const ov::Tensor& expected, for (size_t i = 0; i < shape_size_cnt; ++i) { double expected_value = expected_data[i]; double actual_value = actual_data[i]; + if (std::isnan(expected_value) && std::isnan(actual_value)) + continue; if (std::isnan(expected_value)) { std::ostringstream out_stream; - out_stream << "Expected value is NAN on coordinate: " << i; + out_stream << "Expected value is NAN but Actual value is not on coordinate: " << i; throw std::runtime_error(out_stream.str()); } if (std::isnan(actual_value)) { std::ostringstream out_stream; - out_stream << "Actual value is NAN on coordinate: " << i; + out_stream << "Actual value is NAN but Expected value is not on coordinate: " << i; throw std::runtime_error(out_stream.str()); } + double abs = std::fabs(expected_value - actual_value); double rel = expected_value ? (abs / std::fabs(expected_value)) : abs; + abs_error.update(abs, i); rel_error.update(rel, i); } diff --git a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp index 18af2ec243a1b3..f3260de55abdf0 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp @@ -71,6 +71,7 @@ void TransformationTestsF::TearDown() { if (acc_enabled) { OPENVINO_ASSERT(cloned_function != nullptr, "Accuracy cannot be checked. Cloned Model is not initialized."); auto acc_comparator = FunctionsComparator::no_default(); + acc_comparator.set_accuracy_thresholds(m_abs_threshold, m_rel_threshold); acc_comparator.enable(FunctionsComparator::CmpValues::ACCURACY); auto res = acc_comparator.compare(model, cloned_function); ASSERT_TRUE(res.valid) << res.message; diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp index ceb4ee4aacd072..ea62ed52aaab99 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp @@ -16,7 +16,6 @@ #include "blob_transform.hpp" #include "common_test_utils/data_utils.hpp" #include "common_test_utils/test_constants.hpp" -#include "ie_compound_blob.h" #include "ie_ngraph_utils.hpp" #include "openvino/runtime/common.hpp" #include "precision_utils.h" @@ -675,7 +674,6 @@ inline short reducePrecisionBitwiseS(const float in) { enum class BlobType { Memory, - Batched, Compound, Remote, }; @@ -684,10 +682,6 @@ inline std::ostream& operator<<(std::ostream& os, BlobType type) { switch (type) { case BlobType::Memory: return os << "Memory"; - case BlobType::Batched: - return os << "Batched"; - case BlobType::Compound: - return os << "Compound"; case BlobType::Remote: return os << "Remote"; default: @@ -695,32 +689,6 @@ inline std::ostream& operator<<(std::ostream& os, BlobType type) { } } -inline InferenceEngine::Blob::Ptr createBlobByType(const InferenceEngine::TensorDesc& td, BlobType blobType) { - switch (blobType) { - case BlobType::Memory: - return createAndFillBlob(td); - case BlobType::Batched: - case BlobType::Compound: { - auto dims = td.getDims(); - const size_t subBlobsNum = dims.front(); - dims[0] = 1; - std::vector subBlobs; - InferenceEngine::TensorDesc subBlobDesc(td.getPrecision(), dims, td.getLayout()); - for (size_t i = 0; i < subBlobsNum; i++) { - subBlobs.push_back(createAndFillBlob(subBlobDesc)); - } - return blobType == BlobType::Batched - ? InferenceEngine::make_shared_blob(subBlobs) - : InferenceEngine::make_shared_blob(subBlobs); - } - // TODO: ocl + remote - // case BlobType::Remote: - // return InferenceEngine::as(createAndFillBlob(td)); - default: - IE_THROW() << "Test does not support the blob kind"; - } -} - inline bool checkLayout(InferenceEngine::Layout layout, const std::vector& inputShapes) { bool check = false; switch (layout) { diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp index bdecdd2079a5a9..20ebb92ed0d10e 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/plugin_cache.hpp @@ -4,11 +4,16 @@ #pragma once -#include #include #include #include +namespace InferenceEngine { + +class Core; + +} // namespace InferenceEngine + class PluginCache { public: std::shared_ptr ie(const std::string& deviceToCheck = std::string()); diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv index 2a7b644c116539..b06fa77c49833b 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv @@ -3,6 +3,7 @@ ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperti ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_TYPE:},1.0 ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_ARCHITECTURE:},1.0 ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=CPU_,1.0 +ov_plugin_mandatory/OVClassModelTestP.ImportModelWithNullContextThrows/0,1 ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/3,1.0 ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/2,1.0 ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/1,1.0 diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv index 1cd7e681a499f4..e67b2c0fa73f01 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv @@ -209,3 +209,4 @@ conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dyn conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=b7a53df966d640f075cea7421ca5989ca91ca638e7af16aff33bc275eb7dfe9c_Device=CPU_Config=(),0.000352708 conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=5b185120e46fc0a2238ff4de19e278888ecda5fbae130c62e1ec21b4883ee61d_Device=CPU_Config=(),6.62629e-06 conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=bda73cc94d837df9fb535743febd300cf0baf7fdf48ff538c079a4a7ca291592_Device=CPU_Config=(),2.89071e-06 +conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=dynamic_IR=e255ef2321233444ce6e4fdeb513a9b271987457aa9bd456948b64f589de1e2b_Device=CPU_Config=(),9.4639279043362649e-05 diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py index c26bf8cd42b309..086c4f59cfb831 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py @@ -87,6 +87,7 @@ def aggregate_test_results(aggregated_results: SubElement, xml_reports: list, if aggregated_device_results is None: aggregated_results.append(xml_device_entry) aggregated_device_results = aggregated_results.find(device_name) + break else: aggregated_device_results.append(xml_results_entry) continue diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py index 454dcff7ca0920..0fecd2990f4e03 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py @@ -1,6 +1,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import math import xml.etree.ElementTree as ET from . import conformance_utils @@ -43,9 +44,9 @@ def update_passrates(results: ET.SubElement, rel_weights={}): rel_all_tests = rel_all_tests_actual if rel_all_tests_expected is None else rel_all_tests_expected k = 1 if rel_all_tests_expected is None else round(rel_all_tests_actual / rel_all_tests_expected) rel_passrate = float(rel_passed_tests * 100 / (k * rel_all_tests)) if rel_all_tests != None and rel_all_tests != 0 else 0 - op.set("passrate", "%.2f"%passrate) + op.set("passrate", f"{math.floor(passrate * 100) / 100}") if rel_all_tests != None and rel_passed_tests != None: - op.set("relative_passrate", "%.2f"%rel_passrate) + op.set("relative_passrate", f"{math.floor(rel_passrate * 100) / 100}") diff --git a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp b/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp index 3db56da99c3a0e..9c7e34af62ddc2 100644 --- a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp +++ b/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp @@ -7,12 +7,13 @@ #include #include -#include #include #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_constants.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" +#include "ie_core.hpp" +#include "ie_plugin_config.hpp" #include "openvino/util/file_util.hpp" namespace { diff --git a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp index 08283dc81fb774..e8a6c9a7a4c04e 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp @@ -28,7 +28,6 @@ class MockIExecutableNetworkInternal : public IExecutableNetworkInternal { MOCK_METHOD1(SetConfig, void(const std::map& config)); MOCK_CONST_METHOD1(GetConfig, Parameter(const std::string& name)); MOCK_CONST_METHOD1(GetMetric, Parameter(const std::string& name)); - MOCK_CONST_METHOD0(GetContext, std::shared_ptr(void)); void WrapOstreamExport(std::ostream& networkModel) { IExecutableNetworkInternal::Export(networkModel); } diff --git a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp index 11da8a6ea42fd9..03307ae842818d 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp @@ -21,7 +21,6 @@ class MockIInferRequestInternal : public InferenceEngine::IInferRequestInternal MOCK_CONST_METHOD0(GetPerformanceCounts, std::map()); MOCK_METHOD2(SetBlob, void(const std::string&, const InferenceEngine::Blob::Ptr&)); MOCK_METHOD1(GetBlob, InferenceEngine::Blob::Ptr(const std::string&)); - MOCK_CONST_METHOD1(GetPreProcess, const InferenceEngine::PreProcessInfo&(const std::string&)); MOCK_METHOD1(SetCallback, void(std::function)); MOCK_METHOD0(QueryState, std::vector()); MOCK_METHOD0(Cancel, void()); diff --git a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp index 958ff721568778..c5df4baefb87dd 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp @@ -39,22 +39,10 @@ class MockIInferencePlugin : public InferenceEngine::IInferencePlugin { MOCK_CONST_METHOD2(GetMetric, InferenceEngine::Parameter(const std::string&, const std::map&)); - MOCK_METHOD1(CreateContext, std::shared_ptr(const InferenceEngine::ParamMap&)); - MOCK_METHOD1(GetDefaultContext, std::shared_ptr(const InferenceEngine::ParamMap&)); - MOCK_METHOD3(LoadNetwork, - std::shared_ptr( - const InferenceEngine::CNNNetwork&, - const std::map&, - const std::shared_ptr&)); MOCK_METHOD2( ImportNetwork, std::shared_ptr(std::istream&, const std::map&)); - MOCK_METHOD3(ImportNetwork, - std::shared_ptr( - std::istream&, - const std::shared_ptr&, - const std::map&)); MOCK_CONST_METHOD2(QueryNetwork, InferenceEngine::QueryNetworkResult(const InferenceEngine::CNNNetwork&, const std::map&)); diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp b/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp index 7b0e59356958f9..c3a2b428acbfe2 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_iexecutable_network.hpp @@ -40,7 +40,6 @@ class MockIExecutableNetwork : public IExecutableNetwork { GetMetric, (const std::string& name, Parameter& result, ResponseDesc* resp), (const, noexcept)); - MOCK_METHOD(StatusCode, GetContext, (RemoteContext::Ptr & pContext, ResponseDesc* resp), (const, noexcept)); }; IE_SUPPRESS_DEPRECATED_END diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp b/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp index 48e42d3f9c4e4f..2677416523e799 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_iinfer_request.hpp @@ -33,7 +33,6 @@ class MockIInferRequest : public IInferRequest { ((std::map&), ResponseDesc*), (const, noexcept)); MOCK_METHOD(StatusCode, GetBlob, (const char*, Blob::Ptr&, ResponseDesc*), (noexcept)); - MOCK_METHOD(StatusCode, GetPreProcess, (const char*, const PreProcessInfo**, ResponseDesc*), (const, noexcept)); MOCK_METHOD(StatusCode, SetBlob, (const char*, const Blob::Ptr&, ResponseDesc*), (noexcept)); MOCK_METHOD(StatusCode, Cancel, (ResponseDesc*), (noexcept)); }; diff --git a/tests/constraints.txt b/tests/constraints.txt index 4e3455dd1b8b27..7a32f3987fa5bb 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -1,5 +1,5 @@ numpy>=1.16.6,<1.27 -attrs==23.1.0 +attrs==23.2.0 distro==1.8.0 h5py>=3.1.0 Jinja2>=2.11.2 diff --git a/tests/layer_tests/common/tf_layer_test_class.py b/tests/layer_tests/common/tf_layer_test_class.py index ab3289d8152b3a..85e703dc95c841 100644 --- a/tests/layer_tests/common/tf_layer_test_class.py +++ b/tests/layer_tests/common/tf_layer_test_class.py @@ -32,6 +32,9 @@ def get_tf_results(self, inputs_dict, model_path): graph_summary = summarize_graph(model_path=model_path) outputs_list = graph_summary["outputs"] + fw_outputs_list = [out + ":0" for out in outputs_list] + if self.use_new_frontend: + outputs_list = fw_outputs_list tf.compat.v1.reset_default_graph() @@ -42,7 +45,7 @@ def get_tf_results(self, inputs_dict, model_path): sess.graph.as_default() tf.compat.v1.import_graph_def(graph_def, name='') - tf_res = sess.run([out + ":0" for out in outputs_list], inputs_dict) + tf_res = sess.run(fw_outputs_list, inputs_dict) result = dict() for i, output in enumerate(outputs_list): diff --git a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py index fab06da4d3ac5c..ea1747b13c268d 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py +++ b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py @@ -71,9 +71,9 @@ def create_tf_model_single_input_output(tmp_dir): return save_to_pb(tf_net, tmp_dir) test_data = [ - {'params_test': {'output': ["Sigmoid_0", "Sigmoid_2"]}, + {'params_test': {'output': ["Sigmoid_0:0", "Sigmoid_2:0"]}, 'params_ref': {'output': "Sigmoid_0,Sigmoid_2"}}, - {'params_test': {'output': ["Sigmoid_0"]}, + {'params_test': {'output': ["Sigmoid_0:0"]}, 'params_ref': {'output': "Sigmoid_0"}}, {'params_test': {'input': [PartialShape([2, 3, 4]), [2, 3, 4], [Dimension(2), Dimension(3), Dimension(4)]]}, 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1,Input2,Input3'}}, diff --git a/tests/layer_tests/ovc_python_api_tests/test_tf.py b/tests/layer_tests/ovc_python_api_tests/test_tf.py index 179de425284416..5e3b0e5f5b2d68 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_tf.py +++ b/tests/layer_tests/ovc_python_api_tests/test_tf.py @@ -1,16 +1,20 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 - +import os +import tempfile import unittest +from pathlib import Path import numpy as np import openvino.runtime as ov import pytest +import tensorflow as tf from openvino.runtime import PartialShape, Model, Dimension -from common.mo_convert_test_class import CommonMOConvertTest +from common import constants from common.layer_test_class import CommonLayerTest -import tensorflow as tf +from common.mo_convert_test_class import CommonMOConvertTest +from common.tf_layer_test_class import save_to_pb def create_tf_graph_def(tmp_dir): @@ -1068,4 +1072,136 @@ def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, ref_model = params['ref_model'] test_params.update({'input_model': fw_model}) - self._test_by_ref_graph(temp_dir, test_params, ref_model, compare_tensor_names=False) \ No newline at end of file + self._test_by_ref_graph(temp_dir, test_params, ref_model, compare_tensor_names=False) + + +class TestOutputTensorName(unittest.TestCase): + @staticmethod + def create_keras_model_with_named_output(): + tf.keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + + input_names = ["Input1", "Input2"] + input_shape = [1, 2, 3] + + x1 = tf.keras.Input(shape=input_shape, name=input_names[0]) + x2 = tf.keras.Input(shape=input_shape, name=input_names[1]) + y = tf.nn.sigmoid(tf.nn.relu(x1 + x2)) + keras_net = tf.keras.Model(inputs=[x1, x2], outputs=[{"output": y}]) + keras_net.output_names[0] = "output" + + return keras_net + + + @pytest.mark.nightly + @pytest.mark.precommit + def test_tf1_from_file_single_tensor_name(self): + tf.keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + + Path(constants.out_path).mkdir(parents=True, exist_ok=True) + tmp_dir = tempfile.TemporaryDirectory(dir=constants.out_path).name + + from openvino import convert_model + + model, _, _ = create_tf_graph_def(None) + path = save_to_pb(model, tmp_dir) + + ov_model = convert_model(path) + out_tensors = ov_model.outputs[0].get_names() + + assert len(out_tensors) == 1 + assert list(out_tensors)[0] == "Sigmoid:0" + + out_tensor_name = list(out_tensors)[0] + + ov_model = convert_model(path, output=out_tensor_name) + out_tensors = ov_model.outputs[0].get_names() + + assert len(out_tensors) == 1 + assert list(out_tensors)[0] == "Sigmoid:0" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_tf1_from_memory_single_tensor_name(self): + tf.keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + from openvino.tools.ovc import convert_model + + model, _, _ = create_tf_graph_def(None) + + ov_model = convert_model(model) + out_tensors = ov_model.outputs[0].get_names() + + assert len(out_tensors) == 1 + assert list(out_tensors)[0] == "Sigmoid:0" + + out_tensor_name = list(out_tensors)[0] + + ov_model = convert_model(model, output=out_tensor_name) + out_tensors = ov_model.outputs[0].get_names() + + assert len(out_tensors) == 1 + assert list(out_tensors)[0] == "Sigmoid:0" + + + @pytest.mark.nightly + @pytest.mark.precommit + def test_tf2_from_file_single_tensor_name(self): + tf.keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + Path(constants.out_path).mkdir(parents=True, exist_ok=True) + tmp_dir = tempfile.TemporaryDirectory(dir=constants.out_path).name + model_path = tmp_dir + os.sep + "model" + + from openvino import convert_model + + model = TestOutputTensorName.create_keras_model_with_named_output() + tf.saved_model.save(model, model_path) + + ov_model = convert_model(model_path) + for output in ov_model.outputs: + out_tensors = output.get_names() + + assert len(out_tensors) == 1 + out_tensor = list(out_tensors)[0] + assert out_tensor == "output" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_tf2_from_memory_single_tensor_name(self): + tf.keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + from openvino.tools.ovc import convert_model + + model = TestOutputTensorName.create_keras_model_with_named_output() + + ov_model = convert_model(model) + for output in ov_model.outputs: + out_tensors = output.get_names() + + assert len(out_tensors) == 1 + out_tensor = list(out_tensors)[0] + assert out_tensor == "output" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_tf1_output_with_identity(self): + tf.keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + from openvino.tools.ovc import convert_model + + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(tf.float32, [2], 'x') + y = tf.compat.v1.placeholder(tf.float32, [2], 'y') + add = tf.add(x, y, name="add") + result1 = tf.identity(add, name="result1") + result2 = tf.identity(add, name="result2") + + tf.compat.v1.global_variables_initializer() + model = sess.graph_def + + ov_model = convert_model(model) + + assert ov_model.outputs[0].get_names() == {"result1:0", "result2:0", "add:0"} + assert ov_model.outputs[1].get_names() == {"result1:0", "result2:0", "add:0"} diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index f76c7b1fa97ba8..7c0051c0feea11 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -295,3 +295,26 @@ def add_ext(front_end, stat): assert tel_stat["send_event"] == 2 assert tel_stat["send_error"] == 0 assert tel_stat["send_stack_trace"] == 0 + + +def test_state_dict_names(): + from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder + + import torchvision + model = torch.hub.load("pytorch/vision", "resnet18", weights="DEFAULT") + decoder = TorchScriptPythonDecoder( + model, example_input=(torch.randn(1, 3, 224, 224),)) + fe_manager = FrontEndManager() + fe = fe_manager.load_by_framework("pytorch") + im = fe.load(decoder) + om = fe.convert(im) + state_dict_keys = set( + name for name in model.state_dict().keys() if "_tracked" not in name) + common_names = set() + for n in om.get_ops(): + if "Constant" in n.get_type_name(): + for name in n.output(0).names: + matches = [k for k in state_dict_keys if name.startswith("self." + k)] + if (len(matches) > 0): + common_names.update(matches) + assert state_dict_keys == common_names, f"Not all names exist:\nstate_dict:{state_dict_keys}" diff --git a/tests/layer_tests/pytorch_tests/test_gcd.py b/tests/layer_tests/pytorch_tests/test_gcd.py new file mode 100644 index 00000000000000..a1b816b853ead4 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_gcd.py @@ -0,0 +1,124 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestGcd(PytorchLayerTest): + def _prepare_input(self): + return self.input_data + + def create_model_tensor_input(self): + class aten_gcd_tensor(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + + def forward(self, tensor_a, tensor_b): + return torch.gcd(tensor_a, tensor_b) + + ref_net = None + + return aten_gcd_tensor(), ref_net, "aten::gcd" + + def create_model_int_input(self): + class aten_gcd_int(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + + def forward(self, int_a: int, int_b: int): + return torch.tensor(torch.gcd(int_a, int_b)) + + ref_net = None + + return aten_gcd_int(), ref_net, "aten::gcd" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_int(self, ie_device, precision, ir_version): + self.input_data = (np.array(11, dtype=np.int32), np.array(17, dtype=np.int32)) + self._test( + *self.create_model_int_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_tensor(self, ie_device, precision, ir_version): + self.input_data = ( + np.array([14, 4, 12, 10, 3, 0], dtype=np.int32), + np.array([121, 2, 16, 0, 1, 8], dtype=np.int32), + ) + self._test( + *self.create_model_tensor_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_int64(self, ie_device, precision, ir_version): + self.input_data = (np.array(11, dtype=np.int64), np.array(17, dtype=np.int64)) + self._test( + *self.create_model_int_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_tensor64(self, ie_device, precision, ir_version): + self.input_data = ( + np.array([14, 4, 12, 10, 3, 0], dtype=np.int64), + np.array([121, 2, 16, 0, 1, 8], dtype=np.int64), + ) + self._test( + *self.create_model_tensor_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_int_diff_dtypes(self, ie_device, precision, ir_version): + self.input_data = (np.array(11, dtype=np.int64), np.array(17, dtype=np.int32)) + self._test( + *self.create_model_int_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_gcd_tensor_diff_dtypes(self, ie_device, precision, ir_version): + self.input_data = ( + np.array([14, 4, 12, 10, 3, 0], dtype=np.int64), + np.array([121, 2, 16, 0, 1, 8], dtype=np.int32), + ) + self._test( + *self.create_model_tensor_input(), + ie_device, + precision, + ir_version, + use_convert_model=True, + trace_model=True + ) diff --git a/tests/layer_tests/pytorch_tests/test_index_add.py b/tests/layer_tests/pytorch_tests/test_index_add.py new file mode 100644 index 00000000000000..49ce03ab701214 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_index_add.py @@ -0,0 +1,66 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch +from pytorch_layer_test_class import PytorchLayerTest + + +class TestIndexAdd(PytorchLayerTest): + def _prepare_input(self, dtype, out): + if not out: + return (np.ones((3, 3)).astype(dtype),) + return (np.ones((3, 3)).astype(dtype), np.zeros((3, 3)).astype(dtype)) + + def create_model(self, dim, index, src, mode, alpha): + class aten_index_add(torch.nn.Module): + def __init__(self, dim, index, src, mode, alpha): + super(aten_index_add, self).__init__() + self.dim = dim + self.index = index + self.src = src + self.inplace = mode == "inplace" + self.alpha = alpha + if mode == "out": + self.forward = self.forward_out + + def forward(self, x: torch.Tensor): + index = self.index + if self.inplace: + return x.index_add_(self.dim, index, self.src, alpha=self.alpha), x + else: + return torch.index_add(x, self.dim, index, self.src, alpha=self.alpha), x + + def forward_out(self, x: torch.Tensor, out): + index = self.index + return torch.index_add(x, self.dim, index, self.src, out=out, alpha=self.alpha), out + + op_name = "aten::index_add_" if mode == "inplace" else "aten::index_add" + + return aten_index_add(dim, index, src, mode, alpha), None, op_name + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("dim", [0, 1, -1]) + @pytest.mark.parametrize( + "index", + [ + torch.tensor([0, 2, 1]), + torch.tensor([0, 0, 0]) + ], + ) + @pytest.mark.parametrize("src", [torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])]) + @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("mode", ["inplace", "out", "default"]) + @pytest.mark.parametrize("alpha", [1, -1, 0.5, 0.25]) + def test_scatter_reduce(self, dim, index, src, dtype, mode, alpha, ie_device, precision, ir_version): + if isinstance(src, torch.Tensor): + src = src.to(getattr(torch, dtype)) + self._test( + *self.create_model(dim, index, src, mode, alpha), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"dtype": dtype, "out": mode == "out"}, + ) diff --git a/tests/layer_tests/pytorch_tests/test_index_put_.py b/tests/layer_tests/pytorch_tests/test_index_put_.py index e367d2a6d6805d..dc185596a6812a 100644 --- a/tests/layer_tests/pytorch_tests/test_index_put_.py +++ b/tests/layer_tests/pytorch_tests/test_index_put_.py @@ -178,7 +178,6 @@ def test_nonzero_index_put_(self, ie_device, precision, ir_version, input_data, self.indices_0 = indices[0] self.indices_1 = indices[1] self._test(*self.create_model(accumulate), ie_device, precision, ir_version, trace_model=True, use_convert_model=True) - @pytest.mark.nightly @pytest.mark.precommit @@ -190,10 +189,9 @@ def test_nonzero_index_put_different_ranks(self, ie_device, precision, ir_versio self._test(*self.create_model(False), ie_device, precision, ir_version, trace_model=True, use_convert_model=True) - class TestMask_IndexPut(PytorchLayerTest): def _prepare_input(self): - return (np.random.randn(100, 5).astype(np.float32),np.random.randn(100, 5).astype(np.float32)) + return (np.random.randn(100, 5).astype(np.float32), np.random.randn(100, 5).astype(np.float32)) def create_model(self): class aten_index_put_mask(torch.nn.Module): @@ -208,4 +206,28 @@ def forward(self, x, y): @pytest.mark.nightly @pytest.mark.precommit def test_nonzero_index_put_(self, ie_device, precision, ir_version): - self._test(*self.create_model(), ie_device, precision, ir_version, trace_model=True, use_convert_model=True) + self._test(*self.create_model(), ie_device, precision, + ir_version, trace_model=True, use_convert_model=True) + + +class TestMaskKosmos_IndexPut(PytorchLayerTest): + def _prepare_input(self): + mask = np.random.randint(0, 2, [1, 30]).astype(np.bool_) + num = mask.sum() + return (np.random.randn(1, 30, 50).astype(np.float32), mask.astype(np.int32), np.random.randn(num, 50).astype(np.float32)) + + def create_model(self): + class aten_index_put_mask(torch.nn.Module): + def forward(self, x, y, z): + x[y.to(dtype=torch.bool)] = z + return x + + ref_net = None + + return aten_index_put_mask(), ref_net, "aten::index_put_" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_nonzero_kosmos_index_put_(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, + ir_version, trace_model=True, use_convert_model=True) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py index 39fb2c62fc63b4..cb2e2df2648266 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py @@ -13,6 +13,11 @@ # Documentation: https://www.tensorflow.org/api_docs/python/tf/raw_ops/ArgMin # https://www.tensorflow.org/api_docs/python/tf/raw_ops/ArgMax +OPS = { + 'tf.raw_ops.ArgMax': tf.raw_ops.ArgMax, + 'tf.raw_ops.ArgMin': tf.raw_ops.ArgMin +} + class TestArgMinMax(CommonTFLayerTest): def _prepare_input(self, inputs_info): assert 'input' in inputs_info @@ -41,24 +46,24 @@ def create_argmin_max_net(self, input_shape, dimension, input_type, output_type, return tf_net, ref_net test_data = [ - dict(input_shape=[20], dimension=0), - dict(input_shape=[20, 30], dimension=1), - dict(input_shape=[2, 30, 3, 4], dimension=2), + [[20], 0], + [[20, 30], 1], + [[2, 30, 3, 4], 2], ] - @pytest.mark.parametrize("params", test_data) + @pytest.mark.parametrize("input_shape, dimension", test_data) @pytest.mark.parametrize("input_type", [np.float32, np.int32]) @pytest.mark.parametrize("output_type", [tf.int32, tf.int64]) - @pytest.mark.parametrize("op_type", [tf.raw_ops.ArgMax, tf.raw_ops.ArgMin]) + @pytest.mark.parametrize("op_type", ['tf.raw_ops.ArgMax', 'tf.raw_ops.ArgMin']) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Linux' and platform.machine() in ['arm', 'armv7l', 'aarch64', 'arm64', 'ARM64'], reason='Ticket - 126314') - def test_argmin_max_net(self, params, input_type, output_type, op_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend): + def test_argmin_max_net(self, input_shape, dimension, input_type, output_type, op_type, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, dimension=dimension) self._test(*self.create_argmin_max_net(**params, input_type=input_type, - output_type=output_type, op_type=op_type), + output_type=output_type, op_type=OPS[op_type]), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py b/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py index a8f52841929f34..7382f780197924 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py @@ -7,6 +7,11 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + "tf.raw_ops.CheckNumerics": tf.raw_ops.CheckNumerics, + "tf.raw_ops.CheckNumericsV2": tf.raw_ops.CheckNumericsV2 +} + class TestCheckNumerics(CommonTFLayerTest): def _prepare_input(self, inputs_info): assert 'x' in inputs_info @@ -33,15 +38,16 @@ def create_check_numerics_net(self, input_shape, input_type, op): return tf_net, None test_data_basic = [ - dict(input_shape=[2, 6], input_type=np.float32, op=tf.raw_ops.CheckNumerics), - dict(input_shape=[3, 4, 5], input_type=np.float32, op=tf.raw_ops.CheckNumericsV2), + [[2, 6], np.float32, 'tf.raw_ops.CheckNumerics'], + [[3, 4, 5], np.float32, 'tf.raw_ops.CheckNumericsV2'], ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, input_type, op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_check_numerics_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_check_numerics_basic(self, input_shape, input_type, op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, input_type=input_type, op=OPS[op]) self._test(*self.create_check_numerics_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py b/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py index 21dafffbf58719..0b63324cc91cef 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py @@ -9,6 +9,21 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.IRFFT': tf.raw_ops.IRFFT, + 'tf.raw_ops.IRFFT2D': tf.raw_ops.IRFFT2D, + 'tf.raw_ops.IRFFT3D': tf.raw_ops.IRFFT3D, + 'tf.raw_ops.FFT': tf.raw_ops.FFT, + 'tf.raw_ops.FFT2D': tf.raw_ops.FFT2D, + 'tf.raw_ops.FFT3D': tf.raw_ops.FFT3D, + 'tf.raw_ops.IFFT': tf.raw_ops.IFFT, + 'tf.raw_ops.IFFT2D': tf.raw_ops.IFFT2D, + 'tf.raw_ops.IFFT3D': tf.raw_ops.IFFT3D, + 'tf.raw_ops.RFFT': tf.raw_ops.RFFT, + 'tf.raw_ops.RFFT2D': tf.raw_ops.RFFT2D, + 'tf.raw_ops.RFFT3D': tf.raw_ops.RFFT3D +} + class TestComplexFFT(CommonTFLayerTest): def _prepare_input(self, inputs_info): rng = np.random.default_rng() @@ -41,30 +56,31 @@ def create_complex_fft_net(self, input_shape, shift_roll, axis_roll, fft_op): return tf_net, None test_data_basic = [ - dict(input_shape=[1, 50, 2], shift_roll=[10, 1], axis_roll=[-2, -1]), - dict(input_shape=[4, 20, 3], shift_roll=[2, 10], axis_roll=[0, 1]), - dict(input_shape=[1, 50, 50, 2], shift_roll=[10, 20], axis_roll=[-2, -1]), - dict(input_shape=[4, 20, 30, 3], shift_roll=[2, 10], axis_roll=[0, 1]), - dict(input_shape=[1, 50, 50, 30, 2], shift_roll=[10, 20, 4], axis_roll=[-3, -2, -1]), - dict(input_shape=[4, 20, 30, 10, 3], shift_roll=[2, 10], axis_roll=[1, 2]), + [[1, 50, 2], [10, 1], [-2, -1]], + [[4, 20, 3], [2, 10], [0, 1]], + [[1, 50, 50, 2], [10, 20], [-2, -1]], + [[4, 20, 30, 3], [2, 10], [0, 1]], + [[1, 50, 50, 30, 2], [10, 20, 4], [-3, -2, -1]], + [[4, 20, 30, 10, 3], [2, 10], [1, 2]], ] @pytest.mark.parametrize("fft_op", [ - tf.raw_ops.FFT, tf.raw_ops.FFT2D, tf.raw_ops.FFT3D, - tf.raw_ops.IFFT, tf.raw_ops.IFFT2D, tf.raw_ops.IFFT3D + "tf.raw_ops.FFT", "tf.raw_ops.FFT2D", "tf.raw_ops.FFT3D", + "tf.raw_ops.IFFT", "tf.raw_ops.IFFT2D", "tf.raw_ops.IFFT3D" ]) - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, shift_roll, axis_roll", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Linux' and platform.machine() in ['arm', 'armv7l', 'aarch64', 'arm64', 'ARM64'], reason='Ticket - 126314') - def test_complex_fft_basic(self, params, fft_op, + def test_complex_fft_basic(self, input_shape, shift_roll, axis_roll, fft_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, shift_roll=shift_roll, axis_roll=axis_roll) self._test( - *self.create_complex_fft_net(**params, fft_op=fft_op), + *self.create_complex_fft_net(**params, fft_op=OPS[fft_op]), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, custom_eps=1e-2) @@ -95,20 +111,19 @@ def create_complex_abs_net(self, input_shape): return tf_net, None test_data_basic = [ - dict(input_shape=[]), - dict(input_shape=[2]), - dict(input_shape=[1, 3]), - dict(input_shape=[2, 3, 4]), - dict(input_shape=[3, 4, 5, 6]), + [], + [2], + [1, 3], + [2, 3, 4], + [3, 4, 5, 6], ] - - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_complex_abs_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_complex_abs_basic(self, input_shape, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test( - *self.create_complex_abs_net(**params), + *self.create_complex_abs_net(input_shape), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) @@ -138,18 +153,19 @@ def create_complex_rfft_net(self, input_shape, fft_length, rfft_op): return tf_net, None test_data_basic = [ - dict(input_shape=[1, 3, 20], fft_length=[10], rfft_op=tf.raw_ops.RFFT), - dict(input_shape=[1, 3, 20], fft_length=[20], rfft_op=tf.raw_ops.RFFT), - dict(input_shape=[1, 3, 20, 40], fft_length=[20, 10], rfft_op=tf.raw_ops.RFFT2D), - dict(input_shape=[1, 3, 20, 40], fft_length=[10, 40], rfft_op=tf.raw_ops.RFFT2D), - dict(input_shape=[1, 2, 10, 20, 5], fft_length=[2, 5, 3], rfft_op=tf.raw_ops.RFFT3D), + [[1, 3, 20], [10], 'tf.raw_ops.RFFT'], + [[1, 3, 20], [20], 'tf.raw_ops.RFFT'], + [[1, 3, 20, 40], [20, 10], 'tf.raw_ops.RFFT2D'], + [[1, 3, 20, 40], [10, 40], 'tf.raw_ops.RFFT2D'], + [[1, 2, 10, 20, 5], [2, 5, 3], 'tf.raw_ops.RFFT3D'] ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, fft_length, rfft_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_complex_rfft_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_complex_rfft_basic(self, input_shape, fft_length, rfft_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, fft_length=fft_length, rfft_op=OPS[rfft_op]) self._test( *self.create_complex_rfft_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, @@ -183,19 +199,19 @@ def create_complex_irfft_net(self, input_shape, fft_length, irfft_op): return tf_net, None test_data_basic = [ - dict(input_shape=[1, 3, 20], fft_length=[10], irfft_op=tf.raw_ops.IRFFT), - dict(input_shape=[1, 3, 20], fft_length=[20], irfft_op=tf.raw_ops.IRFFT), - dict(input_shape=[1, 3, 20, 40], fft_length=[20, 10], irfft_op=tf.raw_ops.IRFFT2D), - dict(input_shape=[1, 3, 20, 40], fft_length=[10, 40], irfft_op=tf.raw_ops.IRFFT2D), - pytest.param(dict(input_shape=[1, 10, 20, 30, 5], fft_length=[2, 3, 4], irfft_op=tf.raw_ops.IRFFT3D), - marks=pytest.mark.xfail(reason="accuracy-issue-124452")) + [[1, 3, 20], [10], 'tf.raw_ops.IRFFT'], + [[1, 3, 20], [20], 'tf.raw_ops.IRFFT'], + [[1, 3, 20, 40], [20, 10], 'tf.raw_ops.IRFFT2D'], + [[1, 3, 20, 40], [10, 40], 'tf.raw_ops.IRFFT2D'], + pytest.param([1, 10, 20, 30, 5], [2, 3, 4], 'tf.raw_ops.IRFFT3D', + marks=pytest.mark.xfail(reason="accuracy-issue-124452")) ] - - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, fft_length, irfft_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_complex_irfft_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_complex_irfft_basic(self, input_shape, fft_length, irfft_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, fft_length=fft_length, irfft_op=OPS[irfft_op]) self._test( *self.create_complex_irfft_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py index 4937f34f292312..cd5129d7383b0f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py @@ -9,6 +9,12 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel': tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel, + 'tf.raw_ops.FakeQuantWithMinMaxVars': tf.raw_ops.FakeQuantWithMinMaxVars, + 'tf.raw_ops.FakeQuantWithMinMaxArgs': tf.raw_ops.FakeQuantWithMinMaxArgs +} + class TestFakeQuantWithMinMaxVars(CommonTFLayerTest): def _prepare_input(self, inputs_info): # generate elements so that the input tensor may contain repeating elements @@ -32,38 +38,36 @@ def create_fake_quant_with_min_max_vars_net(self, inputs_shape, min_value, max_v test_basic = [ # test FakeQuantWithMinMaxVars - dict(inputs_shape=[2, 6, 4], min_value=-3, max_value=4, num_bits=None, narrow_range=None), - dict(inputs_shape=[3, 2, 1, 5], min_value=-4, max_value=5, num_bits=14, narrow_range=True), - dict(inputs_shape=[3, 2, 4], min_value=2, max_value=4, num_bits=10, narrow_range=False), - dict(inputs_shape=[1, 2, 3], min_value=-6, max_value=-3, num_bits=8, narrow_range=True), + [[2, 6, 4], -3, 4, None, None], + [[3, 2, 1, 5], -4, 5, 14, True], + [[3, 2, 4], 2, 4, 10, False], + [[1, 2, 3], -6, -3, 8, True], ] - @pytest.mark.parametrize("params", test_basic) + @pytest.mark.parametrize("inputs_shape, min_value, max_value, num_bits, narrow_range", test_basic) @pytest.mark.parametrize("fake_quant_op", [ - tf.raw_ops.FakeQuantWithMinMaxVars, tf.raw_ops.FakeQuantWithMinMaxArgs + 'tf.raw_ops.FakeQuantWithMinMaxVars', 'tf.raw_ops.FakeQuantWithMinMaxArgs' ]) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') - def test_fake_quant_with_min_max_vars_basic(self, params, fake_quant_op, ie_device, precision, ir_version, temp_dir, - use_new_frontend): - self._test(*self.create_fake_quant_with_min_max_vars_net(**params, fake_quant_op=fake_quant_op), + def test_fake_quant_with_min_max_vars_basic(self, inputs_shape, min_value, max_value, num_bits, narrow_range, fake_quant_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(inputs_shape=inputs_shape, min_value=min_value, max_value=max_value, num_bits=num_bits, narrow_range=narrow_range) + self._test(*self.create_fake_quant_with_min_max_vars_net(**params, fake_quant_op=OPS[fake_quant_op]), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_per_channel_basic = [ - dict(inputs_shape=[2, 6, 4], min_value=[-4, -3, -5, -8], max_value=[4, 7, 9, 5], num_bits=None, - narrow_range=None, - fake_quant_op=tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel), + [[2, 6, 4], [-4, -3, -5, -8], [4, 7, 9, 5], None, None, 'tf.raw_ops.FakeQuantWithMinMaxVarsPerChannel'], ] - @pytest.mark.parametrize("params", test_per_channel_basic) + @pytest.mark.parametrize("inputs_shape, min_value, max_value, num_bits, narrow_range, fake_quant_op", test_per_channel_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail("104822") - def test_fake_quant_with_min_max_vars_per_channel_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend): + def test_fake_quant_with_min_max_vars_per_channel_basic(self, inputs_shape, min_value, max_value, num_bits, narrow_range, fake_quant_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params=dict(inputs_shape=inputs_shape, min_value=min_value, max_value=max_value, num_bits=num_bits, narrow_range=narrow_range, fake_quant_op=OPS[fake_quant_op]) self._test(*self.create_fake_quant_with_min_max_vars_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Identity.py b/tests/layer_tests/tensorflow_tests/test_tf_Identity.py index 7721e31631af8e..382879490e83e0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Identity.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Identity.py @@ -6,6 +6,13 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.Identity': tf.raw_ops.Identity, + 'tf.raw_ops.PreventGradient': tf.raw_ops.PreventGradient, + 'tf.raw_ops.Snapshot': tf.raw_ops.Snapshot, + 'tf.raw_ops.StopGradient': tf.raw_ops.StopGradient, +} + class TestIdentity(CommonTFLayerTest): def create_identity_net(self, input_shape, identity_op): tf.compat.v1.reset_default_graph() @@ -22,17 +29,18 @@ def create_identity_net(self, input_shape, identity_op): return tf_net, None test_data_basic = [ - dict(input_shape=[2], identity_op=tf.raw_ops.Identity), - dict(input_shape=[2, 3], identity_op=tf.raw_ops.PreventGradient), - dict(input_shape=[], identity_op=tf.raw_ops.Snapshot), - dict(input_shape=[1, 2, 3], identity_op=tf.raw_ops.StopGradient) + [[2], 'tf.raw_ops.Identity'], + [[2, 3], 'tf.raw_ops.PreventGradient'], + [[], 'tf.raw_ops.Snapshot'], + [[1, 2, 3], 'tf.raw_ops.StopGradient'] ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("input_shape, identity_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_identity_basic(self, params, ie_device, precision, ir_version, temp_dir, + def test_identity_basic(self, input_shape, identity_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(input_shape=input_shape, identity_op=OPS[identity_op]) self._test(*self.create_identity_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py index 8146226129db62..ac1e43651b7c95 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py @@ -9,6 +9,11 @@ from common.tf_layer_test_class import CommonTFLayerTest +OPS = { + 'tf.raw_ops.ResizeBilinear': tf.raw_ops.ResizeBilinear, + 'tf.raw_ops.ResizeNearestNeighbor': tf.raw_ops.ResizeNearestNeighbor, +} + class TestResize(CommonTFLayerTest): def _prepare_input(self, inputs_info): assert 'images' in inputs_info, "Test error: inputs_info must contain `x`" @@ -34,38 +39,27 @@ def create_resize_net(self, images_shape, images_type, size_value, align_corners test_data_basic = [ # ResizeBilinear testing - dict(images_shape=[1, 30, 30, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[1, 30, 30, 3], images_type=tf.float64, size_value=[40, 40], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[2, 100, 100, 3], images_type=tf.float32, size_value=[40, 40], align_corners=True, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[2, 10, 10, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[2, 40, 40, 3], images_type=tf.uint8, size_value=[10, 10], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeBilinear), - dict(images_shape=[1, 40, 40, 3], images_type=tf.int32, size_value=[10, 10], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeBilinear), + [[1, 30, 30, 3], tf.float32, [40, 40], False, False, 'tf.raw_ops.ResizeBilinear'], + [[1, 30, 30, 3], tf.float64, [40, 40], False, False, 'tf.raw_ops.ResizeBilinear'], + [[2, 100, 100, 3], tf.float32, [40, 40], True, False, 'tf.raw_ops.ResizeBilinear'], + [[2, 10, 10, 3], tf.float32, [40, 40], False, True, 'tf.raw_ops.ResizeBilinear'], + [[2, 40, 40, 3], tf.uint8, [10, 10], False, False, 'tf.raw_ops.ResizeBilinear'], + [[1, 40, 40, 3], tf.int32, [10, 10], False, True, 'tf.raw_ops.ResizeBilinear'], # ResizeNearestNeighbor testing - dict(images_shape=[1, 30, 30, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[2, 100, 100, 3], images_type=tf.float32, size_value=[40, 40], align_corners=True, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[2, 10, 10, 3], images_type=tf.float32, size_value=[40, 40], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[2, 40, 40, 3], images_type=tf.uint8, size_value=[10, 10], align_corners=False, - half_pixel_centers=False, resize_op=tf.raw_ops.ResizeNearestNeighbor), - dict(images_shape=[1, 40, 40, 3], images_type=tf.int32, size_value=[10, 10], align_corners=False, - half_pixel_centers=True, resize_op=tf.raw_ops.ResizeNearestNeighbor), + [[1, 30, 30, 3], tf.float32, [40, 40], False, False, 'tf.raw_ops.ResizeNearestNeighbor'], + [[2, 100, 100, 3], tf.float32, [40, 40], True, False, 'tf.raw_ops.ResizeNearestNeighbor'], + [[2, 10, 10, 3], tf.float32, [40, 40], False, True, 'tf.raw_ops.ResizeNearestNeighbor'], + [[2, 40, 40, 3], tf.uint8, [10, 10], False, False, 'tf.raw_ops.ResizeNearestNeighbor'], + [[1, 40, 40, 3], tf.int32, [10, 10], False,True, 'tf.raw_ops.ResizeNearestNeighbor'], ] - @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.parametrize("images_shape, images_type, size_value, align_corners, half_pixel_centers, resize_op", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') - def test_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend): + def test_resize_basic(self, images_shape, images_type, size_value, align_corners, half_pixel_centers, resize_op, ie_device, precision, ir_version, temp_dir, use_new_frontend): + params = dict(images_shape=images_shape, images_type=images_type, size_value=size_value, align_corners=align_corners, half_pixel_centers=half_pixel_centers, resize_op=OPS[resize_op]) self._test(*self.create_resize_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/model_hub_tests/models_hub_common/test_convert_model.py b/tests/model_hub_tests/models_hub_common/test_convert_model.py index ad09380daeb212..40b8a56a997ee3 100644 --- a/tests/model_hub_tests/models_hub_common/test_convert_model.py +++ b/tests/model_hub_tests/models_hub_common/test_convert_model.py @@ -8,6 +8,13 @@ from openvino import convert_model from openvino.runtime import Core +try: + # 129480 - remove try-except when openvino-tokenizers wheel is built in OpenVINO GHA Workflow + # noinspection PyUnresolvedReferences + import openvino_tokenizers # do not delete, needed for text models +except: + pass + # set seed to have deterministic input data generation # to avoid sporadic issues in inference results rng = np.random.default_rng(seed=56190) diff --git a/tests/model_hub_tests/tf_hub_tests/nightly_models b/tests/model_hub_tests/tf_hub_tests/nightly_models index 758ef889f64c0d..5767c438f23408 100644 --- a/tests/model_hub_tests/tf_hub_tests/nightly_models +++ b/tests/model_hub_tests/tf_hub_tests/nightly_models @@ -1,8 +1,8 @@ -universal-sentence-encoder,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/universal-sentence-encoder/versions/2 +universal-sentence-encoder,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/universal-sentence-encoder/versions/2,xfail,128994 127962 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 SparseFillEmptyRows StaticRegexReplace StringSplit StringToHashBucketFast imagenet/mobilenet_v1_100_224/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-224-classification/versions/2 imagenet/mobilenet_v2_100_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-224-classification/versions/2 universal-sentence-encoder-multilingual,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual/versions/2 -universal-sentence-encoder-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/large/versions/2 +universal-sentence-encoder-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/large/versions/2,xfail,128994 127962 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 SparseFillEmptyRows StaticRegexReplace StringSplit StringToHashBucketFast imagenet/mobilenet_v2_075_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/075-224-classification/versions/2 movenet/singlepose/lightning,https://www.kaggle.com/models/google/movenet/frameworks/tensorFlow2/variations/singlepose-lightning/versions/4 imagenet/mobilenet_v1_025_224/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/025-224-classification/versions/2 @@ -13,7 +13,7 @@ nnlm-en-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/ bert_en_uncased_L-12_H-768_A-12,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/en-uncased-l-12-h-768-a-12/versions/4 bert_uncased_L-12_H-768_A-12,https://www.kaggle.com/models/google/bert/frameworks/tensorFlow1/variations/uncased-l-12-h-768-a-12/versions/1,xfail,129153 TimeoutError or killed with a signal 11 elmo,https://www.kaggle.com/models/google/elmo/frameworks/tensorFlow1/variations/elmo/versions/3 -universal-sentence-encoder-multilingual-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-large/versions/2 +universal-sentence-encoder-multilingual-large,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-large/versions/2,xfail,129830 unsupported operations RaggedTensorToSparse SegmentMean SentencepieceOp SentencepieceTokenizeOp small_bert/bert_en_uncased_L-4_H-256_A-4,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-4-h-256-a-4/versions/2 imagenet/resnet_v2_50/feature_vector,https://www.kaggle.com/models/google/resnet-v2/frameworks/tensorFlow2/variations/50-feature-vector/versions/2 spice,https://www.kaggle.com/models/google/spice/frameworks/tensorFlow1/variations/spice/versions/2,xfail,128817 Model references undeclared parameters @@ -28,7 +28,7 @@ efficientnet/lite0/feature-vector,https://www.kaggle.com/models/tensorflow/effic i3d-kinetics-400,https://www.kaggle.com/models/deepmind/i3d-kinetics/frameworks/tensorFlow1/variations/400/versions/1 imagenet/mobilenet_v2_035_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/035-224-classification/versions/2 tf2-preview/gnews-swivel-20dim,https://www.kaggle.com/models/google/gnews-swivel/frameworks/tensorFlow2/variations/tf2-preview-20dim/versions/1,skip,128989 AttributeError NoneType object has no attribute shape or dtype -faster_rcnn/openimages_v4/inception_resnet_v2,https://www.kaggle.com/models/google/faster-rcnn-inception-resnet-v2/frameworks/tensorFlow1/variations/faster-rcnn-openimages-v4-inception-resnet-v2/versions/1 +faster_rcnn/openimages_v4/inception_resnet_v2,https://www.kaggle.com/models/google/faster-rcnn-inception-resnet-v2/frameworks/tensorFlow1/variations/faster-rcnn-openimages-v4-inception-resnet-v2/versions/1,xfail,127962 unsupported operation LookupTableFindV2 imagenet/mobilenet_v2_140_224/feature_vector,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/140-224-feature-vector/versions/2 imagenet/mobilenet_v2_100_224/feature_vector,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-224-feature-vector/versions/2 bert_en_cased_L-12_H-768_A-12,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/en-cased-l-12-h-768-a-12/versions/4 @@ -55,12 +55,12 @@ efficientdet/lite0/detection,https://www.kaggle.com/models/tensorflow/efficientd small_bert/bert_en_uncased_L-2_H-128_A-2,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-2-h-128-a-2/versions/2 albert_base,https://www.kaggle.com/models/google/albert/frameworks/tensorFlow1/variations/base/versions/3,skip,129153 TimeoutError or killed with a signal 11 nnlm-ja-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/ja-dim128/versions/1,skip,120721 AssertionError No signatures for a model -universal-sentence-encoder-multilingual-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-qa/versions/2 +universal-sentence-encoder-multilingual-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual-qa/versions/2,xfail,129830 unsupported operations RaggedTensorToSparse SegmentMean SentencepieceOp SentencepieceTokenizeOp nnlm-ja-dim128-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/ja-dim128-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model LaBSE,https://www.kaggle.com/models/google/labse/frameworks/tensorFlow2/variations/labse/versions/2 nnlm-en-dim50-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/en-dim50-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model resnet_50/feature_vector,https://www.kaggle.com/models/tensorflow/resnet-50/frameworks/tensorFlow2/variations/feature-vector/versions/1 -universal-sentence-encoder-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/qa/versions/2 +universal-sentence-encoder-qa,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/qa/versions/2,xfail,127962 128994 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 StaticRegexReplace StringSplit StringToHashBucketFast biggan-deep-256,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/deep-256/versions/1 efficientdet/lite2/detection,https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/tensorFlow2/variations/lite2-detection/versions/1 imagenet/mobilenet_v2_050_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/050-224-classification/versions/2 @@ -119,7 +119,7 @@ imagenet/resnet_v2_50/classification,https://www.kaggle.com/models/google/resnet aiy/vision/classifier/birds_V1,https://www.kaggle.com/models/google/aiy/frameworks/tensorFlow1/variations/vision-classifier-birds-v1/versions/1 MuRIL,https://www.kaggle.com/models/google/muril/frameworks/tensorFlow2/variations/muril/versions/1 efficientdet/lite1/feature-vector,https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/tensorFlow2/variations/lite1-feature-vector/versions/1 -random-nnlm-en-dim128,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim128/versions/1 +random-nnlm-en-dim128,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim128/versions/1,xfail,127962 128995 128996 unsupported operations LookupTableFindV2 LookupTableSizeV2 SparseFillEmptyRows SparseSegmentSqrtN StringSplit StringToHashBucketFast imagenet/inception_resnet_v2/classification,https://www.kaggle.com/models/google/inception-resnet-v2/frameworks/tensorFlow2/variations/classification/versions/2 ssd_mobilenet_v2/fpnlite_320x320,https://www.kaggle.com/models/tensorflow/ssd-mobilenet-v2/frameworks/tensorFlow2/variations/fpnlite-320x320/versions/1 centernet/resnet50v1_fpn_512x512,https://www.kaggle.com/models/tensorflow/centernet-resnet/frameworks/tensorFlow2/variations/50v1-fpn-512x512/versions/1 @@ -194,7 +194,7 @@ efficientnet/b7/classification,https://www.kaggle.com/models/tensorflow/efficien small_bert/bert_en_uncased_L-12_H-256_A-4,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-12-h-256-a-4/versions/2 imagenet/efficientnet_v2_imagenet21k_ft1k_xl/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-ft1k-xl-feature-vector/versions/1 ssd_mobilenet_v1/fpn_640x640,https://www.kaggle.com/models/tensorflow/ssd-mobilenet-v1/frameworks/tensorFlow2/variations/fpn-640x640/versions/1 -mil-nce/s3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/s3d/versions/1 +mil-nce/s3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/s3d/versions/1,xfail,127962 129654 128996 unsupported operations LookupTableFindV2 StringLower StringSplitV2 imagenet/nasnet_mobile/feature_vector,https://www.kaggle.com/models/google/nasnet/frameworks/tensorFlow2/variations/mobile-feature-vector/versions/2 efficientnet/b4/feature-vector,https://www.kaggle.com/models/google/efficientnet/frameworks/tensorFlow1/variations/b4-feature-vector/versions/1 imagenet/efficientnet_v2_imagenet21k_m/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-m-feature-vector/versions/2 @@ -225,7 +225,7 @@ imagenet/resnet_v2_101/feature_vector,https://www.kaggle.com/models/google/resne imagenet/efficientnet_v2_imagenet21k_l/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-l-feature-vector/versions/2 imagenet/nasnet_large/classification,https://www.kaggle.com/models/google/nasnet/frameworks/tensorFlow2/variations/large-classification/versions/2 faster_rcnn/resnet152_v1_1024x1024,https://www.kaggle.com/models/tensorflow/faster-rcnn-resnet-v1/frameworks/tensorFlow2/variations/faster-rcnn-resnet152-v1-1024x1024/versions/1,skip,128695 Inference results mismatch -vit_s16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-fe/versions/1 +vit_s16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-fe/versions/1,xfail,118324 unsupported operation XlaGather zh_segmentation,https://www.kaggle.com/models/google/zh-segmentation/frameworks/tensorFlow1/variations/zh-segmentation/versions/1 wiki40b-lm-es,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/es/versions/1 centernet/resnet50v1_fpn_512x512_kpts,https://www.kaggle.com/models/tensorflow/centernet-resnet/frameworks/tensorFlow2/variations/50v1-fpn-512x512-kpts/versions/1 @@ -265,7 +265,7 @@ imagenet/efficientnet_v2_imagenet21k_ft1k_b3/feature_vector,https://www.kaggle.c humpback_whale,https://www.kaggle.com/models/google/humpback-whale/frameworks/tensorFlow2/variations/humpback-whale/versions/1,skip,120720 InvalidArgumentError Graph execution error nnlm-id-dim50,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/id-dim50/versions/1,skip,120721 AssertionError No signatures for a model nonsemantic-speech-benchmark/frill,https://www.kaggle.com/models/google/nonsemantic-speech-benchmark/frameworks/tensorFlow2/variations/frill/versions/1 -vit_s16_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-classification/versions/1 +vit_s16_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-s16-classification/versions/1,xfail,118324 unsupported operation XlaGather faster_rcnn/resnet152_v1_800x1333,https://www.kaggle.com/models/tensorflow/faster-rcnn-resnet-v1/frameworks/tensorFlow2/variations/faster-rcnn-resnet152-v1-800x1333/versions/1 bit/s-r152x4,https://www.kaggle.com/models/google/bit/frameworks/tensorFlow2/variations/s-r152x4/versions/1 imagenet/resnet_v1_101/classification,https://www.kaggle.com/models/google/resnet-v1/frameworks/tensorFlow2/variations/101-classification/versions/2 @@ -300,7 +300,7 @@ efficientnet/b5/feature-vector,https://www.kaggle.com/models/google/efficientnet distilbert_multi_cased_preprocess,https://www.kaggle.com/models/jeongukjae/distilbert/frameworks/tensorFlow2/variations/multi-cased-preprocess/versions/2,skip,128989 AttributeError NoneType object has no attribute shape or dtype nnlm-de-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/de-dim128/versions/1,skip,120721 AssertionError No signatures for a model bertseq2seq/roberta24_gigaword,https://www.kaggle.com/models/google/bertseq2seq/frameworks/tensorFlow1/variations/roberta24-gigaword/versions/1,skip,128817 Model references undeclared parameters -vit_b8_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-fe/versions/1 +vit_b8_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-fe/versions/1,xfail,118324 unsupported operation XlaGather aiy/vision/classifier/insects_V1,https://www.kaggle.com/models/google/aiy/frameworks/tensorFlow1/variations/vision-classifier-insects-v1/versions/1 bertseq2seq/roberta24_cnndm,https://www.kaggle.com/models/google/bertseq2seq/frameworks/tensorFlow1/variations/roberta24-cnndm/versions/1,skip,128817 Model references undeclared parameters movinet/a1/base/kinetics-600/classification,https://www.kaggle.com/models/google/movinet/frameworks/tensorFlow2/variations/a1-base-kinetics-600-classification/versions/3 @@ -399,12 +399,12 @@ imagenet/mobilenet_v1_050_128/classification,https://www.kaggle.com/models/googl tf2-preview/nnlm-de-dim50-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/tf2-preview-de-dim50-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model imagenet/mobilenet_v1_100_192/feature_vector,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-192-feature-vector/versions/2 efficientnet/b6/classification,https://www.kaggle.com/models/tensorflow/efficientnet/frameworks/tensorFlow2/variations/b6-classification/versions/1 -vit_b8_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-classification/versions/1 +vit_b8_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-b8-classification/versions/1,xfail,118324 unsupported operation XlaGather universal-sentence-encoder-xling/en-es,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow1/variations/xling-en-es/versions/1 -mil-nce/i3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/i3d/versions/1 -vit_l16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-l16-fe/versions/1 +mil-nce/i3d,https://www.kaggle.com/models/deepmind/mil-nce/frameworks/tensorFlow1/variations/i3d/versions/1,xfail,127962 129654 128996 unsupported operations LookupTableFindV2 StringLower StringSplitV2 +vit_l16_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-l16-fe/versions/1,xfail,118324 unsupported operation XlaGather nonsemantic-speech-benchmark/frill-nofrontend,https://www.kaggle.com/models/google/nonsemantic-speech-benchmark/frameworks/tensorFlow2/variations/frill-nofrontend/versions/1 -vit_r50_l32_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-r50-l32-fe/versions/1 +vit_r50_l32_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-r50-l32-fe/versions/1,xfail,118324 118325 unsupported operation XlaGather XlaReduceWindow imagenet/efficientnet_v2_imagenet21k_ft1k_b2/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-ft1k-b2-feature-vector/versions/1 remote_sensing/so2sat-resnet50,https://www.kaggle.com/models/google/resnet50/frameworks/tensorFlow1/variations/remote-sensing-so2sat-resnet50/versions/1 imagenet/efficientnet_v2_imagenet21k_ft1k_b3/classification,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-ft1k-b3-classification/versions/1 @@ -572,7 +572,7 @@ small_bert/bert_uncased_L-2_H-256_A-4,https://www.kaggle.com/models/google/bert/ edgetpu/vision/autoseg-edgetpu/fused_argmax/s,https://www.kaggle.com/models/google/autoseg-edgetpu/frameworks/tensorFlow2/variations/fused-argmax-s/versions/1 image_augmentation/flipx_crop_rotate_color,https://www.kaggle.com/models/google/image-augmentation/frameworks/tensorFlow1/variations/flipx-crop-rotate-color/versions/1,skip,128817 Model references undeclared parameters circularnet_3,https://www.kaggle.com/models/google/circularnet/frameworks/tensorFlow2/variations/3/versions/1 -random-nnlm-en-dim50,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim50/versions/1 +random-nnlm-en-dim50,https://www.kaggle.com/models/google/random-nnlm-en/frameworks/tensorFlow1/variations/dim50/versions/1,xfail,127962 128996 128995 unsupported operation LookupTableFindV2 LookupTableSizeV2 StringSplit StringToHashBucketFast mixer_b16_i21k_classification,https://www.kaggle.com/models/spsayakpaul/mlp-mixer/frameworks/tensorFlow2/variations/mixer-b16-i21k-classification/versions/1 imagenet/mobilenet_v1_025_160/quantops/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow1/variations/025-160-quantops-classification/versions/2,skip,128695 Inference results mismatch imagenet/mobilenet_v1_075_224/quantops/feature_vector,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow1/variations/075-224-quantops-feature-vector/versions/2,skip,128695 Inference results mismatch diff --git a/tests/model_hub_tests/tf_hub_tests/precommit_models b/tests/model_hub_tests/tf_hub_tests/precommit_models index 69e2cc40c5594a..2ee145dd0dc29f 100644 --- a/tests/model_hub_tests/tf_hub_tests/precommit_models +++ b/tests/model_hub_tests/tf_hub_tests/precommit_models @@ -21,4 +21,6 @@ esrgan-tf2,https://www.kaggle.com/models/kaggle/esrgan-tf2/frameworks/tensorFlow film,https://www.kaggle.com/models/google/film/frameworks/tensorFlow2/variations/film/versions/1 planet/vision/classifier/planet_v2,https://www.kaggle.com/models/google/planet-v2/frameworks/tensorFlow1/variations/planet-vision-classifier-planet-v2/versions/1 # TF1 models in .pb format -i3d-rgb,https://storage.openvinotoolkit.org/repositories/open_model_zoo/public/2022.1/i3d-rgb-tf/rgb.frozen.pb \ No newline at end of file +i3d-rgb,https://storage.openvinotoolkit.org/repositories/open_model_zoo/public/2022.1/i3d-rgb-tf/rgb.frozen.pb +# Model with SentencePiece tokenizer, use openvino-tokenizers package +universal-sentence-encoder-multilingual,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual/versions/2,skip, 129480 - Add openvino-tokenizers wheel build to OpenVINO GHA Workflow \ No newline at end of file diff --git a/tests/model_hub_tests/torch_tests/hf_transformers_models b/tests/model_hub_tests/torch_tests/hf_transformers_models index 2cec1a1b744901..fddf1afdcfc172 100644 --- a/tests/model_hub_tests/torch_tests/hf_transformers_models +++ b/tests/model_hub_tests/torch_tests/hf_transformers_models @@ -18,7 +18,7 @@ anugunj/omnivore-swinL-in21k,omnivore,skip,Load problem apple/mobilevitv2-1.0-imagenet1k-256,mobilevitv2,xfail,Unsupported op aten::col2im ArthurZ/jukebox_prior_0,jukebox_prior,skip,Load problem ArthurZ/jukebox-vqvae,jukebox_vqvae,skip,Load problem -ArthurZ/persimmon-8b-base,persimmon,skip,Load problem +ArthurZ/persimmon-8b-base,persimmon ashishpatel26/span-marker-bert-base-fewnerd-coarse-super,span-marker,skip,Load problem asi/albert-act-tiny,albert_act,skip,Load problem BAAI/AltCLIP,altclip @@ -140,7 +140,7 @@ hf-internal-testing/tiny-random-mbart,mbart,xfail,Compile error: CPU plug-in doe hf-internal-testing/tiny-random-MobileNetV2Model,mobilenet_v2 hf-internal-testing/tiny-random-mobilevit,mobilevit hf-internal-testing/tiny-random-MPNetModel,mpnet -hf-internal-testing/tiny-random-MptForCausalLM,mpt,skip,Load problem +hf-internal-testing/tiny-random-MptForCausalLM,mpt hf-internal-testing/tiny-random-NllbMoeForConditionalGeneration,nllb_moe,skip,Load problem hf-internal-testing/tiny-random-NystromformerModel,nystromformer hf-internal-testing/tiny-random-PegasusModel,pegasus,skip,Load problem @@ -178,7 +178,7 @@ ibm/MoLM-350M-4B,moduleformer,skip,Load problem IDEA-CCNL/Randeng-Deltalm-362M-En-Zh,Deltalm,skip,Load problem Inderpreet01/seaformer-semantic-segmentation-large,seaformer,skip,Load problem Intel/dpt-hybrid-midas,dpt -Intel/tvp-base,tvp,skip,Load problem +# Intel/tvp-base,tvp,skip,Load problem # takes too long isemmanuelolowe/code-embedder,instruct-codebert,skip,Load problem isemmanuelolowe/instruct-codet5-5,instruct-codet5,skip,Load problem jaketae/fastspeech2-ljspeech,fastspeech2,skip,Load problem @@ -412,6 +412,6 @@ youzanai/clip-product-title-chinese,clip_chinese_model,skip,Load problem Yova/SmallCapOPT7M,smallcap,skip,Load problem yusufani/trclip-vitl14-e10,trclip,skip,Load problem yysung53/dpr,text_similarity,skip,Load problem -Zetatech/pvt-tiny-224,pvt,skip,Load problem +Zetatech/pvt-tiny-224,pvt ZinengTang/tvlt-base,tvlt,xfail,Conversion is failed for aten::cat: Argument element types are inconsistent zuppif/resnetd-18,resnetd,skip,Load problem diff --git a/tests/model_hub_tests/torch_tests/test_torchbench.py b/tests/model_hub_tests/torch_tests/test_torchbench.py index 425862d14cd6f3..04c2c96de0ae63 100644 --- a/tests/model_hub_tests/torch_tests/test_torchbench.py +++ b/tests/model_hub_tests/torch_tests/test_torchbench.py @@ -29,12 +29,9 @@ def setup_class(self): f"git clone https://github.com/pytorch/benchmark.git {self.repo_dir.name}") subprocess.check_call( ["git", "checkout", "850364ac2678b2363f086b7549254b6cb7df2e4d"], cwd=self.repo_dir.name) - m_list = get_models_list(self._model_list_path) - m_processed_list = [m for m, _, mark, _ in m_list if mark != "skip"] - subprocess.check_call( - [sys.executable, "install.py"]+m_processed_list, cwd=self.repo_dir.name) def load_model(self, model_name, model_link): + subprocess.check_call([sys.executable, "install.py"] + [model_name], cwd=self.repo_dir.name) sys.path.append(self.repo_dir.name) from torchbenchmark import load_model_by_name try: diff --git a/tests/model_hub_tests/torch_tests/torch_utils.py b/tests/model_hub_tests/torch_tests/torch_utils.py index d92462efaf6521..87c9aeb043f596 100644 --- a/tests/model_hub_tests/torch_tests/torch_utils.py +++ b/tests/model_hub_tests/torch_tests/torch_utils.py @@ -59,7 +59,7 @@ def prepare_inputs(self, inputs_info): if isinstance(inputs, dict): return dict((k, v.numpy()) for k, v in inputs.items()) else: - return [i.numpy() for i in inputs] + return flattenize_structure(inputs) def convert_model(self, model_obj): try: diff --git a/tests/model_hub_tests/torch_tests/torchbench_models b/tests/model_hub_tests/torch_tests/torchbench_models index 8634635574b400..6e75f637b71934 100644 --- a/tests/model_hub_tests/torch_tests/torchbench_models +++ b/tests/model_hub_tests/torch_tests/torchbench_models @@ -60,7 +60,6 @@ mobilenet_v2_quantized_qat,None #nanogpt,None,skip,No install.py is found nvidia_deeprecommender,None opacus_cifar10,None,skip,Modules that have backward hooks assigned can't be compiled -phi_1_5,None phlippe_densenet,None phlippe_resnet,None pyhpc_equation_of_state,None,xfail,Accuracy validation failed diff --git a/thirdparty/dependencies.cmake b/thirdparty/dependencies.cmake index fc6ef40c64c9bd..78860f2cd86aa3 100644 --- a/thirdparty/dependencies.cmake +++ b/thirdparty/dependencies.cmake @@ -323,7 +323,15 @@ if(ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_TF_FRONTEND # otherwise, fallback to existing default find_package(Protobuf 3.20.3 REQUIRED ${protobuf_config}) endif() - set(PROTOC_EXECUTABLE protobuf::protoc) + + # with newer protobuf versions (4.22 and newer), we use CONFIG first + # so, the Protobuf_PROTOC_EXECUTABLE variable must be checked explicitly, + # because it's not used in this case (oppositely to MODULE case) + if(Protobuf_VERSION VERSION_GREATER_EQUAL 22 AND DEFINED Protobuf_PROTOC_EXECUTABLE) + set(PROTOC_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE}) + else() + set(PROTOC_EXECUTABLE protobuf::protoc) + endif() else() add_subdirectory(thirdparty/protobuf EXCLUDE_FROM_ALL) endif() diff --git a/tools/benchmark_tool/openvino/__init__.py b/tools/benchmark_tool/openvino/__init__.py index b7dc434f3148cc..1d75589bd2eceb 100644 --- a/tools/benchmark_tool/openvino/__init__.py +++ b/tools/benchmark_tool/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/tools/mo/openvino/__init__.py b/tools/mo/openvino/__init__.py index 90552e0befed68..635bae28670cc6 100644 --- a/tools/mo/openvino/__init__.py +++ b/tools/mo/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/tools/openvino_dev/src/openvino/__init__.py b/tools/openvino_dev/src/openvino/__init__.py index 90552e0befed68..635bae28670cc6 100644 --- a/tools/openvino_dev/src/openvino/__init__.py +++ b/tools/openvino_dev/src/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/tools/ovc/openvino/__init__.py b/tools/ovc/openvino/__init__.py index b7dc434f3148cc..1d75589bd2eceb 100644 --- a/tools/ovc/openvino/__init__.py +++ b/tools/ovc/openvino/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 __path__ = __import__("pkgutil").extend_path(__path__, __name__)